diff --git a/src/app/api/integrations/route.ts b/src/app/api/integrations/route.ts index 6f7713e..4dd9927 100644 --- a/src/app/api/integrations/route.ts +++ b/src/app/api/integrations/route.ts @@ -45,6 +45,7 @@ const INTEGRATIONS: IntegrationDef[] = [ { id: 'anthropic', name: 'Anthropic', category: 'ai', envVars: ['ANTHROPIC_API_KEY'], vaultItem: 'openclaw-anthropic-api-key', testable: true }, { id: 'openai', name: 'OpenAI', category: 'ai', envVars: ['OPENAI_API_KEY'], vaultItem: 'openclaw-openai-api-key', testable: true }, { id: 'openrouter', name: 'OpenRouter', category: 'ai', envVars: ['OPENROUTER_API_KEY'], vaultItem: 'openclaw-openrouter-api-key', testable: true }, + { id: 'venice', name: 'Venice AI', category: 'ai', envVars: ['VENICE_API_KEY'], vaultItem: 'openclaw-venice-api-key', testable: true }, { id: 'nvidia', name: 'NVIDIA', category: 'ai', envVars: ['NVIDIA_API_KEY'], vaultItem: 'openclaw-nvidia-api-key' }, { id: 'moonshot', name: 'Moonshot / Kimi', category: 'ai', envVars: ['MOONSHOT_API_KEY'], vaultItem: 'openclaw-moonshot-api-key' }, { id: 'ollama', name: 'Ollama (Local)', category: 'ai', envVars: ['OLLAMA_API_KEY'], vaultItem: 'openclaw-ollama-api-key' }, @@ -735,6 +736,19 @@ async function handleTest( break } + case 'venice': { + const key = getEffectiveEnvValue(envMap, 'VENICE_API_KEY') + if (!key) return NextResponse.json({ ok: false, detail: 'API key not set' }) + const res = await fetch('https://api.venice.ai/api/v1/models', { + headers: { Authorization: `Bearer ${key}` }, + signal: AbortSignal.timeout(5000), + }) + result = res.ok + ? { ok: true, detail: 'API key valid' } + : { ok: false, detail: `HTTP ${res.status}` } + break + } + case 'hyperbrowser': { const key = getEffectiveEnvValue(envMap, 'HYPERBROWSER_API_KEY') if (!key) return NextResponse.json({ ok: false, detail: 'API key not set' }) diff --git a/src/lib/__tests__/token-pricing.test.ts b/src/lib/__tests__/token-pricing.test.ts index 73de005..159d3c9 100644 --- a/src/lib/__tests__/token-pricing.test.ts +++ b/src/lib/__tests__/token-pricing.test.ts @@ -34,6 +34,7 @@ describe('token pricing', () => { it('maps providers from model prefixes and names', () => { expect(getProviderFromModel('openai/gpt-4.1')).toBe('openai') expect(getProviderFromModel('anthropic/claude-sonnet-4-5')).toBe('anthropic') + expect(getProviderFromModel('venice/llama-3.3-70b')).toBe('venice') expect(getProviderFromModel('gateway::codex-mini')).toBe('openai') }) }) diff --git a/src/lib/__tests__/token-utils.test.ts b/src/lib/__tests__/token-utils.test.ts index f7c8475..8c170c5 100644 --- a/src/lib/__tests__/token-utils.test.ts +++ b/src/lib/__tests__/token-utils.test.ts @@ -11,6 +11,7 @@ describe('detectProvider', () => { ['mistral-large', 'Mistral'], ['llama-3', 'Meta'], ['deepseek-coder', 'DeepSeek'], + ['venice/llama-3.3-70b', 'Venice AI'], ['unknown-model', 'Other'], ])('%s -> %s', (model, expected) => { expect(detectProvider(model)).toBe(expected) diff --git a/src/lib/models.ts b/src/lib/models.ts index ca10a79..1f8936d 100644 --- a/src/lib/models.ts +++ b/src/lib/models.ts @@ -14,6 +14,7 @@ export const MODEL_CATALOG: ModelConfig[] = [ { alias: 'groq-fast', name: 'groq/llama-3.1-8b-instant', provider: 'groq', description: '840 tok/s, ultra fast', costPer1k: 0.05 }, { alias: 'groq', name: 'groq/llama-3.3-70b-versatile', provider: 'groq', description: 'Fast + quality balance', costPer1k: 0.59 }, { alias: 'kimi', name: 'moonshot/kimi-k2.5', provider: 'moonshot', description: 'Alternative provider', costPer1k: 1.0 }, + { alias: 'venice-llama-3.3-70b', name: 'venice/llama-3.3-70b', provider: 'venice', description: 'Venice AI Llama 3.3 70B', costPer1k: 0.7 }, { alias: 'minimax', name: 'minimax/minimax-m2.1', provider: 'minimax', description: 'Cost-effective (1/10th price), strong coding', costPer1k: 0.3 }, ] diff --git a/src/lib/token-pricing.ts b/src/lib/token-pricing.ts index fc7a0b4..13e1168 100644 --- a/src/lib/token-pricing.ts +++ b/src/lib/token-pricing.ts @@ -33,6 +33,7 @@ const MODEL_PRICING: Record = { 'groq/llama-3.1-8b-instant': { inputPerMTok: 0.05, outputPerMTok: 0.05 }, 'groq/llama-3.3-70b-versatile': { inputPerMTok: 0.59, outputPerMTok: 0.59 }, 'moonshot/kimi-k2.5': { inputPerMTok: 1.0, outputPerMTok: 1.0 }, + 'venice/llama-3.3-70b': { inputPerMTok: 0.7, outputPerMTok: 2.8 }, 'minimax/minimax-m2.1': { inputPerMTok: 0.3, outputPerMTok: 0.3 }, 'ollama/deepseek-r1:14b': { inputPerMTok: 0.0, outputPerMTok: 0.0 }, 'ollama/qwen2.5-coder:7b': { inputPerMTok: 0.0, outputPerMTok: 0.0 }, diff --git a/src/lib/token-utils.ts b/src/lib/token-utils.ts index ae0f25d..dc3e540 100644 --- a/src/lib/token-utils.ts +++ b/src/lib/token-utils.ts @@ -4,6 +4,7 @@ export function detectProvider(model: string): string { if (lower.includes('gpt') || lower.includes('o1') || lower.includes('o3') || lower.includes('o4') || lower.includes('openai')) return 'OpenAI' if (lower.includes('gemini') || lower.includes('google')) return 'Google' if (lower.includes('mistral') || lower.includes('mixtral')) return 'Mistral' + if (lower.includes('venice')) return 'Venice AI' if (lower.includes('llama') || lower.includes('meta')) return 'Meta' if (lower.includes('deepseek')) return 'DeepSeek' if (lower.includes('command') || lower.includes('cohere')) return 'Cohere'