Add Venice AI integration and model support (#301)

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: Joshua Mo <joshua-mo-143@users.noreply.github.com>
This commit is contained in:
Joshua Mo 2026-03-12 15:14:38 +00:00 committed by GitHub
parent eaf0bb149e
commit cfe7525200
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 19 additions and 0 deletions

View File

@ -45,6 +45,7 @@ const INTEGRATIONS: IntegrationDef[] = [
{ id: 'anthropic', name: 'Anthropic', category: 'ai', envVars: ['ANTHROPIC_API_KEY'], vaultItem: 'openclaw-anthropic-api-key', testable: true }, { id: 'anthropic', name: 'Anthropic', category: 'ai', envVars: ['ANTHROPIC_API_KEY'], vaultItem: 'openclaw-anthropic-api-key', testable: true },
{ id: 'openai', name: 'OpenAI', category: 'ai', envVars: ['OPENAI_API_KEY'], vaultItem: 'openclaw-openai-api-key', testable: true }, { id: 'openai', name: 'OpenAI', category: 'ai', envVars: ['OPENAI_API_KEY'], vaultItem: 'openclaw-openai-api-key', testable: true },
{ id: 'openrouter', name: 'OpenRouter', category: 'ai', envVars: ['OPENROUTER_API_KEY'], vaultItem: 'openclaw-openrouter-api-key', testable: true }, { id: 'openrouter', name: 'OpenRouter', category: 'ai', envVars: ['OPENROUTER_API_KEY'], vaultItem: 'openclaw-openrouter-api-key', testable: true },
{ id: 'venice', name: 'Venice AI', category: 'ai', envVars: ['VENICE_API_KEY'], vaultItem: 'openclaw-venice-api-key', testable: true },
{ id: 'nvidia', name: 'NVIDIA', category: 'ai', envVars: ['NVIDIA_API_KEY'], vaultItem: 'openclaw-nvidia-api-key' }, { id: 'nvidia', name: 'NVIDIA', category: 'ai', envVars: ['NVIDIA_API_KEY'], vaultItem: 'openclaw-nvidia-api-key' },
{ id: 'moonshot', name: 'Moonshot / Kimi', category: 'ai', envVars: ['MOONSHOT_API_KEY'], vaultItem: 'openclaw-moonshot-api-key' }, { id: 'moonshot', name: 'Moonshot / Kimi', category: 'ai', envVars: ['MOONSHOT_API_KEY'], vaultItem: 'openclaw-moonshot-api-key' },
{ id: 'ollama', name: 'Ollama (Local)', category: 'ai', envVars: ['OLLAMA_API_KEY'], vaultItem: 'openclaw-ollama-api-key' }, { id: 'ollama', name: 'Ollama (Local)', category: 'ai', envVars: ['OLLAMA_API_KEY'], vaultItem: 'openclaw-ollama-api-key' },
@ -735,6 +736,19 @@ async function handleTest(
break break
} }
case 'venice': {
const key = getEffectiveEnvValue(envMap, 'VENICE_API_KEY')
if (!key) return NextResponse.json({ ok: false, detail: 'API key not set' })
const res = await fetch('https://api.venice.ai/api/v1/models', {
headers: { Authorization: `Bearer ${key}` },
signal: AbortSignal.timeout(5000),
})
result = res.ok
? { ok: true, detail: 'API key valid' }
: { ok: false, detail: `HTTP ${res.status}` }
break
}
case 'hyperbrowser': { case 'hyperbrowser': {
const key = getEffectiveEnvValue(envMap, 'HYPERBROWSER_API_KEY') const key = getEffectiveEnvValue(envMap, 'HYPERBROWSER_API_KEY')
if (!key) return NextResponse.json({ ok: false, detail: 'API key not set' }) if (!key) return NextResponse.json({ ok: false, detail: 'API key not set' })

View File

@ -34,6 +34,7 @@ describe('token pricing', () => {
it('maps providers from model prefixes and names', () => { it('maps providers from model prefixes and names', () => {
expect(getProviderFromModel('openai/gpt-4.1')).toBe('openai') expect(getProviderFromModel('openai/gpt-4.1')).toBe('openai')
expect(getProviderFromModel('anthropic/claude-sonnet-4-5')).toBe('anthropic') expect(getProviderFromModel('anthropic/claude-sonnet-4-5')).toBe('anthropic')
expect(getProviderFromModel('venice/llama-3.3-70b')).toBe('venice')
expect(getProviderFromModel('gateway::codex-mini')).toBe('openai') expect(getProviderFromModel('gateway::codex-mini')).toBe('openai')
}) })
}) })

View File

@ -11,6 +11,7 @@ describe('detectProvider', () => {
['mistral-large', 'Mistral'], ['mistral-large', 'Mistral'],
['llama-3', 'Meta'], ['llama-3', 'Meta'],
['deepseek-coder', 'DeepSeek'], ['deepseek-coder', 'DeepSeek'],
['venice/llama-3.3-70b', 'Venice AI'],
['unknown-model', 'Other'], ['unknown-model', 'Other'],
])('%s -> %s', (model, expected) => { ])('%s -> %s', (model, expected) => {
expect(detectProvider(model)).toBe(expected) expect(detectProvider(model)).toBe(expected)

View File

@ -14,6 +14,7 @@ export const MODEL_CATALOG: ModelConfig[] = [
{ alias: 'groq-fast', name: 'groq/llama-3.1-8b-instant', provider: 'groq', description: '840 tok/s, ultra fast', costPer1k: 0.05 }, { alias: 'groq-fast', name: 'groq/llama-3.1-8b-instant', provider: 'groq', description: '840 tok/s, ultra fast', costPer1k: 0.05 },
{ alias: 'groq', name: 'groq/llama-3.3-70b-versatile', provider: 'groq', description: 'Fast + quality balance', costPer1k: 0.59 }, { alias: 'groq', name: 'groq/llama-3.3-70b-versatile', provider: 'groq', description: 'Fast + quality balance', costPer1k: 0.59 },
{ alias: 'kimi', name: 'moonshot/kimi-k2.5', provider: 'moonshot', description: 'Alternative provider', costPer1k: 1.0 }, { alias: 'kimi', name: 'moonshot/kimi-k2.5', provider: 'moonshot', description: 'Alternative provider', costPer1k: 1.0 },
{ alias: 'venice-llama-3.3-70b', name: 'venice/llama-3.3-70b', provider: 'venice', description: 'Venice AI Llama 3.3 70B', costPer1k: 0.7 },
{ alias: 'minimax', name: 'minimax/minimax-m2.1', provider: 'minimax', description: 'Cost-effective (1/10th price), strong coding', costPer1k: 0.3 }, { alias: 'minimax', name: 'minimax/minimax-m2.1', provider: 'minimax', description: 'Cost-effective (1/10th price), strong coding', costPer1k: 0.3 },
] ]

View File

@ -33,6 +33,7 @@ const MODEL_PRICING: Record<string, ModelPricing> = {
'groq/llama-3.1-8b-instant': { inputPerMTok: 0.05, outputPerMTok: 0.05 }, 'groq/llama-3.1-8b-instant': { inputPerMTok: 0.05, outputPerMTok: 0.05 },
'groq/llama-3.3-70b-versatile': { inputPerMTok: 0.59, outputPerMTok: 0.59 }, 'groq/llama-3.3-70b-versatile': { inputPerMTok: 0.59, outputPerMTok: 0.59 },
'moonshot/kimi-k2.5': { inputPerMTok: 1.0, outputPerMTok: 1.0 }, 'moonshot/kimi-k2.5': { inputPerMTok: 1.0, outputPerMTok: 1.0 },
'venice/llama-3.3-70b': { inputPerMTok: 0.7, outputPerMTok: 2.8 },
'minimax/minimax-m2.1': { inputPerMTok: 0.3, outputPerMTok: 0.3 }, 'minimax/minimax-m2.1': { inputPerMTok: 0.3, outputPerMTok: 0.3 },
'ollama/deepseek-r1:14b': { inputPerMTok: 0.0, outputPerMTok: 0.0 }, 'ollama/deepseek-r1:14b': { inputPerMTok: 0.0, outputPerMTok: 0.0 },
'ollama/qwen2.5-coder:7b': { inputPerMTok: 0.0, outputPerMTok: 0.0 }, 'ollama/qwen2.5-coder:7b': { inputPerMTok: 0.0, outputPerMTok: 0.0 },

View File

@ -4,6 +4,7 @@ export function detectProvider(model: string): string {
if (lower.includes('gpt') || lower.includes('o1') || lower.includes('o3') || lower.includes('o4') || lower.includes('openai')) return 'OpenAI' if (lower.includes('gpt') || lower.includes('o1') || lower.includes('o3') || lower.includes('o4') || lower.includes('openai')) return 'OpenAI'
if (lower.includes('gemini') || lower.includes('google')) return 'Google' if (lower.includes('gemini') || lower.includes('google')) return 'Google'
if (lower.includes('mistral') || lower.includes('mixtral')) return 'Mistral' if (lower.includes('mistral') || lower.includes('mixtral')) return 'Mistral'
if (lower.includes('venice')) return 'Venice AI'
if (lower.includes('llama') || lower.includes('meta')) return 'Meta' if (lower.includes('llama') || lower.includes('meta')) return 'Meta'
if (lower.includes('deepseek')) return 'DeepSeek' if (lower.includes('deepseek')) return 'DeepSeek'
if (lower.includes('command') || lower.includes('cohere')) return 'Cohere' if (lower.includes('command') || lower.includes('cohere')) return 'Cohere'