| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162 |
- import type { ProviderName, ModelInfo, ProviderSettings } from "@roo-code/types"
- import {
- anthropicDefaultModelId,
- azureDefaultModelId,
- bedrockDefaultModelId,
- deepSeekDefaultModelId,
- moonshotDefaultModelId,
- geminiDefaultModelId,
- mistralDefaultModelId,
- openAiNativeDefaultModelId,
- qwenCodeDefaultModelId,
- vertexDefaultModelId,
- xaiDefaultModelId,
- sambaNovaDefaultModelId,
- internationalZAiDefaultModelId,
- mainlandZAiDefaultModelId,
- fireworksDefaultModelId,
- minimaxDefaultModelId,
- basetenDefaultModelId,
- } from "@roo-code/types"
- import { MODELS_BY_PROVIDER } from "../constants"
- export interface ProviderServiceConfig {
- serviceName: string
- serviceUrl: string
- }
- export const PROVIDER_SERVICE_CONFIG: Partial<Record<ProviderName, ProviderServiceConfig>> = {
- anthropic: { serviceName: "Anthropic", serviceUrl: "https://console.anthropic.com" },
- azure: {
- serviceName: "Azure AI Foundry",
- serviceUrl: "https://azure.microsoft.com/en-us/products/ai-foundry/models/openai",
- },
- bedrock: { serviceName: "Amazon Bedrock", serviceUrl: "https://aws.amazon.com/bedrock" },
- deepseek: { serviceName: "DeepSeek", serviceUrl: "https://platform.deepseek.com" },
- moonshot: { serviceName: "Moonshot", serviceUrl: "https://platform.moonshot.cn" },
- gemini: { serviceName: "Google Gemini", serviceUrl: "https://ai.google.dev" },
- mistral: { serviceName: "Mistral", serviceUrl: "https://console.mistral.ai" },
- "openai-native": { serviceName: "OpenAI", serviceUrl: "https://platform.openai.com" },
- "qwen-code": { serviceName: "Qwen Code", serviceUrl: "https://dashscope.console.aliyun.com" },
- vertex: { serviceName: "GCP Vertex AI", serviceUrl: "https://console.cloud.google.com/vertex-ai" },
- xai: { serviceName: "xAI", serviceUrl: "https://x.ai" },
- sambanova: { serviceName: "SambaNova", serviceUrl: "https://sambanova.ai" },
- zai: { serviceName: "Z.ai", serviceUrl: "https://z.ai" },
- fireworks: { serviceName: "Fireworks AI", serviceUrl: "https://fireworks.ai" },
- minimax: { serviceName: "MiniMax", serviceUrl: "https://minimax.chat" },
- baseten: { serviceName: "Baseten", serviceUrl: "https://baseten.co" },
- ollama: { serviceName: "Ollama", serviceUrl: "https://ollama.ai" },
- lmstudio: { serviceName: "LM Studio", serviceUrl: "https://lmstudio.ai/docs" },
- "vscode-lm": {
- serviceName: "VS Code LM",
- serviceUrl: "https://code.visualstudio.com/api/extension-guides/language-model",
- },
- }
- export const PROVIDER_DEFAULT_MODEL_IDS: Partial<Record<ProviderName, string>> = {
- anthropic: anthropicDefaultModelId,
- azure: azureDefaultModelId,
- bedrock: bedrockDefaultModelId,
- deepseek: deepSeekDefaultModelId,
- moonshot: moonshotDefaultModelId,
- gemini: geminiDefaultModelId,
- mistral: mistralDefaultModelId,
- "openai-native": openAiNativeDefaultModelId,
- "qwen-code": qwenCodeDefaultModelId,
- vertex: vertexDefaultModelId,
- xai: xaiDefaultModelId,
- sambanova: sambaNovaDefaultModelId,
- zai: internationalZAiDefaultModelId,
- fireworks: fireworksDefaultModelId,
- minimax: minimaxDefaultModelId,
- baseten: basetenDefaultModelId,
- }
- export const getProviderServiceConfig = (provider: ProviderName): ProviderServiceConfig => {
- return PROVIDER_SERVICE_CONFIG[provider] ?? { serviceName: provider, serviceUrl: "" }
- }
- export const getDefaultModelIdForProvider = (provider: ProviderName, apiConfiguration?: ProviderSettings): string => {
- // Handle Z.ai's China/International entrypoint distinction
- if (provider === "zai" && apiConfiguration) {
- return apiConfiguration.zaiApiLine === "china_coding"
- ? mainlandZAiDefaultModelId
- : internationalZAiDefaultModelId
- }
- return PROVIDER_DEFAULT_MODEL_IDS[provider] ?? ""
- }
- export const getStaticModelsForProvider = (
- provider: ProviderName,
- customArnLabel?: string,
- ): Record<string, ModelInfo> => {
- const models = MODELS_BY_PROVIDER[provider] ?? {}
- // Add custom-arn option for Bedrock
- if (provider === "bedrock") {
- return {
- ...models,
- "custom-arn": {
- maxTokens: 0,
- contextWindow: 0,
- supportsPromptCache: false,
- description: customArnLabel ?? "Use Custom ARN",
- },
- }
- }
- return models
- }
- /**
- * Checks if a provider uses static models from MODELS_BY_PROVIDER
- */
- export const isStaticModelProvider = (provider: ProviderName): boolean => {
- return provider in MODELS_BY_PROVIDER
- }
- /**
- * List of providers that have their own custom model selection UI
- * and should not use the generic ModelPicker in ApiOptions
- */
- export const PROVIDERS_WITH_CUSTOM_MODEL_UI: ProviderName[] = [
- "openrouter",
- "requesty",
- "openai", // OpenAI Compatible
- "openai-codex", // OpenAI Codex has custom UI with auth and rate limits
- "litellm",
- "vercel-ai-gateway",
- "roo",
- "ollama",
- "lmstudio",
- "vscode-lm",
- ]
- /**
- * Checks if a provider should use the generic ModelPicker
- */
- export const shouldUseGenericModelPicker = (provider: ProviderName): boolean => {
- return isStaticModelProvider(provider) && !PROVIDERS_WITH_CUSTOM_MODEL_UI.includes(provider)
- }
- /**
- * Handles provider-specific side effects when a model is changed.
- * Centralizes provider-specific logic to keep it out of the ApiOptions template.
- */
- export const handleModelChangeSideEffects = <K extends keyof ProviderSettings>(
- provider: ProviderName,
- modelId: string,
- setApiConfigurationField: (field: K, value: ProviderSettings[K]) => void,
- ): void => {
- // Bedrock: Clear custom ARN if not using custom ARN option
- if (provider === "bedrock" && modelId !== "custom-arn") {
- setApiConfigurationField("awsCustomArn" as K, "" as ProviderSettings[K])
- }
- // All providers: Clear reasoning effort when switching models to allow
- // the new model's default to take effect. Different models within the
- // same provider can have different reasoning effort defaults/options.
- setApiConfigurationField("reasoningEffort" as K, undefined as ProviderSettings[K])
- }
|