providerModelConfig.ts 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. import type { ProviderName, ModelInfo, ProviderSettings } from "@roo-code/types"
  2. import {
  3. anthropicDefaultModelId,
  4. azureDefaultModelId,
  5. bedrockDefaultModelId,
  6. deepSeekDefaultModelId,
  7. moonshotDefaultModelId,
  8. geminiDefaultModelId,
  9. mistralDefaultModelId,
  10. openAiNativeDefaultModelId,
  11. qwenCodeDefaultModelId,
  12. vertexDefaultModelId,
  13. xaiDefaultModelId,
  14. sambaNovaDefaultModelId,
  15. internationalZAiDefaultModelId,
  16. mainlandZAiDefaultModelId,
  17. fireworksDefaultModelId,
  18. minimaxDefaultModelId,
  19. basetenDefaultModelId,
  20. } from "@roo-code/types"
  21. import { MODELS_BY_PROVIDER } from "../constants"
  22. export interface ProviderServiceConfig {
  23. serviceName: string
  24. serviceUrl: string
  25. }
  26. export const PROVIDER_SERVICE_CONFIG: Partial<Record<ProviderName, ProviderServiceConfig>> = {
  27. anthropic: { serviceName: "Anthropic", serviceUrl: "https://console.anthropic.com" },
  28. azure: {
  29. serviceName: "Azure AI Foundry",
  30. serviceUrl: "https://azure.microsoft.com/en-us/products/ai-foundry/models/openai",
  31. },
  32. bedrock: { serviceName: "Amazon Bedrock", serviceUrl: "https://aws.amazon.com/bedrock" },
  33. deepseek: { serviceName: "DeepSeek", serviceUrl: "https://platform.deepseek.com" },
  34. moonshot: { serviceName: "Moonshot", serviceUrl: "https://platform.moonshot.cn" },
  35. gemini: { serviceName: "Google Gemini", serviceUrl: "https://ai.google.dev" },
  36. mistral: { serviceName: "Mistral", serviceUrl: "https://console.mistral.ai" },
  37. "openai-native": { serviceName: "OpenAI", serviceUrl: "https://platform.openai.com" },
  38. "qwen-code": { serviceName: "Qwen Code", serviceUrl: "https://dashscope.console.aliyun.com" },
  39. vertex: { serviceName: "GCP Vertex AI", serviceUrl: "https://console.cloud.google.com/vertex-ai" },
  40. xai: { serviceName: "xAI", serviceUrl: "https://x.ai" },
  41. sambanova: { serviceName: "SambaNova", serviceUrl: "https://sambanova.ai" },
  42. zai: { serviceName: "Z.ai", serviceUrl: "https://z.ai" },
  43. fireworks: { serviceName: "Fireworks AI", serviceUrl: "https://fireworks.ai" },
  44. minimax: { serviceName: "MiniMax", serviceUrl: "https://minimax.chat" },
  45. baseten: { serviceName: "Baseten", serviceUrl: "https://baseten.co" },
  46. ollama: { serviceName: "Ollama", serviceUrl: "https://ollama.ai" },
  47. lmstudio: { serviceName: "LM Studio", serviceUrl: "https://lmstudio.ai/docs" },
  48. "vscode-lm": {
  49. serviceName: "VS Code LM",
  50. serviceUrl: "https://code.visualstudio.com/api/extension-guides/language-model",
  51. },
  52. }
  53. export const PROVIDER_DEFAULT_MODEL_IDS: Partial<Record<ProviderName, string>> = {
  54. anthropic: anthropicDefaultModelId,
  55. azure: azureDefaultModelId,
  56. bedrock: bedrockDefaultModelId,
  57. deepseek: deepSeekDefaultModelId,
  58. moonshot: moonshotDefaultModelId,
  59. gemini: geminiDefaultModelId,
  60. mistral: mistralDefaultModelId,
  61. "openai-native": openAiNativeDefaultModelId,
  62. "qwen-code": qwenCodeDefaultModelId,
  63. vertex: vertexDefaultModelId,
  64. xai: xaiDefaultModelId,
  65. sambanova: sambaNovaDefaultModelId,
  66. zai: internationalZAiDefaultModelId,
  67. fireworks: fireworksDefaultModelId,
  68. minimax: minimaxDefaultModelId,
  69. baseten: basetenDefaultModelId,
  70. }
  71. export const getProviderServiceConfig = (provider: ProviderName): ProviderServiceConfig => {
  72. return PROVIDER_SERVICE_CONFIG[provider] ?? { serviceName: provider, serviceUrl: "" }
  73. }
  74. export const getDefaultModelIdForProvider = (provider: ProviderName, apiConfiguration?: ProviderSettings): string => {
  75. // Handle Z.ai's China/International entrypoint distinction
  76. if (provider === "zai" && apiConfiguration) {
  77. return apiConfiguration.zaiApiLine === "china_coding"
  78. ? mainlandZAiDefaultModelId
  79. : internationalZAiDefaultModelId
  80. }
  81. return PROVIDER_DEFAULT_MODEL_IDS[provider] ?? ""
  82. }
  83. export const getStaticModelsForProvider = (
  84. provider: ProviderName,
  85. customArnLabel?: string,
  86. ): Record<string, ModelInfo> => {
  87. const models = MODELS_BY_PROVIDER[provider] ?? {}
  88. // Add custom-arn option for Bedrock
  89. if (provider === "bedrock") {
  90. return {
  91. ...models,
  92. "custom-arn": {
  93. maxTokens: 0,
  94. contextWindow: 0,
  95. supportsPromptCache: false,
  96. description: customArnLabel ?? "Use Custom ARN",
  97. },
  98. }
  99. }
  100. return models
  101. }
  102. /**
  103. * Checks if a provider uses static models from MODELS_BY_PROVIDER
  104. */
  105. export const isStaticModelProvider = (provider: ProviderName): boolean => {
  106. return provider in MODELS_BY_PROVIDER
  107. }
  108. /**
  109. * List of providers that have their own custom model selection UI
  110. * and should not use the generic ModelPicker in ApiOptions
  111. */
  112. export const PROVIDERS_WITH_CUSTOM_MODEL_UI: ProviderName[] = [
  113. "openrouter",
  114. "requesty",
  115. "openai", // OpenAI Compatible
  116. "openai-codex", // OpenAI Codex has custom UI with auth and rate limits
  117. "litellm",
  118. "vercel-ai-gateway",
  119. "roo",
  120. "ollama",
  121. "lmstudio",
  122. "vscode-lm",
  123. ]
  124. /**
  125. * Checks if a provider should use the generic ModelPicker
  126. */
  127. export const shouldUseGenericModelPicker = (provider: ProviderName): boolean => {
  128. return isStaticModelProvider(provider) && !PROVIDERS_WITH_CUSTOM_MODEL_UI.includes(provider)
  129. }
  130. /**
  131. * Handles provider-specific side effects when a model is changed.
  132. * Centralizes provider-specific logic to keep it out of the ApiOptions template.
  133. */
  134. export const handleModelChangeSideEffects = <K extends keyof ProviderSettings>(
  135. provider: ProviderName,
  136. modelId: string,
  137. setApiConfigurationField: (field: K, value: ProviderSettings[K]) => void,
  138. ): void => {
  139. // Bedrock: Clear custom ARN if not using custom ARN option
  140. if (provider === "bedrock" && modelId !== "custom-arn") {
  141. setApiConfigurationField("awsCustomArn" as K, "" as ProviderSettings[K])
  142. }
  143. // All providers: Clear reasoning effort when switching models to allow
  144. // the new model's default to take effect. Different models within the
  145. // same provider can have different reasoning effort defaults/options.
  146. setApiConfigurationField("reasoningEffort" as K, undefined as ProviderSettings[K])
  147. }