// AUTO-GENERATED FILE - DO NOT MODIFY DIRECTLY // Generated by scripts/generate-provider-definitions.mjs // Source: src/shared/api.ts // // ============================================================================ // DATA CONTRACT & DOCUMENTATION // ============================================================================ // // This file provides structured provider metadata extracted from TypeScript source. // It serves as the bridge between the VSCode extension's TypeScript API definitions // and the CLI's Go-based setup wizard. // // CORE STRUCTURES // =============== // // ConfigField: Individual configuration fields with type, category, and validation metadata // - Name: Field name as it appears in ApiHandlerOptions (e.g., "cerebrasApiKey") // - Type: TypeScript type (e.g., "string", "number") // - Comment: Inline comment from TypeScript source // - Category: Provider categorization (e.g., "cerebras", "general") // - Required: Whether this field MUST be collected for any provider // - FieldType: UI field type hint ("password", "url", "string", "select") // - Placeholder: Suggested placeholder text for UI input // // ModelInfo: Model capabilities, pricing, and limits // - MaxTokens: Maximum output tokens // - ContextWindow: Total context window size // - SupportsImages: Whether model accepts image inputs // - SupportsPromptCache: Whether model supports prompt caching // - InputPrice: Cost per 1M input tokens (USD) // - OutputPrice: Cost per 1M output tokens (USD) // - CacheWritesPrice: Cost per 1M cached tokens written (USD) // - CacheReadsPrice: Cost per 1M cached tokens read (USD) // - Description: Human-readable model description // // ProviderDefinition: Complete provider metadata including required/optional fields // - ID: Provider identifier (e.g., "cerebras", "anthropic") // - Name: Human-readable display name (e.g., "Cerebras", "Anthropic (Claude)") // - RequiredFields: Fields that MUST be collected (filtered by category + overrides) // - OptionalFields: Fields that MAY be collected (filtered by category + overrides) // - Models: Map of model IDs to ModelInfo // - DefaultModelID: Recommended default model from TypeScript source // - HasDynamicModels: Whether provider supports runtime model discovery // - SetupInstructions: User-facing setup guidance // // FIELD FILTERING LOGIC // ===================== // // Fields are categorized during parsing based on provider-specific prefixes in field names: // - "cerebrasApiKey" → category="cerebras" // - "awsAccessKey" → category="aws" (used by bedrock) // - "requestTimeoutMs" → category="general" (applies to all providers) // // The getFieldsByProvider() function filters fields using this priority: // 1. Check field_overrides.go via GetFieldOverride() for manual corrections // 2. Match field.Category against provider ID (primary filtering) // 3. Apply hardcoded switch cases for complex provider relationships // 4. Include universal fields (requestTimeoutMs, ulid, clineAccountId) for all providers // // Required vs Optional: // - Fields are marked as required if they appear in the providerRequiredFields map // in the generator script (scripts/generate-provider-definitions.mjs) // - getFieldsByProvider() respects the required parameter to separate required/optional // // MODEL SELECTION // =============== // // DefaultModelID extraction priority: // 1. Exact match from TypeScript constant (e.g., cerebrasDefaultModelId = "llama-3.3-70b") // 2. Pattern matching on model IDs ("latest", "default", "sonnet", "gpt-4", etc.) // 3. First model in the models map // // Models map contains full capability and pricing data extracted from TypeScript model // definitions (e.g., cerebrasModels, anthropicModels). // // HasDynamicModels indicates providers that support runtime model discovery via API // (e.g., OpenRouter, Ollama, LM Studio). For these providers, the models map may be // incomplete or a representative sample. // // USAGE EXAMPLE // ============= // // def, err := GetProviderDefinition("cerebras") // if err != nil { // return err // } // // // Collect required fields from user // for _, field := range def.RequiredFields { // value := promptUser(field.Name, field.Placeholder, field.FieldType == "password") // config[field.Name] = value // } // // // Use default model or let user choose // if def.DefaultModelID != "" { // config["modelId"] = def.DefaultModelID // } // // EXTENDING & OVERRIDING // ====================== // // DO NOT modify this generated file directly. Changes will be lost on regeneration. // // To fix incorrect field categorization: // - Edit cli/pkg/generated/field_overrides.go // - Add entries to GetFieldOverride() function // - Example: Force "awsSessionToken" to be relevant for "bedrock" // // To change required fields: // - Edit providerRequiredFields map in scripts/generate-provider-definitions.mjs // - Rerun: npm run generate-provider-definitions // // To add new providers: // - Add to ApiProvider type in src/shared/api.ts // - Add fields to ApiHandlerOptions with provider-specific prefixes // - Optionally add model definitions (e.g., export const newProviderModels = {...}) // - Rerun generator // // To fix default model extraction: // - Ensure TypeScript source has: export const DefaultModelId = "model-id" // - Or update extractDefaultModelIds() patterns in generator script // // For upstream changes: // - Submit pull request to src/shared/api.ts in the main repository // // ============================================================================ package generated import ( "encoding/json" "fmt" "strings" ) // Provider constants const ( ANTHROPIC = "anthropic" OPENROUTER = "openrouter" BEDROCK = "bedrock" OPENAI = "openai" OLLAMA = "ollama" GEMINI = "gemini" OPENAI_NATIVE = "openai-native" XAI = "xai" CEREBRAS = "cerebras" OCA = "oca" NOUSRESEARCH = "nousResearch" ) // AllProviders returns a slice of enabled provider IDs for the CLI build. // This is a filtered subset of all providers available in the VSCode extension. // To modify which providers are included, edit ENABLED_PROVIDERS in scripts/cli-providers.mjs var AllProviders = []string{ "anthropic", "openrouter", "bedrock", "openai", "ollama", "gemini", "openai-native", "xai", "cerebras", "oca", "nousResearch", } // ConfigField represents a configuration field requirement type ConfigField struct { Name string `json:"name"` Type string `json:"type"` Comment string `json:"comment"` Category string `json:"category"` Required bool `json:"required"` FieldType string `json:"fieldType"` Placeholder string `json:"placeholder"` } // ModelInfo represents model capabilities and pricing type ModelInfo struct { MaxTokens int `json:"maxTokens,omitempty"` ContextWindow int `json:"contextWindow,omitempty"` SupportsImages bool `json:"supportsImages"` SupportsPromptCache bool `json:"supportsPromptCache"` InputPrice float64 `json:"inputPrice,omitempty"` OutputPrice float64 `json:"outputPrice,omitempty"` CacheWritesPrice float64 `json:"cacheWritesPrice,omitempty"` CacheReadsPrice float64 `json:"cacheReadsPrice,omitempty"` Description string `json:"description,omitempty"` } // ProviderDefinition represents a provider's metadata and requirements type ProviderDefinition struct { ID string `json:"id"` Name string `json:"name"` RequiredFields []ConfigField `json:"requiredFields"` OptionalFields []ConfigField `json:"optionalFields"` Models map[string]ModelInfo `json:"models"` DefaultModelID string `json:"defaultModelId"` HasDynamicModels bool `json:"hasDynamicModels"` SetupInstructions string `json:"setupInstructions"` } // Raw configuration fields data (parsed from TypeScript) var rawConfigFields = ` [ { "name": "apiKey", "type": "string", "comment": "anthropic", "category": "anthropic", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "awsAccessKey", "type": "string", "comment": "", "category": "bedrock", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "awsSecretKey", "type": "string", "comment": "", "category": "bedrock", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "openRouterApiKey", "type": "string", "comment": "", "category": "openrouter", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "awsSessionToken", "type": "string", "comment": "", "category": "bedrock", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "awsBedrockApiKey", "type": "string", "comment": "", "category": "bedrock", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "openAiApiKey", "type": "string", "comment": "", "category": "openai", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "geminiApiKey", "type": "string", "comment": "", "category": "gemini", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "openAiNativeApiKey", "type": "string", "comment": "", "category": "openai-native", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "ollamaApiKey", "type": "string", "comment": "", "category": "ollama", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "authNonce", "type": "string", "comment": "", "category": "general", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "xaiApiKey", "type": "string", "comment": "", "category": "xai", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "cerebrasApiKey", "type": "string", "comment": "", "category": "cerebras", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "nousResearchApiKey", "type": "string", "comment": "", "category": "nousResearch", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, { "name": "ulid", "type": "string", "comment": "Used to identify the task in API requests", "category": "general", "required": false, "fieldType": "string", "placeholder": "" }, { "name": "openAiHeaders", "type": "Record", "comment": "Custom headers for OpenAI requests", "category": "openai", "required": false, "fieldType": "string", "placeholder": "" }, { "name": "anthropicBaseUrl", "type": "string", "comment": "", "category": "anthropic", "required": false, "fieldType": "url", "placeholder": "https://api.example.com" }, { "name": "openRouterProviderSorting", "type": "string", "comment": "", "category": "openrouter", "required": false, "fieldType": "string", "placeholder": "" }, { "name": "openAiBaseUrl", "type": "string", "comment": "", "category": "openai", "required": false, "fieldType": "url", "placeholder": "https://api.example.com" }, { "name": "ollamaBaseUrl", "type": "string", "comment": "", "category": "ollama", "required": false, "fieldType": "url", "placeholder": "https://api.example.com" }, { "name": "ollamaApiOptionsCtxNum", "type": "string", "comment": "", "category": "ollama", "required": false, "fieldType": "string", "placeholder": "" }, { "name": "geminiBaseUrl", "type": "string", "comment": "", "category": "gemini", "required": false, "fieldType": "url", "placeholder": "https://api.example.com" }, { "name": "azureApiVersion", "type": "string", "comment": "", "category": "general", "required": false, "fieldType": "string", "placeholder": "" }, { "name": "requestTimeoutMs", "type": "number", "comment": "", "category": "general", "required": false, "fieldType": "string", "placeholder": "" }, { "name": "sapAiResourceGroup", "type": "string", "comment": "", "category": "general", "required": false, "fieldType": "string", "placeholder": "" }, { "name": "onRetryAttempt", "type": "(attempt: number, maxRetries: number, delay: number, error: any) => void", "comment": "", "category": "general", "required": false, "fieldType": "string", "placeholder": "" }, { "name": "ocaBaseUrl", "type": "string", "comment": "", "category": "general", "required": false, "fieldType": "url", "placeholder": "https://api.example.com" }, { "name": "minimaxApiLine", "type": "string", "comment": "", "category": "general", "required": false, "fieldType": "string", "placeholder": "" }, { "name": "ocaMode", "type": "string", "comment": "", "category": "general", "required": false, "fieldType": "string", "placeholder": "" }, { "name": "hicapApiKey", "type": "string", "comment": "", "category": "general", "required": true, "fieldType": "password", "placeholder": "Enter your API key" }, ]` // Raw model definitions data (parsed from TypeScript) var rawModelDefinitions = ` { "anthropic": { "claude-sonnet-4-5-20250929": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "claude-sonnet-4-5-20250929:1m": { "maxTokens": 8192, "contextWindow": 1000000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "claude-haiku-4-5-20251001": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 1, "outputPrice": 5, "cacheWritesPrice": 1, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "claude-sonnet-4-20250514": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "claude-sonnet-4-20250514:1m": { "maxTokens": 8192, "contextWindow": 1000000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "claude-opus-4-1-20250805": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 15, "outputPrice": 75, "cacheWritesPrice": 18, "cacheReadsPrice": 1, "supportsImages": true, "supportsPromptCache": true }, "claude-opus-4-20250514": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 15, "outputPrice": 75, "cacheWritesPrice": 18, "cacheReadsPrice": 1, "supportsImages": true, "supportsPromptCache": true }, "claude-3-7-sonnet-20250219": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "claude-3-5-sonnet-20241022": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "claude-3-5-haiku-20241022": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 0, "outputPrice": 4, "cacheWritesPrice": 1, "cacheReadsPrice": 0, "supportsImages": false, "supportsPromptCache": true }, "claude-3-opus-20240229": { "maxTokens": 4096, "contextWindow": 200000, "inputPrice": 15, "outputPrice": 75, "cacheWritesPrice": 18, "cacheReadsPrice": 1, "supportsImages": true, "supportsPromptCache": true }, "claude-3-haiku-20240307": { "maxTokens": 4096, "contextWindow": 200000, "inputPrice": 0, "outputPrice": 1, "cacheWritesPrice": 0, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true } }, "bedrock": { "anthropic.claude-sonnet-4-5-20250929-v1:0": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "anthropic.claude-sonnet-4-5-20250929-v1:0:1m": { "maxTokens": 8192, "contextWindow": 1000000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "anthropic.claude-haiku-4-5-20251001-v1:0": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 1, "outputPrice": 5, "cacheWritesPrice": 1, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "anthropic.claude-sonnet-4-20250514-v1:0": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "anthropic.claude-sonnet-4-20250514-v1:0:1m": { "maxTokens": 8192, "contextWindow": 1000000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "anthropic.claude-opus-4-20250514-v1:0": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 15, "outputPrice": 75, "cacheWritesPrice": 18, "cacheReadsPrice": 1, "supportsImages": true, "supportsPromptCache": true }, "anthropic.claude-opus-4-1-20250805-v1:0": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 15, "outputPrice": 75, "cacheWritesPrice": 18, "cacheReadsPrice": 1, "supportsImages": true, "supportsPromptCache": true }, "amazon.nova-premier-v1:0": { "maxTokens": 10000, "contextWindow": 1000000, "inputPrice": 2, "outputPrice": 12, "supportsImages": true, "supportsPromptCache": false }, "amazon.nova-pro-v1:0": { "maxTokens": 5000, "contextWindow": 300000, "inputPrice": 0, "outputPrice": 3, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "amazon.nova-lite-v1:0": { "maxTokens": 5000, "contextWindow": 300000, "inputPrice": 0, "outputPrice": 0, "cacheWritesPrice": 0, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "amazon.nova-micro-v1:0": { "maxTokens": 5000, "contextWindow": 128000, "inputPrice": 0, "outputPrice": 0, "cacheWritesPrice": 0, "cacheReadsPrice": 0, "supportsImages": false, "supportsPromptCache": true }, "anthropic.claude-3-7-sonnet-20250219-v1:0": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "anthropic.claude-3-5-sonnet-20241022-v2:0": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 3, "outputPrice": 15, "cacheWritesPrice": 3, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "anthropic.claude-3-5-haiku-20241022-v1:0": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 0, "outputPrice": 4, "cacheWritesPrice": 1, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "anthropic.claude-3-5-sonnet-20240620-v1:0": { "maxTokens": 8192, "contextWindow": 200000, "inputPrice": 3, "outputPrice": 15, "supportsImages": true, "supportsPromptCache": false }, "anthropic.claude-3-opus-20240229-v1:0": { "maxTokens": 4096, "contextWindow": 200000, "inputPrice": 15, "outputPrice": 75, "supportsImages": true, "supportsPromptCache": false }, "anthropic.claude-3-sonnet-20240229-v1:0": { "maxTokens": 4096, "contextWindow": 200000, "inputPrice": 3, "outputPrice": 15, "supportsImages": true, "supportsPromptCache": false }, "anthropic.claude-3-haiku-20240307-v1:0": { "maxTokens": 4096, "contextWindow": 200000, "inputPrice": 0, "outputPrice": 1, "supportsImages": true, "supportsPromptCache": false }, "deepseek.r1-v1:0": { "maxTokens": 8000, "contextWindow": 64000, "inputPrice": 1, "outputPrice": 5, "supportsImages": false, "supportsPromptCache": false }, "openai.gpt-oss-120b-1:0": { "maxTokens": 8192, "contextWindow": 128000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "A state-of-the-art 120B open-weight Mixture-of-Experts language model optimized for strong reasoning, tool use, and efficient deployment on large GPUs" }, "openai.gpt-oss-20b-1:0": { "maxTokens": 8192, "contextWindow": 128000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "A compact 20B open-weight Mixture-of-Experts language model designed for strong reasoning and tool use, ideal for edge devices and local inference." }, "qwen.qwen3-coder-30b-a3b-v1:0": { "maxTokens": 8192, "contextWindow": 262144, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "Qwen3 Coder 30B MoE model with 3.3B activated parameters, optimized for code generation and analysis with 256K context window." }, "qwen.qwen3-coder-480b-a35b-v1:0": { "maxTokens": 8192, "contextWindow": 262144, "inputPrice": 0, "outputPrice": 1, "supportsImages": false, "supportsPromptCache": false, "description": "Qwen3 Coder 480B flagship MoE model with 35B activated parameters, designed for complex coding tasks with advanced reasoning capabilities and 256K context window." } }, "gemini": { "gemini-2.5-pro": { "maxTokens": 65536, "contextWindow": 1048576, "inputPrice": 2, "outputPrice": 15, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gemini-2.5-flash-lite-preview-06-17": { "maxTokens": 64000, "contextWindow": 1000000, "inputPrice": 0, "outputPrice": 0, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true, "description": "Preview version - may not be available in all regions" }, "gemini-2.5-flash": { "maxTokens": 65536, "contextWindow": 1048576, "inputPrice": 0, "outputPrice": 2, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gemini-2.0-flash-001": { "maxTokens": 8192, "contextWindow": 1048576, "inputPrice": 0, "outputPrice": 0, "cacheWritesPrice": 1, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gemini-2.0-flash-lite-preview-02-05": { "maxTokens": 8192, "contextWindow": 1048576, "inputPrice": 0, "outputPrice": 0, "supportsImages": true, "supportsPromptCache": false }, "gemini-2.0-pro-exp-02-05": { "maxTokens": 8192, "contextWindow": 2097152, "inputPrice": 0, "outputPrice": 0, "supportsImages": true, "supportsPromptCache": false }, "gemini-2.0-flash-thinking-exp-01-21": { "maxTokens": 65536, "contextWindow": 1048576, "inputPrice": 0, "outputPrice": 0, "supportsImages": true, "supportsPromptCache": false }, "gemini-2.0-flash-thinking-exp-1219": { "maxTokens": 8192, "contextWindow": 32767, "inputPrice": 0, "outputPrice": 0, "supportsImages": true, "supportsPromptCache": false }, "gemini-2.0-flash-exp": { "maxTokens": 8192, "contextWindow": 1048576, "inputPrice": 0, "outputPrice": 0, "supportsImages": true, "supportsPromptCache": false }, "gemini-1.5-flash-002": { "maxTokens": 8192, "contextWindow": 1048576, "inputPrice": 0, "outputPrice": 0, "cacheWritesPrice": 1, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gemini-1.5-flash-exp-0827": { "maxTokens": 8192, "contextWindow": 1048576, "inputPrice": 0, "outputPrice": 0, "supportsImages": true, "supportsPromptCache": false }, "gemini-1.5-flash-8b-exp-0827": { "maxTokens": 8192, "contextWindow": 1048576, "inputPrice": 0, "outputPrice": 0, "supportsImages": true, "supportsPromptCache": false }, "gemini-1.5-pro-002": { "maxTokens": 8192, "contextWindow": 2097152, "inputPrice": 0, "outputPrice": 0, "supportsImages": true, "supportsPromptCache": false }, "gemini-1.5-pro-exp-0827": { "maxTokens": 8192, "contextWindow": 2097152, "inputPrice": 0, "outputPrice": 0, "supportsImages": true, "supportsPromptCache": false }, "gemini-exp-1206": { "maxTokens": 8192, "contextWindow": 2097152, "inputPrice": 0, "outputPrice": 0, "supportsImages": true, "supportsPromptCache": false } }, "openai-native": { "gpt-5-2025-08-07": { "maxTokens": 8192, "contextWindow": 272000, "inputPrice": 1, "outputPrice": 10, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gpt-5-mini-2025-08-07": { "maxTokens": 8192, "contextWindow": 272000, "inputPrice": 0, "outputPrice": 2, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gpt-5-nano-2025-08-07": { "maxTokens": 8192, "contextWindow": 272000, "inputPrice": 0, "outputPrice": 0, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gpt-5-chat-latest": { "maxTokens": 8192, "contextWindow": 400000, "inputPrice": 1, "outputPrice": 10, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "o4-mini": { "maxTokens": 100000, "contextWindow": 200000, "inputPrice": 1, "outputPrice": 4, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gpt-4.1": { "maxTokens": 32768, "contextWindow": 1047576, "inputPrice": 2, "outputPrice": 8, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gpt-4.1-mini": { "maxTokens": 32768, "contextWindow": 1047576, "inputPrice": 0, "outputPrice": 1, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gpt-4.1-nano": { "maxTokens": 32768, "contextWindow": 1047576, "inputPrice": 0, "outputPrice": 0, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "o3-mini": { "maxTokens": 100000, "contextWindow": 200000, "inputPrice": 1, "outputPrice": 4, "cacheReadsPrice": 0, "supportsImages": false, "supportsPromptCache": true }, "o1-preview": { "maxTokens": 32768, "contextWindow": 128000, "inputPrice": 15, "outputPrice": 60, "cacheReadsPrice": 7, "supportsImages": true, "supportsPromptCache": true }, "o1-mini": { "maxTokens": 65536, "contextWindow": 128000, "inputPrice": 1, "outputPrice": 4, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "gpt-4o": { "maxTokens": 4096, "contextWindow": 128000, "inputPrice": 2, "outputPrice": 10, "cacheReadsPrice": 1, "supportsImages": true, "supportsPromptCache": true }, "gpt-4o-mini": { "maxTokens": 16384, "contextWindow": 128000, "inputPrice": 0, "outputPrice": 0, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "chatgpt-4o-latest": { "maxTokens": 16384, "contextWindow": 128000, "inputPrice": 5, "outputPrice": 15, "supportsImages": true, "supportsPromptCache": false } }, "xai": { "grok-4-fast-reasoning": { "maxTokens": 30000, "contextWindow": 2000000, "inputPrice": 0, "outputPrice": 0, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": false, "description": "xAI's Grok 4 Fast (free) multimodal model with 2M context." }, "grok-4": { "maxTokens": 8192, "contextWindow": 262144, "inputPrice": 3, "outputPrice": 15, "cacheReadsPrice": 0, "supportsImages": true, "supportsPromptCache": true }, "grok-3-beta": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 3, "outputPrice": 15, "supportsImages": false, "supportsPromptCache": true, "description": "X AI's Grok-3 beta model with 131K context window" }, "grok-3-fast-beta": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 5, "outputPrice": 25, "supportsImages": false, "supportsPromptCache": true, "description": "X AI's Grok-3 fast beta model with 131K context window" }, "grok-3-mini-beta": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": true, "description": "X AI's Grok-3 mini beta model with 131K context window" }, "grok-3-mini-fast-beta": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 0, "outputPrice": 4, "supportsImages": false, "supportsPromptCache": true, "description": "X AI's Grok-3 mini fast beta model with 131K context window" }, "grok-3": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 3, "outputPrice": 15, "supportsImages": false, "supportsPromptCache": true, "description": "X AI's Grok-3 model with 131K context window" }, "grok-3-fast": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 5, "outputPrice": 25, "supportsImages": false, "supportsPromptCache": true, "description": "X AI's Grok-3 fast model with 131K context window" }, "grok-3-mini": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": true, "description": "X AI's Grok-3 mini model with 131K context window" }, "grok-3-mini-fast": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 0, "outputPrice": 4, "supportsImages": false, "supportsPromptCache": true, "description": "X AI's Grok-3 mini fast model with 131K context window" }, "grok-2-latest": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 2, "outputPrice": 10, "supportsImages": false, "supportsPromptCache": false, "description": "X AI's Grok-2 model - latest version with 131K context window" }, "grok-2": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 2, "outputPrice": 10, "supportsImages": false, "supportsPromptCache": false, "description": "X AI's Grok-2 model with 131K context window" }, "grok-2-1212": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 2, "outputPrice": 10, "supportsImages": false, "supportsPromptCache": false, "description": "X AI's Grok-2 model (version 1212) with 131K context window" }, "grok-2-vision-latest": { "maxTokens": 8192, "contextWindow": 32768, "inputPrice": 2, "outputPrice": 10, "supportsImages": true, "supportsPromptCache": false, "description": "X AI's Grok-2 Vision model - latest version with image support and 32K context window" }, "grok-2-vision": { "maxTokens": 8192, "contextWindow": 32768, "inputPrice": 2, "outputPrice": 10, "supportsImages": true, "supportsPromptCache": false, "description": "X AI's Grok-2 Vision model with image support and 32K context window" }, "grok-2-vision-1212": { "maxTokens": 8192, "contextWindow": 32768, "inputPrice": 2, "outputPrice": 10, "supportsImages": true, "supportsPromptCache": false, "description": "X AI's Grok-2 Vision model (version 1212) with image support and 32K context window" }, "grok-vision-beta": { "maxTokens": 8192, "contextWindow": 8192, "inputPrice": 5, "outputPrice": 15, "supportsImages": true, "supportsPromptCache": false, "description": "X AI's Grok Vision Beta model with image support and 8K context window" }, "grok-beta": { "maxTokens": 8192, "contextWindow": 131072, "inputPrice": 5, "outputPrice": 15, "supportsImages": false, "supportsPromptCache": false, "description": "X AI's Grok Beta model (legacy) with 131K context window" } }, "cerebras": { "gpt-oss-120b": { "maxTokens": 65536, "contextWindow": 128000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "Intelligent general purpose model with 3,000 tokens/s" }, "qwen-3-coder-480b-free": { "maxTokens": 40000, "contextWindow": 64000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "SOTA coding model with ~2000 tokens/s ($0 free tier)\\n\\n• Use this if you don't have a Cerebras subscription\\n• 64K context window\\n• Rate limits: 150K TPM, 1M TPH/TPD, 10 RPM, 100 RPH/RPD\\n\\nUpgrade for higher limits: [https://cloud.cerebras.ai/?utm=cline](https://cloud.cerebras.ai/?utm=cline)" }, "qwen-3-coder-480b": { "maxTokens": 40000, "contextWindow": 128000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "SOTA coding model with ~2000 tokens/s ($50/$250 paid tiers)\\n\\n• Use this if you have a Cerebras subscription\\n• 131K context window with higher rate limits" }, "qwen-3-235b-a22b-instruct-2507": { "maxTokens": 64000, "contextWindow": 64000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "Intelligent model with ~1400 tokens/s" }, "llama-3.3-70b": { "maxTokens": 64000, "contextWindow": 64000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "Powerful model with ~2600 tokens/s" }, "qwen-3-32b": { "maxTokens": 64000, "contextWindow": 64000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "SOTA coding performance with ~2500 tokens/s" }, "qwen-3-235b-a22b-thinking-2507": { "maxTokens": 32000, "contextWindow": 65000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "SOTA performance with ~1500 tokens/s" } }, "nousResearch": { "Hermes-4-405B": { "maxTokens": 8192, "contextWindow": 128000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "This is the largest model in the Hermes 4 family, and it is the fullest expression of our design, focused on advanced reasoning and creative depth rather than optimizing inference speed or cost." }, "Hermes-4-70B": { "maxTokens": 8192, "contextWindow": 128000, "inputPrice": 0, "outputPrice": 0, "supportsImages": false, "supportsPromptCache": false, "description": "This incarnation of Hermes 4 balances scale and size. It handles complex reasoning tasks, while staying fast and cost effective. A versatile choice for many use cases." } } }` // GetConfigFields returns all configuration fields func GetConfigFields() ([]ConfigField, error) { var fields []ConfigField if err := json.Unmarshal([]byte(rawConfigFields), &fields); err != nil { return nil, fmt.Errorf("failed to parse config fields: %w", err) } return fields, nil } // GetModelDefinitions returns all model definitions func GetModelDefinitions() (map[string]map[string]ModelInfo, error) { var models map[string]map[string]ModelInfo if err := json.Unmarshal([]byte(rawModelDefinitions), &models); err != nil { return nil, fmt.Errorf("failed to parse model definitions: %w", err) } return models, nil } // GetProviderDefinition returns the definition for a specific provider func GetProviderDefinition(providerID string) (*ProviderDefinition, error) { definitions, err := GetProviderDefinitions() if err != nil { return nil, err } def, exists := definitions[providerID] if !exists { return nil, fmt.Errorf("provider %s not found", providerID) } return &def, nil } // GetProviderDefinitions returns all provider definitions func GetProviderDefinitions() (map[string]ProviderDefinition, error) { configFields, err := GetConfigFields() if err != nil { return nil, err } modelDefinitions, err := GetModelDefinitions() if err != nil { return nil, err } definitions := make(map[string]ProviderDefinition) // Anthropic (Claude) definitions["anthropic"] = ProviderDefinition{ ID: "anthropic", Name: "Anthropic (Claude)", RequiredFields: getFieldsByProvider("anthropic", configFields, true), OptionalFields: getFieldsByProvider("anthropic", configFields, false), Models: modelDefinitions["anthropic"], DefaultModelID: "claude-sonnet-4-5-20250929", HasDynamicModels: false, SetupInstructions: `Get your API key from https://console.anthropic.com/`, } // OpenRouter definitions["openrouter"] = ProviderDefinition{ ID: "openrouter", Name: "OpenRouter", RequiredFields: getFieldsByProvider("openrouter", configFields, true), OptionalFields: getFieldsByProvider("openrouter", configFields, false), Models: modelDefinitions["openrouter"], DefaultModelID: "", HasDynamicModels: true, SetupInstructions: `Get your API key from https://openrouter.ai/keys`, } // AWS Bedrock definitions["bedrock"] = ProviderDefinition{ ID: "bedrock", Name: "AWS Bedrock", RequiredFields: getFieldsByProvider("bedrock", configFields, true), OptionalFields: getFieldsByProvider("bedrock", configFields, false), Models: modelDefinitions["bedrock"], DefaultModelID: "anthropic.claude-sonnet-4-20250514-v1", HasDynamicModels: false, SetupInstructions: `Configure AWS credentials with Bedrock access permissions`, } // OpenAI Compatible definitions["openai"] = ProviderDefinition{ ID: "openai", Name: "OpenAI Compatible", RequiredFields: getFieldsByProvider("openai", configFields, true), OptionalFields: getFieldsByProvider("openai", configFields, false), Models: modelDefinitions["openai"], DefaultModelID: "", HasDynamicModels: true, SetupInstructions: `Get your API key from https://platform.openai.com/api-keys`, } // Ollama definitions["ollama"] = ProviderDefinition{ ID: "ollama", Name: "Ollama", RequiredFields: getFieldsByProvider("ollama", configFields, true), OptionalFields: getFieldsByProvider("ollama", configFields, false), Models: modelDefinitions["ollama"], DefaultModelID: "", HasDynamicModels: true, SetupInstructions: `Install Ollama locally and ensure it's running on the specified port`, } // Google Gemini definitions["gemini"] = ProviderDefinition{ ID: "gemini", Name: "Google Gemini", RequiredFields: getFieldsByProvider("gemini", configFields, true), OptionalFields: getFieldsByProvider("gemini", configFields, false), Models: modelDefinitions["gemini"], DefaultModelID: "gemini-2.5-pro", HasDynamicModels: false, SetupInstructions: `Get your API key from https://makersuite.google.com/app/apikey`, } // OpenAI definitions["openai-native"] = ProviderDefinition{ ID: "openai-native", Name: "OpenAI", RequiredFields: getFieldsByProvider("openai-native", configFields, true), OptionalFields: getFieldsByProvider("openai-native", configFields, false), Models: modelDefinitions["openai-native"], DefaultModelID: "gpt-5-chat-latest", HasDynamicModels: true, SetupInstructions: `Get your API key from your API provider`, } // X AI (Grok) definitions["xai"] = ProviderDefinition{ ID: "xai", Name: "X AI (Grok)", RequiredFields: getFieldsByProvider("xai", configFields, true), OptionalFields: getFieldsByProvider("xai", configFields, false), Models: modelDefinitions["xai"], DefaultModelID: "grok-4", HasDynamicModels: false, SetupInstructions: `Get your API key from https://console.x.ai/`, } // Cerebras definitions["cerebras"] = ProviderDefinition{ ID: "cerebras", Name: "Cerebras", RequiredFields: getFieldsByProvider("cerebras", configFields, true), OptionalFields: getFieldsByProvider("cerebras", configFields, false), Models: modelDefinitions["cerebras"], DefaultModelID: "qwen-3-coder-480b-free", HasDynamicModels: false, SetupInstructions: `Get your API key from https://cloud.cerebras.ai/`, } // Oca definitions["oca"] = ProviderDefinition{ ID: "oca", Name: "Oca", RequiredFields: getFieldsByProvider("oca", configFields, true), OptionalFields: getFieldsByProvider("oca", configFields, false), Models: modelDefinitions["oca"], DefaultModelID: "", HasDynamicModels: false, SetupInstructions: `Configure Oca API credentials`, } // NousResearch definitions["nousResearch"] = ProviderDefinition{ ID: "nousResearch", Name: "NousResearch", RequiredFields: getFieldsByProvider("nousResearch", configFields, true), OptionalFields: getFieldsByProvider("nousResearch", configFields, false), Models: modelDefinitions["nousResearch"], DefaultModelID: "Hermes-4-405B", HasDynamicModels: false, SetupInstructions: `Configure NousResearch API credentials`, } return definitions, nil } // IsValidProvider checks if a provider ID is valid func IsValidProvider(providerID string) bool { for _, p := range AllProviders { if p == providerID { return true } } return false } // GetProviderDisplayName returns a human-readable name for a provider func GetProviderDisplayName(providerID string) string { displayNames := map[string]string{ "anthropic": "Anthropic (Claude)", "openrouter": "OpenRouter", "bedrock": "AWS Bedrock", "openai": "OpenAI Compatible", "ollama": "Ollama", "gemini": "Google Gemini", "openai-native": "OpenAI", "xai": "X AI (Grok)", "cerebras": "Cerebras", "oca": "Oca", "nousResearch": "NousResearch", } if name, exists := displayNames[providerID]; exists { return name } return providerID } // getFieldsByProvider filters configuration fields by provider and requirement // Uses category field as primary filter with override support func getFieldsByProvider(providerID string, allFields []ConfigField, required bool) []ConfigField { var fields []ConfigField for _, field := range allFields { fieldName := strings.ToLower(field.Name) fieldCategory := strings.ToLower(field.Category) providerName := strings.ToLower(providerID) isRelevant := false // Priority 1: Check manual overrides FIRST (from GetFieldOverride in this package) if override, hasOverride := GetFieldOverride(providerID, field.Name); hasOverride { isRelevant = override } else if fieldCategory == providerName { // Priority 2: Direct category match (primary filtering mechanism) isRelevant = true } else if fieldCategory == "aws" && providerID == "bedrock" { // Priority 3: Handle provider-specific category relationships // AWS fields are used by Bedrock provider isRelevant = true } else if fieldCategory == "openai" && providerID == "openai-native" { // OpenAI fields used by openai-native isRelevant = true } else if fieldCategory == "general" { // Priority 4: Universal fields that apply to all providers // Note: ulid is excluded as it's auto-generated and users should not set it universalFields := []string{"requesttimeoutms", "clineaccountid"} for _, universal := range universalFields { if fieldName == universal { isRelevant = true break } } } if isRelevant && field.Required == required { fields = append(fields, field) } } return fields }