Просмотр исходного кода

Get the model id property for a given provider (#8009)

Chris Estreich 3 месяцев назад
Родитель
Сommit
a255c95bd0

+ 1 - 1
packages/types/npm/package.metadata.json

@@ -1,6 +1,6 @@
 {
 	"name": "@roo-code/types",
-	"version": "1.78.0",
+	"version": "1.79.0",
 	"description": "TypeScript type definitions for Roo Code.",
 	"publishConfig": {
 		"access": "public",

+ 165 - 55
packages/types/src/provider-settings.ts

@@ -27,53 +27,126 @@ import {
 	internationalZAiModels,
 } from "./providers/index.js"
 
+/**
+ * constants
+ */
+
+export const DEFAULT_CONSECUTIVE_MISTAKE_LIMIT = 3
+
+/**
+ * DynamicProvider
+ *
+ * Dynamic provider requires external API calls in order to get the model list.
+ */
+
+export const dynamicProviders = [
+	"openrouter",
+	"vercel-ai-gateway",
+	"huggingface",
+	"litellm",
+	"deepinfra",
+	"io-intelligence",
+	"requesty",
+	"unbound",
+	"glama",
+] as const
+
+export type DynamicProvider = (typeof dynamicProviders)[number]
+
+export const isDynamicProvider = (key: string): key is DynamicProvider =>
+	dynamicProviders.includes(key as DynamicProvider)
+
+/**
+ * LocalProvider
+ *
+ * Local providers require localhost API calls in order to get the model list.
+ */
+
+export const localProviders = ["ollama", "lmstudio"] as const
+
+export type LocalProvider = (typeof localProviders)[number]
+
+export const isLocalProvider = (key: string): key is LocalProvider => localProviders.includes(key as LocalProvider)
+
+/**
+ * InternalProvider
+ *
+ * Internal providers require internal VSCode API calls in order to get the
+ * model list.
+ */
+
+export const internalProviders = ["vscode-lm"] as const
+
+export type InternalProvider = (typeof internalProviders)[number]
+
+export const isInternalProvider = (key: string): key is InternalProvider =>
+	internalProviders.includes(key as InternalProvider)
+
+/**
+ * CustomProvider
+ *
+ * Custom providers are completely configurable within Roo Code settings.
+ */
+
+export const customProviders = ["openai"] as const
+
+export type CustomProvider = (typeof customProviders)[number]
+
+export const isCustomProvider = (key: string): key is CustomProvider => customProviders.includes(key as CustomProvider)
+
+/**
+ * FauxProvider
+ *
+ * Faux providers do not make external inference calls and therefore do not have
+ * model lists.
+ */
+
+export const fauxProviders = ["fake-ai", "human-relay"] as const
+
+export type FauxProvider = (typeof fauxProviders)[number]
+
+export const isFauxProvider = (key: string): key is FauxProvider => fauxProviders.includes(key as FauxProvider)
+
 /**
  * ProviderName
  */
 
 export const providerNames = [
+	...dynamicProviders,
+	...localProviders,
+	...internalProviders,
+	...customProviders,
+	...fauxProviders,
 	"anthropic",
-	"claude-code",
-	"glama",
-	"openrouter",
 	"bedrock",
-	"vertex",
-	"openai",
-	"ollama",
-	"vscode-lm",
-	"lmstudio",
+	"cerebras",
+	"chutes",
+	"claude-code",
+	"doubao",
+	"deepseek",
+	"featherless",
+	"fireworks",
 	"gemini",
 	"gemini-cli",
-	"openai-native",
+	"groq",
 	"mistral",
 	"moonshot",
-	"deepseek",
-	"deepinfra",
-	"doubao",
+	"openai-native",
 	"qwen-code",
-	"unbound",
-	"requesty",
-	"human-relay",
-	"fake-ai",
-	"xai",
-	"groq",
-	"chutes",
-	"litellm",
-	"huggingface",
-	"cerebras",
+	"roo",
 	"sambanova",
+	"vertex",
+	"xai",
 	"zai",
-	"fireworks",
-	"featherless",
-	"io-intelligence",
-	"roo",
-	"vercel-ai-gateway",
 ] as const
 
 export const providerNamesSchema = z.enum(providerNames)
 
 export type ProviderName = z.infer<typeof providerNamesSchema>
 
+export const isProviderName = (key: unknown): key is ProviderName =>
+	typeof key === "string" && providerNames.includes(key as ProviderName)
+
 /**
  * ProviderSettingsEntry
  */
@@ -91,11 +164,6 @@ export type ProviderSettingsEntry = z.infer<typeof providerSettingsEntrySchema>
  * ProviderSettings
  */
 
-/**
- * Default value for consecutive mistake limit
- */
-export const DEFAULT_CONSECUTIVE_MISTAKE_LIMIT = 3
-
 const baseProviderSettingsSchema = z.object({
 	includeMaxTokens: z.boolean().optional(),
 	diffEnabled: z.boolean().optional(),
@@ -124,7 +192,7 @@ const anthropicSchema = apiModelIdProviderModelSchema.extend({
 	apiKey: z.string().optional(),
 	anthropicBaseUrl: z.string().optional(),
 	anthropicUseAuthToken: z.boolean().optional(),
-	anthropicBeta1MContext: z.boolean().optional(), // Enable 'context-1m-2025-08-07' beta for 1M context window
+	anthropicBeta1MContext: z.boolean().optional(), // Enable 'context-1m-2025-08-07' beta for 1M context window.
 })
 
 const claudeCodeSchema = apiModelIdProviderModelSchema.extend({
@@ -160,7 +228,7 @@ const bedrockSchema = apiModelIdProviderModelSchema.extend({
 	awsModelContextWindow: z.number().optional(),
 	awsBedrockEndpointEnabled: z.boolean().optional(),
 	awsBedrockEndpoint: z.string().optional(),
-	awsBedrock1MContext: z.boolean().optional(), // Enable 'context-1m-2025-08-07' beta for 1M context window
+	awsBedrock1MContext: z.boolean().optional(), // Enable 'context-1m-2025-08-07' beta for 1M context window.
 })
 
 const vertexSchema = apiModelIdProviderModelSchema.extend({
@@ -335,7 +403,7 @@ const qwenCodeSchema = apiModelIdProviderModelSchema.extend({
 })
 
 const rooSchema = apiModelIdProviderModelSchema.extend({
-	// No additional fields needed - uses cloud authentication
+	// No additional fields needed - uses cloud authentication.
 })
 
 const vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
@@ -440,7 +508,11 @@ export type ProviderSettingsWithId = z.infer<typeof providerSettingsWithIdSchema
 
 export const PROVIDER_SETTINGS_KEYS = providerSettingsSchema.keyof().options
 
-export const MODEL_ID_KEYS: Partial<keyof ProviderSettings>[] = [
+/**
+ * ModelIdKey
+ */
+
+export const modelIdKeys = [
 	"apiModelId",
 	"glamaModelId",
 	"openRouterModelId",
@@ -455,13 +527,63 @@ export const MODEL_ID_KEYS: Partial<keyof ProviderSettings>[] = [
 	"ioIntelligenceModelId",
 	"vercelAiGatewayModelId",
 	"deepInfraModelId",
-]
+] as const satisfies readonly (keyof ProviderSettings)[]
+
+export type ModelIdKey = (typeof modelIdKeys)[number]
 
 export const getModelId = (settings: ProviderSettings): string | undefined => {
-	const modelIdKey = MODEL_ID_KEYS.find((key) => settings[key])
-	return modelIdKey ? (settings[modelIdKey] as string) : undefined
+	const modelIdKey = modelIdKeys.find((key) => settings[key])
+	return modelIdKey ? settings[modelIdKey] : undefined
+}
+
+/**
+ * TypicalProvider
+ */
+
+export type TypicalProvider = Exclude<ProviderName, InternalProvider | CustomProvider | FauxProvider>
+
+export const isTypicalProvider = (key: unknown): key is TypicalProvider =>
+	isProviderName(key) && !isInternalProvider(key) && !isCustomProvider(key) && !isFauxProvider(key)
+
+export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
+	anthropic: "apiModelId",
+	"claude-code": "apiModelId",
+	glama: "glamaModelId",
+	openrouter: "openRouterModelId",
+	bedrock: "apiModelId",
+	vertex: "apiModelId",
+	"openai-native": "openAiModelId",
+	ollama: "ollamaModelId",
+	lmstudio: "lmStudioModelId",
+	gemini: "apiModelId",
+	"gemini-cli": "apiModelId",
+	mistral: "apiModelId",
+	moonshot: "apiModelId",
+	deepseek: "apiModelId",
+	deepinfra: "deepInfraModelId",
+	doubao: "apiModelId",
+	"qwen-code": "apiModelId",
+	unbound: "unboundModelId",
+	requesty: "requestyModelId",
+	xai: "apiModelId",
+	groq: "apiModelId",
+	chutes: "apiModelId",
+	litellm: "litellmModelId",
+	huggingface: "huggingFaceModelId",
+	cerebras: "apiModelId",
+	sambanova: "apiModelId",
+	zai: "apiModelId",
+	fireworks: "apiModelId",
+	featherless: "apiModelId",
+	"io-intelligence": "ioIntelligenceModelId",
+	roo: "apiModelId",
+	"vercel-ai-gateway": "vercelAiGatewayModelId",
 }
 
+/**
+ * ANTHROPIC_STYLE_PROVIDERS
+ */
+
 // Providers that use Anthropic-style API protocol.
 export const ANTHROPIC_STYLE_PROVIDERS: ProviderName[] = ["anthropic", "claude-code", "bedrock"]
 
@@ -482,6 +604,10 @@ export const getApiProtocol = (provider: ProviderName | undefined, modelId?: str
 	return "openai"
 }
 
+/**
+ * MODELS_BY_PROVIDER
+ */
+
 export const MODELS_BY_PROVIDER: Record<
 	Exclude<ProviderName, "fake-ai" | "human-relay" | "gemini-cli" | "lmstudio" | "openai" | "ollama">,
 	{ id: ProviderName; label: string; models: string[] }
@@ -579,19 +705,3 @@ export const MODELS_BY_PROVIDER: Record<
 	deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] },
 	"vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] },
 }
-
-export const dynamicProviders = [
-	"glama",
-	"huggingface",
-	"litellm",
-	"openrouter",
-	"requesty",
-	"unbound",
-	"deepinfra",
-	"vercel-ai-gateway",
-] as const satisfies readonly ProviderName[]
-
-export type DynamicProvider = (typeof dynamicProviders)[number]
-
-export const isDynamicProvider = (key: string): key is DynamicProvider =>
-	dynamicProviders.includes(key as DynamicProvider)

+ 22 - 58
src/api/providers/fetchers/huggingface.ts

@@ -1,17 +1,16 @@
 import axios from "axios"
 import { z } from "zod"
-import type { ModelInfo } from "@roo-code/types"
+
 import {
+	type ModelInfo,
 	HUGGINGFACE_API_URL,
 	HUGGINGFACE_CACHE_DURATION,
 	HUGGINGFACE_DEFAULT_MAX_TOKENS,
 	HUGGINGFACE_DEFAULT_CONTEXT_WINDOW,
 } from "@roo-code/types"
+
 import type { ModelRecord } from "../../../shared/api"
 
-/**
- * HuggingFace Provider Schema
- */
 const huggingFaceProviderSchema = z.object({
 	provider: z.string(),
 	status: z.enum(["live", "staging", "error"]),
@@ -27,7 +26,8 @@ const huggingFaceProviderSchema = z.object({
 })
 
 /**
- * Represents a provider that can serve a HuggingFace model
+ * Represents a provider that can serve a HuggingFace model.
+ *
  * @property provider - The provider identifier (e.g., "sambanova", "together")
  * @property status - The current status of the provider
  * @property supports_tools - Whether the provider supports tool/function calling
@@ -37,9 +37,6 @@ const huggingFaceProviderSchema = z.object({
  */
 export type HuggingFaceProvider = z.infer<typeof huggingFaceProviderSchema>
 
-/**
- * HuggingFace Model Schema
- */
 const huggingFaceModelSchema = z.object({
 	id: z.string(),
 	object: z.literal("model"),
@@ -50,6 +47,7 @@ const huggingFaceModelSchema = z.object({
 
 /**
  * Represents a HuggingFace model available through the router API
+ *
  * @property id - The unique identifier of the model
  * @property object - The object type (always "model")
  * @property created - Unix timestamp of when the model was created
@@ -58,26 +56,13 @@ const huggingFaceModelSchema = z.object({
  */
 export type HuggingFaceModel = z.infer<typeof huggingFaceModelSchema>
 
-/**
- * HuggingFace API Response Schema
- */
 const huggingFaceApiResponseSchema = z.object({
 	object: z.string(),
 	data: z.array(huggingFaceModelSchema),
 })
 
-/**
- * Represents the response from the HuggingFace router API
- * @property object - The response object type
- * @property data - Array of available models
- */
 type HuggingFaceApiResponse = z.infer<typeof huggingFaceApiResponseSchema>
 
-/**
- * Cache entry for storing fetched models
- * @property data - The cached model records
- * @property timestamp - Unix timestamp of when the cache was last updated
- */
 interface CacheEntry {
 	data: ModelRecord
 	rawModels?: HuggingFaceModel[]
@@ -87,13 +72,14 @@ interface CacheEntry {
 let cache: CacheEntry | null = null
 
 /**
- * Parse a HuggingFace model into ModelInfo format
+ * Parse a HuggingFace model into ModelInfo format.
+ *
  * @param model - The HuggingFace model to parse
  * @param provider - Optional specific provider to use for capabilities
  * @returns ModelInfo object compatible with the application's model system
  */
 function parseHuggingFaceModel(model: HuggingFaceModel, provider?: HuggingFaceProvider): ModelInfo {
-	// Use provider-specific values if available, otherwise find first provider with values
+	// Use provider-specific values if available, otherwise find first provider with values.
 	const contextLength =
 		provider?.context_length ||
 		model.providers.find((p) => p.context_length)?.context_length ||
@@ -101,13 +87,13 @@ function parseHuggingFaceModel(model: HuggingFaceModel, provider?: HuggingFacePr
 
 	const pricing = provider?.pricing || model.providers.find((p) => p.pricing)?.pricing
 
-	// Include provider name in description if specific provider is given
+	// Include provider name in description if specific provider is given.
 	const description = provider ? `${model.id} via ${provider.provider}` : `${model.id} via HuggingFace`
 
 	return {
 		maxTokens: Math.min(contextLength, HUGGINGFACE_DEFAULT_MAX_TOKENS),
 		contextWindow: contextLength,
-		supportsImages: false, // HuggingFace API doesn't provide this info yet
+		supportsImages: false, // HuggingFace API doesn't provide this info yet.
 		supportsPromptCache: false,
 		supportsComputerUse: false,
 		inputPrice: pricing?.input,
@@ -125,7 +111,6 @@ function parseHuggingFaceModel(model: HuggingFaceModel, provider?: HuggingFacePr
 export async function getHuggingFaceModels(): Promise<ModelRecord> {
 	const now = Date.now()
 
-	// Check cache
 	if (cache && now - cache.timestamp < HUGGINGFACE_CACHE_DURATION) {
 		return cache.data
 	}
@@ -144,7 +129,7 @@ export async function getHuggingFaceModels(): Promise<ModelRecord> {
 				Pragma: "no-cache",
 				"Cache-Control": "no-cache",
 			},
-			timeout: 10000, // 10 second timeout
+			timeout: 10000,
 		})
 
 		const result = huggingFaceApiResponseSchema.safeParse(response.data)
@@ -157,38 +142,31 @@ export async function getHuggingFaceModels(): Promise<ModelRecord> {
 		const validModels = result.data.data.filter((model) => model.providers.length > 0)
 
 		for (const model of validModels) {
-			// Add the base model
+			// Add the base model.
 			models[model.id] = parseHuggingFaceModel(model)
 
-			// Add provider-specific variants for all live providers
+			// Add provider-specific variants for all live providers.
 			for (const provider of model.providers) {
 				if (provider.status === "live") {
 					const providerKey = `${model.id}:${provider.provider}`
 					const providerModel = parseHuggingFaceModel(model, provider)
 
-					// Always add provider variants to show all available providers
+					// Always add provider variants to show all available providers.
 					models[providerKey] = providerModel
 				}
 			}
 		}
 
-		// Update cache
-		cache = {
-			data: models,
-			rawModels: validModels,
-			timestamp: now,
-		}
+		cache = { data: models, rawModels: validModels, timestamp: now }
 
 		return models
 	} catch (error) {
 		console.error("Error fetching HuggingFace models:", error)
 
-		// Return cached data if available
 		if (cache) {
 			return cache.data
 		}
 
-		// Re-throw with more context
 		if (axios.isAxiosError(error)) {
 			if (error.response) {
 				throw new Error(
@@ -208,45 +186,35 @@ export async function getHuggingFaceModels(): Promise<ModelRecord> {
 }
 
 /**
- * Get cached models without making an API request
+ * Get cached models without making an API request.
  */
 export function getCachedHuggingFaceModels(): ModelRecord | null {
 	return cache?.data || null
 }
 
 /**
- * Get cached raw models for UI display
+ * Get cached raw models for UI display.
  */
 export function getCachedRawHuggingFaceModels(): HuggingFaceModel[] | null {
 	return cache?.rawModels || null
 }
 
-/**
- * Clear the cache
- */
 export function clearHuggingFaceCache(): void {
 	cache = null
 }
 
-/**
- * HuggingFace Models Response Interface
- */
 export interface HuggingFaceModelsResponse {
 	models: HuggingFaceModel[]
 	cached: boolean
 	timestamp: number
 }
 
-/**
- * Get HuggingFace models with response metadata
- * This function provides a higher-level API that includes cache status and timestamp
- */
 export async function getHuggingFaceModelsWithMetadata(): Promise<HuggingFaceModelsResponse> {
 	try {
-		// First, trigger the fetch to populate cache
+		// First, trigger the fetch to populate cache.
 		await getHuggingFaceModels()
 
-		// Get the raw models from cache
+		// Get the raw models from cache.
 		const cachedRawModels = getCachedRawHuggingFaceModels()
 
 		if (cachedRawModels) {
@@ -257,7 +225,7 @@ export async function getHuggingFaceModelsWithMetadata(): Promise<HuggingFaceMod
 			}
 		}
 
-		// If no cached raw models, fetch directly from API
+		// If no cached raw models, fetch directly from API.
 		const response = await axios.get(HUGGINGFACE_API_URL, {
 			headers: {
 				"Upgrade-Insecure-Requests": "1",
@@ -281,10 +249,6 @@ export async function getHuggingFaceModelsWithMetadata(): Promise<HuggingFaceMod
 		}
 	} catch (error) {
 		console.error("Failed to get HuggingFace models:", error)
-		return {
-			models: [],
-			cached: false,
-			timestamp: Date.now(),
-		}
+		return { models: [], cached: false, timestamp: Date.now() }
 	}
 }

+ 6 - 34
src/api/providers/fetchers/io-intelligence.ts

@@ -1,12 +1,10 @@
 import axios from "axios"
 import { z } from "zod"
-import type { ModelInfo } from "@roo-code/types"
-import { IO_INTELLIGENCE_CACHE_DURATION } from "@roo-code/types"
+
+import { type ModelInfo, IO_INTELLIGENCE_CACHE_DURATION } from "@roo-code/types"
+
 import type { ModelRecord } from "../../../shared/api"
 
-/**
- * IO Intelligence Model Schema
- */
 const ioIntelligenceModelSchema = z.object({
 	id: z.string(),
 	object: z.literal("model"),
@@ -35,9 +33,6 @@ const ioIntelligenceModelSchema = z.object({
 
 export type IOIntelligenceModel = z.infer<typeof ioIntelligenceModelSchema>
 
-/**
- * IO Intelligence API Response Schema
- */
 const ioIntelligenceApiResponseSchema = z.object({
 	object: z.literal("list"),
 	data: z.array(ioIntelligenceModelSchema),
@@ -45,9 +40,6 @@ const ioIntelligenceApiResponseSchema = z.object({
 
 type IOIntelligenceApiResponse = z.infer<typeof ioIntelligenceApiResponseSchema>
 
-/**
- * Cache entry for storing fetched models
- */
 interface CacheEntry {
 	data: ModelRecord
 	timestamp: number
@@ -66,21 +58,15 @@ const MODEL_CONTEXT_LENGTHS: Record<string, number> = {
 	"openai/gpt-oss-120b": 131072,
 }
 
-/**
- * Vision models that support images
- */
 const VISION_MODELS = new Set([
 	"Qwen/Qwen2.5-VL-32B-Instruct",
 	"meta-llama/Llama-3.2-90B-Vision-Instruct",
 	"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
 ])
 
-/**
- * Parse an IO Intelligence model into ModelInfo format
- */
 function parseIOIntelligenceModel(model: IOIntelligenceModel): ModelInfo {
 	const contextLength = MODEL_CONTEXT_LENGTHS[model.id] || 8192
-	// Cap maxTokens at 32k for very large context windows, or 20% of context length, whichever is smaller
+	// Cap maxTokens at 32k for very large context windows, or 20% of context length, whichever is smaller.
 	const maxTokens = Math.min(contextLength, Math.ceil(contextLength * 0.2), 32768)
 	const supportsImages = VISION_MODELS.has(model.id)
 
@@ -101,7 +87,6 @@ function parseIOIntelligenceModel(model: IOIntelligenceModel): ModelInfo {
 export async function getIOIntelligenceModels(apiKey?: string): Promise<ModelRecord> {
 	const now = Date.now()
 
-	// Check cache
 	if (cache && now - cache.timestamp < IO_INTELLIGENCE_CACHE_DURATION) {
 		return cache.data
 	}
@@ -113,7 +98,6 @@ export async function getIOIntelligenceModels(apiKey?: string): Promise<ModelRec
 			"Content-Type": "application/json",
 		}
 
-		// Add authorization header if API key is provided
 		if (apiKey) {
 			headers.Authorization = `Bearer ${apiKey}`
 		} else {
@@ -125,7 +109,7 @@ export async function getIOIntelligenceModels(apiKey?: string): Promise<ModelRec
 			"https://api.intelligence.io.solutions/api/v1/models",
 			{
 				headers,
-				timeout: 10000, // 10 second timeout
+				timeout: 10_000,
 			},
 		)
 
@@ -140,22 +124,16 @@ export async function getIOIntelligenceModels(apiKey?: string): Promise<ModelRec
 			models[model.id] = parseIOIntelligenceModel(model)
 		}
 
-		// Update cache
-		cache = {
-			data: models,
-			timestamp: now,
-		}
+		cache = { data: models, timestamp: now }
 
 		return models
 	} catch (error) {
 		console.error("Error fetching IO Intelligence models:", error)
 
-		// Return cached data if available
 		if (cache) {
 			return cache.data
 		}
 
-		// Re-throw with more context
 		if (axios.isAxiosError(error)) {
 			if (error.response) {
 				throw new Error(
@@ -174,16 +152,10 @@ export async function getIOIntelligenceModels(apiKey?: string): Promise<ModelRec
 	}
 }
 
-/**
- * Get cached models without making an API request
- */
 export function getCachedIOIntelligenceModels(): ModelRecord | null {
 	return cache?.data || null
 }
 
-/**
- * Clear the cache
- */
 export function clearIOIntelligenceCache(): void {
 	cache = null
 }

+ 9 - 9
src/api/providers/fetchers/lmstudio.ts

@@ -1,27 +1,27 @@
-import { ModelInfo, lMStudioDefaultModelInfo } from "@roo-code/types"
-import { LLM, LLMInfo, LLMInstanceInfo, LMStudioClient } from "@lmstudio/sdk"
 import axios from "axios"
+import { LLM, LLMInfo, LLMInstanceInfo, LMStudioClient } from "@lmstudio/sdk"
+
+import { type ModelInfo, lMStudioDefaultModelInfo } from "@roo-code/types"
+
 import { flushModels, getModels } from "./modelCache"
 
 const modelsWithLoadedDetails = new Set<string>()
 
-export const hasLoadedFullDetails = (modelId: string): boolean => {
-	return modelsWithLoadedDetails.has(modelId)
-}
+export const hasLoadedFullDetails = (modelId: string): boolean => modelsWithLoadedDetails.has(modelId)
 
 export const forceFullModelDetailsLoad = async (baseUrl: string, modelId: string): Promise<void> => {
 	try {
-		// test the connection to LM Studio first
-		// errors will be caught further down
+		// Test the connection to LM Studio first
+		// Crrors will be caught further down.
 		await axios.get(`${baseUrl}/v1/models`)
 		const lmsUrl = baseUrl.replace(/^http:\/\//, "ws://").replace(/^https:\/\//, "wss://")
 
 		const client = new LMStudioClient({ baseUrl: lmsUrl })
 		await client.llm.model(modelId)
 		await flushModels("lmstudio")
-		await getModels({ provider: "lmstudio" }) // force cache update now
+		await getModels({ provider: "lmstudio" }) // Force cache update now.
 
-		// Mark this model as having full details loaded
+		// Mark this model as having full details loaded.
 		modelsWithLoadedDetails.add(modelId)
 	} catch (error) {
 		if (error.code === "ECONNREFUSED") {

+ 20 - 9
src/api/providers/fetchers/modelCache.ts

@@ -2,11 +2,14 @@ import * as path from "path"
 import fs from "fs/promises"
 
 import NodeCache from "node-cache"
+
+import type { ProviderName } from "@roo-code/types"
+
 import { safeWriteJson } from "../../../utils/safeWriteJson"
 
 import { ContextProxy } from "../../../core/config/ContextProxy"
 import { getCacheDirectoryPath } from "../../../utils/storage"
-import { RouterName, ModelRecord } from "../../../shared/api"
+import type { RouterName, ModelRecord } from "../../../shared/api"
 import { fileExistsAtPath } from "../../../utils/fs"
 
 import { getOpenRouterModels } from "./openrouter"
@@ -20,6 +23,8 @@ import { getOllamaModels } from "./ollama"
 import { getLMStudioModels } from "./lmstudio"
 import { getIOIntelligenceModels } from "./io-intelligence"
 import { getDeepInfraModels } from "./deepinfra"
+import { getHuggingFaceModels } from "./huggingface"
+
 const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 })
 
 async function writeModels(router: RouterName, data: ModelRecord) {
@@ -49,7 +54,9 @@ async function readModels(router: RouterName): Promise<ModelRecord | undefined>
  */
 export const getModels = async (options: GetModelsOptions): Promise<ModelRecord> => {
 	const { provider } = options
+
 	let models = getModelsFromCache(provider)
+
 	if (models) {
 		return models
 	}
@@ -60,18 +67,18 @@ export const getModels = async (options: GetModelsOptions): Promise<ModelRecord>
 				models = await getOpenRouterModels()
 				break
 			case "requesty":
-				// Requesty models endpoint requires an API key for per-user custom policies
+				// Requesty models endpoint requires an API key for per-user custom policies.
 				models = await getRequestyModels(options.baseUrl, options.apiKey)
 				break
 			case "glama":
 				models = await getGlamaModels()
 				break
 			case "unbound":
-				// Unbound models endpoint requires an API key to fetch application specific models
+				// Unbound models endpoint requires an API key to fetch application specific models.
 				models = await getUnboundModels(options.apiKey)
 				break
 			case "litellm":
-				// Type safety ensures apiKey and baseUrl are always provided for litellm
+				// Type safety ensures apiKey and baseUrl are always provided for LiteLLM.
 				models = await getLiteLLMModels(options.apiKey, options.baseUrl)
 				break
 			case "ollama":
@@ -89,22 +96,25 @@ export const getModels = async (options: GetModelsOptions): Promise<ModelRecord>
 			case "vercel-ai-gateway":
 				models = await getVercelAiGatewayModels()
 				break
+			case "huggingface":
+				models = await getHuggingFaceModels()
+				break
 			default: {
-				// Ensures router is exhaustively checked if RouterName is a strict union
+				// Ensures router is exhaustively checked if RouterName is a strict union.
 				const exhaustiveCheck: never = provider
 				throw new Error(`Unknown provider: ${exhaustiveCheck}`)
 			}
 		}
 
-		// Cache the fetched models (even if empty, to signify a successful fetch with no models)
+		// Cache the fetched models (even if empty, to signify a successful fetch with no models).
 		memoryCache.set(provider, models)
+
 		await writeModels(provider, models).catch((err) =>
 			console.error(`[getModels] Error writing ${provider} models to file cache:`, err),
 		)
 
 		try {
 			models = await readModels(provider)
-			// console.log(`[getModels] read ${router} models from file cache`)
 		} catch (error) {
 			console.error(`[getModels] error reading ${provider} models from file cache`, error)
 		}
@@ -118,13 +128,14 @@ export const getModels = async (options: GetModelsOptions): Promise<ModelRecord>
 }
 
 /**
- * Flush models memory cache for a specific router
+ * Flush models memory cache for a specific router.
+ *
  * @param router - The router to flush models for.
  */
 export const flushModels = async (router: RouterName) => {
 	memoryCache.del(router)
 }
 
-export function getModelsFromCache(provider: string) {
+export function getModelsFromCache(provider: ProviderName) {
 	return memoryCache.get<ModelRecord>(provider)
 }

+ 3 - 1
src/api/providers/io-intelligence.ts

@@ -19,8 +19,10 @@ export class IOIntelligenceHandler extends BaseOpenAiCompatibleProvider<IOIntell
 			apiKey: options.ioIntelligenceApiKey,
 		})
 	}
+
 	override getModel() {
 		const modelId = this.options.ioIntelligenceModelId || (ioIntelligenceDefaultModelId as IOIntelligenceModelId)
+
 		const modelInfo =
 			this.providerModels[modelId as IOIntelligenceModelId] ?? this.providerModels[ioIntelligenceDefaultModelId]
 
@@ -28,7 +30,7 @@ export class IOIntelligenceHandler extends BaseOpenAiCompatibleProvider<IOIntell
 			return { id: modelId as IOIntelligenceModelId, info: modelInfo }
 		}
 
-		// Return the requested model ID even if not found, with fallback info
+		// Return the requested model ID even if not found, with fallback info.
 		return {
 			id: modelId as IOIntelligenceModelId,
 			info: {

+ 6 - 0
src/core/webview/__tests__/ClineProvider.spec.ts

@@ -2703,6 +2703,8 @@ describe("ClineProvider - Router Models", () => {
 				ollama: {},
 				lmstudio: {},
 				"vercel-ai-gateway": mockModels,
+				huggingface: {},
+				"io-intelligence": {},
 			},
 		})
 	})
@@ -2752,6 +2754,8 @@ describe("ClineProvider - Router Models", () => {
 				lmstudio: {},
 				litellm: {},
 				"vercel-ai-gateway": mockModels,
+				huggingface: {},
+				"io-intelligence": {},
 			},
 		})
 
@@ -2864,6 +2868,8 @@ describe("ClineProvider - Router Models", () => {
 				ollama: {},
 				lmstudio: {},
 				"vercel-ai-gateway": mockModels,
+				huggingface: {},
+				"io-intelligence": {},
 			},
 		})
 	})

+ 10 - 0
src/core/webview/__tests__/webviewMessageHandler.spec.ts

@@ -1,3 +1,5 @@
+// npx vitest core/webview/__tests__/webviewMessageHandler.spec.ts
+
 import type { Mock } from "vitest"
 
 // Mock dependencies - must come before imports
@@ -227,6 +229,8 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 			apiKey: "litellm-key",
 			baseUrl: "http://localhost:4000",
 		})
+		// Note: huggingface is not fetched in requestRouterModels - it has its own handler
+		// Note: io-intelligence is not fetched because no API key is provided in the mock state
 
 		// Verify response was sent
 		expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({
@@ -241,6 +245,8 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 				ollama: {},
 				lmstudio: {},
 				"vercel-ai-gateway": mockModels,
+				huggingface: {},
+				"io-intelligence": {},
 			},
 		})
 	})
@@ -330,6 +336,8 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 				ollama: {},
 				lmstudio: {},
 				"vercel-ai-gateway": mockModels,
+				huggingface: {},
+				"io-intelligence": {},
 			},
 		})
 	})
@@ -371,6 +379,8 @@ describe("webviewMessageHandler - requestRouterModels", () => {
 				ollama: {},
 				lmstudio: {},
 				"vercel-ai-gateway": mockModels,
+				huggingface: {},
+				"io-intelligence": {},
 			},
 		})
 

+ 32 - 41
src/core/webview/webviewMessageHandler.ts

@@ -23,7 +23,7 @@ import { ClineProvider } from "./ClineProvider"
 import { handleCheckpointRestoreOperation } from "./checkpointRestoreHandler"
 import { changeLanguage, t } from "../../i18n"
 import { Package } from "../../shared/package"
-import { RouterName, toRouterName, ModelRecord } from "../../shared/api"
+import { type RouterName, type ModelRecord, toRouterName } from "../../shared/api"
 import { MessageEnhancer } from "./messageEnhancer"
 
 import {
@@ -756,15 +756,18 @@ export const webviewMessageHandler = async (
 		case "requestRouterModels":
 			const { apiConfiguration } = await provider.getState()
 
-			const routerModels: Partial<Record<RouterName, ModelRecord>> = {
+			const routerModels: Record<RouterName, ModelRecord> = {
 				openrouter: {},
+				"vercel-ai-gateway": {},
+				huggingface: {},
+				litellm: {},
+				deepinfra: {},
+				"io-intelligence": {},
 				requesty: {},
-				glama: {},
 				unbound: {},
-				litellm: {},
+				glama: {},
 				ollama: {},
 				lmstudio: {},
-				deepinfra: {},
 			}
 
 			const safeGetModels = async (options: GetModelsOptions): Promise<ModelRecord> => {
@@ -775,11 +778,12 @@ export const webviewMessageHandler = async (
 						`Failed to fetch models in webviewMessageHandler requestRouterModels for ${options.provider}:`,
 						error,
 					)
-					throw error // Re-throw to be caught by Promise.allSettled
+
+					throw error // Re-throw to be caught by Promise.allSettled.
 				}
 			}
 
-			const modelFetchPromises: Array<{ key: RouterName; options: GetModelsOptions }> = [
+			const modelFetchPromises: { key: RouterName; options: GetModelsOptions }[] = [
 				{ key: "openrouter", options: { provider: "openrouter" } },
 				{
 					key: "requesty",
@@ -802,8 +806,9 @@ export const webviewMessageHandler = async (
 				},
 			]
 
-			// Add IO Intelligence if API key is provided
+			// Add IO Intelligence if API key is provided.
 			const ioIntelligenceApiKey = apiConfiguration.ioIntelligenceApiKey
+
 			if (ioIntelligenceApiKey) {
 				modelFetchPromises.push({
 					key: "io-intelligence",
@@ -811,11 +816,12 @@ export const webviewMessageHandler = async (
 				})
 			}
 
-			// Don't fetch Ollama and LM Studio models by default anymore
-			// They have their own specific handlers: requestOllamaModels and requestLmStudioModels
+			// Don't fetch Ollama and LM Studio models by default anymore.
+			// They have their own specific handlers: requestOllamaModels and requestLmStudioModels.
 
 			const litellmApiKey = apiConfiguration.litellmApiKey || message?.values?.litellmApiKey
 			const litellmBaseUrl = apiConfiguration.litellmBaseUrl || message?.values?.litellmBaseUrl
+
 			if (litellmApiKey && litellmBaseUrl) {
 				modelFetchPromises.push({
 					key: "litellm",
@@ -826,24 +832,17 @@ export const webviewMessageHandler = async (
 			const results = await Promise.allSettled(
 				modelFetchPromises.map(async ({ key, options }) => {
 					const models = await safeGetModels(options)
-					return { key, models } // key is RouterName here
+					return { key, models } // The key is `ProviderName` here.
 				}),
 			)
 
-			const fetchedRouterModels: Partial<Record<RouterName, ModelRecord>> = {
-				...routerModels,
-				// Initialize ollama and lmstudio with empty objects since they use separate handlers
-				ollama: {},
-				lmstudio: {},
-			}
-
 			results.forEach((result, index) => {
-				const routerName = modelFetchPromises[index].key // Get RouterName using index
+				const routerName = modelFetchPromises[index].key
 
 				if (result.status === "fulfilled") {
-					fetchedRouterModels[routerName] = result.value.models
+					routerModels[routerName] = result.value.models
 
-					// Ollama and LM Studio settings pages still need these events
+					// Ollama and LM Studio settings pages still need these events.
 					if (routerName === "ollama" && Object.keys(result.value.models).length > 0) {
 						provider.postMessageToWebview({
 							type: "ollamaModels",
@@ -856,11 +855,11 @@ export const webviewMessageHandler = async (
 						})
 					}
 				} else {
-					// Handle rejection: Post a specific error message for this provider
+					// Handle rejection: Post a specific error message for this provider.
 					const errorMessage = result.reason instanceof Error ? result.reason.message : String(result.reason)
 					console.error(`Error fetching models for ${routerName}:`, result.reason)
 
-					fetchedRouterModels[routerName] = {} // Ensure it's an empty object in the main routerModels message
+					routerModels[routerName] = {} // Ensure it's an empty object in the main routerModels message.
 
 					provider.postMessageToWebview({
 						type: "singleRouterModelFetchResponse",
@@ -871,17 +870,13 @@ export const webviewMessageHandler = async (
 				}
 			})
 
-			provider.postMessageToWebview({
-				type: "routerModels",
-				routerModels: fetchedRouterModels as Record<RouterName, ModelRecord>,
-			})
-
+			provider.postMessageToWebview({ type: "routerModels", routerModels })
 			break
 		case "requestOllamaModels": {
-			// Specific handler for Ollama models only
+			// Specific handler for Ollama models only.
 			const { apiConfiguration: ollamaApiConfig } = await provider.getState()
 			try {
-				// Flush cache first to ensure fresh models
+				// Flush cache first to ensure fresh models.
 				await flushModels("ollama")
 
 				const ollamaModels = await getModels({
@@ -891,10 +886,7 @@ export const webviewMessageHandler = async (
 				})
 
 				if (Object.keys(ollamaModels).length > 0) {
-					provider.postMessageToWebview({
-						type: "ollamaModels",
-						ollamaModels: ollamaModels,
-					})
+					provider.postMessageToWebview({ type: "ollamaModels", ollamaModels: ollamaModels })
 				}
 			} catch (error) {
 				// Silently fail - user hasn't configured Ollama yet
@@ -903,10 +895,10 @@ export const webviewMessageHandler = async (
 			break
 		}
 		case "requestLmStudioModels": {
-			// Specific handler for LM Studio models only
+			// Specific handler for LM Studio models only.
 			const { apiConfiguration: lmStudioApiConfig } = await provider.getState()
 			try {
-				// Flush cache first to ensure fresh models
+				// Flush cache first to ensure fresh models.
 				await flushModels("lmstudio")
 
 				const lmStudioModels = await getModels({
@@ -921,7 +913,7 @@ export const webviewMessageHandler = async (
 					})
 				}
 			} catch (error) {
-				// Silently fail - user hasn't configured LM Studio yet
+				// Silently fail - user hasn't configured LM Studio yet.
 				console.debug("LM Studio models fetch failed:", error)
 			}
 			break
@@ -944,19 +936,18 @@ export const webviewMessageHandler = async (
 			provider.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
 			break
 		case "requestHuggingFaceModels":
+			// TODO: Why isn't this handled by `requestRouterModels` above?
 			try {
 				const { getHuggingFaceModelsWithMetadata } = await import("../../api/providers/fetchers/huggingface")
 				const huggingFaceModelsResponse = await getHuggingFaceModelsWithMetadata()
+
 				provider.postMessageToWebview({
 					type: "huggingFaceModels",
 					huggingFaceModels: huggingFaceModelsResponse.models,
 				})
 			} catch (error) {
 				console.error("Failed to fetch Hugging Face models:", error)
-				provider.postMessageToWebview({
-					type: "huggingFaceModels",
-					huggingFaceModels: [],
-				})
+				provider.postMessageToWebview({ type: "huggingFaceModels", huggingFaceModels: [] })
 			}
 			break
 		case "openImage":

+ 35 - 26
src/shared/api.ts

@@ -1,8 +1,12 @@
 import {
 	type ModelInfo,
 	type ProviderSettings,
+	type DynamicProvider,
+	type LocalProvider,
 	ANTHROPIC_DEFAULT_MAX_TOKENS,
 	CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS,
+	isDynamicProvider,
+	isLocalProvider,
 } from "@roo-code/types"
 
 // ApiHandlerOptions
@@ -18,22 +22,9 @@ export type ApiHandlerOptions = Omit<ProviderSettings, "apiProvider"> & {
 
 // RouterName
 
-const routerNames = [
-	"openrouter",
-	"requesty",
-	"glama",
-	"unbound",
-	"litellm",
-	"ollama",
-	"lmstudio",
-	"io-intelligence",
-	"deepinfra",
-	"vercel-ai-gateway",
-] as const
+export type RouterName = DynamicProvider | LocalProvider
 
-export type RouterName = (typeof routerNames)[number]
-
-export const isRouterName = (value: string): value is RouterName => routerNames.includes(value as RouterName)
+export const isRouterName = (value: string): value is RouterName => isDynamicProvider(value) || isLocalProvider(value)
 
 export function toRouterName(value?: string): RouterName {
 	if (value && isRouterName(value)) {
@@ -144,14 +135,32 @@ export const getModelMaxOutputTokens = ({
 
 // GetModelsOptions
 
-export type GetModelsOptions =
-	| { provider: "openrouter" }
-	| { provider: "glama" }
-	| { provider: "requesty"; apiKey?: string; baseUrl?: string }
-	| { provider: "unbound"; apiKey?: string }
-	| { provider: "litellm"; apiKey: string; baseUrl: string }
-	| { provider: "ollama"; baseUrl?: string; apiKey?: string }
-	| { provider: "lmstudio"; baseUrl?: string }
-	| { provider: "deepinfra"; apiKey?: string; baseUrl?: string }
-	| { provider: "io-intelligence"; apiKey: string }
-	| { provider: "vercel-ai-gateway" }
+// Allow callers to always pass apiKey/baseUrl without excess property errors,
+// while still enforcing required fields per provider where applicable.
+type CommonFetchParams = {
+	apiKey?: string
+	baseUrl?: string
+}
+
+// Exhaustive, value-level map for all dynamic providers.
+// If a new dynamic provider is added in packages/types, this will fail to compile
+// until a corresponding entry is added here.
+const dynamicProviderExtras = {
+	openrouter: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
+	"vercel-ai-gateway": {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
+	huggingface: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
+	litellm: {} as { apiKey: string; baseUrl: string },
+	deepinfra: {} as { apiKey?: string; baseUrl?: string },
+	"io-intelligence": {} as { apiKey: string },
+	requesty: {} as { apiKey?: string; baseUrl?: string },
+	unbound: {} as { apiKey?: string },
+	glama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
+	ollama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
+	lmstudio: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
+} as const satisfies Record<RouterName, object>
+
+// Build the dynamic options union from the map, intersected with CommonFetchParams
+// so extra fields are always allowed while required ones are enforced.
+export type GetModelsOptions = {
+	[P in keyof typeof dynamicProviderExtras]: ({ provider: P } & (typeof dynamicProviderExtras)[P]) & CommonFetchParams
+}[RouterName]

+ 2 - 4
webview-ui/src/components/settings/ApiOptions.tsx

@@ -216,7 +216,7 @@ const ApiOptions = ({
 					values: {
 						baseUrl: apiConfiguration?.openAiBaseUrl,
 						apiKey: apiConfiguration?.openAiApiKey,
-						customHeaders: {}, // Reserved for any additional headers
+						customHeaders: {}, // Reserved for any additional headers.
 						openAiHeaders: headerObject,
 					},
 				})
@@ -226,9 +226,7 @@ const ApiOptions = ({
 				vscode.postMessage({ type: "requestLmStudioModels" })
 			} else if (selectedProvider === "vscode-lm") {
 				vscode.postMessage({ type: "requestVsCodeLmModels" })
-			} else if (selectedProvider === "litellm") {
-				vscode.postMessage({ type: "requestRouterModels" })
-			} else if (selectedProvider === "deepinfra") {
+			} else if (selectedProvider === "litellm" || selectedProvider === "deepinfra") {
 				vscode.postMessage({ type: "requestRouterModels" })
 			}
 		},

+ 3 - 3
webview-ui/src/components/settings/providers/HuggingFace.tsx

@@ -60,19 +60,19 @@ export const HuggingFace = ({ apiConfiguration, setApiConfigurationField }: Hugg
 		[setApiConfigurationField],
 	)
 
-	// Fetch models when component mounts
+	// Fetch models when component mounts.
 	useEffect(() => {
 		setLoading(true)
 		vscode.postMessage({ type: "requestHuggingFaceModels" })
 	}, [])
 
-	// Handle messages from extension
+	// Handle messages from extension.
 	const onMessage = useCallback((event: MessageEvent) => {
 		const message: ExtensionMessage = event.data
 
 		switch (message.type) {
 			case "huggingFaceModels":
-				setModels(message.huggingFaceModels || [])
+				setModels(message.huggingFaceModels?.sort((a, b) => a.id.localeCompare(b.id)) || [])
 				setLoading(false)
 				break
 		}

+ 1 - 0
webview-ui/src/components/settings/providers/LiteLLM.tsx

@@ -85,6 +85,7 @@ export const LiteLLM = ({
 			setRefreshError(t("settings:providers.refreshModels.missingConfig"))
 			return
 		}
+
 		vscode.postMessage({ type: "requestRouterModels", values: { litellmApiKey: key, litellmBaseUrl: url } })
 	}, [apiConfiguration, setRefreshStatus, setRefreshError, t])
 

+ 2 - 0
webview-ui/src/components/settings/providers/Unbound.tsx

@@ -90,11 +90,13 @@ export const Unbound = ({
 		const modelsPromise = new Promise<void>((resolve) => {
 			const messageHandler = (event: MessageEvent) => {
 				const message = event.data
+
 				if (message.type === "routerModels") {
 					window.removeEventListener("message", messageHandler)
 					resolve()
 				}
 			}
+
 			window.addEventListener("message", messageHandler)
 		})
 

+ 1 - 0
webview-ui/src/utils/__tests__/validate.test.ts

@@ -42,6 +42,7 @@ describe("Model Validation Functions", () => {
 		deepinfra: {},
 		"io-intelligence": {},
 		"vercel-ai-gateway": {},
+		huggingface: {},
 	}
 
 	const allowAllOrganization: OrganizationAllowList = {

+ 58 - 86
webview-ui/src/utils/validate.ts

@@ -1,8 +1,17 @@
 import i18next from "i18next"
 
-import type { ProviderSettings, OrganizationAllowList } from "@roo-code/types"
-
-import { isRouterName, RouterModels } from "@roo/api"
+import {
+	type ProviderSettings,
+	type OrganizationAllowList,
+	type ProviderName,
+	modelIdKeysByProvider,
+	isProviderName,
+	isDynamicProvider,
+	isFauxProvider,
+	isCustomProvider,
+} from "@roo-code/types"
+
+import type { RouterModels } from "@roo/api"
 
 export function validateApiConfiguration(
 	apiConfiguration: ProviderSettings,
@@ -10,6 +19,7 @@ export function validateApiConfiguration(
 	organizationAllowList?: OrganizationAllowList,
 ): string | undefined {
 	const keysAndIdsPresentErrorMessage = validateModelsAndKeysProvided(apiConfiguration)
+
 	if (keysAndIdsPresentErrorMessage) {
 		return keysAndIdsPresentErrorMessage
 	}
@@ -18,11 +28,12 @@ export function validateApiConfiguration(
 		apiConfiguration,
 		organizationAllowList,
 	)
+
 	if (organizationAllowListError) {
 		return organizationAllowListError.message
 	}
 
-	return validateModelId(apiConfiguration, routerModels)
+	return validateDynamicProviderModelId(apiConfiguration, routerModels)
 }
 
 function validateModelsAndKeysProvided(apiConfiguration: ProviderSettings): string | undefined {
@@ -161,9 +172,13 @@ function validateProviderAgainstOrganizationSettings(
 ): ValidationError | undefined {
 	if (organizationAllowList && !organizationAllowList.allowAll) {
 		const provider = apiConfiguration.apiProvider
-		if (!provider) return undefined
+
+		if (!provider) {
+			return undefined
+		}
 
 		const providerConfig = organizationAllowList.providers[provider]
+
 		if (!providerConfig) {
 			return {
 				message: i18next.t("settings:validation.providerNotAllowed", { provider }),
@@ -188,47 +203,28 @@ function validateProviderAgainstOrganizationSettings(
 	}
 }
 
-function getModelIdForProvider(apiConfiguration: ProviderSettings, provider: string): string | undefined {
-	switch (provider) {
-		case "openrouter":
-			return apiConfiguration.openRouterModelId
-		case "glama":
-			return apiConfiguration.glamaModelId
-		case "unbound":
-			return apiConfiguration.unboundModelId
-		case "requesty":
-			return apiConfiguration.requestyModelId
-		case "deepinfra":
-			return apiConfiguration.deepInfraModelId
-		case "litellm":
-			return apiConfiguration.litellmModelId
-		case "openai":
-			return apiConfiguration.openAiModelId
-		case "ollama":
-			return apiConfiguration.ollamaModelId
-		case "lmstudio":
-			return apiConfiguration.lmStudioModelId
-		case "vscode-lm":
-			// vsCodeLmModelSelector is an object, not a string
-			return apiConfiguration.vsCodeLmModelSelector?.id
-		case "huggingface":
-			return apiConfiguration.huggingFaceModelId
-		case "io-intelligence":
-			return apiConfiguration.ioIntelligenceModelId
-		case "vercel-ai-gateway":
-			return apiConfiguration.vercelAiGatewayModelId
-		default:
-			return apiConfiguration.apiModelId
+function getModelIdForProvider(apiConfiguration: ProviderSettings, provider: ProviderName): string | undefined {
+	if (provider === "vscode-lm") {
+		return apiConfiguration.vsCodeLmModelSelector?.id
 	}
+
+	if (isCustomProvider(provider) || isFauxProvider(provider)) {
+		return apiConfiguration.apiModelId
+	}
+
+	return apiConfiguration[modelIdKeysByProvider[provider]]
 }
+
 /**
- * Validates an Amazon Bedrock ARN format and optionally checks if the region in the ARN matches the provided region
+ * Validates an Amazon Bedrock ARN format and optionally checks if the region in
+ * the ARN matches the provided region.
+ *
  * @param arn The ARN string to validate
  * @param region Optional region to check against the ARN's region
  * @returns An object with validation results: { isValid, arnRegion, errorMessage }
  */
 export function validateBedrockArn(arn: string, region?: string) {
-	// Validate ARN format
+	// Validate ARN format.
 	const arnRegex = /^arn:aws:(?:bedrock|sagemaker):([^:]+):([^:]*):(?:([^/]+)\/([\w.\-:]+)|([^/]+))$/
 	const match = arn.match(arnRegex)
 
@@ -240,10 +236,10 @@ export function validateBedrockArn(arn: string, region?: string) {
 		}
 	}
 
-	// Extract region from ARN
+	// Extract region from ARN.
 	const arnRegion = match[1]
 
-	// Check if region in ARN matches provided region (if specified)
+	// Check if region in ARN matches provided region (if specified).
 	if (region && arnRegion !== region) {
 		return {
 			isValid: true,
@@ -252,51 +248,22 @@ export function validateBedrockArn(arn: string, region?: string) {
 		}
 	}
 
-	// ARN is valid and region matches (or no region was provided to check against)
+	// ARN is valid and region matches (or no region was provided to check against).
 	return { isValid: true, arnRegion, errorMessage: undefined }
 }
 
-export function validateModelId(apiConfiguration: ProviderSettings, routerModels?: RouterModels): string | undefined {
+function validateDynamicProviderModelId(
+	apiConfiguration: ProviderSettings,
+	routerModels?: RouterModels,
+): string | undefined {
 	const provider = apiConfiguration.apiProvider ?? ""
 
-	if (!isRouterName(provider)) {
+	// We only validate model ids from dynamic providers.
+	if (!isDynamicProvider(provider)) {
 		return undefined
 	}
 
-	let modelId: string | undefined
-
-	switch (provider) {
-		case "openrouter":
-			modelId = apiConfiguration.openRouterModelId
-			break
-		case "glama":
-			modelId = apiConfiguration.glamaModelId
-			break
-		case "unbound":
-			modelId = apiConfiguration.unboundModelId
-			break
-		case "requesty":
-			modelId = apiConfiguration.requestyModelId
-			break
-		case "deepinfra":
-			modelId = apiConfiguration.deepInfraModelId
-			break
-		case "ollama":
-			modelId = apiConfiguration.ollamaModelId
-			break
-		case "lmstudio":
-			modelId = apiConfiguration.lmStudioModelId
-			break
-		case "litellm":
-			modelId = apiConfiguration.litellmModelId
-			break
-		case "io-intelligence":
-			modelId = apiConfiguration.ioIntelligenceModelId
-			break
-		case "vercel-ai-gateway":
-			modelId = apiConfiguration.vercelAiGatewayModelId
-			break
-	}
+	const modelId = getModelIdForProvider(apiConfiguration, provider)
 
 	if (!modelId) {
 		return i18next.t("settings:validation.modelId")
@@ -312,39 +279,44 @@ export function validateModelId(apiConfiguration: ProviderSettings, routerModels
 }
 
 /**
- * Extracts model-specific validation errors from the API configuration
- * This is used to show model errors specifically in the model selector components
+ * Extracts model-specific validation errors from the API configuration.
+ * This is used to show model errors specifically in the model selector components.
  */
 export function getModelValidationError(
 	apiConfiguration: ProviderSettings,
 	routerModels?: RouterModels,
 	organizationAllowList?: OrganizationAllowList,
 ): string | undefined {
-	const modelId = getModelIdForProvider(apiConfiguration, apiConfiguration.apiProvider || "")
+	const modelId = isProviderName(apiConfiguration.apiProvider)
+		? getModelIdForProvider(apiConfiguration, apiConfiguration.apiProvider)
+		: apiConfiguration.apiModelId
+
 	const configWithModelId = {
 		...apiConfiguration,
 		apiModelId: modelId || "",
 	}
 
 	const orgError = validateProviderAgainstOrganizationSettings(configWithModelId, organizationAllowList)
+
 	if (orgError && orgError.code === "MODEL_NOT_ALLOWED") {
 		return orgError.message
 	}
 
-	return validateModelId(configWithModelId, routerModels)
+	return validateDynamicProviderModelId(configWithModelId, routerModels)
 }
 
 /**
- * Validates API configuration but excludes model-specific errors
+ * Validates API configuration but excludes model-specific errors.
  * This is used for the general API error display to prevent duplication
- * when model errors are shown in the model selector
+ * when model errors are shown in the model selector.
  */
 export function validateApiConfigurationExcludingModelErrors(
 	apiConfiguration: ProviderSettings,
-	_routerModels?: RouterModels, // keeping this for compatibility with the old function
+	_routerModels?: RouterModels, // Keeping this for compatibility with the old function.
 	organizationAllowList?: OrganizationAllowList,
 ): string | undefined {
 	const keysAndIdsPresentErrorMessage = validateModelsAndKeysProvided(apiConfiguration)
+
 	if (keysAndIdsPresentErrorMessage) {
 		return keysAndIdsPresentErrorMessage
 	}
@@ -354,11 +326,11 @@ export function validateApiConfigurationExcludingModelErrors(
 		organizationAllowList,
 	)
 
-	// only return organization errors if they're not model-specific
+	// Inly return organization errors if they're not model-specific.
 	if (organizationAllowListError && organizationAllowListError.code === "PROVIDER_NOT_ALLOWED") {
 		return organizationAllowListError.message
 	}
 
-	// skip model validation errors as they'll be shown in the model selector
+	// Skip model validation errors as they'll be shown in the model selector.
 	return undefined
 }