Просмотр исходного кода

Add LiteLLM provider (#3242)

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
John Richmond 8 месяцев назад
Родитель
Сommit
0bbd3fd1df
34 измененных файлов с 366 добавлено и 5 удалено
  1. 3 0
      src/api/index.ts
  2. 15 1
      src/api/providers/fetchers/cache.ts
  3. 58 0
      src/api/providers/fetchers/litellm.ts
  4. 113 0
      src/api/providers/litellm.ts
  5. 1 1
      src/api/providers/router-provider.ts
  6. 3 1
      src/core/webview/webviewMessageHandler.ts
  7. 5 0
      src/exports/roo-code.d.ts
  8. 5 0
      src/exports/types.ts
  9. 11 0
      src/schemas/index.ts
  10. 15 1
      src/shared/api.ts
  11. 20 0
      webview-ui/src/components/settings/ApiOptions.tsx
  12. 1 1
      webview-ui/src/components/settings/ModelPicker.tsx
  13. 1 0
      webview-ui/src/components/settings/constants.ts
  14. 64 0
      webview-ui/src/components/settings/providers/LiteLLM.tsx
  15. 1 0
      webview-ui/src/components/settings/providers/index.ts
  16. 8 0
      webview-ui/src/components/ui/hooks/useSelectedModel.ts
  17. 2 0
      webview-ui/src/i18n/locales/ca/settings.json
  18. 2 0
      webview-ui/src/i18n/locales/de/settings.json
  19. 2 0
      webview-ui/src/i18n/locales/en/settings.json
  20. 2 0
      webview-ui/src/i18n/locales/es/settings.json
  21. 2 0
      webview-ui/src/i18n/locales/fr/settings.json
  22. 2 0
      webview-ui/src/i18n/locales/hi/settings.json
  23. 2 0
      webview-ui/src/i18n/locales/it/settings.json
  24. 2 0
      webview-ui/src/i18n/locales/ja/settings.json
  25. 2 0
      webview-ui/src/i18n/locales/ko/settings.json
  26. 2 0
      webview-ui/src/i18n/locales/nl/settings.json
  27. 2 0
      webview-ui/src/i18n/locales/pl/settings.json
  28. 2 0
      webview-ui/src/i18n/locales/pt-BR/settings.json
  29. 2 0
      webview-ui/src/i18n/locales/ru/settings.json
  30. 2 0
      webview-ui/src/i18n/locales/tr/settings.json
  31. 2 0
      webview-ui/src/i18n/locales/vi/settings.json
  32. 2 0
      webview-ui/src/i18n/locales/zh-CN/settings.json
  33. 2 0
      webview-ui/src/i18n/locales/zh-TW/settings.json
  34. 8 0
      webview-ui/src/utils/validate.ts

+ 3 - 0
src/api/index.ts

@@ -25,6 +25,7 @@ import { FakeAIHandler } from "./providers/fake-ai"
 import { XAIHandler } from "./providers/xai"
 import { GroqHandler } from "./providers/groq"
 import { ChutesHandler } from "./providers/chutes"
+import { LiteLLMHandler } from "./providers/litellm"
 
 export interface SingleCompletionHandler {
 	completePrompt(prompt: string): Promise<string>
@@ -94,6 +95,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
 			return new GroqHandler(options)
 		case "chutes":
 			return new ChutesHandler(options)
+		case "litellm":
+			return new LiteLLMHandler(options)
 		default:
 			return new AnthropicHandler(options)
 	}

+ 15 - 1
src/api/providers/fetchers/cache.ts

@@ -12,6 +12,7 @@ import { getOpenRouterModels } from "./openrouter"
 import { getRequestyModels } from "./requesty"
 import { getGlamaModels } from "./glama"
 import { getUnboundModels } from "./unbound"
+import { getLiteLLMModels } from "./litellm"
 
 const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 })
 
@@ -36,9 +37,15 @@ async function readModels(router: RouterName): Promise<ModelRecord | undefined>
  * 2. File cache - This is a file-based cache that is used to store models for a longer period of time.
  *
  * @param router - The router to fetch models from.
+ * @param apiKey - Optional API key for the provider.
+ * @param baseUrl - Optional base URL for the provider (currently used only for LiteLLM).
  * @returns The models from the cache or the fetched models.
  */
-export const getModels = async (router: RouterName, apiKey: string | undefined = undefined): Promise<ModelRecord> => {
+export const getModels = async (
+	router: RouterName,
+	apiKey: string | undefined = undefined,
+	baseUrl: string | undefined = undefined,
+): Promise<ModelRecord> => {
 	let models = memoryCache.get<ModelRecord>(router)
 	if (models) {
 		// console.log(`[getModels] NodeCache hit for ${router} -> ${Object.keys(models).length}`)
@@ -59,6 +66,13 @@ export const getModels = async (router: RouterName, apiKey: string | undefined =
 		case "unbound":
 			models = await getUnboundModels()
 			break
+		case "litellm":
+			if (apiKey && baseUrl) {
+				models = await getLiteLLMModels(apiKey, baseUrl)
+			} else {
+				models = {}
+			}
+			break
 	}
 
 	if (Object.keys(models).length > 0) {

+ 58 - 0
src/api/providers/fetchers/litellm.ts

@@ -0,0 +1,58 @@
+import axios from "axios"
+import { COMPUTER_USE_MODELS, ModelRecord } from "../../../shared/api"
+
+/**
+ * Fetches available models from a LiteLLM server
+ *
+ * @param apiKey The API key for the LiteLLM server
+ * @param baseUrl The base URL of the LiteLLM server
+ * @returns A promise that resolves to a record of model IDs to model info
+ */
+export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise<ModelRecord> {
+	try {
+		const headers: Record<string, string> = {
+			"Content-Type": "application/json",
+		}
+
+		if (apiKey) {
+			headers["Authorization"] = `Bearer ${apiKey}`
+		}
+
+		const response = await axios.get(`${baseUrl}/v1/model/info`, { headers })
+		const models: ModelRecord = {}
+
+		const computerModels = Array.from(COMPUTER_USE_MODELS)
+
+		// Process the model info from the response
+		if (response.data && response.data.data && Array.isArray(response.data.data)) {
+			for (const model of response.data.data) {
+				const modelName = model.model_name
+				const modelInfo = model.model_info
+				const litellmModelName = model?.litellm_params?.model as string | undefined
+
+				if (!modelName || !modelInfo || !litellmModelName) continue
+
+				models[modelName] = {
+					maxTokens: modelInfo.max_tokens || 8192,
+					contextWindow: modelInfo.max_input_tokens || 200000,
+					supportsImages: Boolean(modelInfo.supports_vision),
+					// litellm_params.model may have a prefix like openrouter/
+					supportsComputerUse: computerModels.some((computer_model) =>
+						litellmModelName.endsWith(computer_model),
+					),
+					supportsPromptCache: Boolean(modelInfo.supports_prompt_caching),
+					inputPrice: modelInfo.input_cost_per_token ? modelInfo.input_cost_per_token * 1000000 : undefined,
+					outputPrice: modelInfo.output_cost_per_token
+						? modelInfo.output_cost_per_token * 1000000
+						: undefined,
+					description: `${modelName} via LiteLLM proxy`,
+				}
+			}
+		}
+
+		return models
+	} catch (error) {
+		console.error("Error fetching LiteLLM models:", error)
+		return {}
+	}
+}

+ 113 - 0
src/api/providers/litellm.ts

@@ -0,0 +1,113 @@
+import OpenAI from "openai"
+import { Anthropic } from "@anthropic-ai/sdk" // Keep for type usage only
+
+import { ApiHandlerOptions, litellmDefaultModelId, litellmDefaultModelInfo } from "../../shared/api"
+import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
+import { convertToOpenAiMessages } from "../transform/openai-format"
+import { SingleCompletionHandler } from "../index"
+import { RouterProvider } from "./router-provider"
+
+/**
+ * LiteLLM provider handler
+ *
+ * This handler uses the LiteLLM API to proxy requests to various LLM providers.
+ * It follows the OpenAI API format for compatibility.
+ */
+export class LiteLLMHandler extends RouterProvider implements SingleCompletionHandler {
+	constructor(options: ApiHandlerOptions) {
+		super({
+			options,
+			name: "litellm",
+			baseURL: `${options.litellmBaseUrl || "http://localhost:4000"}`,
+			apiKey: options.litellmApiKey || "dummy-key",
+			modelId: options.litellmModelId,
+			defaultModelId: litellmDefaultModelId,
+			defaultModelInfo: litellmDefaultModelInfo,
+		})
+	}
+
+	override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
+		const { id: modelId, info } = await this.fetchModel()
+
+		const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
+			{ role: "system", content: systemPrompt },
+			...convertToOpenAiMessages(messages),
+		]
+
+		// Required by some providers; others default to max tokens allowed
+		let maxTokens: number | undefined = info.maxTokens ?? undefined
+
+		const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
+			model: modelId,
+			max_tokens: maxTokens,
+			messages: openAiMessages,
+			stream: true,
+			stream_options: {
+				include_usage: true,
+			},
+		}
+
+		if (this.supportsTemperature(modelId)) {
+			requestOptions.temperature = this.options.modelTemperature ?? 0
+		}
+
+		try {
+			const { data: completion } = await this.client.chat.completions.create(requestOptions).withResponse()
+
+			let lastUsage
+
+			for await (const chunk of completion) {
+				const delta = chunk.choices[0]?.delta
+				const usage = chunk.usage as OpenAI.CompletionUsage
+
+				if (delta?.content) {
+					yield { type: "text", text: delta.content }
+				}
+
+				if (usage) {
+					lastUsage = usage
+				}
+			}
+
+			if (lastUsage) {
+				const usageData: ApiStreamUsageChunk = {
+					type: "usage",
+					inputTokens: lastUsage.prompt_tokens || 0,
+					outputTokens: lastUsage.completion_tokens || 0,
+				}
+
+				yield usageData
+			}
+		} catch (error) {
+			if (error instanceof Error) {
+				throw new Error(`LiteLLM streaming error: ${error.message}`)
+			}
+			throw error
+		}
+	}
+
+	async completePrompt(prompt: string): Promise<string> {
+		const { id: modelId, info } = await this.fetchModel()
+
+		try {
+			const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
+				model: modelId,
+				messages: [{ role: "user", content: prompt }],
+			}
+
+			if (this.supportsTemperature(modelId)) {
+				requestOptions.temperature = this.options.modelTemperature ?? 0
+			}
+
+			requestOptions.max_tokens = info.maxTokens
+
+			const response = await this.client.chat.completions.create(requestOptions)
+			return response.choices[0]?.message.content || ""
+		} catch (error) {
+			if (error instanceof Error) {
+				throw new Error(`LiteLLM completion error: ${error.message}`)
+			}
+			throw error
+		}
+	}
+}

+ 1 - 1
src/api/providers/router-provider.ts

@@ -44,7 +44,7 @@ export abstract class RouterProvider extends BaseProvider {
 	}
 
 	public async fetchModel() {
-		this.models = await getModels(this.name)
+		this.models = await getModels(this.name, this.client.apiKey, this.client.baseURL)
 		return this.getModel()
 	}
 

+ 3 - 1
src/core/webview/webviewMessageHandler.ts

@@ -289,11 +289,12 @@ export const webviewMessageHandler = async (provider: ClineProvider, message: We
 		case "requestRouterModels":
 			const { apiConfiguration } = await provider.getState()
 
-			const [openRouterModels, requestyModels, glamaModels, unboundModels] = await Promise.all([
+			const [openRouterModels, requestyModels, glamaModels, unboundModels, litellmModels] = await Promise.all([
 				getModels("openrouter", apiConfiguration.openRouterApiKey),
 				getModels("requesty", apiConfiguration.requestyApiKey),
 				getModels("glama", apiConfiguration.glamaApiKey),
 				getModels("unbound", apiConfiguration.unboundApiKey),
+				getModels("litellm", apiConfiguration.litellmApiKey, apiConfiguration.litellmBaseUrl),
 			])
 
 			provider.postMessageToWebview({
@@ -303,6 +304,7 @@ export const webviewMessageHandler = async (provider: ClineProvider, message: We
 					requesty: requestyModels,
 					glama: glamaModels,
 					unbound: unboundModels,
+					litellm: litellmModels,
 				},
 			})
 			break

+ 5 - 0
src/exports/roo-code.d.ts

@@ -23,6 +23,7 @@ type ProviderSettings = {
 				| "xai"
 				| "groq"
 				| "chutes"
+				| "litellm"
 		  )
 		| undefined
 	apiModelId?: string | undefined
@@ -123,6 +124,9 @@ type ProviderSettings = {
 	xaiApiKey?: string | undefined
 	groqApiKey?: string | undefined
 	chutesApiKey?: string | undefined
+	litellmBaseUrl?: string | undefined
+	litellmApiKey?: string | undefined
+	litellmModelId?: string | undefined
 	modelMaxTokens?: number | undefined
 	modelMaxThinkingTokens?: number | undefined
 	includeMaxTokens?: boolean | undefined
@@ -163,6 +167,7 @@ type GlobalSettings = {
 							| "xai"
 							| "groq"
 							| "chutes"
+							| "litellm"
 					  )
 					| undefined
 		  }[]

+ 5 - 0
src/exports/types.ts

@@ -24,6 +24,7 @@ type ProviderSettings = {
 				| "xai"
 				| "groq"
 				| "chutes"
+				| "litellm"
 		  )
 		| undefined
 	apiModelId?: string | undefined
@@ -124,6 +125,9 @@ type ProviderSettings = {
 	xaiApiKey?: string | undefined
 	groqApiKey?: string | undefined
 	chutesApiKey?: string | undefined
+	litellmBaseUrl?: string | undefined
+	litellmApiKey?: string | undefined
+	litellmModelId?: string | undefined
 	modelMaxTokens?: number | undefined
 	modelMaxThinkingTokens?: number | undefined
 	includeMaxTokens?: boolean | undefined
@@ -166,6 +170,7 @@ type GlobalSettings = {
 							| "xai"
 							| "groq"
 							| "chutes"
+							| "litellm"
 					  )
 					| undefined
 		  }[]

+ 11 - 0
src/schemas/index.ts

@@ -31,6 +31,7 @@ export const providerNames = [
 	"xai",
 	"groq",
 	"chutes",
+	"litellm",
 ] as const
 
 export const providerNamesSchema = z.enum(providerNames)
@@ -429,6 +430,10 @@ export const providerSettingsSchema = z.object({
 	groqApiKey: z.string().optional(),
 	// Chutes AI
 	chutesApiKey: z.string().optional(),
+	// LiteLLM
+	litellmBaseUrl: z.string().optional(),
+	litellmApiKey: z.string().optional(),
+	litellmModelId: z.string().optional(),
 	// Claude 3.7 Sonnet Thinking
 	modelMaxTokens: z.number().optional(),
 	modelMaxThinkingTokens: z.number().optional(),
@@ -538,6 +543,10 @@ const providerSettingsRecord: ProviderSettingsRecord = {
 	groqApiKey: undefined,
 	// Chutes AI
 	chutesApiKey: undefined,
+	// LiteLLM
+	litellmBaseUrl: undefined,
+	litellmApiKey: undefined,
+	litellmModelId: undefined,
 }
 
 export const PROVIDER_SETTINGS_KEYS = Object.keys(providerSettingsRecord) as Keys<ProviderSettings>[]
@@ -732,6 +741,7 @@ export type SecretState = Pick<
 	| "xaiApiKey"
 	| "groqApiKey"
 	| "chutesApiKey"
+	| "litellmApiKey"
 >
 
 type SecretStateRecord = Record<Keys<SecretState>, undefined>
@@ -753,6 +763,7 @@ const secretStateRecord: SecretStateRecord = {
 	xaiApiKey: undefined,
 	groqApiKey: undefined,
 	chutesApiKey: undefined,
+	litellmApiKey: undefined,
 }
 
 export const SECRET_STATE_KEYS = Object.keys(secretStateRecord) as Keys<SecretState>[]

+ 15 - 1
src/shared/api.ts

@@ -1136,6 +1136,20 @@ export const unboundDefaultModelInfo: ModelInfo = {
 	cacheReadsPrice: 0.3,
 }
 
+// LiteLLM
+// https://docs.litellm.ai/
+export const litellmDefaultModelId = "anthropic/claude-3-7-sonnet-20250219"
+export const litellmDefaultModelInfo: ModelInfo = {
+	maxTokens: 8192,
+	contextWindow: 200_000,
+	supportsImages: true,
+	supportsComputerUse: true,
+	supportsPromptCache: true,
+	inputPrice: 3.0,
+	outputPrice: 15.0,
+	cacheWritesPrice: 3.75,
+	cacheReadsPrice: 0.3,
+}
 // xAI
 // https://docs.x.ai/docs/api-reference
 export type XAIModelId = keyof typeof xaiModels
@@ -1731,7 +1745,7 @@ export const COMPUTER_USE_MODELS = new Set([
 	"anthropic/claude-3.7-sonnet:thinking",
 ])
 
-const routerNames = ["openrouter", "requesty", "glama", "unbound"] as const
+const routerNames = ["openrouter", "requesty", "glama", "unbound", "litellm"] as const
 
 export type RouterName = (typeof routerNames)[number]
 

+ 20 - 0
webview-ui/src/components/settings/ApiOptions.tsx

@@ -9,6 +9,7 @@ import {
 	requestyDefaultModelId,
 	glamaDefaultModelId,
 	unboundDefaultModelId,
+	litellmDefaultModelId,
 } from "@roo/shared/api"
 
 import { vscode } from "@src/utils/vscode"
@@ -27,6 +28,7 @@ import {
 	Glama,
 	Groq,
 	LMStudio,
+	LiteLLM,
 	Mistral,
 	Ollama,
 	OpenAI,
@@ -171,6 +173,8 @@ const ApiOptions = ({
 				vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl })
 			} else if (selectedProvider === "vscode-lm") {
 				vscode.postMessage({ type: "requestVsCodeLmModels" })
+			} else if (selectedProvider === "litellm") {
+				vscode.postMessage({ type: "requestRouterModels" })
 			}
 		},
 		250,
@@ -181,6 +185,8 @@ const ApiOptions = ({
 			apiConfiguration?.openAiApiKey,
 			apiConfiguration?.ollamaBaseUrl,
 			apiConfiguration?.lmStudioBaseUrl,
+			apiConfiguration?.litellmBaseUrl,
+			apiConfiguration?.litellmApiKey,
 			customHeaders,
 		],
 	)
@@ -233,6 +239,11 @@ const ApiOptions = ({
 						setApiConfigurationField("requestyModelId", requestyDefaultModelId)
 					}
 					break
+				case "litellm":
+					if (!apiConfiguration.litellmModelId) {
+						setApiConfigurationField("litellmModelId", litellmDefaultModelId)
+					}
+					break
 			}
 
 			setApiConfigurationField("apiProvider", value)
@@ -243,6 +254,7 @@ const ApiOptions = ({
 			apiConfiguration.glamaModelId,
 			apiConfiguration.unboundModelId,
 			apiConfiguration.requestyModelId,
+			apiConfiguration.litellmModelId,
 		],
 	)
 
@@ -395,6 +407,14 @@ const ApiOptions = ({
 				<Chutes apiConfiguration={apiConfiguration} setApiConfigurationField={setApiConfigurationField} />
 			)}
 
+			{selectedProvider === "litellm" && (
+				<LiteLLM
+					apiConfiguration={apiConfiguration}
+					setApiConfigurationField={setApiConfigurationField}
+					routerModels={routerModels}
+				/>
+			)}
+
 			{selectedProvider === "human-relay" && (
 				<>
 					<div className="text-sm text-vscode-descriptionForeground">

+ 1 - 1
webview-ui/src/components/settings/ModelPicker.tsx

@@ -26,7 +26,7 @@ import { ModelInfoView } from "./ModelInfoView"
 
 type ModelIdKey = keyof Pick<
 	ProviderSettings,
-	"glamaModelId" | "openRouterModelId" | "unboundModelId" | "requestyModelId" | "openAiModelId"
+	"glamaModelId" | "openRouterModelId" | "unboundModelId" | "requestyModelId" | "openAiModelId" | "litellmModelId"
 >
 
 interface ModelPickerProps {

+ 1 - 0
webview-ui/src/components/settings/constants.ts

@@ -50,6 +50,7 @@ export const PROVIDERS = [
 	{ value: "xai", label: "xAI (Grok)" },
 	{ value: "groq", label: "Groq" },
 	{ value: "chutes", label: "Chutes AI" },
+	{ value: "litellm", label: "LiteLLM" },
 ].sort((a, b) => a.label.localeCompare(b.label))
 
 export const VERTEX_REGIONS = [

+ 64 - 0
webview-ui/src/components/settings/providers/LiteLLM.tsx

@@ -0,0 +1,64 @@
+import { useCallback } from "react"
+import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react"
+
+import { ApiConfiguration, RouterModels, litellmDefaultModelId } from "@roo/shared/api"
+
+import { useAppTranslation } from "@src/i18n/TranslationContext"
+
+import { inputEventTransform } from "../transforms"
+import { ModelPicker } from "../ModelPicker"
+
+type LiteLLMProps = {
+	apiConfiguration: ApiConfiguration
+	setApiConfigurationField: (field: keyof ApiConfiguration, value: ApiConfiguration[keyof ApiConfiguration]) => void
+	routerModels?: RouterModels
+}
+
+export const LiteLLM = ({ apiConfiguration, setApiConfigurationField, routerModels }: LiteLLMProps) => {
+	const { t } = useAppTranslation()
+
+	const handleInputChange = useCallback(
+		<K extends keyof ApiConfiguration, E>(
+			field: K,
+			transform: (event: E) => ApiConfiguration[K] = inputEventTransform,
+		) =>
+			(event: E | Event) => {
+				setApiConfigurationField(field, transform(event as E))
+			},
+		[setApiConfigurationField],
+	)
+
+	return (
+		<>
+			<VSCodeTextField
+				value={apiConfiguration?.litellmBaseUrl || "http://localhost:4000"}
+				onInput={handleInputChange("litellmBaseUrl")}
+				placeholder="http://localhost:4000"
+				className="w-full">
+				<label className="block font-medium mb-1">{t("settings:providers.litellmBaseUrl")}</label>
+			</VSCodeTextField>
+
+			<VSCodeTextField
+				value={apiConfiguration?.litellmApiKey || ""}
+				type="password"
+				onInput={handleInputChange("litellmApiKey")}
+				placeholder={t("settings:placeholders.apiKey")}
+				className="w-full">
+				<label className="block font-medium mb-1">{t("settings:providers.litellmApiKey")}</label>
+			</VSCodeTextField>
+			<div className="text-sm text-vscode-descriptionForeground -mt-2">
+				{t("settings:providers.apiKeyStorageNotice")}
+			</div>
+
+			<ModelPicker
+				apiConfiguration={apiConfiguration}
+				defaultModelId={litellmDefaultModelId}
+				models={routerModels?.litellm ?? {}}
+				modelIdKey="litellmModelId"
+				serviceName="LiteLLM"
+				serviceUrl="https://docs.litellm.ai/"
+				setApiConfigurationField={setApiConfigurationField}
+			/>
+		</>
+	)
+}

+ 1 - 0
webview-ui/src/components/settings/providers/index.ts

@@ -16,3 +16,4 @@ export { Unbound } from "./Unbound"
 export { Vertex } from "./Vertex"
 export { VSCodeLM } from "./VSCodeLM"
 export { XAI } from "./XAI"
+export { LiteLLM } from "./LiteLLM"

+ 8 - 0
webview-ui/src/components/ui/hooks/useSelectedModel.ts

@@ -30,6 +30,7 @@ import {
 	requestyDefaultModelId,
 	glamaDefaultModelId,
 	unboundDefaultModelId,
+	litellmDefaultModelId,
 } from "@roo/shared/api"
 
 import { useRouterModels } from "./useRouterModels"
@@ -82,6 +83,13 @@ function getSelectedModel({
 				? { id, info }
 				: { id: unboundDefaultModelId, info: routerModels.unbound[unboundDefaultModelId] }
 		}
+		case "litellm": {
+			const id = apiConfiguration.litellmModelId ?? litellmDefaultModelId
+			const info = routerModels.litellm[id]
+			return info
+				? { id, info }
+				: { id: litellmDefaultModelId, info: routerModels.litellm[litellmDefaultModelId] }
+		}
 		case "xai": {
 			const id = apiConfiguration.apiModelId ?? xaiDefaultModelId
 			const info = xaiModels[id as keyof typeof xaiModels]

+ 2 - 0
webview-ui/src/i18n/locales/ca/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Establir una URL alternativa per al model Codestral.",
 		"xaiApiKey": "Clau API de xAI",
 		"getXaiApiKey": "Obtenir clau API de xAI",
+		"litellmApiKey": "Clau API de LiteLLM",
+		"litellmBaseUrl": "URL base de LiteLLM",
 		"awsCredentials": "Credencials d'AWS",
 		"awsProfile": "Perfil d'AWS",
 		"awsProfileName": "Nom del perfil d'AWS",

+ 2 - 0
webview-ui/src/i18n/locales/de/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Legen Sie eine alternative URL für das Codestral-Modell fest.",
 		"xaiApiKey": "xAI API-Schlüssel",
 		"getXaiApiKey": "xAI API-Schlüssel erhalten",
+		"litellmApiKey": "LiteLLM API-Schlüssel",
+		"litellmBaseUrl": "LiteLLM Basis-URL",
 		"awsCredentials": "AWS Anmeldedaten",
 		"awsProfile": "AWS Profil",
 		"awsProfileName": "AWS Profilname",

+ 2 - 0
webview-ui/src/i18n/locales/en/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Set an alternative URL for the Codestral model.",
 		"xaiApiKey": "xAI API Key",
 		"getXaiApiKey": "Get xAI API Key",
+		"litellmApiKey": "LiteLLM API Key",
+		"litellmBaseUrl": "LiteLLM Base URL",
 		"awsCredentials": "AWS Credentials",
 		"awsProfile": "AWS Profile",
 		"awsProfileName": "AWS Profile Name",

+ 2 - 0
webview-ui/src/i18n/locales/es/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Establecer una URL alternativa para el modelo Codestral.",
 		"xaiApiKey": "Clave API de xAI",
 		"getXaiApiKey": "Obtener clave API de xAI",
+		"litellmApiKey": "Clave API de LiteLLM",
+		"litellmBaseUrl": "URL base de LiteLLM",
 		"awsCredentials": "Credenciales de AWS",
 		"awsProfile": "Perfil de AWS",
 		"awsProfileName": "Nombre del perfil de AWS",

+ 2 - 0
webview-ui/src/i18n/locales/fr/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Définir une URL alternative pour le modèle Codestral.",
 		"xaiApiKey": "Clé API xAI",
 		"getXaiApiKey": "Obtenir la clé API xAI",
+		"litellmApiKey": "Clé API LiteLLM",
+		"litellmBaseUrl": "URL de base LiteLLM",
 		"awsCredentials": "Identifiants AWS",
 		"awsProfile": "Profil AWS",
 		"awsProfileName": "Nom du profil AWS",

+ 2 - 0
webview-ui/src/i18n/locales/hi/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Codestral मॉडल के लिए वैकल्पिक URL सेट करें।",
 		"xaiApiKey": "xAI API कुंजी",
 		"getXaiApiKey": "xAI API कुंजी प्राप्त करें",
+		"litellmApiKey": "LiteLLM API कुंजी",
+		"litellmBaseUrl": "LiteLLM आधार URL",
 		"awsCredentials": "AWS क्रेडेंशियल्स",
 		"awsProfile": "AWS प्रोफाइल",
 		"awsProfileName": "AWS प्रोफाइल नाम",

+ 2 - 0
webview-ui/src/i18n/locales/it/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Imposta un URL opzionale per i modelli Codestral.",
 		"xaiApiKey": "Chiave API xAI",
 		"getXaiApiKey": "Ottieni chiave API xAI",
+		"litellmApiKey": "Chiave API LiteLLM",
+		"litellmBaseUrl": "URL base LiteLLM",
 		"awsCredentials": "Credenziali AWS",
 		"awsProfile": "Profilo AWS",
 		"awsProfileName": "Nome profilo AWS",

+ 2 - 0
webview-ui/src/i18n/locales/ja/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Codestralモデルの代替URLを設定します。",
 		"xaiApiKey": "xAI APIキー",
 		"getXaiApiKey": "xAI APIキーを取得",
+		"litellmApiKey": "LiteLLM APIキー",
+		"litellmBaseUrl": "LiteLLM ベースURL",
 		"awsCredentials": "AWS認証情報",
 		"awsProfile": "AWSプロファイル",
 		"awsProfileName": "AWSプロファイル名",

+ 2 - 0
webview-ui/src/i18n/locales/ko/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Codestral 모델의 대체 URL을 설정합니다.",
 		"xaiApiKey": "xAI API 키",
 		"getXaiApiKey": "xAI API 키 받기",
+		"litellmApiKey": "LiteLLM API 키",
+		"litellmBaseUrl": "LiteLLM 기본 URL",
 		"awsCredentials": "AWS 자격 증명",
 		"awsProfile": "AWS 프로필",
 		"awsProfileName": "AWS 프로필 이름",

+ 2 - 0
webview-ui/src/i18n/locales/nl/settings.json

@@ -69,6 +69,8 @@
 		"codestralBaseUrlDesc": "Stel een alternatieve URL in voor het Codestral-model.",
 		"xaiApiKey": "xAI API-sleutel",
 		"getXaiApiKey": "xAI API-sleutel ophalen",
+		"litellmApiKey": "LiteLLM API-sleutel",
+		"litellmBaseUrl": "LiteLLM basis-URL",
 		"awsCredentials": "AWS-inloggegevens",
 		"awsProfile": "AWS-profiel",
 		"awsProfileName": "AWS-profielnaam",

+ 2 - 0
webview-ui/src/i18n/locales/pl/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Ustaw opcjonalny URL dla modeli Codestral.",
 		"xaiApiKey": "Klucz API xAI",
 		"getXaiApiKey": "Uzyskaj klucz API xAI",
+		"litellmApiKey": "Klucz API LiteLLM",
+		"litellmBaseUrl": "URL bazowy LiteLLM",
 		"awsCredentials": "Poświadczenia AWS",
 		"awsProfile": "Profil AWS",
 		"awsProfileName": "Nazwa profilu AWS",

+ 2 - 0
webview-ui/src/i18n/locales/pt-BR/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Defina uma URL alternativa para o modelo Codestral.",
 		"xaiApiKey": "Chave de API xAI",
 		"getXaiApiKey": "Obter chave de API xAI",
+		"litellmApiKey": "Chave API LiteLLM",
+		"litellmBaseUrl": "URL base LiteLLM",
 		"awsCredentials": "Credenciais AWS",
 		"awsProfile": "Perfil AWS",
 		"awsProfileName": "Nome do Perfil AWS",

+ 2 - 0
webview-ui/src/i18n/locales/ru/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Укажите альтернативный URL для модели Codestral.",
 		"xaiApiKey": "xAI API-ключ",
 		"getXaiApiKey": "Получить xAI API-ключ",
+		"litellmApiKey": "API-ключ LiteLLM",
+		"litellmBaseUrl": "Базовый URL LiteLLM",
 		"awsCredentials": "AWS-учётные данные",
 		"awsProfile": "Профиль AWS",
 		"awsProfileName": "Имя профиля AWS",

+ 2 - 0
webview-ui/src/i18n/locales/tr/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "Codestral modeli için alternatif URL ayarlayın.",
 		"xaiApiKey": "xAI API Anahtarı",
 		"getXaiApiKey": "xAI API Anahtarı Al",
+		"litellmApiKey": "LiteLLM API Anahtarı",
+		"litellmBaseUrl": "LiteLLM Temel URL",
 		"awsCredentials": "AWS Kimlik Bilgileri",
 		"awsProfile": "AWS Profili",
 		"awsProfileName": "AWS Profil Adı",

+ 2 - 0
webview-ui/src/i18n/locales/vi/settings.json

@@ -137,6 +137,8 @@
 		"codestralBaseUrlDesc": "Đặt URL thay thế cho mô hình Codestral.",
 		"xaiApiKey": "Khóa API xAI",
 		"getXaiApiKey": "Lấy khóa API xAI",
+		"litellmApiKey": "Khóa API LiteLLM",
+		"litellmBaseUrl": "URL cơ sở LiteLLM",
 		"awsCredentials": "Thông tin xác thực AWS",
 		"awsProfile": "Hồ sơ AWS",
 		"awsProfileName": "Tên hồ sơ AWS",

+ 2 - 0
webview-ui/src/i18n/locales/zh-CN/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "为 Codestral 模型设置替代 URL。",
 		"xaiApiKey": "xAI API 密钥",
 		"getXaiApiKey": "获取 xAI API 密钥",
+		"litellmApiKey": "LiteLLM API 密钥",
+		"litellmBaseUrl": "LiteLLM 基础 URL",
 		"awsCredentials": "AWS 凭证",
 		"awsProfile": "AWS 配置文件",
 		"awsProfileName": "AWS 配置文件名称",

+ 2 - 0
webview-ui/src/i18n/locales/zh-TW/settings.json

@@ -142,6 +142,8 @@
 		"codestralBaseUrlDesc": "設定 Codestral 模型的替代 URL。",
 		"xaiApiKey": "xAI API 金鑰",
 		"getXaiApiKey": "取得 xAI API 金鑰",
+		"litellmApiKey": "LiteLLM API 金鑰",
+		"litellmBaseUrl": "LiteLLM 基礎 URL",
 		"awsCredentials": "AWS 認證",
 		"awsProfile": "AWS Profile",
 		"awsProfileName": "AWS Profile 名稱",

+ 8 - 0
webview-ui/src/utils/validate.ts

@@ -24,6 +24,11 @@ export function validateApiConfiguration(apiConfiguration: ApiConfiguration): st
 				return i18next.t("settings:validation.apiKey")
 			}
 			break
+		case "litellm":
+			if (!apiConfiguration.litellmApiKey) {
+				return i18next.t("settings:validation.apiKey")
+			}
+			break
 		case "anthropic":
 			if (!apiConfiguration.apiKey) {
 				return i18next.t("settings:validation.apiKey")
@@ -135,6 +140,9 @@ export function validateModelId(apiConfiguration: ApiConfiguration, routerModels
 		case "requesty":
 			modelId = apiConfiguration.requestyModelId
 			break
+		case "litellm":
+			modelId = apiConfiguration.litellmModelId
+			break
 	}
 
 	if (!modelId) {