소스 검색

Added support for dynamic litellm supports_computer_use (#4027)

slytechnical 7 달 전
부모
커밋
9d4b4ebff0
2개의 변경된 파일29개의 추가작업 그리고 9개의 파일을 삭제
  1. 27 3
      src/api/providers/fetchers/__tests__/litellm.test.ts
  2. 2 6
      src/api/providers/fetchers/litellm.ts

+ 27 - 3
src/api/providers/fetchers/__tests__/litellm.test.ts

@@ -1,6 +1,5 @@
 import axios from "axios"
 import { getLiteLLMModels } from "../litellm"
-import { OPEN_ROUTER_COMPUTER_USE_MODELS } from "../../../../shared/api"
 
 // Mock axios
 jest.mock("axios")
@@ -26,6 +25,7 @@ describe("getLiteLLMModels", () => {
 							supports_prompt_caching: false,
 							input_cost_per_token: 0.000003,
 							output_cost_per_token: 0.000015,
+							supports_computer_use: true,
 						},
 						litellm_params: {
 							model: "anthropic/claude-3.5-sonnet",
@@ -40,6 +40,7 @@ describe("getLiteLLMModels", () => {
 							supports_prompt_caching: false,
 							input_cost_per_token: 0.00001,
 							output_cost_per_token: 0.00003,
+							supports_computer_use: false,
 						},
 						litellm_params: {
 							model: "openai/gpt-4-turbo",
@@ -105,7 +106,6 @@ describe("getLiteLLMModels", () => {
 	})
 
 	it("handles computer use models correctly", async () => {
-		const computerUseModel = Array.from(OPEN_ROUTER_COMPUTER_USE_MODELS)[0]
 		const mockResponse = {
 			data: {
 				data: [
@@ -115,9 +115,22 @@ describe("getLiteLLMModels", () => {
 							max_tokens: 4096,
 							max_input_tokens: 200000,
 							supports_vision: true,
+							supports_computer_use: true,
 						},
 						litellm_params: {
-							model: `anthropic/${computerUseModel}`,
+							model: `anthropic/test-computer-model`,
+						},
+					},
+					{
+						model_name: "test-non-computer-model",
+						model_info: {
+							max_tokens: 4096,
+							max_input_tokens: 200000,
+							supports_vision: false,
+							supports_computer_use: false,
+						},
+						litellm_params: {
+							model: `anthropic/test-non-computer-model`,
 						},
 					},
 				],
@@ -138,6 +151,17 @@ describe("getLiteLLMModels", () => {
 			outputPrice: undefined,
 			description: "test-computer-model via LiteLLM proxy",
 		})
+
+		expect(result["test-non-computer-model"]).toEqual({
+			maxTokens: 4096,
+			contextWindow: 200000,
+			supportsImages: false,
+			supportsComputerUse: false,
+			supportsPromptCache: false,
+			inputPrice: undefined,
+			outputPrice: undefined,
+			description: "test-non-computer-model via LiteLLM proxy",
+		})
 	})
 
 	it("throws error for unexpected response format", async () => {

+ 2 - 6
src/api/providers/fetchers/litellm.ts

@@ -1,6 +1,6 @@
 import axios from "axios"
 
-import { OPEN_ROUTER_COMPUTER_USE_MODELS, ModelRecord } from "../../../shared/api"
+import { ModelRecord } from "../../../shared/api"
 
 /**
  * Fetches available models from a LiteLLM server
@@ -23,8 +23,6 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
 		const response = await axios.get(`${baseUrl}/v1/model/info`, { headers, timeout: 5000 })
 		const models: ModelRecord = {}
 
-		const computerModels = Array.from(OPEN_ROUTER_COMPUTER_USE_MODELS)
-
 		// Process the model info from the response
 		if (response.data && response.data.data && Array.isArray(response.data.data)) {
 			for (const model of response.data.data) {
@@ -39,9 +37,7 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
 					contextWindow: modelInfo.max_input_tokens || 200000,
 					supportsImages: Boolean(modelInfo.supports_vision),
 					// litellm_params.model may have a prefix like openrouter/
-					supportsComputerUse: computerModels.some((computer_model) =>
-						litellmModelName.endsWith(computer_model),
-					),
+					supportsComputerUse: Boolean(modelInfo.supports_computer_use),
 					supportsPromptCache: Boolean(modelInfo.supports_prompt_caching),
 					inputPrice: modelInfo.input_cost_per_token ? modelInfo.input_cost_per_token * 1000000 : undefined,
 					outputPrice: modelInfo.output_cost_per_token