Explorar o código

Fix LiteLLM test failures after merge (#8870)

* Use monotonic clock for rate limiting

* Fix LiteLLM test failures after merge

- Remove supportsComputerUse from LiteLLM implementation as it's no longer part of ModelInfo interface
- Update test expectations to include cacheWritesPrice and cacheReadsPrice fields
- Fix test for max_output_tokens preference functionality

---------

Co-authored-by: Christiaan Arnoldus <[email protected]>
Daniel hai 4 meses
pai
achega
fceb413047

+ 6 - 3
src/api/providers/fetchers/__tests__/litellm.spec.ts

@@ -645,10 +645,11 @@ describe("getLiteLLMModels", () => {
 			maxTokens: 64000,
 			contextWindow: 200000,
 			supportsImages: true,
-			supportsComputerUse: true,
 			supportsPromptCache: false,
 			inputPrice: undefined,
 			outputPrice: undefined,
+			cacheWritesPrice: undefined,
+			cacheReadsPrice: undefined,
 			description: "claude-3-5-sonnet-4-5 via LiteLLM proxy",
 		})
 
@@ -657,10 +658,11 @@ describe("getLiteLLMModels", () => {
 			maxTokens: 8192,
 			contextWindow: 128000,
 			supportsImages: false,
-			supportsComputerUse: false,
 			supportsPromptCache: false,
 			inputPrice: undefined,
 			outputPrice: undefined,
+			cacheWritesPrice: undefined,
+			cacheReadsPrice: undefined,
 			description: "model-with-only-max-tokens via LiteLLM proxy",
 		})
 
@@ -669,10 +671,11 @@ describe("getLiteLLMModels", () => {
 			maxTokens: 16384,
 			contextWindow: 100000,
 			supportsImages: false,
-			supportsComputerUse: false,
 			supportsPromptCache: false,
 			inputPrice: undefined,
 			outputPrice: undefined,
+			cacheWritesPrice: undefined,
+			cacheReadsPrice: undefined,
 			description: "model-with-only-max-output-tokens via LiteLLM proxy",
 		})
 	})

+ 0 - 1
src/api/providers/fetchers/litellm.ts

@@ -44,7 +44,6 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
 					maxTokens: modelInfo.max_output_tokens || modelInfo.max_tokens || 8192,
 					contextWindow: modelInfo.max_input_tokens || 200000,
 					supportsImages: Boolean(modelInfo.supports_vision),
-					// litellm_params.model may have a prefix like openrouter/
 					supportsPromptCache: Boolean(modelInfo.supports_prompt_caching),
 					inputPrice: modelInfo.input_cost_per_token ? modelInfo.input_cost_per_token * 1000000 : undefined,
 					outputPrice: modelInfo.output_cost_per_token