Просмотр исходного кода

Support 128k max tokens for openrouter thinking

Matt Rubens 10 месяцев назад
Родитель
Сommit
63cecf8b22
2 измененных файлов с 2 добавлено и 2 удалено
  1. 1 1
      src/api/providers/__tests__/openrouter.test.ts
  2. 1 1
      src/api/providers/openrouter.ts

+ 1 - 1
src/api/providers/__tests__/openrouter.test.ts

@@ -72,7 +72,7 @@ describe("OpenRouterHandler", () => {
 			openRouterModelId: "test-model",
 			openRouterModelId: "test-model",
 			openRouterModelInfo: {
 			openRouterModelInfo: {
 				...mockOpenRouterModelInfo,
 				...mockOpenRouterModelInfo,
-				maxTokens: 64_000,
+				maxTokens: 128_000,
 				thinking: true,
 				thinking: true,
 			},
 			},
 			modelMaxTokens: 32_768,
 			modelMaxTokens: 32_768,

+ 1 - 1
src/api/providers/openrouter.ts

@@ -263,7 +263,7 @@ export async function getOpenRouterModels() {
 					modelInfo.supportsPromptCache = true
 					modelInfo.supportsPromptCache = true
 					modelInfo.cacheWritesPrice = 3.75
 					modelInfo.cacheWritesPrice = 3.75
 					modelInfo.cacheReadsPrice = 0.3
 					modelInfo.cacheReadsPrice = 0.3
-					modelInfo.maxTokens = rawModel.id === "anthropic/claude-3.7-sonnet:thinking" ? 64_000 : 16_384
+					modelInfo.maxTokens = rawModel.id === "anthropic/claude-3.7-sonnet:thinking" ? 128_000 : 16_384
 					break
 					break
 				case rawModel.id.startsWith("anthropic/claude-3.5-sonnet-20240620"):
 				case rawModel.id.startsWith("anthropic/claude-3.5-sonnet-20240620"):
 					modelInfo.supportsPromptCache = true
 					modelInfo.supportsPromptCache = true