Procházet zdrojové kódy

fix: copy model-level capabilities to OpenRouter endpoint models (#9483)

Daniel před 1 měsícem
rodič
revize
f6d3fdfff6

+ 166 - 0
src/api/providers/fetchers/__tests__/modelEndpointCache.spec.ts

@@ -0,0 +1,166 @@
+// npx vitest run api/providers/fetchers/__tests__/modelEndpointCache.spec.ts
+
+import { vi, describe, it, expect, beforeEach } from "vitest"
+import { getModelEndpoints } from "../modelEndpointCache"
+import * as modelCache from "../modelCache"
+import * as openrouter from "../openrouter"
+
+vi.mock("../modelCache")
+vi.mock("../openrouter")
+
+describe("modelEndpointCache", () => {
+	beforeEach(() => {
+		vi.clearAllMocks()
+	})
+
+	describe("getModelEndpoints", () => {
+		it("should copy model-level capabilities from parent model to endpoints", async () => {
+			// Mock the parent model data with native tools support
+			const mockParentModels = {
+				"anthropic/claude-sonnet-4": {
+					maxTokens: 8192,
+					contextWindow: 200000,
+					supportsImages: true,
+					supportsPromptCache: true,
+					supportsNativeTools: true, // Parent supports native tools
+					supportsReasoningEffort: true,
+					supportedParameters: ["max_tokens", "temperature", "reasoning"] as any,
+					inputPrice: 3,
+					outputPrice: 15,
+				},
+			}
+
+			// Mock endpoint data WITHOUT capabilities (as returned by API)
+			const mockEndpoints = {
+				anthropic: {
+					maxTokens: 8192,
+					contextWindow: 200000,
+					supportsImages: true,
+					supportsPromptCache: true,
+					inputPrice: 3,
+					outputPrice: 15,
+					// Note: No supportsNativeTools, supportsReasoningEffort, or supportedParameters
+				},
+				"amazon-bedrock": {
+					maxTokens: 8192,
+					contextWindow: 200000,
+					supportsImages: true,
+					supportsPromptCache: true,
+					inputPrice: 3,
+					outputPrice: 15,
+				},
+			}
+
+			vi.spyOn(modelCache, "getModels").mockResolvedValue(mockParentModels as any)
+			vi.spyOn(openrouter, "getOpenRouterModelEndpoints").mockResolvedValue(mockEndpoints as any)
+
+			const result = await getModelEndpoints({
+				router: "openrouter",
+				modelId: "anthropic/claude-sonnet-4",
+				endpoint: "anthropic",
+			})
+
+			// Verify capabilities were copied from parent to ALL endpoints
+			expect(result.anthropic.supportsNativeTools).toBe(true)
+			expect(result.anthropic.supportsReasoningEffort).toBe(true)
+			expect(result.anthropic.supportedParameters).toEqual(["max_tokens", "temperature", "reasoning"])
+
+			expect(result["amazon-bedrock"].supportsNativeTools).toBe(true)
+			expect(result["amazon-bedrock"].supportsReasoningEffort).toBe(true)
+			expect(result["amazon-bedrock"].supportedParameters).toEqual(["max_tokens", "temperature", "reasoning"])
+		})
+
+		it("should create independent array copies to avoid shared references", async () => {
+			const mockParentModels = {
+				"test/model": {
+					maxTokens: 1000,
+					contextWindow: 10000,
+					supportsPromptCache: false,
+					supportsNativeTools: true,
+					supportedParameters: ["max_tokens", "temperature"] as any,
+				},
+			}
+
+			const mockEndpoints = {
+				"endpoint-1": {
+					maxTokens: 1000,
+					contextWindow: 10000,
+					supportsPromptCache: false,
+				},
+				"endpoint-2": {
+					maxTokens: 1000,
+					contextWindow: 10000,
+					supportsPromptCache: false,
+				},
+			}
+
+			vi.spyOn(modelCache, "getModels").mockResolvedValue(mockParentModels as any)
+			vi.spyOn(openrouter, "getOpenRouterModelEndpoints").mockResolvedValue(mockEndpoints as any)
+
+			const result = await getModelEndpoints({
+				router: "openrouter",
+				modelId: "test/model",
+				endpoint: "endpoint-1",
+			})
+
+			// Modify one endpoint's array
+			result["endpoint-1"].supportedParameters?.push("reasoning" as any)
+
+			// Verify the other endpoint's array was NOT affected (independent copy)
+			expect(result["endpoint-1"].supportedParameters).toHaveLength(3)
+			expect(result["endpoint-2"].supportedParameters).toHaveLength(2)
+		})
+
+		it("should handle missing parent model gracefully", async () => {
+			const mockParentModels = {}
+			const mockEndpoints = {
+				anthropic: {
+					maxTokens: 8192,
+					contextWindow: 200000,
+					supportsImages: true,
+					supportsPromptCache: true,
+				},
+			}
+
+			vi.spyOn(modelCache, "getModels").mockResolvedValue(mockParentModels as any)
+			vi.spyOn(openrouter, "getOpenRouterModelEndpoints").mockResolvedValue(mockEndpoints as any)
+
+			const result = await getModelEndpoints({
+				router: "openrouter",
+				modelId: "missing/model",
+				endpoint: "anthropic",
+			})
+
+			// Should not crash, but capabilities will be undefined
+			expect(result.anthropic).toBeDefined()
+			expect(result.anthropic.supportsNativeTools).toBeUndefined()
+		})
+
+		it("should return empty object for non-openrouter providers", async () => {
+			const result = await getModelEndpoints({
+				router: "vercel-ai-gateway",
+				modelId: "claude-sonnet-4",
+				endpoint: "default",
+			})
+
+			expect(result).toEqual({})
+		})
+
+		it("should return empty object when modelId or endpoint is missing", async () => {
+			const result1 = await getModelEndpoints({
+				router: "openrouter",
+				modelId: undefined,
+				endpoint: "anthropic",
+			})
+
+			const result2 = await getModelEndpoints({
+				router: "openrouter",
+				modelId: "anthropic/claude-sonnet-4",
+				endpoint: undefined,
+			})
+
+			expect(result1).toEqual({})
+			expect(result2).toEqual({})
+		})
+	})
+})

+ 158 - 8
src/api/providers/fetchers/__tests__/openrouter.spec.ts

@@ -82,9 +82,78 @@ describe("OpenRouter API", () => {
 
 
 	describe("getOpenRouterModelEndpoints", () => {
 	describe("getOpenRouterModelEndpoints", () => {
 		it("fetches model endpoints and validates schema", async () => {
 		it("fetches model endpoints and validates schema", async () => {
-			const { nockDone } = await nockBack("openrouter-model-endpoints.json")
+			const mockEndpointsResponse = {
+				data: {
+					data: {
+						id: "google/gemini-2.5-pro-preview",
+						name: "Gemini 2.5 Pro Preview",
+						architecture: {
+							input_modalities: ["text", "image"],
+							output_modalities: ["text"],
+						},
+						endpoints: [
+							{
+								provider_name: "Google Vertex",
+								tag: "google-vertex",
+								context_length: 1048576,
+								max_completion_tokens: 65535,
+								pricing: {
+									prompt: "0.00000125",
+									completion: "0.00001",
+									input_cache_write: "0.000001625",
+									input_cache_read: "0.00000031",
+								},
+							},
+							{
+								provider_name: "Google AI Studio",
+								tag: "google-ai-studio",
+								context_length: 1048576,
+								max_completion_tokens: 65536,
+								pricing: {
+									prompt: "0.00000125",
+									completion: "0.00001",
+									input_cache_write: "0.000001625",
+									input_cache_read: "0.00000031",
+								},
+							},
+						],
+					},
+				},
+			}
+
+			// Mock cached parent model data
+			const mockCachedModels = {
+				"google/gemini-2.5-pro-preview": {
+					maxTokens: 65536,
+					contextWindow: 1048576,
+					supportsImages: true,
+					supportsPromptCache: true,
+					supportsReasoningBudget: true,
+					inputPrice: 1.25,
+					outputPrice: 10,
+					cacheWritesPrice: 1.625,
+					cacheReadsPrice: 0.31,
+					supportsReasoningEffort: true,
+					supportsNativeTools: false, // Gemini doesn't support native tools via "tools" parameter
+					supportedParameters: ["max_tokens", "temperature", "reasoning"],
+				},
+			} as Record<string, any>
+
+			const axios = await import("axios")
+			const getSpy = vi.spyOn(axios.default, "get").mockResolvedValue(mockEndpointsResponse)
+
 			const endpoints = await getOpenRouterModelEndpoints("google/gemini-2.5-pro-preview")
 			const endpoints = await getOpenRouterModelEndpoints("google/gemini-2.5-pro-preview")
 
 
+			// Simulate what modelEndpointCache does - copy capabilities from parent
+			const parentModel = mockCachedModels["google/gemini-2.5-pro-preview"]
+			if (parentModel) {
+				for (const key of Object.keys(endpoints)) {
+					endpoints[key].supportsNativeTools = parentModel.supportsNativeTools
+					endpoints[key].supportsReasoningEffort = parentModel.supportsReasoningEffort
+					endpoints[key].supportedParameters = parentModel.supportedParameters
+				}
+			}
+
 			expect(endpoints).toEqual({
 			expect(endpoints).toEqual({
 				"google-vertex": {
 				"google-vertex": {
 					maxTokens: 65535,
 					maxTokens: 65535,
@@ -97,9 +166,9 @@ describe("OpenRouter API", () => {
 					cacheWritesPrice: 1.625,
 					cacheWritesPrice: 1.625,
 					cacheReadsPrice: 0.31,
 					cacheReadsPrice: 0.31,
 					description: undefined,
 					description: undefined,
-					supportsReasoningEffort: undefined,
-					supportsNativeTools: undefined,
-					supportedParameters: undefined,
+					supportsReasoningEffort: true,
+					supportsNativeTools: false, // Copied from parent model
+					supportedParameters: ["max_tokens", "temperature", "reasoning"],
 				},
 				},
 				"google-ai-studio": {
 				"google-ai-studio": {
 					maxTokens: 65536,
 					maxTokens: 65536,
@@ -112,13 +181,94 @@ describe("OpenRouter API", () => {
 					cacheWritesPrice: 1.625,
 					cacheWritesPrice: 1.625,
 					cacheReadsPrice: 0.31,
 					cacheReadsPrice: 0.31,
 					description: undefined,
 					description: undefined,
-					supportsReasoningEffort: undefined,
-					supportsNativeTools: undefined,
-					supportedParameters: undefined,
+					supportsReasoningEffort: true,
+					supportsNativeTools: false, // Copied from parent model
+					supportedParameters: ["max_tokens", "temperature", "reasoning"],
 				},
 				},
 			})
 			})
 
 
-			nockDone()
+			getSpy.mockRestore()
+		})
+
+		it("copies model-level capabilities from parent model to endpoint models", async () => {
+			const mockEndpointsResponse = {
+				data: {
+					data: {
+						id: "anthropic/claude-sonnet-4",
+						name: "Claude Sonnet 4",
+						description: "Latest Claude model",
+						architecture: {
+							input_modalities: ["text", "image"],
+							output_modalities: ["text"],
+						},
+						endpoints: [
+							{
+								provider_name: "Anthropic",
+								name: "Claude Sonnet 4",
+								context_length: 200000,
+								max_completion_tokens: 8192,
+								pricing: {
+									prompt: "0.000003",
+									completion: "0.000015",
+									input_cache_write: "0.00000375",
+									input_cache_read: "0.0000003",
+								},
+							},
+						],
+					},
+				},
+			}
+
+			// Mock cached parent model with native tools support
+			const mockCachedModels = {
+				"anthropic/claude-sonnet-4": {
+					maxTokens: 8192,
+					contextWindow: 200000,
+					supportsImages: true,
+					supportsPromptCache: true,
+					supportsReasoningBudget: true,
+					inputPrice: 3,
+					outputPrice: 15,
+					cacheWritesPrice: 3.75,
+					cacheReadsPrice: 0.3,
+					supportsReasoningEffort: true,
+					supportsNativeTools: true, // Anthropic supports native tools
+					supportedParameters: ["max_tokens", "temperature", "reasoning"],
+				},
+			} as Record<string, any>
+
+			const axios = await import("axios")
+			const getSpy = vi.spyOn(axios.default, "get").mockResolvedValue(mockEndpointsResponse)
+
+			const endpoints = await getOpenRouterModelEndpoints("anthropic/claude-sonnet-4")
+
+			// Simulate what modelEndpointCache does - copy capabilities from parent
+			const parentModel = mockCachedModels["anthropic/claude-sonnet-4"]
+			if (parentModel) {
+				for (const key of Object.keys(endpoints)) {
+					endpoints[key].supportsNativeTools = parentModel.supportsNativeTools
+					endpoints[key].supportsReasoningEffort = parentModel.supportsReasoningEffort
+					endpoints[key].supportedParameters = parentModel.supportedParameters
+				}
+			}
+
+			expect(endpoints["Anthropic"]).toEqual({
+				maxTokens: 8192,
+				contextWindow: 200000,
+				supportsImages: true,
+				supportsPromptCache: true,
+				inputPrice: 3,
+				outputPrice: 15,
+				cacheWritesPrice: 3.75,
+				cacheReadsPrice: 0.3,
+				description: undefined,
+				supportsReasoningBudget: true,
+				supportsReasoningEffort: true,
+				supportsNativeTools: true, // Copied from parent model
+				supportedParameters: ["max_tokens", "temperature", "reasoning"],
+			})
+
+			getSpy.mockRestore()
 		})
 		})
 	})
 	})
 
 

+ 20 - 0
src/api/providers/fetchers/modelEndpointCache.ts

@@ -11,6 +11,7 @@ import { RouterName, ModelRecord } from "../../../shared/api"
 import { fileExistsAtPath } from "../../../utils/fs"
 import { fileExistsAtPath } from "../../../utils/fs"
 
 
 import { getOpenRouterModelEndpoints } from "./openrouter"
 import { getOpenRouterModelEndpoints } from "./openrouter"
+import { getModels } from "./modelCache"
 
 
 const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 })
 const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 })
 
 
@@ -55,6 +56,25 @@ export const getModelEndpoints = async ({
 
 
 	modelProviders = await getOpenRouterModelEndpoints(modelId)
 	modelProviders = await getOpenRouterModelEndpoints(modelId)
 
 
+	// Copy model-level capabilities from the parent model to each endpoint
+	// These are capabilities that don't vary by provider (tools, reasoning, etc.)
+	if (Object.keys(modelProviders).length > 0) {
+		const parentModels = await getModels({ provider: "openrouter" })
+		const parentModel = parentModels[modelId]
+
+		if (parentModel) {
+			// Copy model-level capabilities to all endpoints
+			// Clone arrays to avoid shared mutable references
+			for (const endpointKey of Object.keys(modelProviders)) {
+				modelProviders[endpointKey].supportsNativeTools = parentModel.supportsNativeTools
+				modelProviders[endpointKey].supportsReasoningEffort = parentModel.supportsReasoningEffort
+				modelProviders[endpointKey].supportedParameters = parentModel.supportedParameters
+					? [...parentModel.supportedParameters]
+					: undefined
+			}
+		}
+	}
+
 	if (Object.keys(modelProviders).length > 0) {
 	if (Object.keys(modelProviders).length > 0) {
 		// console.log(`[getModelProviders] API fetch for ${key} -> ${Object.keys(modelProviders).length}`)
 		// console.log(`[getModelProviders] API fetch for ${key} -> ${Object.keys(modelProviders).length}`)
 		memoryCache.set(key, modelProviders)
 		memoryCache.set(key, modelProviders)