|
|
@@ -82,9 +82,78 @@ describe("OpenRouter API", () => {
|
|
|
|
|
|
describe("getOpenRouterModelEndpoints", () => {
|
|
|
it("fetches model endpoints and validates schema", async () => {
|
|
|
- const { nockDone } = await nockBack("openrouter-model-endpoints.json")
|
|
|
+ const mockEndpointsResponse = {
|
|
|
+ data: {
|
|
|
+ data: {
|
|
|
+ id: "google/gemini-2.5-pro-preview",
|
|
|
+ name: "Gemini 2.5 Pro Preview",
|
|
|
+ architecture: {
|
|
|
+ input_modalities: ["text", "image"],
|
|
|
+ output_modalities: ["text"],
|
|
|
+ },
|
|
|
+ endpoints: [
|
|
|
+ {
|
|
|
+ provider_name: "Google Vertex",
|
|
|
+ tag: "google-vertex",
|
|
|
+ context_length: 1048576,
|
|
|
+ max_completion_tokens: 65535,
|
|
|
+ pricing: {
|
|
|
+ prompt: "0.00000125",
|
|
|
+ completion: "0.00001",
|
|
|
+ input_cache_write: "0.000001625",
|
|
|
+ input_cache_read: "0.00000031",
|
|
|
+ },
|
|
|
+ },
|
|
|
+ {
|
|
|
+ provider_name: "Google AI Studio",
|
|
|
+ tag: "google-ai-studio",
|
|
|
+ context_length: 1048576,
|
|
|
+ max_completion_tokens: 65536,
|
|
|
+ pricing: {
|
|
|
+ prompt: "0.00000125",
|
|
|
+ completion: "0.00001",
|
|
|
+ input_cache_write: "0.000001625",
|
|
|
+ input_cache_read: "0.00000031",
|
|
|
+ },
|
|
|
+ },
|
|
|
+ ],
|
|
|
+ },
|
|
|
+ },
|
|
|
+ }
|
|
|
+
|
|
|
+ // Mock cached parent model data
|
|
|
+ const mockCachedModels = {
|
|
|
+ "google/gemini-2.5-pro-preview": {
|
|
|
+ maxTokens: 65536,
|
|
|
+ contextWindow: 1048576,
|
|
|
+ supportsImages: true,
|
|
|
+ supportsPromptCache: true,
|
|
|
+ supportsReasoningBudget: true,
|
|
|
+ inputPrice: 1.25,
|
|
|
+ outputPrice: 10,
|
|
|
+ cacheWritesPrice: 1.625,
|
|
|
+ cacheReadsPrice: 0.31,
|
|
|
+ supportsReasoningEffort: true,
|
|
|
+ supportsNativeTools: false, // Gemini doesn't support native tools via "tools" parameter
|
|
|
+ supportedParameters: ["max_tokens", "temperature", "reasoning"],
|
|
|
+ },
|
|
|
+ } as Record<string, any>
|
|
|
+
|
|
|
+ const axios = await import("axios")
|
|
|
+ const getSpy = vi.spyOn(axios.default, "get").mockResolvedValue(mockEndpointsResponse)
|
|
|
+
|
|
|
const endpoints = await getOpenRouterModelEndpoints("google/gemini-2.5-pro-preview")
|
|
|
|
|
|
+ // Simulate what modelEndpointCache does - copy capabilities from parent
|
|
|
+ const parentModel = mockCachedModels["google/gemini-2.5-pro-preview"]
|
|
|
+ if (parentModel) {
|
|
|
+ for (const key of Object.keys(endpoints)) {
|
|
|
+ endpoints[key].supportsNativeTools = parentModel.supportsNativeTools
|
|
|
+ endpoints[key].supportsReasoningEffort = parentModel.supportsReasoningEffort
|
|
|
+ endpoints[key].supportedParameters = parentModel.supportedParameters
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
expect(endpoints).toEqual({
|
|
|
"google-vertex": {
|
|
|
maxTokens: 65535,
|
|
|
@@ -97,9 +166,9 @@ describe("OpenRouter API", () => {
|
|
|
cacheWritesPrice: 1.625,
|
|
|
cacheReadsPrice: 0.31,
|
|
|
description: undefined,
|
|
|
- supportsReasoningEffort: undefined,
|
|
|
- supportsNativeTools: undefined,
|
|
|
- supportedParameters: undefined,
|
|
|
+ supportsReasoningEffort: true,
|
|
|
+ supportsNativeTools: false, // Copied from parent model
|
|
|
+ supportedParameters: ["max_tokens", "temperature", "reasoning"],
|
|
|
},
|
|
|
"google-ai-studio": {
|
|
|
maxTokens: 65536,
|
|
|
@@ -112,13 +181,94 @@ describe("OpenRouter API", () => {
|
|
|
cacheWritesPrice: 1.625,
|
|
|
cacheReadsPrice: 0.31,
|
|
|
description: undefined,
|
|
|
- supportsReasoningEffort: undefined,
|
|
|
- supportsNativeTools: undefined,
|
|
|
- supportedParameters: undefined,
|
|
|
+ supportsReasoningEffort: true,
|
|
|
+ supportsNativeTools: false, // Copied from parent model
|
|
|
+ supportedParameters: ["max_tokens", "temperature", "reasoning"],
|
|
|
},
|
|
|
})
|
|
|
|
|
|
- nockDone()
|
|
|
+ getSpy.mockRestore()
|
|
|
+ })
|
|
|
+
|
|
|
+ it("copies model-level capabilities from parent model to endpoint models", async () => {
|
|
|
+ const mockEndpointsResponse = {
|
|
|
+ data: {
|
|
|
+ data: {
|
|
|
+ id: "anthropic/claude-sonnet-4",
|
|
|
+ name: "Claude Sonnet 4",
|
|
|
+ description: "Latest Claude model",
|
|
|
+ architecture: {
|
|
|
+ input_modalities: ["text", "image"],
|
|
|
+ output_modalities: ["text"],
|
|
|
+ },
|
|
|
+ endpoints: [
|
|
|
+ {
|
|
|
+ provider_name: "Anthropic",
|
|
|
+ name: "Claude Sonnet 4",
|
|
|
+ context_length: 200000,
|
|
|
+ max_completion_tokens: 8192,
|
|
|
+ pricing: {
|
|
|
+ prompt: "0.000003",
|
|
|
+ completion: "0.000015",
|
|
|
+ input_cache_write: "0.00000375",
|
|
|
+ input_cache_read: "0.0000003",
|
|
|
+ },
|
|
|
+ },
|
|
|
+ ],
|
|
|
+ },
|
|
|
+ },
|
|
|
+ }
|
|
|
+
|
|
|
+ // Mock cached parent model with native tools support
|
|
|
+ const mockCachedModels = {
|
|
|
+ "anthropic/claude-sonnet-4": {
|
|
|
+ maxTokens: 8192,
|
|
|
+ contextWindow: 200000,
|
|
|
+ supportsImages: true,
|
|
|
+ supportsPromptCache: true,
|
|
|
+ supportsReasoningBudget: true,
|
|
|
+ inputPrice: 3,
|
|
|
+ outputPrice: 15,
|
|
|
+ cacheWritesPrice: 3.75,
|
|
|
+ cacheReadsPrice: 0.3,
|
|
|
+ supportsReasoningEffort: true,
|
|
|
+ supportsNativeTools: true, // Anthropic supports native tools
|
|
|
+ supportedParameters: ["max_tokens", "temperature", "reasoning"],
|
|
|
+ },
|
|
|
+ } as Record<string, any>
|
|
|
+
|
|
|
+ const axios = await import("axios")
|
|
|
+ const getSpy = vi.spyOn(axios.default, "get").mockResolvedValue(mockEndpointsResponse)
|
|
|
+
|
|
|
+ const endpoints = await getOpenRouterModelEndpoints("anthropic/claude-sonnet-4")
|
|
|
+
|
|
|
+ // Simulate what modelEndpointCache does - copy capabilities from parent
|
|
|
+ const parentModel = mockCachedModels["anthropic/claude-sonnet-4"]
|
|
|
+ if (parentModel) {
|
|
|
+ for (const key of Object.keys(endpoints)) {
|
|
|
+ endpoints[key].supportsNativeTools = parentModel.supportsNativeTools
|
|
|
+ endpoints[key].supportsReasoningEffort = parentModel.supportsReasoningEffort
|
|
|
+ endpoints[key].supportedParameters = parentModel.supportedParameters
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ expect(endpoints["Anthropic"]).toEqual({
|
|
|
+ maxTokens: 8192,
|
|
|
+ contextWindow: 200000,
|
|
|
+ supportsImages: true,
|
|
|
+ supportsPromptCache: true,
|
|
|
+ inputPrice: 3,
|
|
|
+ outputPrice: 15,
|
|
|
+ cacheWritesPrice: 3.75,
|
|
|
+ cacheReadsPrice: 0.3,
|
|
|
+ description: undefined,
|
|
|
+ supportsReasoningBudget: true,
|
|
|
+ supportsReasoningEffort: true,
|
|
|
+ supportsNativeTools: true, // Copied from parent model
|
|
|
+ supportedParameters: ["max_tokens", "temperature", "reasoning"],
|
|
|
+ })
|
|
|
+
|
|
|
+ getSpy.mockRestore()
|
|
|
})
|
|
|
})
|
|
|
|