|
|
@@ -267,6 +267,56 @@ describe("ProviderTransform.maxOutputTokens", () => {
|
|
|
expect(result).toBe(OUTPUT_TOKEN_MAX)
|
|
|
})
|
|
|
})
|
|
|
+
|
|
|
+ describe("openai-compatible with thinking options (snake_case)", () => {
|
|
|
+ test("returns 32k when budget_tokens + 32k <= modelLimit", () => {
|
|
|
+ const modelLimit = 100000
|
|
|
+ const options = {
|
|
|
+ thinking: {
|
|
|
+ type: "enabled",
|
|
|
+ budget_tokens: 10000,
|
|
|
+ },
|
|
|
+ }
|
|
|
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai-compatible", options, modelLimit, OUTPUT_TOKEN_MAX)
|
|
|
+ expect(result).toBe(OUTPUT_TOKEN_MAX)
|
|
|
+ })
|
|
|
+
|
|
|
+ test("returns modelLimit - budget_tokens when budget_tokens + 32k > modelLimit", () => {
|
|
|
+ const modelLimit = 50000
|
|
|
+ const options = {
|
|
|
+ thinking: {
|
|
|
+ type: "enabled",
|
|
|
+ budget_tokens: 30000,
|
|
|
+ },
|
|
|
+ }
|
|
|
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai-compatible", options, modelLimit, OUTPUT_TOKEN_MAX)
|
|
|
+ expect(result).toBe(20000)
|
|
|
+ })
|
|
|
+
|
|
|
+ test("returns 32k when thinking type is not enabled", () => {
|
|
|
+ const modelLimit = 100000
|
|
|
+ const options = {
|
|
|
+ thinking: {
|
|
|
+ type: "disabled",
|
|
|
+ budget_tokens: 10000,
|
|
|
+ },
|
|
|
+ }
|
|
|
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai-compatible", options, modelLimit, OUTPUT_TOKEN_MAX)
|
|
|
+ expect(result).toBe(OUTPUT_TOKEN_MAX)
|
|
|
+ })
|
|
|
+
|
|
|
+ test("returns 32k when budget_tokens is 0", () => {
|
|
|
+ const modelLimit = 100000
|
|
|
+ const options = {
|
|
|
+ thinking: {
|
|
|
+ type: "enabled",
|
|
|
+ budget_tokens: 0,
|
|
|
+ },
|
|
|
+ }
|
|
|
+ const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai-compatible", options, modelLimit, OUTPUT_TOKEN_MAX)
|
|
|
+ expect(result).toBe(OUTPUT_TOKEN_MAX)
|
|
|
+ })
|
|
|
+ })
|
|
|
})
|
|
|
|
|
|
describe("ProviderTransform.schema - gemini array items", () => {
|
|
|
@@ -1494,6 +1544,67 @@ describe("ProviderTransform.variants", () => {
|
|
|
expect(result.low).toEqual({ reasoningEffort: "low" })
|
|
|
expect(result.high).toEqual({ reasoningEffort: "high" })
|
|
|
})
|
|
|
+
|
|
|
+ test("Claude via LiteLLM returns thinking with snake_case budget_tokens", () => {
|
|
|
+ const model = createMockModel({
|
|
|
+ id: "anthropic/claude-sonnet-4-5",
|
|
|
+ providerID: "anthropic",
|
|
|
+ api: {
|
|
|
+ id: "claude-sonnet-4-5-20250929",
|
|
|
+ url: "http://localhost:4000",
|
|
|
+ npm: "@ai-sdk/openai-compatible",
|
|
|
+ },
|
|
|
+ })
|
|
|
+ const result = ProviderTransform.variants(model)
|
|
|
+ expect(Object.keys(result)).toEqual(["high", "max"])
|
|
|
+ expect(result.high).toEqual({
|
|
|
+ thinking: {
|
|
|
+ type: "enabled",
|
|
|
+ budget_tokens: 16000,
|
|
|
+ },
|
|
|
+ })
|
|
|
+ expect(result.max).toEqual({
|
|
|
+ thinking: {
|
|
|
+ type: "enabled",
|
|
|
+ budget_tokens: 31999,
|
|
|
+ },
|
|
|
+ })
|
|
|
+ })
|
|
|
+
|
|
|
+ test("Claude model (by model.id) via openai-compatible uses snake_case", () => {
|
|
|
+ const model = createMockModel({
|
|
|
+ id: "litellm/claude-3-opus",
|
|
|
+ providerID: "litellm",
|
|
|
+ api: {
|
|
|
+ id: "claude-3-opus-20240229",
|
|
|
+ url: "http://localhost:4000",
|
|
|
+ npm: "@ai-sdk/openai-compatible",
|
|
|
+ },
|
|
|
+ })
|
|
|
+ const result = ProviderTransform.variants(model)
|
|
|
+ expect(Object.keys(result)).toEqual(["high", "max"])
|
|
|
+ expect(result.high).toEqual({
|
|
|
+ thinking: {
|
|
|
+ type: "enabled",
|
|
|
+ budget_tokens: 16000,
|
|
|
+ },
|
|
|
+ })
|
|
|
+ })
|
|
|
+
|
|
|
+ test("Anthropic model (by model.api.id) via openai-compatible uses snake_case", () => {
|
|
|
+ const model = createMockModel({
|
|
|
+ id: "custom/my-model",
|
|
|
+ providerID: "custom",
|
|
|
+ api: {
|
|
|
+ id: "anthropic.claude-sonnet",
|
|
|
+ url: "http://localhost:4000",
|
|
|
+ npm: "@ai-sdk/openai-compatible",
|
|
|
+ },
|
|
|
+ })
|
|
|
+ const result = ProviderTransform.variants(model)
|
|
|
+ expect(Object.keys(result)).toEqual(["high", "max"])
|
|
|
+ expect(result.high.thinking.budget_tokens).toBe(16000)
|
|
|
+ })
|
|
|
})
|
|
|
|
|
|
describe("@ai-sdk/azure", () => {
|