| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798 |
- import { describe, expect, test } from "bun:test"
- import { ProviderTransform } from "../../src/provider/transform"
- const OUTPUT_TOKEN_MAX = 32000
- describe("ProviderTransform.maxOutputTokens", () => {
- test("returns 32k when modelLimit > 32k", () => {
- const modelLimit = 100000
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(OUTPUT_TOKEN_MAX)
- })
- test("returns modelLimit when modelLimit < 32k", () => {
- const modelLimit = 16000
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(16000)
- })
- describe("azure", () => {
- test("returns 32k when modelLimit > 32k", () => {
- const modelLimit = 100000
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(OUTPUT_TOKEN_MAX)
- })
- test("returns modelLimit when modelLimit < 32k", () => {
- const modelLimit = 16000
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(16000)
- })
- })
- describe("bedrock", () => {
- test("returns 32k when modelLimit > 32k", () => {
- const modelLimit = 100000
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(OUTPUT_TOKEN_MAX)
- })
- test("returns modelLimit when modelLimit < 32k", () => {
- const modelLimit = 16000
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(16000)
- })
- })
- describe("anthropic without thinking options", () => {
- test("returns 32k when modelLimit > 32k", () => {
- const modelLimit = 100000
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(OUTPUT_TOKEN_MAX)
- })
- test("returns modelLimit when modelLimit < 32k", () => {
- const modelLimit = 16000
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(16000)
- })
- })
- describe("anthropic with thinking options", () => {
- test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
- const modelLimit = 100000
- const options = {
- thinking: {
- type: "enabled",
- budgetTokens: 10000,
- },
- }
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(OUTPUT_TOKEN_MAX)
- })
- test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
- const modelLimit = 50000
- const options = {
- thinking: {
- type: "enabled",
- budgetTokens: 30000,
- },
- }
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(20000)
- })
- test("returns 32k when thinking type is not enabled", () => {
- const modelLimit = 100000
- const options = {
- thinking: {
- type: "disabled",
- budgetTokens: 10000,
- },
- }
- const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
- expect(result).toBe(OUTPUT_TOKEN_MAX)
- })
- })
- })
|