transform.test.ts 3.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.maxOutputTokens", () => {
  5. test("returns 32k when modelLimit > 32k", () => {
  6. const modelLimit = 100000
  7. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  8. expect(result).toBe(OUTPUT_TOKEN_MAX)
  9. })
  10. test("returns modelLimit when modelLimit < 32k", () => {
  11. const modelLimit = 16000
  12. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  13. expect(result).toBe(16000)
  14. })
  15. describe("azure", () => {
  16. test("returns 32k when modelLimit > 32k", () => {
  17. const modelLimit = 100000
  18. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  19. expect(result).toBe(OUTPUT_TOKEN_MAX)
  20. })
  21. test("returns modelLimit when modelLimit < 32k", () => {
  22. const modelLimit = 16000
  23. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  24. expect(result).toBe(16000)
  25. })
  26. })
  27. describe("bedrock", () => {
  28. test("returns 32k when modelLimit > 32k", () => {
  29. const modelLimit = 100000
  30. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  31. expect(result).toBe(OUTPUT_TOKEN_MAX)
  32. })
  33. test("returns modelLimit when modelLimit < 32k", () => {
  34. const modelLimit = 16000
  35. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  36. expect(result).toBe(16000)
  37. })
  38. })
  39. describe("anthropic without thinking options", () => {
  40. test("returns 32k when modelLimit > 32k", () => {
  41. const modelLimit = 100000
  42. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  43. expect(result).toBe(OUTPUT_TOKEN_MAX)
  44. })
  45. test("returns modelLimit when modelLimit < 32k", () => {
  46. const modelLimit = 16000
  47. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  48. expect(result).toBe(16000)
  49. })
  50. })
  51. describe("anthropic with thinking options", () => {
  52. test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
  53. const modelLimit = 100000
  54. const options = {
  55. thinking: {
  56. type: "enabled",
  57. budgetTokens: 10000,
  58. },
  59. }
  60. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  61. expect(result).toBe(OUTPUT_TOKEN_MAX)
  62. })
  63. test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
  64. const modelLimit = 50000
  65. const options = {
  66. thinking: {
  67. type: "enabled",
  68. budgetTokens: 30000,
  69. },
  70. }
  71. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  72. expect(result).toBe(20000)
  73. })
  74. test("returns 32k when thinking type is not enabled", () => {
  75. const modelLimit = 100000
  76. const options = {
  77. thinking: {
  78. type: "disabled",
  79. budgetTokens: 10000,
  80. },
  81. }
  82. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  83. expect(result).toBe(OUTPUT_TOKEN_MAX)
  84. })
  85. })
  86. })