transform.test.ts 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.maxOutputTokens", () => {
  5. test("returns 32k when modelLimit > 32k", () => {
  6. const modelLimit = 100000
  7. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  8. expect(result).toBe(OUTPUT_TOKEN_MAX)
  9. })
  10. test("returns modelLimit when modelLimit < 32k", () => {
  11. const modelLimit = 16000
  12. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  13. expect(result).toBe(16000)
  14. })
  15. describe("azure", () => {
  16. test("returns 32k when modelLimit > 32k", () => {
  17. const modelLimit = 100000
  18. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  19. expect(result).toBe(OUTPUT_TOKEN_MAX)
  20. })
  21. test("returns modelLimit when modelLimit < 32k", () => {
  22. const modelLimit = 16000
  23. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  24. expect(result).toBe(16000)
  25. })
  26. })
  27. describe("bedrock", () => {
  28. test("returns 32k when modelLimit > 32k", () => {
  29. const modelLimit = 100000
  30. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  31. expect(result).toBe(OUTPUT_TOKEN_MAX)
  32. })
  33. test("returns modelLimit when modelLimit < 32k", () => {
  34. const modelLimit = 16000
  35. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  36. expect(result).toBe(16000)
  37. })
  38. })
  39. describe("anthropic without thinking options", () => {
  40. test("returns 32k when modelLimit > 32k", () => {
  41. const modelLimit = 100000
  42. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  43. expect(result).toBe(OUTPUT_TOKEN_MAX)
  44. })
  45. test("returns modelLimit when modelLimit < 32k", () => {
  46. const modelLimit = 16000
  47. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  48. expect(result).toBe(16000)
  49. })
  50. })
  51. describe("anthropic with thinking options", () => {
  52. test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
  53. const modelLimit = 100000
  54. const options = {
  55. thinking: {
  56. type: "enabled",
  57. budgetTokens: 10000,
  58. },
  59. }
  60. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  61. expect(result).toBe(OUTPUT_TOKEN_MAX)
  62. })
  63. test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
  64. const modelLimit = 50000
  65. const options = {
  66. thinking: {
  67. type: "enabled",
  68. budgetTokens: 30000,
  69. },
  70. }
  71. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  72. expect(result).toBe(20000)
  73. })
  74. test("returns 32k when thinking type is not enabled", () => {
  75. const modelLimit = 100000
  76. const options = {
  77. thinking: {
  78. type: "disabled",
  79. budgetTokens: 10000,
  80. },
  81. }
  82. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  83. expect(result).toBe(OUTPUT_TOKEN_MAX)
  84. })
  85. })
  86. })
  87. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  88. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  89. const msgs = [
  90. {
  91. role: "assistant",
  92. content: [
  93. { type: "reasoning", text: "Let me think about this..." },
  94. {
  95. type: "tool-call",
  96. toolCallId: "test",
  97. toolName: "bash",
  98. input: { command: "echo hello" },
  99. },
  100. ],
  101. },
  102. ] as any[]
  103. const result = ProviderTransform.message(msgs, {
  104. id: "deepseek/deepseek-chat",
  105. providerID: "deepseek",
  106. api: {
  107. id: "deepseek-chat",
  108. url: "https://api.deepseek.com",
  109. npm: "@ai-sdk/openai-compatible",
  110. },
  111. name: "DeepSeek Chat",
  112. capabilities: {
  113. temperature: true,
  114. reasoning: true,
  115. attachment: false,
  116. toolcall: true,
  117. input: { text: true, audio: false, image: false, video: false, pdf: false },
  118. output: { text: true, audio: false, image: false, video: false, pdf: false },
  119. interleaved: false,
  120. },
  121. cost: {
  122. input: 0.001,
  123. output: 0.002,
  124. cache: { read: 0.0001, write: 0.0002 },
  125. },
  126. limit: {
  127. context: 128000,
  128. output: 8192,
  129. },
  130. status: "active",
  131. options: {},
  132. headers: {},
  133. })
  134. expect(result).toHaveLength(1)
  135. expect(result[0].content).toEqual([
  136. {
  137. type: "tool-call",
  138. toolCallId: "test",
  139. toolName: "bash",
  140. input: { command: "echo hello" },
  141. },
  142. ])
  143. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  144. })
  145. test("DeepSeek model ID containing 'deepseek' matches (case insensitive)", () => {
  146. const msgs = [
  147. {
  148. role: "assistant",
  149. content: [
  150. { type: "reasoning", text: "Thinking..." },
  151. {
  152. type: "tool-call",
  153. toolCallId: "test",
  154. toolName: "get_weather",
  155. input: { location: "Hangzhou" },
  156. },
  157. ],
  158. },
  159. ] as any[]
  160. const result = ProviderTransform.message(msgs, {
  161. id: "someprovider/deepseek-reasoner",
  162. providerID: "someprovider",
  163. api: {
  164. id: "deepseek-reasoner",
  165. url: "https://api.someprovider.com",
  166. npm: "@ai-sdk/openai-compatible",
  167. },
  168. name: "SomeProvider DeepSeek Reasoner",
  169. capabilities: {
  170. temperature: true,
  171. reasoning: true,
  172. attachment: false,
  173. toolcall: true,
  174. input: { text: true, audio: false, image: false, video: false, pdf: false },
  175. output: { text: true, audio: false, image: false, video: false, pdf: false },
  176. interleaved: false,
  177. },
  178. cost: {
  179. input: 0.001,
  180. output: 0.002,
  181. cache: { read: 0.0001, write: 0.0002 },
  182. },
  183. limit: {
  184. context: 128000,
  185. output: 8192,
  186. },
  187. status: "active",
  188. options: {},
  189. headers: {},
  190. })
  191. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Thinking...")
  192. })
  193. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  194. const msgs = [
  195. {
  196. role: "assistant",
  197. content: [
  198. { type: "reasoning", text: "Should not be processed" },
  199. { type: "text", text: "Answer" },
  200. ],
  201. },
  202. ] as any[]
  203. const result = ProviderTransform.message(msgs, {
  204. id: "openai/gpt-4",
  205. providerID: "openai",
  206. api: {
  207. id: "gpt-4",
  208. url: "https://api.openai.com",
  209. npm: "@ai-sdk/openai",
  210. },
  211. name: "GPT-4",
  212. capabilities: {
  213. temperature: true,
  214. reasoning: false,
  215. attachment: true,
  216. toolcall: true,
  217. input: { text: true, audio: false, image: true, video: false, pdf: false },
  218. output: { text: true, audio: false, image: false, video: false, pdf: false },
  219. interleaved: false,
  220. },
  221. cost: {
  222. input: 0.03,
  223. output: 0.06,
  224. cache: { read: 0.001, write: 0.002 },
  225. },
  226. limit: {
  227. context: 128000,
  228. output: 4096,
  229. },
  230. status: "active",
  231. options: {},
  232. headers: {},
  233. })
  234. expect(result[0].content).toEqual([
  235. { type: "reasoning", text: "Should not be processed" },
  236. { type: "text", text: "Answer" },
  237. ])
  238. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  239. })
  240. })