transform.test.ts 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.maxOutputTokens", () => {
  5. test("returns 32k when modelLimit > 32k", () => {
  6. const modelLimit = 100000
  7. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  8. expect(result).toBe(OUTPUT_TOKEN_MAX)
  9. })
  10. test("returns modelLimit when modelLimit < 32k", () => {
  11. const modelLimit = 16000
  12. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  13. expect(result).toBe(16000)
  14. })
  15. describe("azure", () => {
  16. test("returns 32k when modelLimit > 32k", () => {
  17. const modelLimit = 100000
  18. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  19. expect(result).toBe(OUTPUT_TOKEN_MAX)
  20. })
  21. test("returns modelLimit when modelLimit < 32k", () => {
  22. const modelLimit = 16000
  23. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  24. expect(result).toBe(16000)
  25. })
  26. })
  27. describe("bedrock", () => {
  28. test("returns 32k when modelLimit > 32k", () => {
  29. const modelLimit = 100000
  30. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  31. expect(result).toBe(OUTPUT_TOKEN_MAX)
  32. })
  33. test("returns modelLimit when modelLimit < 32k", () => {
  34. const modelLimit = 16000
  35. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  36. expect(result).toBe(16000)
  37. })
  38. })
  39. describe("anthropic without thinking options", () => {
  40. test("returns 32k when modelLimit > 32k", () => {
  41. const modelLimit = 100000
  42. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  43. expect(result).toBe(OUTPUT_TOKEN_MAX)
  44. })
  45. test("returns modelLimit when modelLimit < 32k", () => {
  46. const modelLimit = 16000
  47. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  48. expect(result).toBe(16000)
  49. })
  50. })
  51. describe("anthropic with thinking options", () => {
  52. test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
  53. const modelLimit = 100000
  54. const options = {
  55. thinking: {
  56. type: "enabled",
  57. budgetTokens: 10000,
  58. },
  59. }
  60. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  61. expect(result).toBe(OUTPUT_TOKEN_MAX)
  62. })
  63. test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
  64. const modelLimit = 50000
  65. const options = {
  66. thinking: {
  67. type: "enabled",
  68. budgetTokens: 30000,
  69. },
  70. }
  71. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  72. expect(result).toBe(20000)
  73. })
  74. test("returns 32k when thinking type is not enabled", () => {
  75. const modelLimit = 100000
  76. const options = {
  77. thinking: {
  78. type: "disabled",
  79. budgetTokens: 10000,
  80. },
  81. }
  82. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  83. expect(result).toBe(OUTPUT_TOKEN_MAX)
  84. })
  85. })
  86. })
  87. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  88. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  89. const msgs = [
  90. {
  91. role: "assistant",
  92. content: [
  93. { type: "reasoning", text: "Let me think about this..." },
  94. {
  95. type: "tool-call",
  96. toolCallId: "test",
  97. toolName: "bash",
  98. input: { command: "echo hello" },
  99. },
  100. ],
  101. },
  102. ] as any[]
  103. const result = ProviderTransform.message(msgs, {
  104. id: "deepseek/deepseek-chat",
  105. providerID: "deepseek",
  106. api: {
  107. id: "deepseek-chat",
  108. url: "https://api.deepseek.com",
  109. npm: "@ai-sdk/openai-compatible",
  110. },
  111. name: "DeepSeek Chat",
  112. capabilities: {
  113. temperature: true,
  114. reasoning: true,
  115. attachment: false,
  116. toolcall: true,
  117. input: { text: true, audio: false, image: false, video: false, pdf: false },
  118. output: { text: true, audio: false, image: false, video: false, pdf: false },
  119. },
  120. cost: {
  121. input: 0.001,
  122. output: 0.002,
  123. cache: { read: 0.0001, write: 0.0002 },
  124. },
  125. limit: {
  126. context: 128000,
  127. output: 8192,
  128. },
  129. status: "active",
  130. options: {},
  131. headers: {},
  132. })
  133. expect(result).toHaveLength(1)
  134. expect(result[0].content).toEqual([
  135. {
  136. type: "tool-call",
  137. toolCallId: "test",
  138. toolName: "bash",
  139. input: { command: "echo hello" },
  140. },
  141. ])
  142. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  143. })
  144. test("DeepSeek without tool calls strips reasoning from content", () => {
  145. const msgs = [
  146. {
  147. role: "assistant",
  148. content: [
  149. { type: "reasoning", text: "Let me think about this..." },
  150. { type: "text", text: "Final answer" },
  151. ],
  152. },
  153. ] as any[]
  154. const result = ProviderTransform.message(msgs, {
  155. id: "deepseek/deepseek-chat",
  156. providerID: "deepseek",
  157. api: {
  158. id: "deepseek-chat",
  159. url: "https://api.deepseek.com",
  160. npm: "@ai-sdk/openai-compatible",
  161. },
  162. name: "DeepSeek Chat",
  163. capabilities: {
  164. temperature: true,
  165. reasoning: true,
  166. attachment: false,
  167. toolcall: true,
  168. input: { text: true, audio: false, image: false, video: false, pdf: false },
  169. output: { text: true, audio: false, image: false, video: false, pdf: false },
  170. },
  171. cost: {
  172. input: 0.001,
  173. output: 0.002,
  174. cache: { read: 0.0001, write: 0.0002 },
  175. },
  176. limit: {
  177. context: 128000,
  178. output: 8192,
  179. },
  180. status: "active",
  181. options: {},
  182. headers: {},
  183. })
  184. expect(result).toHaveLength(1)
  185. expect(result[0].content).toEqual([{ type: "text", text: "Final answer" }])
  186. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  187. })
  188. test("DeepSeek model ID containing 'deepseek' matches (case insensitive)", () => {
  189. const msgs = [
  190. {
  191. role: "assistant",
  192. content: [
  193. { type: "reasoning", text: "Thinking..." },
  194. {
  195. type: "tool-call",
  196. toolCallId: "test",
  197. toolName: "get_weather",
  198. input: { location: "Hangzhou" },
  199. },
  200. ],
  201. },
  202. ] as any[]
  203. const result = ProviderTransform.message(msgs, {
  204. id: "someprovider/deepseek-reasoner",
  205. providerID: "someprovider",
  206. api: {
  207. id: "deepseek-reasoner",
  208. url: "https://api.someprovider.com",
  209. npm: "@ai-sdk/openai-compatible",
  210. },
  211. name: "SomeProvider DeepSeek Reasoner",
  212. capabilities: {
  213. temperature: true,
  214. reasoning: true,
  215. attachment: false,
  216. toolcall: true,
  217. input: { text: true, audio: false, image: false, video: false, pdf: false },
  218. output: { text: true, audio: false, image: false, video: false, pdf: false },
  219. },
  220. cost: {
  221. input: 0.001,
  222. output: 0.002,
  223. cache: { read: 0.0001, write: 0.0002 },
  224. },
  225. limit: {
  226. context: 128000,
  227. output: 8192,
  228. },
  229. status: "active",
  230. options: {},
  231. headers: {},
  232. })
  233. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Thinking...")
  234. })
  235. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  236. const msgs = [
  237. {
  238. role: "assistant",
  239. content: [
  240. { type: "reasoning", text: "Should not be processed" },
  241. { type: "text", text: "Answer" },
  242. ],
  243. },
  244. ] as any[]
  245. const result = ProviderTransform.message(msgs, {
  246. id: "openai/gpt-4",
  247. providerID: "openai",
  248. api: {
  249. id: "gpt-4",
  250. url: "https://api.openai.com",
  251. npm: "@ai-sdk/openai",
  252. },
  253. name: "GPT-4",
  254. capabilities: {
  255. temperature: true,
  256. reasoning: false,
  257. attachment: true,
  258. toolcall: true,
  259. input: { text: true, audio: false, image: true, video: false, pdf: false },
  260. output: { text: true, audio: false, image: false, video: false, pdf: false },
  261. },
  262. cost: {
  263. input: 0.03,
  264. output: 0.06,
  265. cache: { read: 0.001, write: 0.002 },
  266. },
  267. limit: {
  268. context: 128000,
  269. output: 4096,
  270. },
  271. status: "active",
  272. options: {},
  273. headers: {},
  274. })
  275. expect(result[0].content).toEqual([
  276. { type: "reasoning", text: "Should not be processed" },
  277. { type: "text", text: "Answer" },
  278. ])
  279. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  280. })
  281. })