transform.test.ts 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.maxOutputTokens", () => {
  5. test("returns 32k when modelLimit > 32k", () => {
  6. const modelLimit = 100000
  7. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  8. expect(result).toBe(OUTPUT_TOKEN_MAX)
  9. })
  10. test("returns modelLimit when modelLimit < 32k", () => {
  11. const modelLimit = 16000
  12. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  13. expect(result).toBe(16000)
  14. })
  15. describe("azure", () => {
  16. test("returns 32k when modelLimit > 32k", () => {
  17. const modelLimit = 100000
  18. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  19. expect(result).toBe(OUTPUT_TOKEN_MAX)
  20. })
  21. test("returns modelLimit when modelLimit < 32k", () => {
  22. const modelLimit = 16000
  23. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  24. expect(result).toBe(16000)
  25. })
  26. })
  27. describe("bedrock", () => {
  28. test("returns 32k when modelLimit > 32k", () => {
  29. const modelLimit = 100000
  30. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  31. expect(result).toBe(OUTPUT_TOKEN_MAX)
  32. })
  33. test("returns modelLimit when modelLimit < 32k", () => {
  34. const modelLimit = 16000
  35. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  36. expect(result).toBe(16000)
  37. })
  38. })
  39. describe("anthropic without thinking options", () => {
  40. test("returns 32k when modelLimit > 32k", () => {
  41. const modelLimit = 100000
  42. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  43. expect(result).toBe(OUTPUT_TOKEN_MAX)
  44. })
  45. test("returns modelLimit when modelLimit < 32k", () => {
  46. const modelLimit = 16000
  47. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  48. expect(result).toBe(16000)
  49. })
  50. })
  51. describe("anthropic with thinking options", () => {
  52. test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
  53. const modelLimit = 100000
  54. const options = {
  55. thinking: {
  56. type: "enabled",
  57. budgetTokens: 10000,
  58. },
  59. }
  60. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  61. expect(result).toBe(OUTPUT_TOKEN_MAX)
  62. })
  63. test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
  64. const modelLimit = 50000
  65. const options = {
  66. thinking: {
  67. type: "enabled",
  68. budgetTokens: 30000,
  69. },
  70. }
  71. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  72. expect(result).toBe(20000)
  73. })
  74. test("returns 32k when thinking type is not enabled", () => {
  75. const modelLimit = 100000
  76. const options = {
  77. thinking: {
  78. type: "disabled",
  79. budgetTokens: 10000,
  80. },
  81. }
  82. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  83. expect(result).toBe(OUTPUT_TOKEN_MAX)
  84. })
  85. })
  86. })
  87. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  88. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  89. const msgs = [
  90. {
  91. role: "assistant",
  92. content: [
  93. { type: "reasoning", text: "Let me think about this..." },
  94. {
  95. type: "tool-call",
  96. toolCallId: "test",
  97. toolName: "bash",
  98. input: { command: "echo hello" },
  99. },
  100. ],
  101. },
  102. ] as any[]
  103. const result = ProviderTransform.message(msgs, {
  104. id: "deepseek/deepseek-chat",
  105. providerID: "deepseek",
  106. api: {
  107. id: "deepseek-chat",
  108. url: "https://api.deepseek.com",
  109. npm: "@ai-sdk/openai-compatible",
  110. },
  111. name: "DeepSeek Chat",
  112. capabilities: {
  113. temperature: true,
  114. reasoning: true,
  115. attachment: false,
  116. toolcall: true,
  117. input: { text: true, audio: false, image: false, video: false, pdf: false },
  118. output: { text: true, audio: false, image: false, video: false, pdf: false },
  119. interleaved: false,
  120. },
  121. cost: {
  122. input: 0.001,
  123. output: 0.002,
  124. cache: { read: 0.0001, write: 0.0002 },
  125. },
  126. limit: {
  127. context: 128000,
  128. output: 8192,
  129. },
  130. status: "active",
  131. options: {},
  132. headers: {},
  133. })
  134. expect(result).toHaveLength(1)
  135. expect(result[0].content).toEqual([
  136. {
  137. type: "tool-call",
  138. toolCallId: "test",
  139. toolName: "bash",
  140. input: { command: "echo hello" },
  141. },
  142. ])
  143. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  144. })
  145. test("DeepSeek without tool calls strips reasoning from content", () => {
  146. const msgs = [
  147. {
  148. role: "assistant",
  149. content: [
  150. { type: "reasoning", text: "Let me think about this..." },
  151. { type: "text", text: "Final answer" },
  152. ],
  153. },
  154. ] as any[]
  155. const result = ProviderTransform.message(msgs, {
  156. id: "deepseek/deepseek-chat",
  157. providerID: "deepseek",
  158. api: {
  159. id: "deepseek-chat",
  160. url: "https://api.deepseek.com",
  161. npm: "@ai-sdk/openai-compatible",
  162. },
  163. name: "DeepSeek Chat",
  164. capabilities: {
  165. temperature: true,
  166. reasoning: true,
  167. attachment: false,
  168. toolcall: true,
  169. input: { text: true, audio: false, image: false, video: false, pdf: false },
  170. output: { text: true, audio: false, image: false, video: false, pdf: false },
  171. interleaved: false,
  172. },
  173. cost: {
  174. input: 0.001,
  175. output: 0.002,
  176. cache: { read: 0.0001, write: 0.0002 },
  177. },
  178. limit: {
  179. context: 128000,
  180. output: 8192,
  181. },
  182. status: "active",
  183. options: {},
  184. headers: {},
  185. })
  186. expect(result).toHaveLength(1)
  187. expect(result[0].content).toEqual([{ type: "text", text: "Final answer" }])
  188. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  189. })
  190. test("DeepSeek model ID containing 'deepseek' matches (case insensitive)", () => {
  191. const msgs = [
  192. {
  193. role: "assistant",
  194. content: [
  195. { type: "reasoning", text: "Thinking..." },
  196. {
  197. type: "tool-call",
  198. toolCallId: "test",
  199. toolName: "get_weather",
  200. input: { location: "Hangzhou" },
  201. },
  202. ],
  203. },
  204. ] as any[]
  205. const result = ProviderTransform.message(msgs, {
  206. id: "someprovider/deepseek-reasoner",
  207. providerID: "someprovider",
  208. api: {
  209. id: "deepseek-reasoner",
  210. url: "https://api.someprovider.com",
  211. npm: "@ai-sdk/openai-compatible",
  212. },
  213. name: "SomeProvider DeepSeek Reasoner",
  214. capabilities: {
  215. temperature: true,
  216. reasoning: true,
  217. attachment: false,
  218. toolcall: true,
  219. input: { text: true, audio: false, image: false, video: false, pdf: false },
  220. output: { text: true, audio: false, image: false, video: false, pdf: false },
  221. interleaved: false,
  222. },
  223. cost: {
  224. input: 0.001,
  225. output: 0.002,
  226. cache: { read: 0.0001, write: 0.0002 },
  227. },
  228. limit: {
  229. context: 128000,
  230. output: 8192,
  231. },
  232. status: "active",
  233. options: {},
  234. headers: {},
  235. })
  236. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Thinking...")
  237. })
  238. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  239. const msgs = [
  240. {
  241. role: "assistant",
  242. content: [
  243. { type: "reasoning", text: "Should not be processed" },
  244. { type: "text", text: "Answer" },
  245. ],
  246. },
  247. ] as any[]
  248. const result = ProviderTransform.message(msgs, {
  249. id: "openai/gpt-4",
  250. providerID: "openai",
  251. api: {
  252. id: "gpt-4",
  253. url: "https://api.openai.com",
  254. npm: "@ai-sdk/openai",
  255. },
  256. name: "GPT-4",
  257. capabilities: {
  258. temperature: true,
  259. reasoning: false,
  260. attachment: true,
  261. toolcall: true,
  262. input: { text: true, audio: false, image: true, video: false, pdf: false },
  263. output: { text: true, audio: false, image: false, video: false, pdf: false },
  264. interleaved: false,
  265. },
  266. cost: {
  267. input: 0.03,
  268. output: 0.06,
  269. cache: { read: 0.001, write: 0.002 },
  270. },
  271. limit: {
  272. context: 128000,
  273. output: 4096,
  274. },
  275. status: "active",
  276. options: {},
  277. headers: {},
  278. })
  279. expect(result[0].content).toEqual([
  280. { type: "reasoning", text: "Should not be processed" },
  281. { type: "text", text: "Answer" },
  282. ])
  283. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  284. })
  285. })