transform.test.ts 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.maxOutputTokens", () => {
  5. test("returns 32k when modelLimit > 32k", () => {
  6. const modelLimit = 100000
  7. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  8. expect(result).toBe(OUTPUT_TOKEN_MAX)
  9. })
  10. test("returns modelLimit when modelLimit < 32k", () => {
  11. const modelLimit = 16000
  12. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  13. expect(result).toBe(16000)
  14. })
  15. describe("azure", () => {
  16. test("returns 32k when modelLimit > 32k", () => {
  17. const modelLimit = 100000
  18. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  19. expect(result).toBe(OUTPUT_TOKEN_MAX)
  20. })
  21. test("returns modelLimit when modelLimit < 32k", () => {
  22. const modelLimit = 16000
  23. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  24. expect(result).toBe(16000)
  25. })
  26. })
  27. describe("bedrock", () => {
  28. test("returns 32k when modelLimit > 32k", () => {
  29. const modelLimit = 100000
  30. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  31. expect(result).toBe(OUTPUT_TOKEN_MAX)
  32. })
  33. test("returns modelLimit when modelLimit < 32k", () => {
  34. const modelLimit = 16000
  35. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  36. expect(result).toBe(16000)
  37. })
  38. })
  39. describe("anthropic without thinking options", () => {
  40. test("returns 32k when modelLimit > 32k", () => {
  41. const modelLimit = 100000
  42. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  43. expect(result).toBe(OUTPUT_TOKEN_MAX)
  44. })
  45. test("returns modelLimit when modelLimit < 32k", () => {
  46. const modelLimit = 16000
  47. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  48. expect(result).toBe(16000)
  49. })
  50. })
  51. describe("anthropic with thinking options", () => {
  52. test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
  53. const modelLimit = 100000
  54. const options = {
  55. thinking: {
  56. type: "enabled",
  57. budgetTokens: 10000,
  58. },
  59. }
  60. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  61. expect(result).toBe(OUTPUT_TOKEN_MAX)
  62. })
  63. test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
  64. const modelLimit = 50000
  65. const options = {
  66. thinking: {
  67. type: "enabled",
  68. budgetTokens: 30000,
  69. },
  70. }
  71. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  72. expect(result).toBe(20000)
  73. })
  74. test("returns 32k when thinking type is not enabled", () => {
  75. const modelLimit = 100000
  76. const options = {
  77. thinking: {
  78. type: "disabled",
  79. budgetTokens: 10000,
  80. },
  81. }
  82. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  83. expect(result).toBe(OUTPUT_TOKEN_MAX)
  84. })
  85. })
  86. })
  87. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  88. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  89. const msgs = [
  90. {
  91. role: "assistant",
  92. content: [
  93. { type: "reasoning", text: "Let me think about this..." },
  94. {
  95. type: "tool-call",
  96. toolCallId: "test",
  97. toolName: "bash",
  98. input: { command: "echo hello" },
  99. },
  100. ],
  101. },
  102. ] as any[]
  103. const result = ProviderTransform.message(msgs, {
  104. id: "deepseek/deepseek-chat",
  105. providerID: "deepseek",
  106. api: {
  107. id: "deepseek-chat",
  108. url: "https://api.deepseek.com",
  109. npm: "@ai-sdk/openai-compatible",
  110. },
  111. name: "DeepSeek Chat",
  112. capabilities: {
  113. temperature: true,
  114. reasoning: true,
  115. attachment: false,
  116. toolcall: true,
  117. input: { text: true, audio: false, image: false, video: false, pdf: false },
  118. output: { text: true, audio: false, image: false, video: false, pdf: false },
  119. interleaved: false,
  120. },
  121. cost: {
  122. input: 0.001,
  123. output: 0.002,
  124. cache: { read: 0.0001, write: 0.0002 },
  125. },
  126. limit: {
  127. context: 128000,
  128. output: 8192,
  129. },
  130. status: "active",
  131. options: {},
  132. headers: {},
  133. release_date: "2023-04-01",
  134. })
  135. expect(result).toHaveLength(1)
  136. expect(result[0].content).toEqual([
  137. {
  138. type: "tool-call",
  139. toolCallId: "test",
  140. toolName: "bash",
  141. input: { command: "echo hello" },
  142. },
  143. ])
  144. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  145. })
  146. test("DeepSeek model ID containing 'deepseek' matches (case insensitive)", () => {
  147. const msgs = [
  148. {
  149. role: "assistant",
  150. content: [
  151. { type: "reasoning", text: "Thinking..." },
  152. {
  153. type: "tool-call",
  154. toolCallId: "test",
  155. toolName: "get_weather",
  156. input: { location: "Hangzhou" },
  157. },
  158. ],
  159. },
  160. ] as any[]
  161. const result = ProviderTransform.message(msgs, {
  162. id: "someprovider/deepseek-reasoner",
  163. providerID: "someprovider",
  164. api: {
  165. id: "deepseek-reasoner",
  166. url: "https://api.someprovider.com",
  167. npm: "@ai-sdk/openai-compatible",
  168. },
  169. name: "SomeProvider DeepSeek Reasoner",
  170. capabilities: {
  171. temperature: true,
  172. reasoning: true,
  173. attachment: false,
  174. toolcall: true,
  175. input: { text: true, audio: false, image: false, video: false, pdf: false },
  176. output: { text: true, audio: false, image: false, video: false, pdf: false },
  177. interleaved: false,
  178. },
  179. cost: {
  180. input: 0.001,
  181. output: 0.002,
  182. cache: { read: 0.0001, write: 0.0002 },
  183. },
  184. limit: {
  185. context: 128000,
  186. output: 8192,
  187. },
  188. status: "active",
  189. options: {},
  190. headers: {},
  191. release_date: "2023-04-01",
  192. })
  193. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Thinking...")
  194. })
  195. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  196. const msgs = [
  197. {
  198. role: "assistant",
  199. content: [
  200. { type: "reasoning", text: "Should not be processed" },
  201. { type: "text", text: "Answer" },
  202. ],
  203. },
  204. ] as any[]
  205. const result = ProviderTransform.message(msgs, {
  206. id: "openai/gpt-4",
  207. providerID: "openai",
  208. api: {
  209. id: "gpt-4",
  210. url: "https://api.openai.com",
  211. npm: "@ai-sdk/openai",
  212. },
  213. name: "GPT-4",
  214. capabilities: {
  215. temperature: true,
  216. reasoning: false,
  217. attachment: true,
  218. toolcall: true,
  219. input: { text: true, audio: false, image: true, video: false, pdf: false },
  220. output: { text: true, audio: false, image: false, video: false, pdf: false },
  221. interleaved: false,
  222. },
  223. cost: {
  224. input: 0.03,
  225. output: 0.06,
  226. cache: { read: 0.001, write: 0.002 },
  227. },
  228. limit: {
  229. context: 128000,
  230. output: 4096,
  231. },
  232. status: "active",
  233. options: {},
  234. headers: {},
  235. release_date: "2023-04-01",
  236. })
  237. expect(result[0].content).toEqual([
  238. { type: "reasoning", text: "Should not be processed" },
  239. { type: "text", text: "Answer" },
  240. ])
  241. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  242. })
  243. })
  244. describe("ProviderTransform.message - empty image handling", () => {
  245. const mockModel = {
  246. id: "anthropic/claude-3-5-sonnet",
  247. providerID: "anthropic",
  248. api: {
  249. id: "claude-3-5-sonnet-20241022",
  250. url: "https://api.anthropic.com",
  251. npm: "@ai-sdk/anthropic",
  252. },
  253. name: "Claude 3.5 Sonnet",
  254. capabilities: {
  255. temperature: true,
  256. reasoning: false,
  257. attachment: true,
  258. toolcall: true,
  259. input: { text: true, audio: false, image: true, video: false, pdf: true },
  260. output: { text: true, audio: false, image: false, video: false, pdf: false },
  261. interleaved: false,
  262. },
  263. cost: {
  264. input: 0.003,
  265. output: 0.015,
  266. cache: { read: 0.0003, write: 0.00375 },
  267. },
  268. limit: {
  269. context: 200000,
  270. output: 8192,
  271. },
  272. status: "active",
  273. options: {},
  274. headers: {},
  275. } as any
  276. test("should replace empty base64 image with error text", () => {
  277. const msgs = [
  278. {
  279. role: "user",
  280. content: [
  281. { type: "text", text: "What is in this image?" },
  282. { type: "image", image: "data:image/png;base64," },
  283. ],
  284. },
  285. ] as any[]
  286. const result = ProviderTransform.message(msgs, mockModel)
  287. expect(result).toHaveLength(1)
  288. expect(result[0].content).toHaveLength(2)
  289. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  290. expect(result[0].content[1]).toEqual({
  291. type: "text",
  292. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  293. })
  294. })
  295. test("should keep valid base64 images unchanged", () => {
  296. const validBase64 =
  297. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  298. const msgs = [
  299. {
  300. role: "user",
  301. content: [
  302. { type: "text", text: "What is in this image?" },
  303. { type: "image", image: `data:image/png;base64,${validBase64}` },
  304. ],
  305. },
  306. ] as any[]
  307. const result = ProviderTransform.message(msgs, mockModel)
  308. expect(result).toHaveLength(1)
  309. expect(result[0].content).toHaveLength(2)
  310. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  311. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  312. })
  313. test("should handle mixed valid and empty images", () => {
  314. const validBase64 =
  315. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  316. const msgs = [
  317. {
  318. role: "user",
  319. content: [
  320. { type: "text", text: "Compare these images" },
  321. { type: "image", image: `data:image/png;base64,${validBase64}` },
  322. { type: "image", image: "data:image/jpeg;base64," },
  323. ],
  324. },
  325. ] as any[]
  326. const result = ProviderTransform.message(msgs, mockModel)
  327. expect(result).toHaveLength(1)
  328. expect(result[0].content).toHaveLength(3)
  329. expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
  330. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  331. expect(result[0].content[2]).toEqual({
  332. type: "text",
  333. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  334. })
  335. })
  336. })