transform.test.ts 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.options - setCacheKey", () => {
  5. const sessionID = "test-session-123"
  6. const mockModel = {
  7. id: "anthropic/claude-3-5-sonnet",
  8. providerID: "anthropic",
  9. api: {
  10. id: "claude-3-5-sonnet-20241022",
  11. url: "https://api.anthropic.com",
  12. npm: "@ai-sdk/anthropic",
  13. },
  14. name: "Claude 3.5 Sonnet",
  15. capabilities: {
  16. temperature: true,
  17. reasoning: false,
  18. attachment: true,
  19. toolcall: true,
  20. input: { text: true, audio: false, image: true, video: false, pdf: true },
  21. output: { text: true, audio: false, image: false, video: false, pdf: false },
  22. interleaved: false,
  23. },
  24. cost: {
  25. input: 0.003,
  26. output: 0.015,
  27. cache: { read: 0.0003, write: 0.00375 },
  28. },
  29. limit: {
  30. context: 200000,
  31. output: 8192,
  32. },
  33. status: "active",
  34. options: {},
  35. headers: {},
  36. } as any
  37. test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
  38. const result = ProviderTransform.options({
  39. model: mockModel,
  40. sessionID,
  41. providerOptions: { setCacheKey: true },
  42. })
  43. expect(result.promptCacheKey).toBe(sessionID)
  44. })
  45. test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
  46. const result = ProviderTransform.options({
  47. model: mockModel,
  48. sessionID,
  49. providerOptions: { setCacheKey: false },
  50. })
  51. expect(result.promptCacheKey).toBeUndefined()
  52. })
  53. test("should not set promptCacheKey when providerOptions is undefined", () => {
  54. const result = ProviderTransform.options({
  55. model: mockModel,
  56. sessionID,
  57. providerOptions: undefined,
  58. })
  59. expect(result.promptCacheKey).toBeUndefined()
  60. })
  61. test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
  62. const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
  63. expect(result.promptCacheKey).toBeUndefined()
  64. })
  65. test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
  66. const openaiModel = {
  67. ...mockModel,
  68. providerID: "openai",
  69. api: {
  70. id: "gpt-4",
  71. url: "https://api.openai.com",
  72. npm: "@ai-sdk/openai",
  73. },
  74. }
  75. const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
  76. expect(result.promptCacheKey).toBe(sessionID)
  77. })
  78. test("should set store=false for openai provider", () => {
  79. const openaiModel = {
  80. ...mockModel,
  81. providerID: "openai",
  82. api: {
  83. id: "gpt-4",
  84. url: "https://api.openai.com",
  85. npm: "@ai-sdk/openai",
  86. },
  87. }
  88. const result = ProviderTransform.options({
  89. model: openaiModel,
  90. sessionID,
  91. providerOptions: {},
  92. })
  93. expect(result.store).toBe(false)
  94. })
  95. })
  96. describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
  97. const sessionID = "test-session-123"
  98. const createGpt5Model = (apiId: string) =>
  99. ({
  100. id: `openai/${apiId}`,
  101. providerID: "openai",
  102. api: {
  103. id: apiId,
  104. url: "https://api.openai.com",
  105. npm: "@ai-sdk/openai",
  106. },
  107. name: apiId,
  108. capabilities: {
  109. temperature: true,
  110. reasoning: true,
  111. attachment: true,
  112. toolcall: true,
  113. input: { text: true, audio: false, image: true, video: false, pdf: false },
  114. output: { text: true, audio: false, image: false, video: false, pdf: false },
  115. interleaved: false,
  116. },
  117. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  118. limit: { context: 128000, output: 4096 },
  119. status: "active",
  120. options: {},
  121. headers: {},
  122. }) as any
  123. test("gpt-5.2 should have textVerbosity set to low", () => {
  124. const model = createGpt5Model("gpt-5.2")
  125. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  126. expect(result.textVerbosity).toBe("low")
  127. })
  128. test("gpt-5.1 should have textVerbosity set to low", () => {
  129. const model = createGpt5Model("gpt-5.1")
  130. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  131. expect(result.textVerbosity).toBe("low")
  132. })
  133. test("gpt-5.2-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  134. const model = createGpt5Model("gpt-5.2-chat-latest")
  135. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  136. expect(result.textVerbosity).toBeUndefined()
  137. })
  138. test("gpt-5.1-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  139. const model = createGpt5Model("gpt-5.1-chat-latest")
  140. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  141. expect(result.textVerbosity).toBeUndefined()
  142. })
  143. test("gpt-5.2-chat should NOT have textVerbosity set", () => {
  144. const model = createGpt5Model("gpt-5.2-chat")
  145. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  146. expect(result.textVerbosity).toBeUndefined()
  147. })
  148. test("gpt-5-chat should NOT have textVerbosity set", () => {
  149. const model = createGpt5Model("gpt-5-chat")
  150. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  151. expect(result.textVerbosity).toBeUndefined()
  152. })
  153. test("gpt-5.2-codex should NOT have textVerbosity set (codex models excluded)", () => {
  154. const model = createGpt5Model("gpt-5.2-codex")
  155. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  156. expect(result.textVerbosity).toBeUndefined()
  157. })
  158. })
  159. describe("ProviderTransform.maxOutputTokens", () => {
  160. test("returns 32k when modelLimit > 32k", () => {
  161. const modelLimit = 100000
  162. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  163. expect(result).toBe(OUTPUT_TOKEN_MAX)
  164. })
  165. test("returns modelLimit when modelLimit < 32k", () => {
  166. const modelLimit = 16000
  167. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  168. expect(result).toBe(16000)
  169. })
  170. describe("azure", () => {
  171. test("returns 32k when modelLimit > 32k", () => {
  172. const modelLimit = 100000
  173. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  174. expect(result).toBe(OUTPUT_TOKEN_MAX)
  175. })
  176. test("returns modelLimit when modelLimit < 32k", () => {
  177. const modelLimit = 16000
  178. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  179. expect(result).toBe(16000)
  180. })
  181. })
  182. describe("bedrock", () => {
  183. test("returns 32k when modelLimit > 32k", () => {
  184. const modelLimit = 100000
  185. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  186. expect(result).toBe(OUTPUT_TOKEN_MAX)
  187. })
  188. test("returns modelLimit when modelLimit < 32k", () => {
  189. const modelLimit = 16000
  190. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  191. expect(result).toBe(16000)
  192. })
  193. })
  194. describe("anthropic without thinking options", () => {
  195. test("returns 32k when modelLimit > 32k", () => {
  196. const modelLimit = 100000
  197. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  198. expect(result).toBe(OUTPUT_TOKEN_MAX)
  199. })
  200. test("returns modelLimit when modelLimit < 32k", () => {
  201. const modelLimit = 16000
  202. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  203. expect(result).toBe(16000)
  204. })
  205. })
  206. describe("anthropic with thinking options", () => {
  207. test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
  208. const modelLimit = 100000
  209. const options = {
  210. thinking: {
  211. type: "enabled",
  212. budgetTokens: 10000,
  213. },
  214. }
  215. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  216. expect(result).toBe(OUTPUT_TOKEN_MAX)
  217. })
  218. test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
  219. const modelLimit = 50000
  220. const options = {
  221. thinking: {
  222. type: "enabled",
  223. budgetTokens: 30000,
  224. },
  225. }
  226. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  227. expect(result).toBe(20000)
  228. })
  229. test("returns 32k when thinking type is not enabled", () => {
  230. const modelLimit = 100000
  231. const options = {
  232. thinking: {
  233. type: "disabled",
  234. budgetTokens: 10000,
  235. },
  236. }
  237. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  238. expect(result).toBe(OUTPUT_TOKEN_MAX)
  239. })
  240. })
  241. })
  242. describe("ProviderTransform.schema - gemini array items", () => {
  243. test("adds missing items for array properties", () => {
  244. const geminiModel = {
  245. providerID: "google",
  246. api: {
  247. id: "gemini-3-pro",
  248. },
  249. } as any
  250. const schema = {
  251. type: "object",
  252. properties: {
  253. nodes: { type: "array" },
  254. edges: { type: "array", items: { type: "string" } },
  255. },
  256. } as any
  257. const result = ProviderTransform.schema(geminiModel, schema) as any
  258. expect(result.properties.nodes.items).toBeDefined()
  259. expect(result.properties.edges.items.type).toBe("string")
  260. })
  261. })
  262. describe("ProviderTransform.schema - gemini non-object properties removal", () => {
  263. const geminiModel = {
  264. providerID: "google",
  265. api: {
  266. id: "gemini-3-pro",
  267. },
  268. } as any
  269. test("removes properties from non-object types", () => {
  270. const schema = {
  271. type: "object",
  272. properties: {
  273. data: {
  274. type: "string",
  275. properties: { invalid: { type: "string" } },
  276. },
  277. },
  278. } as any
  279. const result = ProviderTransform.schema(geminiModel, schema) as any
  280. expect(result.properties.data.type).toBe("string")
  281. expect(result.properties.data.properties).toBeUndefined()
  282. })
  283. test("removes required from non-object types", () => {
  284. const schema = {
  285. type: "object",
  286. properties: {
  287. data: {
  288. type: "array",
  289. items: { type: "string" },
  290. required: ["invalid"],
  291. },
  292. },
  293. } as any
  294. const result = ProviderTransform.schema(geminiModel, schema) as any
  295. expect(result.properties.data.type).toBe("array")
  296. expect(result.properties.data.required).toBeUndefined()
  297. })
  298. test("removes properties and required from nested non-object types", () => {
  299. const schema = {
  300. type: "object",
  301. properties: {
  302. outer: {
  303. type: "object",
  304. properties: {
  305. inner: {
  306. type: "number",
  307. properties: { bad: { type: "string" } },
  308. required: ["bad"],
  309. },
  310. },
  311. },
  312. },
  313. } as any
  314. const result = ProviderTransform.schema(geminiModel, schema) as any
  315. expect(result.properties.outer.properties.inner.type).toBe("number")
  316. expect(result.properties.outer.properties.inner.properties).toBeUndefined()
  317. expect(result.properties.outer.properties.inner.required).toBeUndefined()
  318. })
  319. test("keeps properties and required on object types", () => {
  320. const schema = {
  321. type: "object",
  322. properties: {
  323. data: {
  324. type: "object",
  325. properties: { name: { type: "string" } },
  326. required: ["name"],
  327. },
  328. },
  329. } as any
  330. const result = ProviderTransform.schema(geminiModel, schema) as any
  331. expect(result.properties.data.type).toBe("object")
  332. expect(result.properties.data.properties).toBeDefined()
  333. expect(result.properties.data.required).toEqual(["name"])
  334. })
  335. test("does not affect non-gemini providers", () => {
  336. const openaiModel = {
  337. providerID: "openai",
  338. api: {
  339. id: "gpt-4",
  340. },
  341. } as any
  342. const schema = {
  343. type: "object",
  344. properties: {
  345. data: {
  346. type: "string",
  347. properties: { invalid: { type: "string" } },
  348. },
  349. },
  350. } as any
  351. const result = ProviderTransform.schema(openaiModel, schema) as any
  352. expect(result.properties.data.properties).toBeDefined()
  353. })
  354. })
  355. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  356. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  357. const msgs = [
  358. {
  359. role: "assistant",
  360. content: [
  361. { type: "reasoning", text: "Let me think about this..." },
  362. {
  363. type: "tool-call",
  364. toolCallId: "test",
  365. toolName: "bash",
  366. input: { command: "echo hello" },
  367. },
  368. ],
  369. },
  370. ] as any[]
  371. const result = ProviderTransform.message(
  372. msgs,
  373. {
  374. id: "deepseek/deepseek-chat",
  375. providerID: "deepseek",
  376. api: {
  377. id: "deepseek-chat",
  378. url: "https://api.deepseek.com",
  379. npm: "@ai-sdk/openai-compatible",
  380. },
  381. name: "DeepSeek Chat",
  382. capabilities: {
  383. temperature: true,
  384. reasoning: true,
  385. attachment: false,
  386. toolcall: true,
  387. input: { text: true, audio: false, image: false, video: false, pdf: false },
  388. output: { text: true, audio: false, image: false, video: false, pdf: false },
  389. interleaved: {
  390. field: "reasoning_content",
  391. },
  392. },
  393. cost: {
  394. input: 0.001,
  395. output: 0.002,
  396. cache: { read: 0.0001, write: 0.0002 },
  397. },
  398. limit: {
  399. context: 128000,
  400. output: 8192,
  401. },
  402. status: "active",
  403. options: {},
  404. headers: {},
  405. release_date: "2023-04-01",
  406. },
  407. {},
  408. )
  409. expect(result).toHaveLength(1)
  410. expect(result[0].content).toEqual([
  411. {
  412. type: "tool-call",
  413. toolCallId: "test",
  414. toolName: "bash",
  415. input: { command: "echo hello" },
  416. },
  417. ])
  418. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  419. })
  420. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  421. const msgs = [
  422. {
  423. role: "assistant",
  424. content: [
  425. { type: "reasoning", text: "Should not be processed" },
  426. { type: "text", text: "Answer" },
  427. ],
  428. },
  429. ] as any[]
  430. const result = ProviderTransform.message(
  431. msgs,
  432. {
  433. id: "openai/gpt-4",
  434. providerID: "openai",
  435. api: {
  436. id: "gpt-4",
  437. url: "https://api.openai.com",
  438. npm: "@ai-sdk/openai",
  439. },
  440. name: "GPT-4",
  441. capabilities: {
  442. temperature: true,
  443. reasoning: false,
  444. attachment: true,
  445. toolcall: true,
  446. input: { text: true, audio: false, image: true, video: false, pdf: false },
  447. output: { text: true, audio: false, image: false, video: false, pdf: false },
  448. interleaved: false,
  449. },
  450. cost: {
  451. input: 0.03,
  452. output: 0.06,
  453. cache: { read: 0.001, write: 0.002 },
  454. },
  455. limit: {
  456. context: 128000,
  457. output: 4096,
  458. },
  459. status: "active",
  460. options: {},
  461. headers: {},
  462. release_date: "2023-04-01",
  463. },
  464. {},
  465. )
  466. expect(result[0].content).toEqual([
  467. { type: "reasoning", text: "Should not be processed" },
  468. { type: "text", text: "Answer" },
  469. ])
  470. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  471. })
  472. })
  473. describe("ProviderTransform.message - empty image handling", () => {
  474. const mockModel = {
  475. id: "anthropic/claude-3-5-sonnet",
  476. providerID: "anthropic",
  477. api: {
  478. id: "claude-3-5-sonnet-20241022",
  479. url: "https://api.anthropic.com",
  480. npm: "@ai-sdk/anthropic",
  481. },
  482. name: "Claude 3.5 Sonnet",
  483. capabilities: {
  484. temperature: true,
  485. reasoning: false,
  486. attachment: true,
  487. toolcall: true,
  488. input: { text: true, audio: false, image: true, video: false, pdf: true },
  489. output: { text: true, audio: false, image: false, video: false, pdf: false },
  490. interleaved: false,
  491. },
  492. cost: {
  493. input: 0.003,
  494. output: 0.015,
  495. cache: { read: 0.0003, write: 0.00375 },
  496. },
  497. limit: {
  498. context: 200000,
  499. output: 8192,
  500. },
  501. status: "active",
  502. options: {},
  503. headers: {},
  504. } as any
  505. test("should replace empty base64 image with error text", () => {
  506. const msgs = [
  507. {
  508. role: "user",
  509. content: [
  510. { type: "text", text: "What is in this image?" },
  511. { type: "image", image: "data:image/png;base64," },
  512. ],
  513. },
  514. ] as any[]
  515. const result = ProviderTransform.message(msgs, mockModel, {})
  516. expect(result).toHaveLength(1)
  517. expect(result[0].content).toHaveLength(2)
  518. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  519. expect(result[0].content[1]).toEqual({
  520. type: "text",
  521. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  522. })
  523. })
  524. test("should keep valid base64 images unchanged", () => {
  525. const validBase64 =
  526. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  527. const msgs = [
  528. {
  529. role: "user",
  530. content: [
  531. { type: "text", text: "What is in this image?" },
  532. { type: "image", image: `data:image/png;base64,${validBase64}` },
  533. ],
  534. },
  535. ] as any[]
  536. const result = ProviderTransform.message(msgs, mockModel, {})
  537. expect(result).toHaveLength(1)
  538. expect(result[0].content).toHaveLength(2)
  539. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  540. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  541. })
  542. test("should handle mixed valid and empty images", () => {
  543. const validBase64 =
  544. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  545. const msgs = [
  546. {
  547. role: "user",
  548. content: [
  549. { type: "text", text: "Compare these images" },
  550. { type: "image", image: `data:image/png;base64,${validBase64}` },
  551. { type: "image", image: "data:image/jpeg;base64," },
  552. ],
  553. },
  554. ] as any[]
  555. const result = ProviderTransform.message(msgs, mockModel, {})
  556. expect(result).toHaveLength(1)
  557. expect(result[0].content).toHaveLength(3)
  558. expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
  559. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  560. expect(result[0].content[2]).toEqual({
  561. type: "text",
  562. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  563. })
  564. })
  565. })
  566. describe("ProviderTransform.message - anthropic empty content filtering", () => {
  567. const anthropicModel = {
  568. id: "anthropic/claude-3-5-sonnet",
  569. providerID: "anthropic",
  570. api: {
  571. id: "claude-3-5-sonnet-20241022",
  572. url: "https://api.anthropic.com",
  573. npm: "@ai-sdk/anthropic",
  574. },
  575. name: "Claude 3.5 Sonnet",
  576. capabilities: {
  577. temperature: true,
  578. reasoning: false,
  579. attachment: true,
  580. toolcall: true,
  581. input: { text: true, audio: false, image: true, video: false, pdf: true },
  582. output: { text: true, audio: false, image: false, video: false, pdf: false },
  583. interleaved: false,
  584. },
  585. cost: {
  586. input: 0.003,
  587. output: 0.015,
  588. cache: { read: 0.0003, write: 0.00375 },
  589. },
  590. limit: {
  591. context: 200000,
  592. output: 8192,
  593. },
  594. status: "active",
  595. options: {},
  596. headers: {},
  597. } as any
  598. test("filters out messages with empty string content", () => {
  599. const msgs = [
  600. { role: "user", content: "Hello" },
  601. { role: "assistant", content: "" },
  602. { role: "user", content: "World" },
  603. ] as any[]
  604. const result = ProviderTransform.message(msgs, anthropicModel, {})
  605. expect(result).toHaveLength(2)
  606. expect(result[0].content).toBe("Hello")
  607. expect(result[1].content).toBe("World")
  608. })
  609. test("filters out empty text parts from array content", () => {
  610. const msgs = [
  611. {
  612. role: "assistant",
  613. content: [
  614. { type: "text", text: "" },
  615. { type: "text", text: "Hello" },
  616. { type: "text", text: "" },
  617. ],
  618. },
  619. ] as any[]
  620. const result = ProviderTransform.message(msgs, anthropicModel, {})
  621. expect(result).toHaveLength(1)
  622. expect(result[0].content).toHaveLength(1)
  623. expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
  624. })
  625. test("filters out empty reasoning parts from array content", () => {
  626. const msgs = [
  627. {
  628. role: "assistant",
  629. content: [
  630. { type: "reasoning", text: "" },
  631. { type: "text", text: "Answer" },
  632. { type: "reasoning", text: "" },
  633. ],
  634. },
  635. ] as any[]
  636. const result = ProviderTransform.message(msgs, anthropicModel, {})
  637. expect(result).toHaveLength(1)
  638. expect(result[0].content).toHaveLength(1)
  639. expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
  640. })
  641. test("removes entire message when all parts are empty", () => {
  642. const msgs = [
  643. { role: "user", content: "Hello" },
  644. {
  645. role: "assistant",
  646. content: [
  647. { type: "text", text: "" },
  648. { type: "reasoning", text: "" },
  649. ],
  650. },
  651. { role: "user", content: "World" },
  652. ] as any[]
  653. const result = ProviderTransform.message(msgs, anthropicModel, {})
  654. expect(result).toHaveLength(2)
  655. expect(result[0].content).toBe("Hello")
  656. expect(result[1].content).toBe("World")
  657. })
  658. test("keeps non-text/reasoning parts even if text parts are empty", () => {
  659. const msgs = [
  660. {
  661. role: "assistant",
  662. content: [
  663. { type: "text", text: "" },
  664. { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
  665. ],
  666. },
  667. ] as any[]
  668. const result = ProviderTransform.message(msgs, anthropicModel, {})
  669. expect(result).toHaveLength(1)
  670. expect(result[0].content).toHaveLength(1)
  671. expect(result[0].content[0]).toEqual({
  672. type: "tool-call",
  673. toolCallId: "123",
  674. toolName: "bash",
  675. input: { command: "ls" },
  676. })
  677. })
  678. test("keeps messages with valid text alongside empty parts", () => {
  679. const msgs = [
  680. {
  681. role: "assistant",
  682. content: [
  683. { type: "reasoning", text: "Thinking..." },
  684. { type: "text", text: "" },
  685. { type: "text", text: "Result" },
  686. ],
  687. },
  688. ] as any[]
  689. const result = ProviderTransform.message(msgs, anthropicModel, {})
  690. expect(result).toHaveLength(1)
  691. expect(result[0].content).toHaveLength(2)
  692. expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
  693. expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
  694. })
  695. test("does not filter for non-anthropic providers", () => {
  696. const openaiModel = {
  697. ...anthropicModel,
  698. providerID: "openai",
  699. api: {
  700. id: "gpt-4",
  701. url: "https://api.openai.com",
  702. npm: "@ai-sdk/openai",
  703. },
  704. }
  705. const msgs = [
  706. { role: "assistant", content: "" },
  707. {
  708. role: "assistant",
  709. content: [{ type: "text", text: "" }],
  710. },
  711. ] as any[]
  712. const result = ProviderTransform.message(msgs, openaiModel, {})
  713. expect(result).toHaveLength(2)
  714. expect(result[0].content).toBe("")
  715. expect(result[1].content).toHaveLength(1)
  716. })
  717. })
  718. describe("ProviderTransform.message - strip openai metadata when store=false", () => {
  719. const openaiModel = {
  720. id: "openai/gpt-5",
  721. providerID: "openai",
  722. api: {
  723. id: "gpt-5",
  724. url: "https://api.openai.com",
  725. npm: "@ai-sdk/openai",
  726. },
  727. name: "GPT-5",
  728. capabilities: {
  729. temperature: true,
  730. reasoning: true,
  731. attachment: true,
  732. toolcall: true,
  733. input: { text: true, audio: false, image: true, video: false, pdf: false },
  734. output: { text: true, audio: false, image: false, video: false, pdf: false },
  735. interleaved: false,
  736. },
  737. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  738. limit: { context: 128000, output: 4096 },
  739. status: "active",
  740. options: {},
  741. headers: {},
  742. } as any
  743. test("preserves itemId and reasoningEncryptedContent when store=false", () => {
  744. const msgs = [
  745. {
  746. role: "assistant",
  747. content: [
  748. {
  749. type: "reasoning",
  750. text: "thinking...",
  751. providerOptions: {
  752. openai: {
  753. itemId: "rs_123",
  754. reasoningEncryptedContent: "encrypted",
  755. },
  756. },
  757. },
  758. {
  759. type: "text",
  760. text: "Hello",
  761. providerOptions: {
  762. openai: {
  763. itemId: "msg_456",
  764. },
  765. },
  766. },
  767. ],
  768. },
  769. ] as any[]
  770. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  771. expect(result).toHaveLength(1)
  772. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  773. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  774. })
  775. test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => {
  776. const zenModel = {
  777. ...openaiModel,
  778. providerID: "zen",
  779. }
  780. const msgs = [
  781. {
  782. role: "assistant",
  783. content: [
  784. {
  785. type: "reasoning",
  786. text: "thinking...",
  787. providerOptions: {
  788. openai: {
  789. itemId: "rs_123",
  790. reasoningEncryptedContent: "encrypted",
  791. },
  792. },
  793. },
  794. {
  795. type: "text",
  796. text: "Hello",
  797. providerOptions: {
  798. openai: {
  799. itemId: "msg_456",
  800. },
  801. },
  802. },
  803. ],
  804. },
  805. ] as any[]
  806. const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
  807. expect(result).toHaveLength(1)
  808. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  809. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  810. })
  811. test("preserves other openai options including itemId", () => {
  812. const msgs = [
  813. {
  814. role: "assistant",
  815. content: [
  816. {
  817. type: "text",
  818. text: "Hello",
  819. providerOptions: {
  820. openai: {
  821. itemId: "msg_123",
  822. otherOption: "value",
  823. },
  824. },
  825. },
  826. ],
  827. },
  828. ] as any[]
  829. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  830. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  831. expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
  832. })
  833. test("preserves metadata for openai package when store is true", () => {
  834. const msgs = [
  835. {
  836. role: "assistant",
  837. content: [
  838. {
  839. type: "text",
  840. text: "Hello",
  841. providerOptions: {
  842. openai: {
  843. itemId: "msg_123",
  844. },
  845. },
  846. },
  847. ],
  848. },
  849. ] as any[]
  850. // openai package preserves itemId regardless of store value
  851. const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
  852. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  853. })
  854. test("preserves metadata for non-openai packages when store is false", () => {
  855. const anthropicModel = {
  856. ...openaiModel,
  857. providerID: "anthropic",
  858. api: {
  859. id: "claude-3",
  860. url: "https://api.anthropic.com",
  861. npm: "@ai-sdk/anthropic",
  862. },
  863. }
  864. const msgs = [
  865. {
  866. role: "assistant",
  867. content: [
  868. {
  869. type: "text",
  870. text: "Hello",
  871. providerOptions: {
  872. openai: {
  873. itemId: "msg_123",
  874. },
  875. },
  876. },
  877. ],
  878. },
  879. ] as any[]
  880. // store=false preserves metadata for non-openai packages
  881. const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
  882. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  883. })
  884. test("preserves metadata using providerID key when store is false", () => {
  885. const opencodeModel = {
  886. ...openaiModel,
  887. providerID: "opencode",
  888. api: {
  889. id: "opencode-test",
  890. url: "https://api.opencode.ai",
  891. npm: "@ai-sdk/openai-compatible",
  892. },
  893. }
  894. const msgs = [
  895. {
  896. role: "assistant",
  897. content: [
  898. {
  899. type: "text",
  900. text: "Hello",
  901. providerOptions: {
  902. opencode: {
  903. itemId: "msg_123",
  904. otherOption: "value",
  905. },
  906. },
  907. },
  908. ],
  909. },
  910. ] as any[]
  911. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  912. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_123")
  913. expect(result[0].content[0].providerOptions?.opencode?.otherOption).toBe("value")
  914. })
  915. test("preserves itemId across all providerOptions keys", () => {
  916. const opencodeModel = {
  917. ...openaiModel,
  918. providerID: "opencode",
  919. api: {
  920. id: "opencode-test",
  921. url: "https://api.opencode.ai",
  922. npm: "@ai-sdk/openai-compatible",
  923. },
  924. }
  925. const msgs = [
  926. {
  927. role: "assistant",
  928. providerOptions: {
  929. openai: { itemId: "msg_root" },
  930. opencode: { itemId: "msg_opencode" },
  931. extra: { itemId: "msg_extra" },
  932. },
  933. content: [
  934. {
  935. type: "text",
  936. text: "Hello",
  937. providerOptions: {
  938. openai: { itemId: "msg_openai_part" },
  939. opencode: { itemId: "msg_opencode_part" },
  940. extra: { itemId: "msg_extra_part" },
  941. },
  942. },
  943. ],
  944. },
  945. ] as any[]
  946. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  947. expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root")
  948. expect(result[0].providerOptions?.opencode?.itemId).toBe("msg_opencode")
  949. expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra")
  950. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part")
  951. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_opencode_part")
  952. expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part")
  953. })
  954. test("does not strip metadata for non-openai packages when store is not false", () => {
  955. const anthropicModel = {
  956. ...openaiModel,
  957. providerID: "anthropic",
  958. api: {
  959. id: "claude-3",
  960. url: "https://api.anthropic.com",
  961. npm: "@ai-sdk/anthropic",
  962. },
  963. }
  964. const msgs = [
  965. {
  966. role: "assistant",
  967. content: [
  968. {
  969. type: "text",
  970. text: "Hello",
  971. providerOptions: {
  972. openai: {
  973. itemId: "msg_123",
  974. },
  975. },
  976. },
  977. ],
  978. },
  979. ] as any[]
  980. const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
  981. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  982. })
  983. })
  984. describe("ProviderTransform.message - providerOptions key remapping", () => {
  985. const createModel = (providerID: string, npm: string) =>
  986. ({
  987. id: `${providerID}/test-model`,
  988. providerID,
  989. api: {
  990. id: "test-model",
  991. url: "https://api.test.com",
  992. npm,
  993. },
  994. name: "Test Model",
  995. capabilities: {
  996. temperature: true,
  997. reasoning: false,
  998. attachment: true,
  999. toolcall: true,
  1000. input: { text: true, audio: false, image: true, video: false, pdf: true },
  1001. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1002. interleaved: false,
  1003. },
  1004. cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
  1005. limit: { context: 128000, output: 8192 },
  1006. status: "active",
  1007. options: {},
  1008. headers: {},
  1009. }) as any
  1010. test("azure keeps 'azure' key and does not remap to 'openai'", () => {
  1011. const model = createModel("azure", "@ai-sdk/azure")
  1012. const msgs = [
  1013. {
  1014. role: "user",
  1015. content: "Hello",
  1016. providerOptions: {
  1017. azure: { someOption: "value" },
  1018. },
  1019. },
  1020. ] as any[]
  1021. const result = ProviderTransform.message(msgs, model, {})
  1022. expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
  1023. expect(result[0].providerOptions?.openai).toBeUndefined()
  1024. })
  1025. test("copilot remaps providerID to 'copilot' key", () => {
  1026. const model = createModel("github-copilot", "@ai-sdk/github-copilot")
  1027. const msgs = [
  1028. {
  1029. role: "user",
  1030. content: "Hello",
  1031. providerOptions: {
  1032. copilot: { someOption: "value" },
  1033. },
  1034. },
  1035. ] as any[]
  1036. const result = ProviderTransform.message(msgs, model, {})
  1037. expect(result[0].providerOptions?.copilot).toEqual({ someOption: "value" })
  1038. expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
  1039. })
  1040. test("bedrock remaps providerID to 'bedrock' key", () => {
  1041. const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock")
  1042. const msgs = [
  1043. {
  1044. role: "user",
  1045. content: "Hello",
  1046. providerOptions: {
  1047. "my-bedrock": { someOption: "value" },
  1048. },
  1049. },
  1050. ] as any[]
  1051. const result = ProviderTransform.message(msgs, model, {})
  1052. expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" })
  1053. expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined()
  1054. })
  1055. })
  1056. describe("ProviderTransform.message - claude w/bedrock custom inference profile", () => {
  1057. test("adds cachePoint", () => {
  1058. const model = {
  1059. id: "amazon-bedrock/custom-claude-sonnet-4.5",
  1060. providerID: "amazon-bedrock",
  1061. api: {
  1062. id: "arn:aws:bedrock:xxx:yyy:application-inference-profile/zzz",
  1063. url: "https://api.test.com",
  1064. npm: "@ai-sdk/amazon-bedrock",
  1065. },
  1066. name: "Custom inference profile",
  1067. capabilities: {},
  1068. options: {},
  1069. headers: {},
  1070. } as any
  1071. const msgs = [
  1072. {
  1073. role: "user",
  1074. content: "Hello",
  1075. },
  1076. ] as any[]
  1077. const result = ProviderTransform.message(msgs, model, {})
  1078. expect(result[0].providerOptions?.bedrock).toEqual(
  1079. expect.objectContaining({
  1080. cachePoint: {
  1081. type: "default",
  1082. },
  1083. }),
  1084. )
  1085. })
  1086. })
  1087. describe("ProviderTransform.variants", () => {
  1088. const createMockModel = (overrides: Partial<any> = {}): any => ({
  1089. id: "test/test-model",
  1090. providerID: "test",
  1091. api: {
  1092. id: "test-model",
  1093. url: "https://api.test.com",
  1094. npm: "@ai-sdk/openai",
  1095. },
  1096. name: "Test Model",
  1097. capabilities: {
  1098. temperature: true,
  1099. reasoning: true,
  1100. attachment: true,
  1101. toolcall: true,
  1102. input: { text: true, audio: false, image: true, video: false, pdf: false },
  1103. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1104. interleaved: false,
  1105. },
  1106. cost: {
  1107. input: 0.001,
  1108. output: 0.002,
  1109. cache: { read: 0.0001, write: 0.0002 },
  1110. },
  1111. limit: {
  1112. context: 200_000,
  1113. output: 64_000,
  1114. },
  1115. status: "active",
  1116. options: {},
  1117. headers: {},
  1118. release_date: "2024-01-01",
  1119. ...overrides,
  1120. })
  1121. test("returns empty object when model has no reasoning capabilities", () => {
  1122. const model = createMockModel({
  1123. capabilities: { reasoning: false },
  1124. })
  1125. const result = ProviderTransform.variants(model)
  1126. expect(result).toEqual({})
  1127. })
  1128. test("deepseek returns empty object", () => {
  1129. const model = createMockModel({
  1130. id: "deepseek/deepseek-chat",
  1131. providerID: "deepseek",
  1132. api: {
  1133. id: "deepseek-chat",
  1134. url: "https://api.deepseek.com",
  1135. npm: "@ai-sdk/openai-compatible",
  1136. },
  1137. })
  1138. const result = ProviderTransform.variants(model)
  1139. expect(result).toEqual({})
  1140. })
  1141. test("minimax returns empty object", () => {
  1142. const model = createMockModel({
  1143. id: "minimax/minimax-model",
  1144. providerID: "minimax",
  1145. api: {
  1146. id: "minimax-model",
  1147. url: "https://api.minimax.com",
  1148. npm: "@ai-sdk/openai-compatible",
  1149. },
  1150. })
  1151. const result = ProviderTransform.variants(model)
  1152. expect(result).toEqual({})
  1153. })
  1154. test("glm returns empty object", () => {
  1155. const model = createMockModel({
  1156. id: "glm/glm-4",
  1157. providerID: "glm",
  1158. api: {
  1159. id: "glm-4",
  1160. url: "https://api.glm.com",
  1161. npm: "@ai-sdk/openai-compatible",
  1162. },
  1163. })
  1164. const result = ProviderTransform.variants(model)
  1165. expect(result).toEqual({})
  1166. })
  1167. test("mistral returns empty object", () => {
  1168. const model = createMockModel({
  1169. id: "mistral/mistral-large",
  1170. providerID: "mistral",
  1171. api: {
  1172. id: "mistral-large-latest",
  1173. url: "https://api.mistral.com",
  1174. npm: "@ai-sdk/mistral",
  1175. },
  1176. })
  1177. const result = ProviderTransform.variants(model)
  1178. expect(result).toEqual({})
  1179. })
  1180. describe("@openrouter/ai-sdk-provider", () => {
  1181. test("returns empty object for non-qualifying models", () => {
  1182. const model = createMockModel({
  1183. id: "openrouter/test-model",
  1184. providerID: "openrouter",
  1185. api: {
  1186. id: "test-model",
  1187. url: "https://openrouter.ai",
  1188. npm: "@openrouter/ai-sdk-provider",
  1189. },
  1190. })
  1191. const result = ProviderTransform.variants(model)
  1192. expect(result).toEqual({})
  1193. })
  1194. test("gpt models return OPENAI_EFFORTS with reasoning", () => {
  1195. const model = createMockModel({
  1196. id: "openrouter/gpt-4",
  1197. providerID: "openrouter",
  1198. api: {
  1199. id: "gpt-4",
  1200. url: "https://openrouter.ai",
  1201. npm: "@openrouter/ai-sdk-provider",
  1202. },
  1203. })
  1204. const result = ProviderTransform.variants(model)
  1205. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1206. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1207. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1208. })
  1209. test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
  1210. const model = createMockModel({
  1211. id: "openrouter/gemini-3-5-pro",
  1212. providerID: "openrouter",
  1213. api: {
  1214. id: "gemini-3-5-pro",
  1215. url: "https://openrouter.ai",
  1216. npm: "@openrouter/ai-sdk-provider",
  1217. },
  1218. })
  1219. const result = ProviderTransform.variants(model)
  1220. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1221. })
  1222. test("grok-4 returns empty object", () => {
  1223. const model = createMockModel({
  1224. id: "openrouter/grok-4",
  1225. providerID: "openrouter",
  1226. api: {
  1227. id: "grok-4",
  1228. url: "https://openrouter.ai",
  1229. npm: "@openrouter/ai-sdk-provider",
  1230. },
  1231. })
  1232. const result = ProviderTransform.variants(model)
  1233. expect(result).toEqual({})
  1234. })
  1235. test("grok-3-mini returns low and high with reasoning", () => {
  1236. const model = createMockModel({
  1237. id: "openrouter/grok-3-mini",
  1238. providerID: "openrouter",
  1239. api: {
  1240. id: "grok-3-mini",
  1241. url: "https://openrouter.ai",
  1242. npm: "@openrouter/ai-sdk-provider",
  1243. },
  1244. })
  1245. const result = ProviderTransform.variants(model)
  1246. expect(Object.keys(result)).toEqual(["low", "high"])
  1247. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1248. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1249. })
  1250. })
  1251. describe("@ai-sdk/gateway", () => {
  1252. test("returns OPENAI_EFFORTS with reasoningEffort", () => {
  1253. const model = createMockModel({
  1254. id: "gateway/gateway-model",
  1255. providerID: "gateway",
  1256. api: {
  1257. id: "gateway-model",
  1258. url: "https://gateway.ai",
  1259. npm: "@ai-sdk/gateway",
  1260. },
  1261. })
  1262. const result = ProviderTransform.variants(model)
  1263. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1264. expect(result.low).toEqual({ reasoningEffort: "low" })
  1265. expect(result.high).toEqual({ reasoningEffort: "high" })
  1266. })
  1267. })
  1268. describe("@ai-sdk/github-copilot", () => {
  1269. test("standard models return low, medium, high", () => {
  1270. const model = createMockModel({
  1271. id: "gpt-4.5",
  1272. providerID: "github-copilot",
  1273. api: {
  1274. id: "gpt-4.5",
  1275. url: "https://api.githubcopilot.com",
  1276. npm: "@ai-sdk/github-copilot",
  1277. },
  1278. })
  1279. const result = ProviderTransform.variants(model)
  1280. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1281. expect(result.low).toEqual({
  1282. reasoningEffort: "low",
  1283. reasoningSummary: "auto",
  1284. include: ["reasoning.encrypted_content"],
  1285. })
  1286. })
  1287. test("gpt-5.1-codex-max includes xhigh", () => {
  1288. const model = createMockModel({
  1289. id: "gpt-5.1-codex-max",
  1290. providerID: "github-copilot",
  1291. api: {
  1292. id: "gpt-5.1-codex-max",
  1293. url: "https://api.githubcopilot.com",
  1294. npm: "@ai-sdk/github-copilot",
  1295. },
  1296. })
  1297. const result = ProviderTransform.variants(model)
  1298. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1299. })
  1300. test("gpt-5.1-codex-mini does not include xhigh", () => {
  1301. const model = createMockModel({
  1302. id: "gpt-5.1-codex-mini",
  1303. providerID: "github-copilot",
  1304. api: {
  1305. id: "gpt-5.1-codex-mini",
  1306. url: "https://api.githubcopilot.com",
  1307. npm: "@ai-sdk/github-copilot",
  1308. },
  1309. })
  1310. const result = ProviderTransform.variants(model)
  1311. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1312. })
  1313. test("gpt-5.1-codex does not include xhigh", () => {
  1314. const model = createMockModel({
  1315. id: "gpt-5.1-codex",
  1316. providerID: "github-copilot",
  1317. api: {
  1318. id: "gpt-5.1-codex",
  1319. url: "https://api.githubcopilot.com",
  1320. npm: "@ai-sdk/github-copilot",
  1321. },
  1322. })
  1323. const result = ProviderTransform.variants(model)
  1324. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1325. })
  1326. test("gpt-5.2 includes xhigh", () => {
  1327. const model = createMockModel({
  1328. id: "gpt-5.2",
  1329. providerID: "github-copilot",
  1330. api: {
  1331. id: "gpt-5.2",
  1332. url: "https://api.githubcopilot.com",
  1333. npm: "@ai-sdk/github-copilot",
  1334. },
  1335. })
  1336. const result = ProviderTransform.variants(model)
  1337. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1338. expect(result.xhigh).toEqual({
  1339. reasoningEffort: "xhigh",
  1340. reasoningSummary: "auto",
  1341. include: ["reasoning.encrypted_content"],
  1342. })
  1343. })
  1344. test("gpt-5.2-codex includes xhigh", () => {
  1345. const model = createMockModel({
  1346. id: "gpt-5.2-codex",
  1347. providerID: "github-copilot",
  1348. api: {
  1349. id: "gpt-5.2-codex",
  1350. url: "https://api.githubcopilot.com",
  1351. npm: "@ai-sdk/github-copilot",
  1352. },
  1353. })
  1354. const result = ProviderTransform.variants(model)
  1355. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1356. })
  1357. })
  1358. describe("@ai-sdk/cerebras", () => {
  1359. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1360. const model = createMockModel({
  1361. id: "cerebras/llama-4",
  1362. providerID: "cerebras",
  1363. api: {
  1364. id: "llama-4-sc",
  1365. url: "https://api.cerebras.ai",
  1366. npm: "@ai-sdk/cerebras",
  1367. },
  1368. })
  1369. const result = ProviderTransform.variants(model)
  1370. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1371. expect(result.low).toEqual({ reasoningEffort: "low" })
  1372. expect(result.high).toEqual({ reasoningEffort: "high" })
  1373. })
  1374. })
  1375. describe("@ai-sdk/togetherai", () => {
  1376. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1377. const model = createMockModel({
  1378. id: "togetherai/llama-4",
  1379. providerID: "togetherai",
  1380. api: {
  1381. id: "llama-4-sc",
  1382. url: "https://api.togetherai.com",
  1383. npm: "@ai-sdk/togetherai",
  1384. },
  1385. })
  1386. const result = ProviderTransform.variants(model)
  1387. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1388. expect(result.low).toEqual({ reasoningEffort: "low" })
  1389. expect(result.high).toEqual({ reasoningEffort: "high" })
  1390. })
  1391. })
  1392. describe("@ai-sdk/xai", () => {
  1393. test("grok-3 returns empty object", () => {
  1394. const model = createMockModel({
  1395. id: "xai/grok-3",
  1396. providerID: "xai",
  1397. api: {
  1398. id: "grok-3",
  1399. url: "https://api.x.ai",
  1400. npm: "@ai-sdk/xai",
  1401. },
  1402. })
  1403. const result = ProviderTransform.variants(model)
  1404. expect(result).toEqual({})
  1405. })
  1406. test("grok-3-mini returns low and high with reasoningEffort", () => {
  1407. const model = createMockModel({
  1408. id: "xai/grok-3-mini",
  1409. providerID: "xai",
  1410. api: {
  1411. id: "grok-3-mini",
  1412. url: "https://api.x.ai",
  1413. npm: "@ai-sdk/xai",
  1414. },
  1415. })
  1416. const result = ProviderTransform.variants(model)
  1417. expect(Object.keys(result)).toEqual(["low", "high"])
  1418. expect(result.low).toEqual({ reasoningEffort: "low" })
  1419. expect(result.high).toEqual({ reasoningEffort: "high" })
  1420. })
  1421. })
  1422. describe("@ai-sdk/deepinfra", () => {
  1423. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1424. const model = createMockModel({
  1425. id: "deepinfra/llama-4",
  1426. providerID: "deepinfra",
  1427. api: {
  1428. id: "llama-4-sc",
  1429. url: "https://api.deepinfra.com",
  1430. npm: "@ai-sdk/deepinfra",
  1431. },
  1432. })
  1433. const result = ProviderTransform.variants(model)
  1434. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1435. expect(result.low).toEqual({ reasoningEffort: "low" })
  1436. expect(result.high).toEqual({ reasoningEffort: "high" })
  1437. })
  1438. })
  1439. describe("@ai-sdk/openai-compatible", () => {
  1440. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1441. const model = createMockModel({
  1442. id: "custom-provider/custom-model",
  1443. providerID: "custom-provider",
  1444. api: {
  1445. id: "custom-model",
  1446. url: "https://api.custom.com",
  1447. npm: "@ai-sdk/openai-compatible",
  1448. },
  1449. })
  1450. const result = ProviderTransform.variants(model)
  1451. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1452. expect(result.low).toEqual({ reasoningEffort: "low" })
  1453. expect(result.high).toEqual({ reasoningEffort: "high" })
  1454. })
  1455. })
  1456. describe("@ai-sdk/azure", () => {
  1457. test("o1-mini returns empty object", () => {
  1458. const model = createMockModel({
  1459. id: "o1-mini",
  1460. providerID: "azure",
  1461. api: {
  1462. id: "o1-mini",
  1463. url: "https://azure.com",
  1464. npm: "@ai-sdk/azure",
  1465. },
  1466. })
  1467. const result = ProviderTransform.variants(model)
  1468. expect(result).toEqual({})
  1469. })
  1470. test("standard azure models return custom efforts with reasoningSummary", () => {
  1471. const model = createMockModel({
  1472. id: "o1",
  1473. providerID: "azure",
  1474. api: {
  1475. id: "o1",
  1476. url: "https://azure.com",
  1477. npm: "@ai-sdk/azure",
  1478. },
  1479. })
  1480. const result = ProviderTransform.variants(model)
  1481. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1482. expect(result.low).toEqual({
  1483. reasoningEffort: "low",
  1484. reasoningSummary: "auto",
  1485. include: ["reasoning.encrypted_content"],
  1486. })
  1487. })
  1488. test("gpt-5 adds minimal effort", () => {
  1489. const model = createMockModel({
  1490. id: "gpt-5",
  1491. providerID: "azure",
  1492. api: {
  1493. id: "gpt-5",
  1494. url: "https://azure.com",
  1495. npm: "@ai-sdk/azure",
  1496. },
  1497. })
  1498. const result = ProviderTransform.variants(model)
  1499. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1500. })
  1501. })
  1502. describe("@ai-sdk/openai", () => {
  1503. test("gpt-5-pro returns empty object", () => {
  1504. const model = createMockModel({
  1505. id: "gpt-5-pro",
  1506. providerID: "openai",
  1507. api: {
  1508. id: "gpt-5-pro",
  1509. url: "https://api.openai.com",
  1510. npm: "@ai-sdk/openai",
  1511. },
  1512. })
  1513. const result = ProviderTransform.variants(model)
  1514. expect(result).toEqual({})
  1515. })
  1516. test("standard openai models return custom efforts with reasoningSummary", () => {
  1517. const model = createMockModel({
  1518. id: "gpt-5",
  1519. providerID: "openai",
  1520. api: {
  1521. id: "gpt-5",
  1522. url: "https://api.openai.com",
  1523. npm: "@ai-sdk/openai",
  1524. },
  1525. release_date: "2024-06-01",
  1526. })
  1527. const result = ProviderTransform.variants(model)
  1528. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1529. expect(result.low).toEqual({
  1530. reasoningEffort: "low",
  1531. reasoningSummary: "auto",
  1532. include: ["reasoning.encrypted_content"],
  1533. })
  1534. })
  1535. test("models after 2025-11-13 include 'none' effort", () => {
  1536. const model = createMockModel({
  1537. id: "gpt-5-nano",
  1538. providerID: "openai",
  1539. api: {
  1540. id: "gpt-5-nano",
  1541. url: "https://api.openai.com",
  1542. npm: "@ai-sdk/openai",
  1543. },
  1544. release_date: "2025-11-14",
  1545. })
  1546. const result = ProviderTransform.variants(model)
  1547. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
  1548. })
  1549. test("models after 2025-12-04 include 'xhigh' effort", () => {
  1550. const model = createMockModel({
  1551. id: "openai/gpt-5-chat",
  1552. providerID: "openai",
  1553. api: {
  1554. id: "gpt-5-chat",
  1555. url: "https://api.openai.com",
  1556. npm: "@ai-sdk/openai",
  1557. },
  1558. release_date: "2025-12-05",
  1559. })
  1560. const result = ProviderTransform.variants(model)
  1561. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1562. })
  1563. })
  1564. describe("@ai-sdk/anthropic", () => {
  1565. test("returns high and max with thinking config", () => {
  1566. const model = createMockModel({
  1567. id: "anthropic/claude-4",
  1568. providerID: "anthropic",
  1569. api: {
  1570. id: "claude-4",
  1571. url: "https://api.anthropic.com",
  1572. npm: "@ai-sdk/anthropic",
  1573. },
  1574. })
  1575. const result = ProviderTransform.variants(model)
  1576. expect(Object.keys(result)).toEqual(["high", "max"])
  1577. expect(result.high).toEqual({
  1578. thinking: {
  1579. type: "enabled",
  1580. budgetTokens: 16000,
  1581. },
  1582. })
  1583. expect(result.max).toEqual({
  1584. thinking: {
  1585. type: "enabled",
  1586. budgetTokens: 31999,
  1587. },
  1588. })
  1589. })
  1590. })
  1591. describe("@ai-sdk/amazon-bedrock", () => {
  1592. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
  1593. const model = createMockModel({
  1594. id: "bedrock/llama-4",
  1595. providerID: "bedrock",
  1596. api: {
  1597. id: "llama-4-sc",
  1598. url: "https://bedrock.amazonaws.com",
  1599. npm: "@ai-sdk/amazon-bedrock",
  1600. },
  1601. })
  1602. const result = ProviderTransform.variants(model)
  1603. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1604. expect(result.low).toEqual({
  1605. reasoningConfig: {
  1606. type: "enabled",
  1607. maxReasoningEffort: "low",
  1608. },
  1609. })
  1610. })
  1611. })
  1612. describe("@ai-sdk/google", () => {
  1613. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  1614. const model = createMockModel({
  1615. id: "google/gemini-2.5-pro",
  1616. providerID: "google",
  1617. api: {
  1618. id: "gemini-2.5-pro",
  1619. url: "https://generativelanguage.googleapis.com",
  1620. npm: "@ai-sdk/google",
  1621. },
  1622. })
  1623. const result = ProviderTransform.variants(model)
  1624. expect(Object.keys(result)).toEqual(["high", "max"])
  1625. expect(result.high).toEqual({
  1626. thinkingConfig: {
  1627. includeThoughts: true,
  1628. thinkingBudget: 16000,
  1629. },
  1630. })
  1631. expect(result.max).toEqual({
  1632. thinkingConfig: {
  1633. includeThoughts: true,
  1634. thinkingBudget: 24576,
  1635. },
  1636. })
  1637. })
  1638. test("other gemini models return low and high with thinkingLevel", () => {
  1639. const model = createMockModel({
  1640. id: "google/gemini-2.0-pro",
  1641. providerID: "google",
  1642. api: {
  1643. id: "gemini-2.0-pro",
  1644. url: "https://generativelanguage.googleapis.com",
  1645. npm: "@ai-sdk/google",
  1646. },
  1647. })
  1648. const result = ProviderTransform.variants(model)
  1649. expect(Object.keys(result)).toEqual(["low", "high"])
  1650. expect(result.low).toEqual({
  1651. includeThoughts: true,
  1652. thinkingLevel: "low",
  1653. })
  1654. expect(result.high).toEqual({
  1655. includeThoughts: true,
  1656. thinkingLevel: "high",
  1657. })
  1658. })
  1659. })
  1660. describe("@ai-sdk/google-vertex", () => {
  1661. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  1662. const model = createMockModel({
  1663. id: "google-vertex/gemini-2.5-pro",
  1664. providerID: "google-vertex",
  1665. api: {
  1666. id: "gemini-2.5-pro",
  1667. url: "https://vertexai.googleapis.com",
  1668. npm: "@ai-sdk/google-vertex",
  1669. },
  1670. })
  1671. const result = ProviderTransform.variants(model)
  1672. expect(Object.keys(result)).toEqual(["high", "max"])
  1673. })
  1674. test("other vertex models return low and high with thinkingLevel", () => {
  1675. const model = createMockModel({
  1676. id: "google-vertex/gemini-2.0-pro",
  1677. providerID: "google-vertex",
  1678. api: {
  1679. id: "gemini-2.0-pro",
  1680. url: "https://vertexai.googleapis.com",
  1681. npm: "@ai-sdk/google-vertex",
  1682. },
  1683. })
  1684. const result = ProviderTransform.variants(model)
  1685. expect(Object.keys(result)).toEqual(["low", "high"])
  1686. })
  1687. })
  1688. describe("@ai-sdk/cohere", () => {
  1689. test("returns empty object", () => {
  1690. const model = createMockModel({
  1691. id: "cohere/command-r",
  1692. providerID: "cohere",
  1693. api: {
  1694. id: "command-r",
  1695. url: "https://api.cohere.com",
  1696. npm: "@ai-sdk/cohere",
  1697. },
  1698. })
  1699. const result = ProviderTransform.variants(model)
  1700. expect(result).toEqual({})
  1701. })
  1702. })
  1703. describe("@ai-sdk/groq", () => {
  1704. test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
  1705. const model = createMockModel({
  1706. id: "groq/llama-4",
  1707. providerID: "groq",
  1708. api: {
  1709. id: "llama-4-sc",
  1710. url: "https://api.groq.com",
  1711. npm: "@ai-sdk/groq",
  1712. },
  1713. })
  1714. const result = ProviderTransform.variants(model)
  1715. expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
  1716. expect(result.none).toEqual({
  1717. includeThoughts: true,
  1718. thinkingLevel: "none",
  1719. })
  1720. expect(result.low).toEqual({
  1721. includeThoughts: true,
  1722. thinkingLevel: "low",
  1723. })
  1724. })
  1725. })
  1726. describe("@ai-sdk/perplexity", () => {
  1727. test("returns empty object", () => {
  1728. const model = createMockModel({
  1729. id: "perplexity/sonar-plus",
  1730. providerID: "perplexity",
  1731. api: {
  1732. id: "sonar-plus",
  1733. url: "https://api.perplexity.ai",
  1734. npm: "@ai-sdk/perplexity",
  1735. },
  1736. })
  1737. const result = ProviderTransform.variants(model)
  1738. expect(result).toEqual({})
  1739. })
  1740. })
  1741. })