transform.test.ts 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.options - setCacheKey", () => {
  5. const sessionID = "test-session-123"
  6. const mockModel = {
  7. id: "anthropic/claude-3-5-sonnet",
  8. providerID: "anthropic",
  9. api: {
  10. id: "claude-3-5-sonnet-20241022",
  11. url: "https://api.anthropic.com",
  12. npm: "@ai-sdk/anthropic",
  13. },
  14. name: "Claude 3.5 Sonnet",
  15. capabilities: {
  16. temperature: true,
  17. reasoning: false,
  18. attachment: true,
  19. toolcall: true,
  20. input: { text: true, audio: false, image: true, video: false, pdf: true },
  21. output: { text: true, audio: false, image: false, video: false, pdf: false },
  22. interleaved: false,
  23. },
  24. cost: {
  25. input: 0.003,
  26. output: 0.015,
  27. cache: { read: 0.0003, write: 0.00375 },
  28. },
  29. limit: {
  30. context: 200000,
  31. output: 8192,
  32. },
  33. status: "active",
  34. options: {},
  35. headers: {},
  36. } as any
  37. test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
  38. const result = ProviderTransform.options({
  39. model: mockModel,
  40. sessionID,
  41. providerOptions: { setCacheKey: true },
  42. })
  43. expect(result.promptCacheKey).toBe(sessionID)
  44. })
  45. test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
  46. const result = ProviderTransform.options({
  47. model: mockModel,
  48. sessionID,
  49. providerOptions: { setCacheKey: false },
  50. })
  51. expect(result.promptCacheKey).toBeUndefined()
  52. })
  53. test("should not set promptCacheKey when providerOptions is undefined", () => {
  54. const result = ProviderTransform.options({
  55. model: mockModel,
  56. sessionID,
  57. providerOptions: undefined,
  58. })
  59. expect(result.promptCacheKey).toBeUndefined()
  60. })
  61. test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
  62. const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
  63. expect(result.promptCacheKey).toBeUndefined()
  64. })
  65. test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
  66. const openaiModel = {
  67. ...mockModel,
  68. providerID: "openai",
  69. api: {
  70. id: "gpt-4",
  71. url: "https://api.openai.com",
  72. npm: "@ai-sdk/openai",
  73. },
  74. }
  75. const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
  76. expect(result.promptCacheKey).toBe(sessionID)
  77. })
  78. test("should set store=false for openai provider", () => {
  79. const openaiModel = {
  80. ...mockModel,
  81. providerID: "openai",
  82. api: {
  83. id: "gpt-4",
  84. url: "https://api.openai.com",
  85. npm: "@ai-sdk/openai",
  86. },
  87. }
  88. const result = ProviderTransform.options({
  89. model: openaiModel,
  90. sessionID,
  91. providerOptions: {},
  92. })
  93. expect(result.store).toBe(false)
  94. })
  95. })
  96. describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
  97. const sessionID = "test-session-123"
  98. const createGpt5Model = (apiId: string) =>
  99. ({
  100. id: `openai/${apiId}`,
  101. providerID: "openai",
  102. api: {
  103. id: apiId,
  104. url: "https://api.openai.com",
  105. npm: "@ai-sdk/openai",
  106. },
  107. name: apiId,
  108. capabilities: {
  109. temperature: true,
  110. reasoning: true,
  111. attachment: true,
  112. toolcall: true,
  113. input: { text: true, audio: false, image: true, video: false, pdf: false },
  114. output: { text: true, audio: false, image: false, video: false, pdf: false },
  115. interleaved: false,
  116. },
  117. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  118. limit: { context: 128000, output: 4096 },
  119. status: "active",
  120. options: {},
  121. headers: {},
  122. }) as any
  123. test("gpt-5.2 should have textVerbosity set to low", () => {
  124. const model = createGpt5Model("gpt-5.2")
  125. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  126. expect(result.textVerbosity).toBe("low")
  127. })
  128. test("gpt-5.1 should have textVerbosity set to low", () => {
  129. const model = createGpt5Model("gpt-5.1")
  130. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  131. expect(result.textVerbosity).toBe("low")
  132. })
  133. test("gpt-5.2-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  134. const model = createGpt5Model("gpt-5.2-chat-latest")
  135. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  136. expect(result.textVerbosity).toBeUndefined()
  137. })
  138. test("gpt-5.1-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  139. const model = createGpt5Model("gpt-5.1-chat-latest")
  140. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  141. expect(result.textVerbosity).toBeUndefined()
  142. })
  143. test("gpt-5.2-chat should NOT have textVerbosity set", () => {
  144. const model = createGpt5Model("gpt-5.2-chat")
  145. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  146. expect(result.textVerbosity).toBeUndefined()
  147. })
  148. test("gpt-5-chat should NOT have textVerbosity set", () => {
  149. const model = createGpt5Model("gpt-5-chat")
  150. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  151. expect(result.textVerbosity).toBeUndefined()
  152. })
  153. test("gpt-5.2-codex should NOT have textVerbosity set (codex models excluded)", () => {
  154. const model = createGpt5Model("gpt-5.2-codex")
  155. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  156. expect(result.textVerbosity).toBeUndefined()
  157. })
  158. })
  159. describe("ProviderTransform.maxOutputTokens", () => {
  160. test("returns 32k when modelLimit > 32k", () => {
  161. const modelLimit = 100000
  162. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  163. expect(result).toBe(OUTPUT_TOKEN_MAX)
  164. })
  165. test("returns modelLimit when modelLimit < 32k", () => {
  166. const modelLimit = 16000
  167. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  168. expect(result).toBe(16000)
  169. })
  170. describe("azure", () => {
  171. test("returns 32k when modelLimit > 32k", () => {
  172. const modelLimit = 100000
  173. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  174. expect(result).toBe(OUTPUT_TOKEN_MAX)
  175. })
  176. test("returns modelLimit when modelLimit < 32k", () => {
  177. const modelLimit = 16000
  178. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  179. expect(result).toBe(16000)
  180. })
  181. })
  182. describe("bedrock", () => {
  183. test("returns 32k when modelLimit > 32k", () => {
  184. const modelLimit = 100000
  185. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  186. expect(result).toBe(OUTPUT_TOKEN_MAX)
  187. })
  188. test("returns modelLimit when modelLimit < 32k", () => {
  189. const modelLimit = 16000
  190. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  191. expect(result).toBe(16000)
  192. })
  193. })
  194. describe("anthropic without thinking options", () => {
  195. test("returns 32k when modelLimit > 32k", () => {
  196. const modelLimit = 100000
  197. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  198. expect(result).toBe(OUTPUT_TOKEN_MAX)
  199. })
  200. test("returns modelLimit when modelLimit < 32k", () => {
  201. const modelLimit = 16000
  202. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  203. expect(result).toBe(16000)
  204. })
  205. })
  206. describe("anthropic with thinking options", () => {
  207. test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
  208. const modelLimit = 100000
  209. const options = {
  210. thinking: {
  211. type: "enabled",
  212. budgetTokens: 10000,
  213. },
  214. }
  215. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  216. expect(result).toBe(OUTPUT_TOKEN_MAX)
  217. })
  218. test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
  219. const modelLimit = 50000
  220. const options = {
  221. thinking: {
  222. type: "enabled",
  223. budgetTokens: 30000,
  224. },
  225. }
  226. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  227. expect(result).toBe(20000)
  228. })
  229. test("returns 32k when thinking type is not enabled", () => {
  230. const modelLimit = 100000
  231. const options = {
  232. thinking: {
  233. type: "disabled",
  234. budgetTokens: 10000,
  235. },
  236. }
  237. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  238. expect(result).toBe(OUTPUT_TOKEN_MAX)
  239. })
  240. })
  241. describe("openai-compatible with thinking options (snake_case)", () => {
  242. test("returns 32k when budget_tokens + 32k <= modelLimit", () => {
  243. const modelLimit = 100000
  244. const options = {
  245. thinking: {
  246. type: "enabled",
  247. budget_tokens: 10000,
  248. },
  249. }
  250. const result = ProviderTransform.maxOutputTokens(
  251. "@ai-sdk/openai-compatible",
  252. options,
  253. modelLimit,
  254. OUTPUT_TOKEN_MAX,
  255. )
  256. expect(result).toBe(OUTPUT_TOKEN_MAX)
  257. })
  258. test("returns modelLimit - budget_tokens when budget_tokens + 32k > modelLimit", () => {
  259. const modelLimit = 50000
  260. const options = {
  261. thinking: {
  262. type: "enabled",
  263. budget_tokens: 30000,
  264. },
  265. }
  266. const result = ProviderTransform.maxOutputTokens(
  267. "@ai-sdk/openai-compatible",
  268. options,
  269. modelLimit,
  270. OUTPUT_TOKEN_MAX,
  271. )
  272. expect(result).toBe(20000)
  273. })
  274. test("returns 32k when thinking type is not enabled", () => {
  275. const modelLimit = 100000
  276. const options = {
  277. thinking: {
  278. type: "disabled",
  279. budget_tokens: 10000,
  280. },
  281. }
  282. const result = ProviderTransform.maxOutputTokens(
  283. "@ai-sdk/openai-compatible",
  284. options,
  285. modelLimit,
  286. OUTPUT_TOKEN_MAX,
  287. )
  288. expect(result).toBe(OUTPUT_TOKEN_MAX)
  289. })
  290. test("returns 32k when budget_tokens is 0", () => {
  291. const modelLimit = 100000
  292. const options = {
  293. thinking: {
  294. type: "enabled",
  295. budget_tokens: 0,
  296. },
  297. }
  298. const result = ProviderTransform.maxOutputTokens(
  299. "@ai-sdk/openai-compatible",
  300. options,
  301. modelLimit,
  302. OUTPUT_TOKEN_MAX,
  303. )
  304. expect(result).toBe(OUTPUT_TOKEN_MAX)
  305. })
  306. })
  307. })
  308. describe("ProviderTransform.schema - gemini array items", () => {
  309. test("adds missing items for array properties", () => {
  310. const geminiModel = {
  311. providerID: "google",
  312. api: {
  313. id: "gemini-3-pro",
  314. },
  315. } as any
  316. const schema = {
  317. type: "object",
  318. properties: {
  319. nodes: { type: "array" },
  320. edges: { type: "array", items: { type: "string" } },
  321. },
  322. } as any
  323. const result = ProviderTransform.schema(geminiModel, schema) as any
  324. expect(result.properties.nodes.items).toBeDefined()
  325. expect(result.properties.edges.items.type).toBe("string")
  326. })
  327. })
  328. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  329. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  330. const msgs = [
  331. {
  332. role: "assistant",
  333. content: [
  334. { type: "reasoning", text: "Let me think about this..." },
  335. {
  336. type: "tool-call",
  337. toolCallId: "test",
  338. toolName: "bash",
  339. input: { command: "echo hello" },
  340. },
  341. ],
  342. },
  343. ] as any[]
  344. const result = ProviderTransform.message(
  345. msgs,
  346. {
  347. id: "deepseek/deepseek-chat",
  348. providerID: "deepseek",
  349. api: {
  350. id: "deepseek-chat",
  351. url: "https://api.deepseek.com",
  352. npm: "@ai-sdk/openai-compatible",
  353. },
  354. name: "DeepSeek Chat",
  355. capabilities: {
  356. temperature: true,
  357. reasoning: true,
  358. attachment: false,
  359. toolcall: true,
  360. input: { text: true, audio: false, image: false, video: false, pdf: false },
  361. output: { text: true, audio: false, image: false, video: false, pdf: false },
  362. interleaved: {
  363. field: "reasoning_content",
  364. },
  365. },
  366. cost: {
  367. input: 0.001,
  368. output: 0.002,
  369. cache: { read: 0.0001, write: 0.0002 },
  370. },
  371. limit: {
  372. context: 128000,
  373. output: 8192,
  374. },
  375. status: "active",
  376. options: {},
  377. headers: {},
  378. release_date: "2023-04-01",
  379. },
  380. {},
  381. )
  382. expect(result).toHaveLength(1)
  383. expect(result[0].content).toEqual([
  384. {
  385. type: "tool-call",
  386. toolCallId: "test",
  387. toolName: "bash",
  388. input: { command: "echo hello" },
  389. },
  390. ])
  391. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  392. })
  393. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  394. const msgs = [
  395. {
  396. role: "assistant",
  397. content: [
  398. { type: "reasoning", text: "Should not be processed" },
  399. { type: "text", text: "Answer" },
  400. ],
  401. },
  402. ] as any[]
  403. const result = ProviderTransform.message(
  404. msgs,
  405. {
  406. id: "openai/gpt-4",
  407. providerID: "openai",
  408. api: {
  409. id: "gpt-4",
  410. url: "https://api.openai.com",
  411. npm: "@ai-sdk/openai",
  412. },
  413. name: "GPT-4",
  414. capabilities: {
  415. temperature: true,
  416. reasoning: false,
  417. attachment: true,
  418. toolcall: true,
  419. input: { text: true, audio: false, image: true, video: false, pdf: false },
  420. output: { text: true, audio: false, image: false, video: false, pdf: false },
  421. interleaved: false,
  422. },
  423. cost: {
  424. input: 0.03,
  425. output: 0.06,
  426. cache: { read: 0.001, write: 0.002 },
  427. },
  428. limit: {
  429. context: 128000,
  430. output: 4096,
  431. },
  432. status: "active",
  433. options: {},
  434. headers: {},
  435. release_date: "2023-04-01",
  436. },
  437. {},
  438. )
  439. expect(result[0].content).toEqual([
  440. { type: "reasoning", text: "Should not be processed" },
  441. { type: "text", text: "Answer" },
  442. ])
  443. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  444. })
  445. })
  446. describe("ProviderTransform.message - empty image handling", () => {
  447. const mockModel = {
  448. id: "anthropic/claude-3-5-sonnet",
  449. providerID: "anthropic",
  450. api: {
  451. id: "claude-3-5-sonnet-20241022",
  452. url: "https://api.anthropic.com",
  453. npm: "@ai-sdk/anthropic",
  454. },
  455. name: "Claude 3.5 Sonnet",
  456. capabilities: {
  457. temperature: true,
  458. reasoning: false,
  459. attachment: true,
  460. toolcall: true,
  461. input: { text: true, audio: false, image: true, video: false, pdf: true },
  462. output: { text: true, audio: false, image: false, video: false, pdf: false },
  463. interleaved: false,
  464. },
  465. cost: {
  466. input: 0.003,
  467. output: 0.015,
  468. cache: { read: 0.0003, write: 0.00375 },
  469. },
  470. limit: {
  471. context: 200000,
  472. output: 8192,
  473. },
  474. status: "active",
  475. options: {},
  476. headers: {},
  477. } as any
  478. test("should replace empty base64 image with error text", () => {
  479. const msgs = [
  480. {
  481. role: "user",
  482. content: [
  483. { type: "text", text: "What is in this image?" },
  484. { type: "image", image: "data:image/png;base64," },
  485. ],
  486. },
  487. ] as any[]
  488. const result = ProviderTransform.message(msgs, mockModel, {})
  489. expect(result).toHaveLength(1)
  490. expect(result[0].content).toHaveLength(2)
  491. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  492. expect(result[0].content[1]).toEqual({
  493. type: "text",
  494. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  495. })
  496. })
  497. test("should keep valid base64 images unchanged", () => {
  498. const validBase64 =
  499. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  500. const msgs = [
  501. {
  502. role: "user",
  503. content: [
  504. { type: "text", text: "What is in this image?" },
  505. { type: "image", image: `data:image/png;base64,${validBase64}` },
  506. ],
  507. },
  508. ] as any[]
  509. const result = ProviderTransform.message(msgs, mockModel, {})
  510. expect(result).toHaveLength(1)
  511. expect(result[0].content).toHaveLength(2)
  512. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  513. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  514. })
  515. test("should handle mixed valid and empty images", () => {
  516. const validBase64 =
  517. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  518. const msgs = [
  519. {
  520. role: "user",
  521. content: [
  522. { type: "text", text: "Compare these images" },
  523. { type: "image", image: `data:image/png;base64,${validBase64}` },
  524. { type: "image", image: "data:image/jpeg;base64," },
  525. ],
  526. },
  527. ] as any[]
  528. const result = ProviderTransform.message(msgs, mockModel, {})
  529. expect(result).toHaveLength(1)
  530. expect(result[0].content).toHaveLength(3)
  531. expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
  532. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  533. expect(result[0].content[2]).toEqual({
  534. type: "text",
  535. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  536. })
  537. })
  538. })
  539. describe("ProviderTransform.message - anthropic empty content filtering", () => {
  540. const anthropicModel = {
  541. id: "anthropic/claude-3-5-sonnet",
  542. providerID: "anthropic",
  543. api: {
  544. id: "claude-3-5-sonnet-20241022",
  545. url: "https://api.anthropic.com",
  546. npm: "@ai-sdk/anthropic",
  547. },
  548. name: "Claude 3.5 Sonnet",
  549. capabilities: {
  550. temperature: true,
  551. reasoning: false,
  552. attachment: true,
  553. toolcall: true,
  554. input: { text: true, audio: false, image: true, video: false, pdf: true },
  555. output: { text: true, audio: false, image: false, video: false, pdf: false },
  556. interleaved: false,
  557. },
  558. cost: {
  559. input: 0.003,
  560. output: 0.015,
  561. cache: { read: 0.0003, write: 0.00375 },
  562. },
  563. limit: {
  564. context: 200000,
  565. output: 8192,
  566. },
  567. status: "active",
  568. options: {},
  569. headers: {},
  570. } as any
  571. test("filters out messages with empty string content", () => {
  572. const msgs = [
  573. { role: "user", content: "Hello" },
  574. { role: "assistant", content: "" },
  575. { role: "user", content: "World" },
  576. ] as any[]
  577. const result = ProviderTransform.message(msgs, anthropicModel, {})
  578. expect(result).toHaveLength(2)
  579. expect(result[0].content).toBe("Hello")
  580. expect(result[1].content).toBe("World")
  581. })
  582. test("filters out empty text parts from array content", () => {
  583. const msgs = [
  584. {
  585. role: "assistant",
  586. content: [
  587. { type: "text", text: "" },
  588. { type: "text", text: "Hello" },
  589. { type: "text", text: "" },
  590. ],
  591. },
  592. ] as any[]
  593. const result = ProviderTransform.message(msgs, anthropicModel, {})
  594. expect(result).toHaveLength(1)
  595. expect(result[0].content).toHaveLength(1)
  596. expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
  597. })
  598. test("filters out empty reasoning parts from array content", () => {
  599. const msgs = [
  600. {
  601. role: "assistant",
  602. content: [
  603. { type: "reasoning", text: "" },
  604. { type: "text", text: "Answer" },
  605. { type: "reasoning", text: "" },
  606. ],
  607. },
  608. ] as any[]
  609. const result = ProviderTransform.message(msgs, anthropicModel, {})
  610. expect(result).toHaveLength(1)
  611. expect(result[0].content).toHaveLength(1)
  612. expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
  613. })
  614. test("removes entire message when all parts are empty", () => {
  615. const msgs = [
  616. { role: "user", content: "Hello" },
  617. {
  618. role: "assistant",
  619. content: [
  620. { type: "text", text: "" },
  621. { type: "reasoning", text: "" },
  622. ],
  623. },
  624. { role: "user", content: "World" },
  625. ] as any[]
  626. const result = ProviderTransform.message(msgs, anthropicModel, {})
  627. expect(result).toHaveLength(2)
  628. expect(result[0].content).toBe("Hello")
  629. expect(result[1].content).toBe("World")
  630. })
  631. test("keeps non-text/reasoning parts even if text parts are empty", () => {
  632. const msgs = [
  633. {
  634. role: "assistant",
  635. content: [
  636. { type: "text", text: "" },
  637. { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
  638. ],
  639. },
  640. ] as any[]
  641. const result = ProviderTransform.message(msgs, anthropicModel, {})
  642. expect(result).toHaveLength(1)
  643. expect(result[0].content).toHaveLength(1)
  644. expect(result[0].content[0]).toEqual({
  645. type: "tool-call",
  646. toolCallId: "123",
  647. toolName: "bash",
  648. input: { command: "ls" },
  649. })
  650. })
  651. test("keeps messages with valid text alongside empty parts", () => {
  652. const msgs = [
  653. {
  654. role: "assistant",
  655. content: [
  656. { type: "reasoning", text: "Thinking..." },
  657. { type: "text", text: "" },
  658. { type: "text", text: "Result" },
  659. ],
  660. },
  661. ] as any[]
  662. const result = ProviderTransform.message(msgs, anthropicModel, {})
  663. expect(result).toHaveLength(1)
  664. expect(result[0].content).toHaveLength(2)
  665. expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
  666. expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
  667. })
  668. test("does not filter for non-anthropic providers", () => {
  669. const openaiModel = {
  670. ...anthropicModel,
  671. providerID: "openai",
  672. api: {
  673. id: "gpt-4",
  674. url: "https://api.openai.com",
  675. npm: "@ai-sdk/openai",
  676. },
  677. }
  678. const msgs = [
  679. { role: "assistant", content: "" },
  680. {
  681. role: "assistant",
  682. content: [{ type: "text", text: "" }],
  683. },
  684. ] as any[]
  685. const result = ProviderTransform.message(msgs, openaiModel, {})
  686. expect(result).toHaveLength(2)
  687. expect(result[0].content).toBe("")
  688. expect(result[1].content).toHaveLength(1)
  689. })
  690. })
  691. describe("ProviderTransform.message - strip openai metadata when store=false", () => {
  692. const openaiModel = {
  693. id: "openai/gpt-5",
  694. providerID: "openai",
  695. api: {
  696. id: "gpt-5",
  697. url: "https://api.openai.com",
  698. npm: "@ai-sdk/openai",
  699. },
  700. name: "GPT-5",
  701. capabilities: {
  702. temperature: true,
  703. reasoning: true,
  704. attachment: true,
  705. toolcall: true,
  706. input: { text: true, audio: false, image: true, video: false, pdf: false },
  707. output: { text: true, audio: false, image: false, video: false, pdf: false },
  708. interleaved: false,
  709. },
  710. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  711. limit: { context: 128000, output: 4096 },
  712. status: "active",
  713. options: {},
  714. headers: {},
  715. } as any
  716. test("preserves itemId and reasoningEncryptedContent when store=false", () => {
  717. const msgs = [
  718. {
  719. role: "assistant",
  720. content: [
  721. {
  722. type: "reasoning",
  723. text: "thinking...",
  724. providerOptions: {
  725. openai: {
  726. itemId: "rs_123",
  727. reasoningEncryptedContent: "encrypted",
  728. },
  729. },
  730. },
  731. {
  732. type: "text",
  733. text: "Hello",
  734. providerOptions: {
  735. openai: {
  736. itemId: "msg_456",
  737. },
  738. },
  739. },
  740. ],
  741. },
  742. ] as any[]
  743. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  744. expect(result).toHaveLength(1)
  745. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  746. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  747. })
  748. test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => {
  749. const zenModel = {
  750. ...openaiModel,
  751. providerID: "zen",
  752. }
  753. const msgs = [
  754. {
  755. role: "assistant",
  756. content: [
  757. {
  758. type: "reasoning",
  759. text: "thinking...",
  760. providerOptions: {
  761. openai: {
  762. itemId: "rs_123",
  763. reasoningEncryptedContent: "encrypted",
  764. },
  765. },
  766. },
  767. {
  768. type: "text",
  769. text: "Hello",
  770. providerOptions: {
  771. openai: {
  772. itemId: "msg_456",
  773. },
  774. },
  775. },
  776. ],
  777. },
  778. ] as any[]
  779. const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
  780. expect(result).toHaveLength(1)
  781. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  782. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  783. })
  784. test("preserves other openai options including itemId", () => {
  785. const msgs = [
  786. {
  787. role: "assistant",
  788. content: [
  789. {
  790. type: "text",
  791. text: "Hello",
  792. providerOptions: {
  793. openai: {
  794. itemId: "msg_123",
  795. otherOption: "value",
  796. },
  797. },
  798. },
  799. ],
  800. },
  801. ] as any[]
  802. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  803. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  804. expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
  805. })
  806. test("preserves metadata for openai package when store is true", () => {
  807. const msgs = [
  808. {
  809. role: "assistant",
  810. content: [
  811. {
  812. type: "text",
  813. text: "Hello",
  814. providerOptions: {
  815. openai: {
  816. itemId: "msg_123",
  817. },
  818. },
  819. },
  820. ],
  821. },
  822. ] as any[]
  823. // openai package preserves itemId regardless of store value
  824. const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
  825. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  826. })
  827. test("preserves metadata for non-openai packages when store is false", () => {
  828. const anthropicModel = {
  829. ...openaiModel,
  830. providerID: "anthropic",
  831. api: {
  832. id: "claude-3",
  833. url: "https://api.anthropic.com",
  834. npm: "@ai-sdk/anthropic",
  835. },
  836. }
  837. const msgs = [
  838. {
  839. role: "assistant",
  840. content: [
  841. {
  842. type: "text",
  843. text: "Hello",
  844. providerOptions: {
  845. openai: {
  846. itemId: "msg_123",
  847. },
  848. },
  849. },
  850. ],
  851. },
  852. ] as any[]
  853. // store=false preserves metadata for non-openai packages
  854. const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
  855. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  856. })
  857. test("preserves metadata using providerID key when store is false", () => {
  858. const opencodeModel = {
  859. ...openaiModel,
  860. providerID: "opencode",
  861. api: {
  862. id: "opencode-test",
  863. url: "https://api.opencode.ai",
  864. npm: "@ai-sdk/openai-compatible",
  865. },
  866. }
  867. const msgs = [
  868. {
  869. role: "assistant",
  870. content: [
  871. {
  872. type: "text",
  873. text: "Hello",
  874. providerOptions: {
  875. opencode: {
  876. itemId: "msg_123",
  877. otherOption: "value",
  878. },
  879. },
  880. },
  881. ],
  882. },
  883. ] as any[]
  884. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  885. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_123")
  886. expect(result[0].content[0].providerOptions?.opencode?.otherOption).toBe("value")
  887. })
  888. test("preserves itemId across all providerOptions keys", () => {
  889. const opencodeModel = {
  890. ...openaiModel,
  891. providerID: "opencode",
  892. api: {
  893. id: "opencode-test",
  894. url: "https://api.opencode.ai",
  895. npm: "@ai-sdk/openai-compatible",
  896. },
  897. }
  898. const msgs = [
  899. {
  900. role: "assistant",
  901. providerOptions: {
  902. openai: { itemId: "msg_root" },
  903. opencode: { itemId: "msg_opencode" },
  904. extra: { itemId: "msg_extra" },
  905. },
  906. content: [
  907. {
  908. type: "text",
  909. text: "Hello",
  910. providerOptions: {
  911. openai: { itemId: "msg_openai_part" },
  912. opencode: { itemId: "msg_opencode_part" },
  913. extra: { itemId: "msg_extra_part" },
  914. },
  915. },
  916. ],
  917. },
  918. ] as any[]
  919. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  920. expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root")
  921. expect(result[0].providerOptions?.opencode?.itemId).toBe("msg_opencode")
  922. expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra")
  923. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part")
  924. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_opencode_part")
  925. expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part")
  926. })
  927. test("does not strip metadata for non-openai packages when store is not false", () => {
  928. const anthropicModel = {
  929. ...openaiModel,
  930. providerID: "anthropic",
  931. api: {
  932. id: "claude-3",
  933. url: "https://api.anthropic.com",
  934. npm: "@ai-sdk/anthropic",
  935. },
  936. }
  937. const msgs = [
  938. {
  939. role: "assistant",
  940. content: [
  941. {
  942. type: "text",
  943. text: "Hello",
  944. providerOptions: {
  945. openai: {
  946. itemId: "msg_123",
  947. },
  948. },
  949. },
  950. ],
  951. },
  952. ] as any[]
  953. const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
  954. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  955. })
  956. })
  957. describe("ProviderTransform.message - providerOptions key remapping", () => {
  958. const createModel = (providerID: string, npm: string) =>
  959. ({
  960. id: `${providerID}/test-model`,
  961. providerID,
  962. api: {
  963. id: "test-model",
  964. url: "https://api.test.com",
  965. npm,
  966. },
  967. name: "Test Model",
  968. capabilities: {
  969. temperature: true,
  970. reasoning: false,
  971. attachment: true,
  972. toolcall: true,
  973. input: { text: true, audio: false, image: true, video: false, pdf: true },
  974. output: { text: true, audio: false, image: false, video: false, pdf: false },
  975. interleaved: false,
  976. },
  977. cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
  978. limit: { context: 128000, output: 8192 },
  979. status: "active",
  980. options: {},
  981. headers: {},
  982. }) as any
  983. test("azure keeps 'azure' key and does not remap to 'openai'", () => {
  984. const model = createModel("azure", "@ai-sdk/azure")
  985. const msgs = [
  986. {
  987. role: "user",
  988. content: "Hello",
  989. providerOptions: {
  990. azure: { someOption: "value" },
  991. },
  992. },
  993. ] as any[]
  994. const result = ProviderTransform.message(msgs, model, {})
  995. expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
  996. expect(result[0].providerOptions?.openai).toBeUndefined()
  997. })
  998. test("openai with github-copilot npm remaps providerID to 'openai'", () => {
  999. const model = createModel("github-copilot", "@ai-sdk/github-copilot")
  1000. const msgs = [
  1001. {
  1002. role: "user",
  1003. content: "Hello",
  1004. providerOptions: {
  1005. "github-copilot": { someOption: "value" },
  1006. },
  1007. },
  1008. ] as any[]
  1009. const result = ProviderTransform.message(msgs, model, {})
  1010. expect(result[0].providerOptions?.openai).toEqual({ someOption: "value" })
  1011. expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
  1012. })
  1013. test("bedrock remaps providerID to 'bedrock' key", () => {
  1014. const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock")
  1015. const msgs = [
  1016. {
  1017. role: "user",
  1018. content: "Hello",
  1019. providerOptions: {
  1020. "my-bedrock": { someOption: "value" },
  1021. },
  1022. },
  1023. ] as any[]
  1024. const result = ProviderTransform.message(msgs, model, {})
  1025. expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" })
  1026. expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined()
  1027. })
  1028. })
  1029. describe("ProviderTransform.message - claude w/bedrock custom inference profile", () => {
  1030. test("adds cachePoint", () => {
  1031. const model = {
  1032. id: "amazon-bedrock/custom-claude-sonnet-4.5",
  1033. providerID: "amazon-bedrock",
  1034. api: {
  1035. id: "arn:aws:bedrock:xxx:yyy:application-inference-profile/zzz",
  1036. url: "https://api.test.com",
  1037. npm: "@ai-sdk/amazon-bedrock",
  1038. },
  1039. name: "Custom inference profile",
  1040. capabilities: {},
  1041. options: {},
  1042. headers: {},
  1043. } as any
  1044. const msgs = [
  1045. {
  1046. role: "user",
  1047. content: "Hello",
  1048. },
  1049. ] as any[]
  1050. const result = ProviderTransform.message(msgs, model, {})
  1051. expect(result[0].providerOptions?.bedrock).toEqual(
  1052. expect.objectContaining({
  1053. cachePoint: {
  1054. type: "ephemeral",
  1055. },
  1056. }),
  1057. )
  1058. })
  1059. })
  1060. describe("ProviderTransform.variants", () => {
  1061. const createMockModel = (overrides: Partial<any> = {}): any => ({
  1062. id: "test/test-model",
  1063. providerID: "test",
  1064. api: {
  1065. id: "test-model",
  1066. url: "https://api.test.com",
  1067. npm: "@ai-sdk/openai",
  1068. },
  1069. name: "Test Model",
  1070. capabilities: {
  1071. temperature: true,
  1072. reasoning: true,
  1073. attachment: true,
  1074. toolcall: true,
  1075. input: { text: true, audio: false, image: true, video: false, pdf: false },
  1076. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1077. interleaved: false,
  1078. },
  1079. cost: {
  1080. input: 0.001,
  1081. output: 0.002,
  1082. cache: { read: 0.0001, write: 0.0002 },
  1083. },
  1084. limit: {
  1085. context: 200_000,
  1086. output: 64_000,
  1087. },
  1088. status: "active",
  1089. options: {},
  1090. headers: {},
  1091. release_date: "2024-01-01",
  1092. ...overrides,
  1093. })
  1094. test("returns empty object when model has no reasoning capabilities", () => {
  1095. const model = createMockModel({
  1096. capabilities: { reasoning: false },
  1097. })
  1098. const result = ProviderTransform.variants(model)
  1099. expect(result).toEqual({})
  1100. })
  1101. test("deepseek returns empty object", () => {
  1102. const model = createMockModel({
  1103. id: "deepseek/deepseek-chat",
  1104. providerID: "deepseek",
  1105. api: {
  1106. id: "deepseek-chat",
  1107. url: "https://api.deepseek.com",
  1108. npm: "@ai-sdk/openai-compatible",
  1109. },
  1110. })
  1111. const result = ProviderTransform.variants(model)
  1112. expect(result).toEqual({})
  1113. })
  1114. test("minimax returns empty object", () => {
  1115. const model = createMockModel({
  1116. id: "minimax/minimax-model",
  1117. providerID: "minimax",
  1118. api: {
  1119. id: "minimax-model",
  1120. url: "https://api.minimax.com",
  1121. npm: "@ai-sdk/openai-compatible",
  1122. },
  1123. })
  1124. const result = ProviderTransform.variants(model)
  1125. expect(result).toEqual({})
  1126. })
  1127. test("glm returns empty object", () => {
  1128. const model = createMockModel({
  1129. id: "glm/glm-4",
  1130. providerID: "glm",
  1131. api: {
  1132. id: "glm-4",
  1133. url: "https://api.glm.com",
  1134. npm: "@ai-sdk/openai-compatible",
  1135. },
  1136. })
  1137. const result = ProviderTransform.variants(model)
  1138. expect(result).toEqual({})
  1139. })
  1140. test("mistral returns empty object", () => {
  1141. const model = createMockModel({
  1142. id: "mistral/mistral-large",
  1143. providerID: "mistral",
  1144. api: {
  1145. id: "mistral-large-latest",
  1146. url: "https://api.mistral.com",
  1147. npm: "@ai-sdk/mistral",
  1148. },
  1149. })
  1150. const result = ProviderTransform.variants(model)
  1151. expect(result).toEqual({})
  1152. })
  1153. describe("@openrouter/ai-sdk-provider", () => {
  1154. test("returns empty object for non-qualifying models", () => {
  1155. const model = createMockModel({
  1156. id: "openrouter/test-model",
  1157. providerID: "openrouter",
  1158. api: {
  1159. id: "test-model",
  1160. url: "https://openrouter.ai",
  1161. npm: "@openrouter/ai-sdk-provider",
  1162. },
  1163. })
  1164. const result = ProviderTransform.variants(model)
  1165. expect(result).toEqual({})
  1166. })
  1167. test("gpt models return OPENAI_EFFORTS with reasoning", () => {
  1168. const model = createMockModel({
  1169. id: "openrouter/gpt-4",
  1170. providerID: "openrouter",
  1171. api: {
  1172. id: "gpt-4",
  1173. url: "https://openrouter.ai",
  1174. npm: "@openrouter/ai-sdk-provider",
  1175. },
  1176. })
  1177. const result = ProviderTransform.variants(model)
  1178. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1179. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1180. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1181. })
  1182. test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
  1183. const model = createMockModel({
  1184. id: "openrouter/gemini-3-5-pro",
  1185. providerID: "openrouter",
  1186. api: {
  1187. id: "gemini-3-5-pro",
  1188. url: "https://openrouter.ai",
  1189. npm: "@openrouter/ai-sdk-provider",
  1190. },
  1191. })
  1192. const result = ProviderTransform.variants(model)
  1193. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1194. })
  1195. test("grok-4 returns empty object", () => {
  1196. const model = createMockModel({
  1197. id: "openrouter/grok-4",
  1198. providerID: "openrouter",
  1199. api: {
  1200. id: "grok-4",
  1201. url: "https://openrouter.ai",
  1202. npm: "@openrouter/ai-sdk-provider",
  1203. },
  1204. })
  1205. const result = ProviderTransform.variants(model)
  1206. expect(result).toEqual({})
  1207. })
  1208. test("grok-3-mini returns low and high with reasoning", () => {
  1209. const model = createMockModel({
  1210. id: "openrouter/grok-3-mini",
  1211. providerID: "openrouter",
  1212. api: {
  1213. id: "grok-3-mini",
  1214. url: "https://openrouter.ai",
  1215. npm: "@openrouter/ai-sdk-provider",
  1216. },
  1217. })
  1218. const result = ProviderTransform.variants(model)
  1219. expect(Object.keys(result)).toEqual(["low", "high"])
  1220. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1221. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1222. })
  1223. })
  1224. describe("@ai-sdk/gateway", () => {
  1225. test("returns OPENAI_EFFORTS with reasoningEffort", () => {
  1226. const model = createMockModel({
  1227. id: "gateway/gateway-model",
  1228. providerID: "gateway",
  1229. api: {
  1230. id: "gateway-model",
  1231. url: "https://gateway.ai",
  1232. npm: "@ai-sdk/gateway",
  1233. },
  1234. })
  1235. const result = ProviderTransform.variants(model)
  1236. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1237. expect(result.low).toEqual({ reasoningEffort: "low" })
  1238. expect(result.high).toEqual({ reasoningEffort: "high" })
  1239. })
  1240. })
  1241. describe("@ai-sdk/github-copilot", () => {
  1242. test("standard models return low, medium, high", () => {
  1243. const model = createMockModel({
  1244. id: "gpt-4.5",
  1245. providerID: "github-copilot",
  1246. api: {
  1247. id: "gpt-4.5",
  1248. url: "https://api.githubcopilot.com",
  1249. npm: "@ai-sdk/github-copilot",
  1250. },
  1251. })
  1252. const result = ProviderTransform.variants(model)
  1253. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1254. expect(result.low).toEqual({
  1255. reasoningEffort: "low",
  1256. reasoningSummary: "auto",
  1257. include: ["reasoning.encrypted_content"],
  1258. })
  1259. })
  1260. test("gpt-5.1-codex-max includes xhigh", () => {
  1261. const model = createMockModel({
  1262. id: "gpt-5.1-codex-max",
  1263. providerID: "github-copilot",
  1264. api: {
  1265. id: "gpt-5.1-codex-max",
  1266. url: "https://api.githubcopilot.com",
  1267. npm: "@ai-sdk/github-copilot",
  1268. },
  1269. })
  1270. const result = ProviderTransform.variants(model)
  1271. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1272. })
  1273. test("gpt-5.1-codex-mini does not include xhigh", () => {
  1274. const model = createMockModel({
  1275. id: "gpt-5.1-codex-mini",
  1276. providerID: "github-copilot",
  1277. api: {
  1278. id: "gpt-5.1-codex-mini",
  1279. url: "https://api.githubcopilot.com",
  1280. npm: "@ai-sdk/github-copilot",
  1281. },
  1282. })
  1283. const result = ProviderTransform.variants(model)
  1284. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1285. })
  1286. test("gpt-5.1-codex does not include xhigh", () => {
  1287. const model = createMockModel({
  1288. id: "gpt-5.1-codex",
  1289. providerID: "github-copilot",
  1290. api: {
  1291. id: "gpt-5.1-codex",
  1292. url: "https://api.githubcopilot.com",
  1293. npm: "@ai-sdk/github-copilot",
  1294. },
  1295. })
  1296. const result = ProviderTransform.variants(model)
  1297. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1298. })
  1299. test("gpt-5.2 includes xhigh", () => {
  1300. const model = createMockModel({
  1301. id: "gpt-5.2",
  1302. providerID: "github-copilot",
  1303. api: {
  1304. id: "gpt-5.2",
  1305. url: "https://api.githubcopilot.com",
  1306. npm: "@ai-sdk/github-copilot",
  1307. },
  1308. })
  1309. const result = ProviderTransform.variants(model)
  1310. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1311. expect(result.xhigh).toEqual({
  1312. reasoningEffort: "xhigh",
  1313. reasoningSummary: "auto",
  1314. include: ["reasoning.encrypted_content"],
  1315. })
  1316. })
  1317. test("gpt-5.2-codex includes xhigh", () => {
  1318. const model = createMockModel({
  1319. id: "gpt-5.2-codex",
  1320. providerID: "github-copilot",
  1321. api: {
  1322. id: "gpt-5.2-codex",
  1323. url: "https://api.githubcopilot.com",
  1324. npm: "@ai-sdk/github-copilot",
  1325. },
  1326. })
  1327. const result = ProviderTransform.variants(model)
  1328. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1329. })
  1330. })
  1331. describe("@ai-sdk/cerebras", () => {
  1332. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1333. const model = createMockModel({
  1334. id: "cerebras/llama-4",
  1335. providerID: "cerebras",
  1336. api: {
  1337. id: "llama-4-sc",
  1338. url: "https://api.cerebras.ai",
  1339. npm: "@ai-sdk/cerebras",
  1340. },
  1341. })
  1342. const result = ProviderTransform.variants(model)
  1343. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1344. expect(result.low).toEqual({ reasoningEffort: "low" })
  1345. expect(result.high).toEqual({ reasoningEffort: "high" })
  1346. })
  1347. })
  1348. describe("@ai-sdk/togetherai", () => {
  1349. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1350. const model = createMockModel({
  1351. id: "togetherai/llama-4",
  1352. providerID: "togetherai",
  1353. api: {
  1354. id: "llama-4-sc",
  1355. url: "https://api.togetherai.com",
  1356. npm: "@ai-sdk/togetherai",
  1357. },
  1358. })
  1359. const result = ProviderTransform.variants(model)
  1360. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1361. expect(result.low).toEqual({ reasoningEffort: "low" })
  1362. expect(result.high).toEqual({ reasoningEffort: "high" })
  1363. })
  1364. })
  1365. describe("@ai-sdk/xai", () => {
  1366. test("grok-3 returns empty object", () => {
  1367. const model = createMockModel({
  1368. id: "xai/grok-3",
  1369. providerID: "xai",
  1370. api: {
  1371. id: "grok-3",
  1372. url: "https://api.x.ai",
  1373. npm: "@ai-sdk/xai",
  1374. },
  1375. })
  1376. const result = ProviderTransform.variants(model)
  1377. expect(result).toEqual({})
  1378. })
  1379. test("grok-3-mini returns low and high with reasoningEffort", () => {
  1380. const model = createMockModel({
  1381. id: "xai/grok-3-mini",
  1382. providerID: "xai",
  1383. api: {
  1384. id: "grok-3-mini",
  1385. url: "https://api.x.ai",
  1386. npm: "@ai-sdk/xai",
  1387. },
  1388. })
  1389. const result = ProviderTransform.variants(model)
  1390. expect(Object.keys(result)).toEqual(["low", "high"])
  1391. expect(result.low).toEqual({ reasoningEffort: "low" })
  1392. expect(result.high).toEqual({ reasoningEffort: "high" })
  1393. })
  1394. })
  1395. describe("@ai-sdk/deepinfra", () => {
  1396. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1397. const model = createMockModel({
  1398. id: "deepinfra/llama-4",
  1399. providerID: "deepinfra",
  1400. api: {
  1401. id: "llama-4-sc",
  1402. url: "https://api.deepinfra.com",
  1403. npm: "@ai-sdk/deepinfra",
  1404. },
  1405. })
  1406. const result = ProviderTransform.variants(model)
  1407. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1408. expect(result.low).toEqual({ reasoningEffort: "low" })
  1409. expect(result.high).toEqual({ reasoningEffort: "high" })
  1410. })
  1411. })
  1412. describe("@ai-sdk/openai-compatible", () => {
  1413. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1414. const model = createMockModel({
  1415. id: "custom-provider/custom-model",
  1416. providerID: "custom-provider",
  1417. api: {
  1418. id: "custom-model",
  1419. url: "https://api.custom.com",
  1420. npm: "@ai-sdk/openai-compatible",
  1421. },
  1422. })
  1423. const result = ProviderTransform.variants(model)
  1424. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1425. expect(result.low).toEqual({ reasoningEffort: "low" })
  1426. expect(result.high).toEqual({ reasoningEffort: "high" })
  1427. })
  1428. test("Claude via LiteLLM returns thinking with snake_case budget_tokens", () => {
  1429. const model = createMockModel({
  1430. id: "anthropic/claude-sonnet-4-5",
  1431. providerID: "anthropic",
  1432. api: {
  1433. id: "claude-sonnet-4-5-20250929",
  1434. url: "http://localhost:4000",
  1435. npm: "@ai-sdk/openai-compatible",
  1436. },
  1437. })
  1438. const result = ProviderTransform.variants(model)
  1439. expect(Object.keys(result)).toEqual(["high", "max"])
  1440. expect(result.high).toEqual({
  1441. thinking: {
  1442. type: "enabled",
  1443. budget_tokens: 16000,
  1444. },
  1445. })
  1446. expect(result.max).toEqual({
  1447. thinking: {
  1448. type: "enabled",
  1449. budget_tokens: 31999,
  1450. },
  1451. })
  1452. })
  1453. test("Claude model (by model.id) via openai-compatible uses snake_case", () => {
  1454. const model = createMockModel({
  1455. id: "litellm/claude-3-opus",
  1456. providerID: "litellm",
  1457. api: {
  1458. id: "claude-3-opus-20240229",
  1459. url: "http://localhost:4000",
  1460. npm: "@ai-sdk/openai-compatible",
  1461. },
  1462. })
  1463. const result = ProviderTransform.variants(model)
  1464. expect(Object.keys(result)).toEqual(["high", "max"])
  1465. expect(result.high).toEqual({
  1466. thinking: {
  1467. type: "enabled",
  1468. budget_tokens: 16000,
  1469. },
  1470. })
  1471. })
  1472. test("Anthropic model (by model.api.id) via openai-compatible uses snake_case", () => {
  1473. const model = createMockModel({
  1474. id: "custom/my-model",
  1475. providerID: "custom",
  1476. api: {
  1477. id: "anthropic.claude-sonnet",
  1478. url: "http://localhost:4000",
  1479. npm: "@ai-sdk/openai-compatible",
  1480. },
  1481. })
  1482. const result = ProviderTransform.variants(model)
  1483. expect(Object.keys(result)).toEqual(["high", "max"])
  1484. expect(result.high.thinking.budget_tokens).toBe(16000)
  1485. })
  1486. })
  1487. describe("@ai-sdk/azure", () => {
  1488. test("o1-mini returns empty object", () => {
  1489. const model = createMockModel({
  1490. id: "o1-mini",
  1491. providerID: "azure",
  1492. api: {
  1493. id: "o1-mini",
  1494. url: "https://azure.com",
  1495. npm: "@ai-sdk/azure",
  1496. },
  1497. })
  1498. const result = ProviderTransform.variants(model)
  1499. expect(result).toEqual({})
  1500. })
  1501. test("standard azure models return custom efforts with reasoningSummary", () => {
  1502. const model = createMockModel({
  1503. id: "o1",
  1504. providerID: "azure",
  1505. api: {
  1506. id: "o1",
  1507. url: "https://azure.com",
  1508. npm: "@ai-sdk/azure",
  1509. },
  1510. })
  1511. const result = ProviderTransform.variants(model)
  1512. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1513. expect(result.low).toEqual({
  1514. reasoningEffort: "low",
  1515. reasoningSummary: "auto",
  1516. include: ["reasoning.encrypted_content"],
  1517. })
  1518. })
  1519. test("gpt-5 adds minimal effort", () => {
  1520. const model = createMockModel({
  1521. id: "gpt-5",
  1522. providerID: "azure",
  1523. api: {
  1524. id: "gpt-5",
  1525. url: "https://azure.com",
  1526. npm: "@ai-sdk/azure",
  1527. },
  1528. })
  1529. const result = ProviderTransform.variants(model)
  1530. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1531. })
  1532. })
  1533. describe("@ai-sdk/openai", () => {
  1534. test("gpt-5-pro returns empty object", () => {
  1535. const model = createMockModel({
  1536. id: "gpt-5-pro",
  1537. providerID: "openai",
  1538. api: {
  1539. id: "gpt-5-pro",
  1540. url: "https://api.openai.com",
  1541. npm: "@ai-sdk/openai",
  1542. },
  1543. })
  1544. const result = ProviderTransform.variants(model)
  1545. expect(result).toEqual({})
  1546. })
  1547. test("standard openai models return custom efforts with reasoningSummary", () => {
  1548. const model = createMockModel({
  1549. id: "gpt-5",
  1550. providerID: "openai",
  1551. api: {
  1552. id: "gpt-5",
  1553. url: "https://api.openai.com",
  1554. npm: "@ai-sdk/openai",
  1555. },
  1556. release_date: "2024-06-01",
  1557. })
  1558. const result = ProviderTransform.variants(model)
  1559. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1560. expect(result.low).toEqual({
  1561. reasoningEffort: "low",
  1562. reasoningSummary: "auto",
  1563. include: ["reasoning.encrypted_content"],
  1564. })
  1565. })
  1566. test("models after 2025-11-13 include 'none' effort", () => {
  1567. const model = createMockModel({
  1568. id: "gpt-5-nano",
  1569. providerID: "openai",
  1570. api: {
  1571. id: "gpt-5-nano",
  1572. url: "https://api.openai.com",
  1573. npm: "@ai-sdk/openai",
  1574. },
  1575. release_date: "2025-11-14",
  1576. })
  1577. const result = ProviderTransform.variants(model)
  1578. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
  1579. })
  1580. test("models after 2025-12-04 include 'xhigh' effort", () => {
  1581. const model = createMockModel({
  1582. id: "openai/gpt-5-chat",
  1583. providerID: "openai",
  1584. api: {
  1585. id: "gpt-5-chat",
  1586. url: "https://api.openai.com",
  1587. npm: "@ai-sdk/openai",
  1588. },
  1589. release_date: "2025-12-05",
  1590. })
  1591. const result = ProviderTransform.variants(model)
  1592. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1593. })
  1594. })
  1595. describe("@ai-sdk/anthropic", () => {
  1596. test("returns high and max with thinking config", () => {
  1597. const model = createMockModel({
  1598. id: "anthropic/claude-4",
  1599. providerID: "anthropic",
  1600. api: {
  1601. id: "claude-4",
  1602. url: "https://api.anthropic.com",
  1603. npm: "@ai-sdk/anthropic",
  1604. },
  1605. })
  1606. const result = ProviderTransform.variants(model)
  1607. expect(Object.keys(result)).toEqual(["high", "max"])
  1608. expect(result.high).toEqual({
  1609. thinking: {
  1610. type: "enabled",
  1611. budgetTokens: 16000,
  1612. },
  1613. })
  1614. expect(result.max).toEqual({
  1615. thinking: {
  1616. type: "enabled",
  1617. budgetTokens: 31999,
  1618. },
  1619. })
  1620. })
  1621. })
  1622. describe("@ai-sdk/amazon-bedrock", () => {
  1623. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
  1624. const model = createMockModel({
  1625. id: "bedrock/llama-4",
  1626. providerID: "bedrock",
  1627. api: {
  1628. id: "llama-4-sc",
  1629. url: "https://bedrock.amazonaws.com",
  1630. npm: "@ai-sdk/amazon-bedrock",
  1631. },
  1632. })
  1633. const result = ProviderTransform.variants(model)
  1634. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1635. expect(result.low).toEqual({
  1636. reasoningConfig: {
  1637. type: "enabled",
  1638. maxReasoningEffort: "low",
  1639. },
  1640. })
  1641. })
  1642. })
  1643. describe("@ai-sdk/google", () => {
  1644. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  1645. const model = createMockModel({
  1646. id: "google/gemini-2.5-pro",
  1647. providerID: "google",
  1648. api: {
  1649. id: "gemini-2.5-pro",
  1650. url: "https://generativelanguage.googleapis.com",
  1651. npm: "@ai-sdk/google",
  1652. },
  1653. })
  1654. const result = ProviderTransform.variants(model)
  1655. expect(Object.keys(result)).toEqual(["high", "max"])
  1656. expect(result.high).toEqual({
  1657. thinkingConfig: {
  1658. includeThoughts: true,
  1659. thinkingBudget: 16000,
  1660. },
  1661. })
  1662. expect(result.max).toEqual({
  1663. thinkingConfig: {
  1664. includeThoughts: true,
  1665. thinkingBudget: 24576,
  1666. },
  1667. })
  1668. })
  1669. test("other gemini models return low and high with thinkingLevel", () => {
  1670. const model = createMockModel({
  1671. id: "google/gemini-2.0-pro",
  1672. providerID: "google",
  1673. api: {
  1674. id: "gemini-2.0-pro",
  1675. url: "https://generativelanguage.googleapis.com",
  1676. npm: "@ai-sdk/google",
  1677. },
  1678. })
  1679. const result = ProviderTransform.variants(model)
  1680. expect(Object.keys(result)).toEqual(["low", "high"])
  1681. expect(result.low).toEqual({
  1682. includeThoughts: true,
  1683. thinkingLevel: "low",
  1684. })
  1685. expect(result.high).toEqual({
  1686. includeThoughts: true,
  1687. thinkingLevel: "high",
  1688. })
  1689. })
  1690. })
  1691. describe("@ai-sdk/google-vertex", () => {
  1692. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  1693. const model = createMockModel({
  1694. id: "google-vertex/gemini-2.5-pro",
  1695. providerID: "google-vertex",
  1696. api: {
  1697. id: "gemini-2.5-pro",
  1698. url: "https://vertexai.googleapis.com",
  1699. npm: "@ai-sdk/google-vertex",
  1700. },
  1701. })
  1702. const result = ProviderTransform.variants(model)
  1703. expect(Object.keys(result)).toEqual(["high", "max"])
  1704. })
  1705. test("other vertex models return low and high with thinkingLevel", () => {
  1706. const model = createMockModel({
  1707. id: "google-vertex/gemini-2.0-pro",
  1708. providerID: "google-vertex",
  1709. api: {
  1710. id: "gemini-2.0-pro",
  1711. url: "https://vertexai.googleapis.com",
  1712. npm: "@ai-sdk/google-vertex",
  1713. },
  1714. })
  1715. const result = ProviderTransform.variants(model)
  1716. expect(Object.keys(result)).toEqual(["low", "high"])
  1717. })
  1718. })
  1719. describe("@ai-sdk/cohere", () => {
  1720. test("returns empty object", () => {
  1721. const model = createMockModel({
  1722. id: "cohere/command-r",
  1723. providerID: "cohere",
  1724. api: {
  1725. id: "command-r",
  1726. url: "https://api.cohere.com",
  1727. npm: "@ai-sdk/cohere",
  1728. },
  1729. })
  1730. const result = ProviderTransform.variants(model)
  1731. expect(result).toEqual({})
  1732. })
  1733. })
  1734. describe("@ai-sdk/groq", () => {
  1735. test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
  1736. const model = createMockModel({
  1737. id: "groq/llama-4",
  1738. providerID: "groq",
  1739. api: {
  1740. id: "llama-4-sc",
  1741. url: "https://api.groq.com",
  1742. npm: "@ai-sdk/groq",
  1743. },
  1744. })
  1745. const result = ProviderTransform.variants(model)
  1746. expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
  1747. expect(result.none).toEqual({
  1748. includeThoughts: true,
  1749. thinkingLevel: "none",
  1750. })
  1751. expect(result.low).toEqual({
  1752. includeThoughts: true,
  1753. thinkingLevel: "low",
  1754. })
  1755. })
  1756. })
  1757. describe("@ai-sdk/perplexity", () => {
  1758. test("returns empty object", () => {
  1759. const model = createMockModel({
  1760. id: "perplexity/sonar-plus",
  1761. providerID: "perplexity",
  1762. api: {
  1763. id: "sonar-plus",
  1764. url: "https://api.perplexity.ai",
  1765. npm: "@ai-sdk/perplexity",
  1766. },
  1767. })
  1768. const result = ProviderTransform.variants(model)
  1769. expect(result).toEqual({})
  1770. })
  1771. })
  1772. })