transform.test.ts 63 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.options - setCacheKey", () => {
  5. const sessionID = "test-session-123"
  6. const mockModel = {
  7. id: "anthropic/claude-3-5-sonnet",
  8. providerID: "anthropic",
  9. api: {
  10. id: "claude-3-5-sonnet-20241022",
  11. url: "https://api.anthropic.com",
  12. npm: "@ai-sdk/anthropic",
  13. },
  14. name: "Claude 3.5 Sonnet",
  15. capabilities: {
  16. temperature: true,
  17. reasoning: false,
  18. attachment: true,
  19. toolcall: true,
  20. input: { text: true, audio: false, image: true, video: false, pdf: true },
  21. output: { text: true, audio: false, image: false, video: false, pdf: false },
  22. interleaved: false,
  23. },
  24. cost: {
  25. input: 0.003,
  26. output: 0.015,
  27. cache: { read: 0.0003, write: 0.00375 },
  28. },
  29. limit: {
  30. context: 200000,
  31. output: 8192,
  32. },
  33. status: "active",
  34. options: {},
  35. headers: {},
  36. } as any
  37. test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
  38. const result = ProviderTransform.options({
  39. model: mockModel,
  40. sessionID,
  41. providerOptions: { setCacheKey: true },
  42. })
  43. expect(result.promptCacheKey).toBe(sessionID)
  44. })
  45. test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
  46. const result = ProviderTransform.options({
  47. model: mockModel,
  48. sessionID,
  49. providerOptions: { setCacheKey: false },
  50. })
  51. expect(result.promptCacheKey).toBeUndefined()
  52. })
  53. test("should not set promptCacheKey when providerOptions is undefined", () => {
  54. const result = ProviderTransform.options({
  55. model: mockModel,
  56. sessionID,
  57. providerOptions: undefined,
  58. })
  59. expect(result.promptCacheKey).toBeUndefined()
  60. })
  61. test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
  62. const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
  63. expect(result.promptCacheKey).toBeUndefined()
  64. })
  65. test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
  66. const openaiModel = {
  67. ...mockModel,
  68. providerID: "openai",
  69. api: {
  70. id: "gpt-4",
  71. url: "https://api.openai.com",
  72. npm: "@ai-sdk/openai",
  73. },
  74. }
  75. const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
  76. expect(result.promptCacheKey).toBe(sessionID)
  77. })
  78. test("should set store=false for openai provider", () => {
  79. const openaiModel = {
  80. ...mockModel,
  81. providerID: "openai",
  82. api: {
  83. id: "gpt-4",
  84. url: "https://api.openai.com",
  85. npm: "@ai-sdk/openai",
  86. },
  87. }
  88. const result = ProviderTransform.options({
  89. model: openaiModel,
  90. sessionID,
  91. providerOptions: {},
  92. })
  93. expect(result.store).toBe(false)
  94. })
  95. })
  96. describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
  97. const sessionID = "test-session-123"
  98. const createGpt5Model = (apiId: string) =>
  99. ({
  100. id: `openai/${apiId}`,
  101. providerID: "openai",
  102. api: {
  103. id: apiId,
  104. url: "https://api.openai.com",
  105. npm: "@ai-sdk/openai",
  106. },
  107. name: apiId,
  108. capabilities: {
  109. temperature: true,
  110. reasoning: true,
  111. attachment: true,
  112. toolcall: true,
  113. input: { text: true, audio: false, image: true, video: false, pdf: false },
  114. output: { text: true, audio: false, image: false, video: false, pdf: false },
  115. interleaved: false,
  116. },
  117. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  118. limit: { context: 128000, output: 4096 },
  119. status: "active",
  120. options: {},
  121. headers: {},
  122. }) as any
  123. test("gpt-5.2 should have textVerbosity set to low", () => {
  124. const model = createGpt5Model("gpt-5.2")
  125. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  126. expect(result.textVerbosity).toBe("low")
  127. })
  128. test("gpt-5.1 should have textVerbosity set to low", () => {
  129. const model = createGpt5Model("gpt-5.1")
  130. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  131. expect(result.textVerbosity).toBe("low")
  132. })
  133. test("gpt-5.2-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  134. const model = createGpt5Model("gpt-5.2-chat-latest")
  135. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  136. expect(result.textVerbosity).toBeUndefined()
  137. })
  138. test("gpt-5.1-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  139. const model = createGpt5Model("gpt-5.1-chat-latest")
  140. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  141. expect(result.textVerbosity).toBeUndefined()
  142. })
  143. test("gpt-5.2-chat should NOT have textVerbosity set", () => {
  144. const model = createGpt5Model("gpt-5.2-chat")
  145. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  146. expect(result.textVerbosity).toBeUndefined()
  147. })
  148. test("gpt-5-chat should NOT have textVerbosity set", () => {
  149. const model = createGpt5Model("gpt-5-chat")
  150. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  151. expect(result.textVerbosity).toBeUndefined()
  152. })
  153. test("gpt-5.2-codex should NOT have textVerbosity set (codex models excluded)", () => {
  154. const model = createGpt5Model("gpt-5.2-codex")
  155. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  156. expect(result.textVerbosity).toBeUndefined()
  157. })
  158. })
  159. describe("ProviderTransform.options - gateway", () => {
  160. const sessionID = "test-session-123"
  161. const createModel = (id: string) =>
  162. ({
  163. id,
  164. providerID: "vercel",
  165. api: {
  166. id,
  167. url: "https://ai-gateway.vercel.sh/v3/ai",
  168. npm: "@ai-sdk/gateway",
  169. },
  170. name: id,
  171. capabilities: {
  172. temperature: true,
  173. reasoning: true,
  174. attachment: true,
  175. toolcall: true,
  176. input: { text: true, audio: false, image: true, video: false, pdf: true },
  177. output: { text: true, audio: false, image: false, video: false, pdf: false },
  178. interleaved: false,
  179. },
  180. cost: {
  181. input: 0.001,
  182. output: 0.002,
  183. cache: { read: 0.0001, write: 0.0002 },
  184. },
  185. limit: {
  186. context: 200_000,
  187. output: 8192,
  188. },
  189. status: "active",
  190. options: {},
  191. headers: {},
  192. release_date: "2024-01-01",
  193. }) as any
  194. test("puts gateway defaults under gateway key", () => {
  195. const model = createModel("anthropic/claude-sonnet-4")
  196. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  197. expect(result).toEqual({
  198. gateway: {
  199. caching: "auto",
  200. },
  201. })
  202. })
  203. })
  204. describe("ProviderTransform.providerOptions", () => {
  205. const createModel = (overrides: Partial<any> = {}) =>
  206. ({
  207. id: "test/test-model",
  208. providerID: "test",
  209. api: {
  210. id: "test-model",
  211. url: "https://api.test.com",
  212. npm: "@ai-sdk/openai",
  213. },
  214. name: "Test Model",
  215. capabilities: {
  216. temperature: true,
  217. reasoning: true,
  218. attachment: true,
  219. toolcall: true,
  220. input: { text: true, audio: false, image: true, video: false, pdf: false },
  221. output: { text: true, audio: false, image: false, video: false, pdf: false },
  222. interleaved: false,
  223. },
  224. cost: {
  225. input: 0.001,
  226. output: 0.002,
  227. cache: { read: 0.0001, write: 0.0002 },
  228. },
  229. limit: {
  230. context: 200_000,
  231. output: 64_000,
  232. },
  233. status: "active",
  234. options: {},
  235. headers: {},
  236. release_date: "2024-01-01",
  237. ...overrides,
  238. }) as any
  239. test("uses sdk key for non-gateway models", () => {
  240. const model = createModel({
  241. providerID: "my-bedrock",
  242. api: {
  243. id: "anthropic.claude-sonnet-4",
  244. url: "https://bedrock.aws",
  245. npm: "@ai-sdk/amazon-bedrock",
  246. },
  247. })
  248. expect(ProviderTransform.providerOptions(model, { cachePoint: { type: "default" } })).toEqual({
  249. bedrock: { cachePoint: { type: "default" } },
  250. })
  251. })
  252. test("uses gateway model provider slug for gateway models", () => {
  253. const model = createModel({
  254. providerID: "vercel",
  255. api: {
  256. id: "anthropic/claude-sonnet-4",
  257. url: "https://ai-gateway.vercel.sh/v3/ai",
  258. npm: "@ai-sdk/gateway",
  259. },
  260. })
  261. expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
  262. anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
  263. })
  264. })
  265. test("falls back to gateway key when gateway api id is unscoped", () => {
  266. const model = createModel({
  267. id: "anthropic/claude-sonnet-4",
  268. providerID: "vercel",
  269. api: {
  270. id: "claude-sonnet-4",
  271. url: "https://ai-gateway.vercel.sh/v3/ai",
  272. npm: "@ai-sdk/gateway",
  273. },
  274. })
  275. expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
  276. gateway: { thinking: { type: "enabled", budgetTokens: 12_000 } },
  277. })
  278. })
  279. test("splits gateway routing options from provider-specific options", () => {
  280. const model = createModel({
  281. providerID: "vercel",
  282. api: {
  283. id: "anthropic/claude-sonnet-4",
  284. url: "https://ai-gateway.vercel.sh/v3/ai",
  285. npm: "@ai-sdk/gateway",
  286. },
  287. })
  288. expect(
  289. ProviderTransform.providerOptions(model, {
  290. gateway: { order: ["vertex", "anthropic"] },
  291. thinking: { type: "enabled", budgetTokens: 12_000 },
  292. }),
  293. ).toEqual({
  294. gateway: { order: ["vertex", "anthropic"] },
  295. anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
  296. } as any)
  297. })
  298. test("falls back to gateway key when model id has no provider slug", () => {
  299. const model = createModel({
  300. id: "claude-sonnet-4",
  301. providerID: "vercel",
  302. api: {
  303. id: "claude-sonnet-4",
  304. url: "https://ai-gateway.vercel.sh/v3/ai",
  305. npm: "@ai-sdk/gateway",
  306. },
  307. })
  308. expect(ProviderTransform.providerOptions(model, { reasoningEffort: "high" })).toEqual({
  309. gateway: { reasoningEffort: "high" },
  310. })
  311. })
  312. })
  313. describe("ProviderTransform.schema - gemini array items", () => {
  314. test("adds missing items for array properties", () => {
  315. const geminiModel = {
  316. providerID: "google",
  317. api: {
  318. id: "gemini-3-pro",
  319. },
  320. } as any
  321. const schema = {
  322. type: "object",
  323. properties: {
  324. nodes: { type: "array" },
  325. edges: { type: "array", items: { type: "string" } },
  326. },
  327. } as any
  328. const result = ProviderTransform.schema(geminiModel, schema) as any
  329. expect(result.properties.nodes.items).toBeDefined()
  330. expect(result.properties.edges.items.type).toBe("string")
  331. })
  332. })
  333. describe("ProviderTransform.schema - gemini nested array items", () => {
  334. const geminiModel = {
  335. providerID: "google",
  336. api: {
  337. id: "gemini-3-pro",
  338. },
  339. } as any
  340. test("adds type to 2D array with empty inner items", () => {
  341. const schema = {
  342. type: "object",
  343. properties: {
  344. values: {
  345. type: "array",
  346. items: {
  347. type: "array",
  348. items: {}, // Empty items object
  349. },
  350. },
  351. },
  352. } as any
  353. const result = ProviderTransform.schema(geminiModel, schema) as any
  354. // Inner items should have a default type
  355. expect(result.properties.values.items.items.type).toBe("string")
  356. })
  357. test("adds items and type to 2D array with missing inner items", () => {
  358. const schema = {
  359. type: "object",
  360. properties: {
  361. data: {
  362. type: "array",
  363. items: { type: "array" }, // No items at all
  364. },
  365. },
  366. } as any
  367. const result = ProviderTransform.schema(geminiModel, schema) as any
  368. expect(result.properties.data.items.items).toBeDefined()
  369. expect(result.properties.data.items.items.type).toBe("string")
  370. })
  371. test("handles deeply nested arrays (3D)", () => {
  372. const schema = {
  373. type: "object",
  374. properties: {
  375. matrix: {
  376. type: "array",
  377. items: {
  378. type: "array",
  379. items: {
  380. type: "array",
  381. // No items
  382. },
  383. },
  384. },
  385. },
  386. } as any
  387. const result = ProviderTransform.schema(geminiModel, schema) as any
  388. expect(result.properties.matrix.items.items.items).toBeDefined()
  389. expect(result.properties.matrix.items.items.items.type).toBe("string")
  390. })
  391. test("preserves existing item types in nested arrays", () => {
  392. const schema = {
  393. type: "object",
  394. properties: {
  395. numbers: {
  396. type: "array",
  397. items: {
  398. type: "array",
  399. items: { type: "number" }, // Has explicit type
  400. },
  401. },
  402. },
  403. } as any
  404. const result = ProviderTransform.schema(geminiModel, schema) as any
  405. // Should preserve the explicit type
  406. expect(result.properties.numbers.items.items.type).toBe("number")
  407. })
  408. test("handles mixed nested structures with objects and arrays", () => {
  409. const schema = {
  410. type: "object",
  411. properties: {
  412. spreadsheetData: {
  413. type: "object",
  414. properties: {
  415. rows: {
  416. type: "array",
  417. items: {
  418. type: "array",
  419. items: {}, // Empty items
  420. },
  421. },
  422. },
  423. },
  424. },
  425. } as any
  426. const result = ProviderTransform.schema(geminiModel, schema) as any
  427. expect(result.properties.spreadsheetData.properties.rows.items.items.type).toBe("string")
  428. })
  429. })
  430. describe("ProviderTransform.schema - gemini non-object properties removal", () => {
  431. const geminiModel = {
  432. providerID: "google",
  433. api: {
  434. id: "gemini-3-pro",
  435. },
  436. } as any
  437. test("removes properties from non-object types", () => {
  438. const schema = {
  439. type: "object",
  440. properties: {
  441. data: {
  442. type: "string",
  443. properties: { invalid: { type: "string" } },
  444. },
  445. },
  446. } as any
  447. const result = ProviderTransform.schema(geminiModel, schema) as any
  448. expect(result.properties.data.type).toBe("string")
  449. expect(result.properties.data.properties).toBeUndefined()
  450. })
  451. test("removes required from non-object types", () => {
  452. const schema = {
  453. type: "object",
  454. properties: {
  455. data: {
  456. type: "array",
  457. items: { type: "string" },
  458. required: ["invalid"],
  459. },
  460. },
  461. } as any
  462. const result = ProviderTransform.schema(geminiModel, schema) as any
  463. expect(result.properties.data.type).toBe("array")
  464. expect(result.properties.data.required).toBeUndefined()
  465. })
  466. test("removes properties and required from nested non-object types", () => {
  467. const schema = {
  468. type: "object",
  469. properties: {
  470. outer: {
  471. type: "object",
  472. properties: {
  473. inner: {
  474. type: "number",
  475. properties: { bad: { type: "string" } },
  476. required: ["bad"],
  477. },
  478. },
  479. },
  480. },
  481. } as any
  482. const result = ProviderTransform.schema(geminiModel, schema) as any
  483. expect(result.properties.outer.properties.inner.type).toBe("number")
  484. expect(result.properties.outer.properties.inner.properties).toBeUndefined()
  485. expect(result.properties.outer.properties.inner.required).toBeUndefined()
  486. })
  487. test("keeps properties and required on object types", () => {
  488. const schema = {
  489. type: "object",
  490. properties: {
  491. data: {
  492. type: "object",
  493. properties: { name: { type: "string" } },
  494. required: ["name"],
  495. },
  496. },
  497. } as any
  498. const result = ProviderTransform.schema(geminiModel, schema) as any
  499. expect(result.properties.data.type).toBe("object")
  500. expect(result.properties.data.properties).toBeDefined()
  501. expect(result.properties.data.required).toEqual(["name"])
  502. })
  503. test("does not affect non-gemini providers", () => {
  504. const openaiModel = {
  505. providerID: "openai",
  506. api: {
  507. id: "gpt-4",
  508. },
  509. } as any
  510. const schema = {
  511. type: "object",
  512. properties: {
  513. data: {
  514. type: "string",
  515. properties: { invalid: { type: "string" } },
  516. },
  517. },
  518. } as any
  519. const result = ProviderTransform.schema(openaiModel, schema) as any
  520. expect(result.properties.data.properties).toBeDefined()
  521. })
  522. })
  523. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  524. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  525. const msgs = [
  526. {
  527. role: "assistant",
  528. content: [
  529. { type: "reasoning", text: "Let me think about this..." },
  530. {
  531. type: "tool-call",
  532. toolCallId: "test",
  533. toolName: "bash",
  534. input: { command: "echo hello" },
  535. },
  536. ],
  537. },
  538. ] as any[]
  539. const result = ProviderTransform.message(
  540. msgs,
  541. {
  542. id: "deepseek/deepseek-chat",
  543. providerID: "deepseek",
  544. api: {
  545. id: "deepseek-chat",
  546. url: "https://api.deepseek.com",
  547. npm: "@ai-sdk/openai-compatible",
  548. },
  549. name: "DeepSeek Chat",
  550. capabilities: {
  551. temperature: true,
  552. reasoning: true,
  553. attachment: false,
  554. toolcall: true,
  555. input: { text: true, audio: false, image: false, video: false, pdf: false },
  556. output: { text: true, audio: false, image: false, video: false, pdf: false },
  557. interleaved: {
  558. field: "reasoning_content",
  559. },
  560. },
  561. cost: {
  562. input: 0.001,
  563. output: 0.002,
  564. cache: { read: 0.0001, write: 0.0002 },
  565. },
  566. limit: {
  567. context: 128000,
  568. output: 8192,
  569. },
  570. status: "active",
  571. options: {},
  572. headers: {},
  573. release_date: "2023-04-01",
  574. },
  575. {},
  576. )
  577. expect(result).toHaveLength(1)
  578. expect(result[0].content).toEqual([
  579. {
  580. type: "tool-call",
  581. toolCallId: "test",
  582. toolName: "bash",
  583. input: { command: "echo hello" },
  584. },
  585. ])
  586. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  587. })
  588. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  589. const msgs = [
  590. {
  591. role: "assistant",
  592. content: [
  593. { type: "reasoning", text: "Should not be processed" },
  594. { type: "text", text: "Answer" },
  595. ],
  596. },
  597. ] as any[]
  598. const result = ProviderTransform.message(
  599. msgs,
  600. {
  601. id: "openai/gpt-4",
  602. providerID: "openai",
  603. api: {
  604. id: "gpt-4",
  605. url: "https://api.openai.com",
  606. npm: "@ai-sdk/openai",
  607. },
  608. name: "GPT-4",
  609. capabilities: {
  610. temperature: true,
  611. reasoning: false,
  612. attachment: true,
  613. toolcall: true,
  614. input: { text: true, audio: false, image: true, video: false, pdf: false },
  615. output: { text: true, audio: false, image: false, video: false, pdf: false },
  616. interleaved: false,
  617. },
  618. cost: {
  619. input: 0.03,
  620. output: 0.06,
  621. cache: { read: 0.001, write: 0.002 },
  622. },
  623. limit: {
  624. context: 128000,
  625. output: 4096,
  626. },
  627. status: "active",
  628. options: {},
  629. headers: {},
  630. release_date: "2023-04-01",
  631. },
  632. {},
  633. )
  634. expect(result[0].content).toEqual([
  635. { type: "reasoning", text: "Should not be processed" },
  636. { type: "text", text: "Answer" },
  637. ])
  638. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  639. })
  640. })
  641. describe("ProviderTransform.message - empty image handling", () => {
  642. const mockModel = {
  643. id: "anthropic/claude-3-5-sonnet",
  644. providerID: "anthropic",
  645. api: {
  646. id: "claude-3-5-sonnet-20241022",
  647. url: "https://api.anthropic.com",
  648. npm: "@ai-sdk/anthropic",
  649. },
  650. name: "Claude 3.5 Sonnet",
  651. capabilities: {
  652. temperature: true,
  653. reasoning: false,
  654. attachment: true,
  655. toolcall: true,
  656. input: { text: true, audio: false, image: true, video: false, pdf: true },
  657. output: { text: true, audio: false, image: false, video: false, pdf: false },
  658. interleaved: false,
  659. },
  660. cost: {
  661. input: 0.003,
  662. output: 0.015,
  663. cache: { read: 0.0003, write: 0.00375 },
  664. },
  665. limit: {
  666. context: 200000,
  667. output: 8192,
  668. },
  669. status: "active",
  670. options: {},
  671. headers: {},
  672. } as any
  673. test("should replace empty base64 image with error text", () => {
  674. const msgs = [
  675. {
  676. role: "user",
  677. content: [
  678. { type: "text", text: "What is in this image?" },
  679. { type: "image", image: "data:image/png;base64," },
  680. ],
  681. },
  682. ] as any[]
  683. const result = ProviderTransform.message(msgs, mockModel, {})
  684. expect(result).toHaveLength(1)
  685. expect(result[0].content).toHaveLength(2)
  686. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  687. expect(result[0].content[1]).toEqual({
  688. type: "text",
  689. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  690. })
  691. })
  692. test("should keep valid base64 images unchanged", () => {
  693. const validBase64 =
  694. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  695. const msgs = [
  696. {
  697. role: "user",
  698. content: [
  699. { type: "text", text: "What is in this image?" },
  700. { type: "image", image: `data:image/png;base64,${validBase64}` },
  701. ],
  702. },
  703. ] as any[]
  704. const result = ProviderTransform.message(msgs, mockModel, {})
  705. expect(result).toHaveLength(1)
  706. expect(result[0].content).toHaveLength(2)
  707. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  708. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  709. })
  710. test("should handle mixed valid and empty images", () => {
  711. const validBase64 =
  712. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  713. const msgs = [
  714. {
  715. role: "user",
  716. content: [
  717. { type: "text", text: "Compare these images" },
  718. { type: "image", image: `data:image/png;base64,${validBase64}` },
  719. { type: "image", image: "data:image/jpeg;base64," },
  720. ],
  721. },
  722. ] as any[]
  723. const result = ProviderTransform.message(msgs, mockModel, {})
  724. expect(result).toHaveLength(1)
  725. expect(result[0].content).toHaveLength(3)
  726. expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
  727. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  728. expect(result[0].content[2]).toEqual({
  729. type: "text",
  730. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  731. })
  732. })
  733. })
  734. describe("ProviderTransform.message - anthropic empty content filtering", () => {
  735. const anthropicModel = {
  736. id: "anthropic/claude-3-5-sonnet",
  737. providerID: "anthropic",
  738. api: {
  739. id: "claude-3-5-sonnet-20241022",
  740. url: "https://api.anthropic.com",
  741. npm: "@ai-sdk/anthropic",
  742. },
  743. name: "Claude 3.5 Sonnet",
  744. capabilities: {
  745. temperature: true,
  746. reasoning: false,
  747. attachment: true,
  748. toolcall: true,
  749. input: { text: true, audio: false, image: true, video: false, pdf: true },
  750. output: { text: true, audio: false, image: false, video: false, pdf: false },
  751. interleaved: false,
  752. },
  753. cost: {
  754. input: 0.003,
  755. output: 0.015,
  756. cache: { read: 0.0003, write: 0.00375 },
  757. },
  758. limit: {
  759. context: 200000,
  760. output: 8192,
  761. },
  762. status: "active",
  763. options: {},
  764. headers: {},
  765. } as any
  766. test("filters out messages with empty string content", () => {
  767. const msgs = [
  768. { role: "user", content: "Hello" },
  769. { role: "assistant", content: "" },
  770. { role: "user", content: "World" },
  771. ] as any[]
  772. const result = ProviderTransform.message(msgs, anthropicModel, {})
  773. expect(result).toHaveLength(2)
  774. expect(result[0].content).toBe("Hello")
  775. expect(result[1].content).toBe("World")
  776. })
  777. test("filters out empty text parts from array content", () => {
  778. const msgs = [
  779. {
  780. role: "assistant",
  781. content: [
  782. { type: "text", text: "" },
  783. { type: "text", text: "Hello" },
  784. { type: "text", text: "" },
  785. ],
  786. },
  787. ] as any[]
  788. const result = ProviderTransform.message(msgs, anthropicModel, {})
  789. expect(result).toHaveLength(1)
  790. expect(result[0].content).toHaveLength(1)
  791. expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
  792. })
  793. test("filters out empty reasoning parts from array content", () => {
  794. const msgs = [
  795. {
  796. role: "assistant",
  797. content: [
  798. { type: "reasoning", text: "" },
  799. { type: "text", text: "Answer" },
  800. { type: "reasoning", text: "" },
  801. ],
  802. },
  803. ] as any[]
  804. const result = ProviderTransform.message(msgs, anthropicModel, {})
  805. expect(result).toHaveLength(1)
  806. expect(result[0].content).toHaveLength(1)
  807. expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
  808. })
  809. test("removes entire message when all parts are empty", () => {
  810. const msgs = [
  811. { role: "user", content: "Hello" },
  812. {
  813. role: "assistant",
  814. content: [
  815. { type: "text", text: "" },
  816. { type: "reasoning", text: "" },
  817. ],
  818. },
  819. { role: "user", content: "World" },
  820. ] as any[]
  821. const result = ProviderTransform.message(msgs, anthropicModel, {})
  822. expect(result).toHaveLength(2)
  823. expect(result[0].content).toBe("Hello")
  824. expect(result[1].content).toBe("World")
  825. })
  826. test("keeps non-text/reasoning parts even if text parts are empty", () => {
  827. const msgs = [
  828. {
  829. role: "assistant",
  830. content: [
  831. { type: "text", text: "" },
  832. { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
  833. ],
  834. },
  835. ] as any[]
  836. const result = ProviderTransform.message(msgs, anthropicModel, {})
  837. expect(result).toHaveLength(1)
  838. expect(result[0].content).toHaveLength(1)
  839. expect(result[0].content[0]).toEqual({
  840. type: "tool-call",
  841. toolCallId: "123",
  842. toolName: "bash",
  843. input: { command: "ls" },
  844. })
  845. })
  846. test("keeps messages with valid text alongside empty parts", () => {
  847. const msgs = [
  848. {
  849. role: "assistant",
  850. content: [
  851. { type: "reasoning", text: "Thinking..." },
  852. { type: "text", text: "" },
  853. { type: "text", text: "Result" },
  854. ],
  855. },
  856. ] as any[]
  857. const result = ProviderTransform.message(msgs, anthropicModel, {})
  858. expect(result).toHaveLength(1)
  859. expect(result[0].content).toHaveLength(2)
  860. expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
  861. expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
  862. })
  863. test("does not filter for non-anthropic providers", () => {
  864. const openaiModel = {
  865. ...anthropicModel,
  866. providerID: "openai",
  867. api: {
  868. id: "gpt-4",
  869. url: "https://api.openai.com",
  870. npm: "@ai-sdk/openai",
  871. },
  872. }
  873. const msgs = [
  874. { role: "assistant", content: "" },
  875. {
  876. role: "assistant",
  877. content: [{ type: "text", text: "" }],
  878. },
  879. ] as any[]
  880. const result = ProviderTransform.message(msgs, openaiModel, {})
  881. expect(result).toHaveLength(2)
  882. expect(result[0].content).toBe("")
  883. expect(result[1].content).toHaveLength(1)
  884. })
  885. })
  886. describe("ProviderTransform.message - strip openai metadata when store=false", () => {
  887. const openaiModel = {
  888. id: "openai/gpt-5",
  889. providerID: "openai",
  890. api: {
  891. id: "gpt-5",
  892. url: "https://api.openai.com",
  893. npm: "@ai-sdk/openai",
  894. },
  895. name: "GPT-5",
  896. capabilities: {
  897. temperature: true,
  898. reasoning: true,
  899. attachment: true,
  900. toolcall: true,
  901. input: { text: true, audio: false, image: true, video: false, pdf: false },
  902. output: { text: true, audio: false, image: false, video: false, pdf: false },
  903. interleaved: false,
  904. },
  905. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  906. limit: { context: 128000, output: 4096 },
  907. status: "active",
  908. options: {},
  909. headers: {},
  910. } as any
  911. test("preserves itemId and reasoningEncryptedContent when store=false", () => {
  912. const msgs = [
  913. {
  914. role: "assistant",
  915. content: [
  916. {
  917. type: "reasoning",
  918. text: "thinking...",
  919. providerOptions: {
  920. openai: {
  921. itemId: "rs_123",
  922. reasoningEncryptedContent: "encrypted",
  923. },
  924. },
  925. },
  926. {
  927. type: "text",
  928. text: "Hello",
  929. providerOptions: {
  930. openai: {
  931. itemId: "msg_456",
  932. },
  933. },
  934. },
  935. ],
  936. },
  937. ] as any[]
  938. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  939. expect(result).toHaveLength(1)
  940. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  941. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  942. })
  943. test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => {
  944. const zenModel = {
  945. ...openaiModel,
  946. providerID: "zen",
  947. }
  948. const msgs = [
  949. {
  950. role: "assistant",
  951. content: [
  952. {
  953. type: "reasoning",
  954. text: "thinking...",
  955. providerOptions: {
  956. openai: {
  957. itemId: "rs_123",
  958. reasoningEncryptedContent: "encrypted",
  959. },
  960. },
  961. },
  962. {
  963. type: "text",
  964. text: "Hello",
  965. providerOptions: {
  966. openai: {
  967. itemId: "msg_456",
  968. },
  969. },
  970. },
  971. ],
  972. },
  973. ] as any[]
  974. const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
  975. expect(result).toHaveLength(1)
  976. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  977. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  978. })
  979. test("preserves other openai options including itemId", () => {
  980. const msgs = [
  981. {
  982. role: "assistant",
  983. content: [
  984. {
  985. type: "text",
  986. text: "Hello",
  987. providerOptions: {
  988. openai: {
  989. itemId: "msg_123",
  990. otherOption: "value",
  991. },
  992. },
  993. },
  994. ],
  995. },
  996. ] as any[]
  997. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  998. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  999. expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
  1000. })
  1001. test("preserves metadata for openai package when store is true", () => {
  1002. const msgs = [
  1003. {
  1004. role: "assistant",
  1005. content: [
  1006. {
  1007. type: "text",
  1008. text: "Hello",
  1009. providerOptions: {
  1010. openai: {
  1011. itemId: "msg_123",
  1012. },
  1013. },
  1014. },
  1015. ],
  1016. },
  1017. ] as any[]
  1018. // openai package preserves itemId regardless of store value
  1019. const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
  1020. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1021. })
  1022. test("preserves metadata for non-openai packages when store is false", () => {
  1023. const anthropicModel = {
  1024. ...openaiModel,
  1025. providerID: "anthropic",
  1026. api: {
  1027. id: "claude-3",
  1028. url: "https://api.anthropic.com",
  1029. npm: "@ai-sdk/anthropic",
  1030. },
  1031. }
  1032. const msgs = [
  1033. {
  1034. role: "assistant",
  1035. content: [
  1036. {
  1037. type: "text",
  1038. text: "Hello",
  1039. providerOptions: {
  1040. openai: {
  1041. itemId: "msg_123",
  1042. },
  1043. },
  1044. },
  1045. ],
  1046. },
  1047. ] as any[]
  1048. // store=false preserves metadata for non-openai packages
  1049. const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
  1050. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1051. })
  1052. test("preserves metadata using providerID key when store is false", () => {
  1053. const opencodeModel = {
  1054. ...openaiModel,
  1055. providerID: "opencode",
  1056. api: {
  1057. id: "opencode-test",
  1058. url: "https://api.opencode.ai",
  1059. npm: "@ai-sdk/openai-compatible",
  1060. },
  1061. }
  1062. const msgs = [
  1063. {
  1064. role: "assistant",
  1065. content: [
  1066. {
  1067. type: "text",
  1068. text: "Hello",
  1069. providerOptions: {
  1070. opencode: {
  1071. itemId: "msg_123",
  1072. otherOption: "value",
  1073. },
  1074. },
  1075. },
  1076. ],
  1077. },
  1078. ] as any[]
  1079. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  1080. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_123")
  1081. expect(result[0].content[0].providerOptions?.opencode?.otherOption).toBe("value")
  1082. })
  1083. test("preserves itemId across all providerOptions keys", () => {
  1084. const opencodeModel = {
  1085. ...openaiModel,
  1086. providerID: "opencode",
  1087. api: {
  1088. id: "opencode-test",
  1089. url: "https://api.opencode.ai",
  1090. npm: "@ai-sdk/openai-compatible",
  1091. },
  1092. }
  1093. const msgs = [
  1094. {
  1095. role: "assistant",
  1096. providerOptions: {
  1097. openai: { itemId: "msg_root" },
  1098. opencode: { itemId: "msg_opencode" },
  1099. extra: { itemId: "msg_extra" },
  1100. },
  1101. content: [
  1102. {
  1103. type: "text",
  1104. text: "Hello",
  1105. providerOptions: {
  1106. openai: { itemId: "msg_openai_part" },
  1107. opencode: { itemId: "msg_opencode_part" },
  1108. extra: { itemId: "msg_extra_part" },
  1109. },
  1110. },
  1111. ],
  1112. },
  1113. ] as any[]
  1114. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  1115. expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root")
  1116. expect(result[0].providerOptions?.opencode?.itemId).toBe("msg_opencode")
  1117. expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra")
  1118. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part")
  1119. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_opencode_part")
  1120. expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part")
  1121. })
  1122. test("does not strip metadata for non-openai packages when store is not false", () => {
  1123. const anthropicModel = {
  1124. ...openaiModel,
  1125. providerID: "anthropic",
  1126. api: {
  1127. id: "claude-3",
  1128. url: "https://api.anthropic.com",
  1129. npm: "@ai-sdk/anthropic",
  1130. },
  1131. }
  1132. const msgs = [
  1133. {
  1134. role: "assistant",
  1135. content: [
  1136. {
  1137. type: "text",
  1138. text: "Hello",
  1139. providerOptions: {
  1140. openai: {
  1141. itemId: "msg_123",
  1142. },
  1143. },
  1144. },
  1145. ],
  1146. },
  1147. ] as any[]
  1148. const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
  1149. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1150. })
  1151. })
  1152. describe("ProviderTransform.message - providerOptions key remapping", () => {
  1153. const createModel = (providerID: string, npm: string) =>
  1154. ({
  1155. id: `${providerID}/test-model`,
  1156. providerID,
  1157. api: {
  1158. id: "test-model",
  1159. url: "https://api.test.com",
  1160. npm,
  1161. },
  1162. name: "Test Model",
  1163. capabilities: {
  1164. temperature: true,
  1165. reasoning: false,
  1166. attachment: true,
  1167. toolcall: true,
  1168. input: { text: true, audio: false, image: true, video: false, pdf: true },
  1169. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1170. interleaved: false,
  1171. },
  1172. cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
  1173. limit: { context: 128000, output: 8192 },
  1174. status: "active",
  1175. options: {},
  1176. headers: {},
  1177. }) as any
  1178. test("azure keeps 'azure' key and does not remap to 'openai'", () => {
  1179. const model = createModel("azure", "@ai-sdk/azure")
  1180. const msgs = [
  1181. {
  1182. role: "user",
  1183. content: "Hello",
  1184. providerOptions: {
  1185. azure: { someOption: "value" },
  1186. },
  1187. },
  1188. ] as any[]
  1189. const result = ProviderTransform.message(msgs, model, {})
  1190. expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
  1191. expect(result[0].providerOptions?.openai).toBeUndefined()
  1192. })
  1193. test("copilot remaps providerID to 'copilot' key", () => {
  1194. const model = createModel("github-copilot", "@ai-sdk/github-copilot")
  1195. const msgs = [
  1196. {
  1197. role: "user",
  1198. content: "Hello",
  1199. providerOptions: {
  1200. copilot: { someOption: "value" },
  1201. },
  1202. },
  1203. ] as any[]
  1204. const result = ProviderTransform.message(msgs, model, {})
  1205. expect(result[0].providerOptions?.copilot).toEqual({ someOption: "value" })
  1206. expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
  1207. })
  1208. test("bedrock remaps providerID to 'bedrock' key", () => {
  1209. const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock")
  1210. const msgs = [
  1211. {
  1212. role: "user",
  1213. content: "Hello",
  1214. providerOptions: {
  1215. "my-bedrock": { someOption: "value" },
  1216. },
  1217. },
  1218. ] as any[]
  1219. const result = ProviderTransform.message(msgs, model, {})
  1220. expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" })
  1221. expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined()
  1222. })
  1223. })
  1224. describe("ProviderTransform.message - claude w/bedrock custom inference profile", () => {
  1225. test("adds cachePoint", () => {
  1226. const model = {
  1227. id: "amazon-bedrock/custom-claude-sonnet-4.5",
  1228. providerID: "amazon-bedrock",
  1229. api: {
  1230. id: "arn:aws:bedrock:xxx:yyy:application-inference-profile/zzz",
  1231. url: "https://api.test.com",
  1232. npm: "@ai-sdk/amazon-bedrock",
  1233. },
  1234. name: "Custom inference profile",
  1235. capabilities: {},
  1236. options: {},
  1237. headers: {},
  1238. } as any
  1239. const msgs = [
  1240. {
  1241. role: "user",
  1242. content: "Hello",
  1243. },
  1244. ] as any[]
  1245. const result = ProviderTransform.message(msgs, model, {})
  1246. expect(result[0].providerOptions?.bedrock).toEqual(
  1247. expect.objectContaining({
  1248. cachePoint: {
  1249. type: "default",
  1250. },
  1251. }),
  1252. )
  1253. })
  1254. })
  1255. describe("ProviderTransform.message - cache control on gateway", () => {
  1256. const createModel = (overrides: Partial<any> = {}) =>
  1257. ({
  1258. id: "anthropic/claude-sonnet-4",
  1259. providerID: "vercel",
  1260. api: {
  1261. id: "anthropic/claude-sonnet-4",
  1262. url: "https://ai-gateway.vercel.sh/v3/ai",
  1263. npm: "@ai-sdk/gateway",
  1264. },
  1265. name: "Claude Sonnet 4",
  1266. capabilities: {
  1267. temperature: true,
  1268. reasoning: true,
  1269. attachment: true,
  1270. toolcall: true,
  1271. input: { text: true, audio: false, image: true, video: false, pdf: true },
  1272. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1273. interleaved: false,
  1274. },
  1275. cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
  1276. limit: { context: 200_000, output: 8192 },
  1277. status: "active",
  1278. options: {},
  1279. headers: {},
  1280. ...overrides,
  1281. }) as any
  1282. test("gateway does not set cache control for anthropic models", () => {
  1283. const model = createModel()
  1284. const msgs = [
  1285. {
  1286. role: "system",
  1287. content: [{ type: "text", text: "You are a helpful assistant" }],
  1288. },
  1289. {
  1290. role: "user",
  1291. content: "Hello",
  1292. },
  1293. ] as any[]
  1294. const result = ProviderTransform.message(msgs, model, {}) as any[]
  1295. expect(result[0].content[0].providerOptions).toBeUndefined()
  1296. expect(result[0].providerOptions).toBeUndefined()
  1297. })
  1298. test("non-gateway anthropic keeps existing cache control behavior", () => {
  1299. const model = createModel({
  1300. providerID: "anthropic",
  1301. api: {
  1302. id: "claude-sonnet-4",
  1303. url: "https://api.anthropic.com",
  1304. npm: "@ai-sdk/anthropic",
  1305. },
  1306. })
  1307. const msgs = [
  1308. {
  1309. role: "system",
  1310. content: "You are a helpful assistant",
  1311. },
  1312. {
  1313. role: "user",
  1314. content: "Hello",
  1315. },
  1316. ] as any[]
  1317. const result = ProviderTransform.message(msgs, model, {}) as any[]
  1318. expect(result[0].providerOptions).toEqual({
  1319. anthropic: {
  1320. cacheControl: {
  1321. type: "ephemeral",
  1322. },
  1323. },
  1324. openrouter: {
  1325. cacheControl: {
  1326. type: "ephemeral",
  1327. },
  1328. },
  1329. bedrock: {
  1330. cachePoint: {
  1331. type: "default",
  1332. },
  1333. },
  1334. openaiCompatible: {
  1335. cache_control: {
  1336. type: "ephemeral",
  1337. },
  1338. },
  1339. copilot: {
  1340. copilot_cache_control: {
  1341. type: "ephemeral",
  1342. },
  1343. },
  1344. })
  1345. })
  1346. })
  1347. describe("ProviderTransform.variants", () => {
  1348. const createMockModel = (overrides: Partial<any> = {}): any => ({
  1349. id: "test/test-model",
  1350. providerID: "test",
  1351. api: {
  1352. id: "test-model",
  1353. url: "https://api.test.com",
  1354. npm: "@ai-sdk/openai",
  1355. },
  1356. name: "Test Model",
  1357. capabilities: {
  1358. temperature: true,
  1359. reasoning: true,
  1360. attachment: true,
  1361. toolcall: true,
  1362. input: { text: true, audio: false, image: true, video: false, pdf: false },
  1363. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1364. interleaved: false,
  1365. },
  1366. cost: {
  1367. input: 0.001,
  1368. output: 0.002,
  1369. cache: { read: 0.0001, write: 0.0002 },
  1370. },
  1371. limit: {
  1372. context: 200_000,
  1373. output: 64_000,
  1374. },
  1375. status: "active",
  1376. options: {},
  1377. headers: {},
  1378. release_date: "2024-01-01",
  1379. ...overrides,
  1380. })
  1381. test("returns empty object when model has no reasoning capabilities", () => {
  1382. const model = createMockModel({
  1383. capabilities: { reasoning: false },
  1384. })
  1385. const result = ProviderTransform.variants(model)
  1386. expect(result).toEqual({})
  1387. })
  1388. test("deepseek returns empty object", () => {
  1389. const model = createMockModel({
  1390. id: "deepseek/deepseek-chat",
  1391. providerID: "deepseek",
  1392. api: {
  1393. id: "deepseek-chat",
  1394. url: "https://api.deepseek.com",
  1395. npm: "@ai-sdk/openai-compatible",
  1396. },
  1397. })
  1398. const result = ProviderTransform.variants(model)
  1399. expect(result).toEqual({})
  1400. })
  1401. test("minimax returns empty object", () => {
  1402. const model = createMockModel({
  1403. id: "minimax/minimax-model",
  1404. providerID: "minimax",
  1405. api: {
  1406. id: "minimax-model",
  1407. url: "https://api.minimax.com",
  1408. npm: "@ai-sdk/openai-compatible",
  1409. },
  1410. })
  1411. const result = ProviderTransform.variants(model)
  1412. expect(result).toEqual({})
  1413. })
  1414. test("glm returns empty object", () => {
  1415. const model = createMockModel({
  1416. id: "glm/glm-4",
  1417. providerID: "glm",
  1418. api: {
  1419. id: "glm-4",
  1420. url: "https://api.glm.com",
  1421. npm: "@ai-sdk/openai-compatible",
  1422. },
  1423. })
  1424. const result = ProviderTransform.variants(model)
  1425. expect(result).toEqual({})
  1426. })
  1427. test("mistral returns empty object", () => {
  1428. const model = createMockModel({
  1429. id: "mistral/mistral-large",
  1430. providerID: "mistral",
  1431. api: {
  1432. id: "mistral-large-latest",
  1433. url: "https://api.mistral.com",
  1434. npm: "@ai-sdk/mistral",
  1435. },
  1436. })
  1437. const result = ProviderTransform.variants(model)
  1438. expect(result).toEqual({})
  1439. })
  1440. describe("@openrouter/ai-sdk-provider", () => {
  1441. test("returns empty object for non-qualifying models", () => {
  1442. const model = createMockModel({
  1443. id: "openrouter/test-model",
  1444. providerID: "openrouter",
  1445. api: {
  1446. id: "test-model",
  1447. url: "https://openrouter.ai",
  1448. npm: "@openrouter/ai-sdk-provider",
  1449. },
  1450. })
  1451. const result = ProviderTransform.variants(model)
  1452. expect(result).toEqual({})
  1453. })
  1454. test("gpt models return OPENAI_EFFORTS with reasoning", () => {
  1455. const model = createMockModel({
  1456. id: "openrouter/gpt-4",
  1457. providerID: "openrouter",
  1458. api: {
  1459. id: "gpt-4",
  1460. url: "https://openrouter.ai",
  1461. npm: "@openrouter/ai-sdk-provider",
  1462. },
  1463. })
  1464. const result = ProviderTransform.variants(model)
  1465. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1466. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1467. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1468. })
  1469. test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
  1470. const model = createMockModel({
  1471. id: "openrouter/gemini-3-5-pro",
  1472. providerID: "openrouter",
  1473. api: {
  1474. id: "gemini-3-5-pro",
  1475. url: "https://openrouter.ai",
  1476. npm: "@openrouter/ai-sdk-provider",
  1477. },
  1478. })
  1479. const result = ProviderTransform.variants(model)
  1480. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1481. })
  1482. test("grok-4 returns empty object", () => {
  1483. const model = createMockModel({
  1484. id: "openrouter/grok-4",
  1485. providerID: "openrouter",
  1486. api: {
  1487. id: "grok-4",
  1488. url: "https://openrouter.ai",
  1489. npm: "@openrouter/ai-sdk-provider",
  1490. },
  1491. })
  1492. const result = ProviderTransform.variants(model)
  1493. expect(result).toEqual({})
  1494. })
  1495. test("grok-3-mini returns low and high with reasoning", () => {
  1496. const model = createMockModel({
  1497. id: "openrouter/grok-3-mini",
  1498. providerID: "openrouter",
  1499. api: {
  1500. id: "grok-3-mini",
  1501. url: "https://openrouter.ai",
  1502. npm: "@openrouter/ai-sdk-provider",
  1503. },
  1504. })
  1505. const result = ProviderTransform.variants(model)
  1506. expect(Object.keys(result)).toEqual(["low", "high"])
  1507. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1508. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1509. })
  1510. })
  1511. describe("@ai-sdk/gateway", () => {
  1512. test("anthropic models return anthropic thinking options", () => {
  1513. const model = createMockModel({
  1514. id: "anthropic/claude-sonnet-4",
  1515. providerID: "gateway",
  1516. api: {
  1517. id: "anthropic/claude-sonnet-4",
  1518. url: "https://gateway.ai",
  1519. npm: "@ai-sdk/gateway",
  1520. },
  1521. })
  1522. const result = ProviderTransform.variants(model)
  1523. expect(Object.keys(result)).toEqual(["high", "max"])
  1524. expect(result.high).toEqual({
  1525. thinking: {
  1526. type: "enabled",
  1527. budgetTokens: 16000,
  1528. },
  1529. })
  1530. expect(result.max).toEqual({
  1531. thinking: {
  1532. type: "enabled",
  1533. budgetTokens: 31999,
  1534. },
  1535. })
  1536. })
  1537. test("returns OPENAI_EFFORTS with reasoningEffort", () => {
  1538. const model = createMockModel({
  1539. id: "gateway/gateway-model",
  1540. providerID: "gateway",
  1541. api: {
  1542. id: "gateway-model",
  1543. url: "https://gateway.ai",
  1544. npm: "@ai-sdk/gateway",
  1545. },
  1546. })
  1547. const result = ProviderTransform.variants(model)
  1548. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1549. expect(result.low).toEqual({ reasoningEffort: "low" })
  1550. expect(result.high).toEqual({ reasoningEffort: "high" })
  1551. })
  1552. })
  1553. describe("@ai-sdk/github-copilot", () => {
  1554. test("standard models return low, medium, high", () => {
  1555. const model = createMockModel({
  1556. id: "gpt-4.5",
  1557. providerID: "github-copilot",
  1558. api: {
  1559. id: "gpt-4.5",
  1560. url: "https://api.githubcopilot.com",
  1561. npm: "@ai-sdk/github-copilot",
  1562. },
  1563. })
  1564. const result = ProviderTransform.variants(model)
  1565. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1566. expect(result.low).toEqual({
  1567. reasoningEffort: "low",
  1568. reasoningSummary: "auto",
  1569. include: ["reasoning.encrypted_content"],
  1570. })
  1571. })
  1572. test("gpt-5.1-codex-max includes xhigh", () => {
  1573. const model = createMockModel({
  1574. id: "gpt-5.1-codex-max",
  1575. providerID: "github-copilot",
  1576. api: {
  1577. id: "gpt-5.1-codex-max",
  1578. url: "https://api.githubcopilot.com",
  1579. npm: "@ai-sdk/github-copilot",
  1580. },
  1581. })
  1582. const result = ProviderTransform.variants(model)
  1583. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1584. })
  1585. test("gpt-5.1-codex-mini does not include xhigh", () => {
  1586. const model = createMockModel({
  1587. id: "gpt-5.1-codex-mini",
  1588. providerID: "github-copilot",
  1589. api: {
  1590. id: "gpt-5.1-codex-mini",
  1591. url: "https://api.githubcopilot.com",
  1592. npm: "@ai-sdk/github-copilot",
  1593. },
  1594. })
  1595. const result = ProviderTransform.variants(model)
  1596. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1597. })
  1598. test("gpt-5.1-codex does not include xhigh", () => {
  1599. const model = createMockModel({
  1600. id: "gpt-5.1-codex",
  1601. providerID: "github-copilot",
  1602. api: {
  1603. id: "gpt-5.1-codex",
  1604. url: "https://api.githubcopilot.com",
  1605. npm: "@ai-sdk/github-copilot",
  1606. },
  1607. })
  1608. const result = ProviderTransform.variants(model)
  1609. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1610. })
  1611. test("gpt-5.2 includes xhigh", () => {
  1612. const model = createMockModel({
  1613. id: "gpt-5.2",
  1614. providerID: "github-copilot",
  1615. api: {
  1616. id: "gpt-5.2",
  1617. url: "https://api.githubcopilot.com",
  1618. npm: "@ai-sdk/github-copilot",
  1619. },
  1620. })
  1621. const result = ProviderTransform.variants(model)
  1622. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1623. expect(result.xhigh).toEqual({
  1624. reasoningEffort: "xhigh",
  1625. reasoningSummary: "auto",
  1626. include: ["reasoning.encrypted_content"],
  1627. })
  1628. })
  1629. test("gpt-5.2-codex includes xhigh", () => {
  1630. const model = createMockModel({
  1631. id: "gpt-5.2-codex",
  1632. providerID: "github-copilot",
  1633. api: {
  1634. id: "gpt-5.2-codex",
  1635. url: "https://api.githubcopilot.com",
  1636. npm: "@ai-sdk/github-copilot",
  1637. },
  1638. })
  1639. const result = ProviderTransform.variants(model)
  1640. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1641. })
  1642. })
  1643. describe("@ai-sdk/cerebras", () => {
  1644. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1645. const model = createMockModel({
  1646. id: "cerebras/llama-4",
  1647. providerID: "cerebras",
  1648. api: {
  1649. id: "llama-4-sc",
  1650. url: "https://api.cerebras.ai",
  1651. npm: "@ai-sdk/cerebras",
  1652. },
  1653. })
  1654. const result = ProviderTransform.variants(model)
  1655. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1656. expect(result.low).toEqual({ reasoningEffort: "low" })
  1657. expect(result.high).toEqual({ reasoningEffort: "high" })
  1658. })
  1659. })
  1660. describe("@ai-sdk/togetherai", () => {
  1661. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1662. const model = createMockModel({
  1663. id: "togetherai/llama-4",
  1664. providerID: "togetherai",
  1665. api: {
  1666. id: "llama-4-sc",
  1667. url: "https://api.togetherai.com",
  1668. npm: "@ai-sdk/togetherai",
  1669. },
  1670. })
  1671. const result = ProviderTransform.variants(model)
  1672. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1673. expect(result.low).toEqual({ reasoningEffort: "low" })
  1674. expect(result.high).toEqual({ reasoningEffort: "high" })
  1675. })
  1676. })
  1677. describe("@ai-sdk/xai", () => {
  1678. test("grok-3 returns empty object", () => {
  1679. const model = createMockModel({
  1680. id: "xai/grok-3",
  1681. providerID: "xai",
  1682. api: {
  1683. id: "grok-3",
  1684. url: "https://api.x.ai",
  1685. npm: "@ai-sdk/xai",
  1686. },
  1687. })
  1688. const result = ProviderTransform.variants(model)
  1689. expect(result).toEqual({})
  1690. })
  1691. test("grok-3-mini returns low and high with reasoningEffort", () => {
  1692. const model = createMockModel({
  1693. id: "xai/grok-3-mini",
  1694. providerID: "xai",
  1695. api: {
  1696. id: "grok-3-mini",
  1697. url: "https://api.x.ai",
  1698. npm: "@ai-sdk/xai",
  1699. },
  1700. })
  1701. const result = ProviderTransform.variants(model)
  1702. expect(Object.keys(result)).toEqual(["low", "high"])
  1703. expect(result.low).toEqual({ reasoningEffort: "low" })
  1704. expect(result.high).toEqual({ reasoningEffort: "high" })
  1705. })
  1706. })
  1707. describe("@ai-sdk/deepinfra", () => {
  1708. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1709. const model = createMockModel({
  1710. id: "deepinfra/llama-4",
  1711. providerID: "deepinfra",
  1712. api: {
  1713. id: "llama-4-sc",
  1714. url: "https://api.deepinfra.com",
  1715. npm: "@ai-sdk/deepinfra",
  1716. },
  1717. })
  1718. const result = ProviderTransform.variants(model)
  1719. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1720. expect(result.low).toEqual({ reasoningEffort: "low" })
  1721. expect(result.high).toEqual({ reasoningEffort: "high" })
  1722. })
  1723. })
  1724. describe("@ai-sdk/openai-compatible", () => {
  1725. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1726. const model = createMockModel({
  1727. id: "custom-provider/custom-model",
  1728. providerID: "custom-provider",
  1729. api: {
  1730. id: "custom-model",
  1731. url: "https://api.custom.com",
  1732. npm: "@ai-sdk/openai-compatible",
  1733. },
  1734. })
  1735. const result = ProviderTransform.variants(model)
  1736. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1737. expect(result.low).toEqual({ reasoningEffort: "low" })
  1738. expect(result.high).toEqual({ reasoningEffort: "high" })
  1739. })
  1740. })
  1741. describe("@ai-sdk/azure", () => {
  1742. test("o1-mini returns empty object", () => {
  1743. const model = createMockModel({
  1744. id: "o1-mini",
  1745. providerID: "azure",
  1746. api: {
  1747. id: "o1-mini",
  1748. url: "https://azure.com",
  1749. npm: "@ai-sdk/azure",
  1750. },
  1751. })
  1752. const result = ProviderTransform.variants(model)
  1753. expect(result).toEqual({})
  1754. })
  1755. test("standard azure models return custom efforts with reasoningSummary", () => {
  1756. const model = createMockModel({
  1757. id: "o1",
  1758. providerID: "azure",
  1759. api: {
  1760. id: "o1",
  1761. url: "https://azure.com",
  1762. npm: "@ai-sdk/azure",
  1763. },
  1764. })
  1765. const result = ProviderTransform.variants(model)
  1766. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1767. expect(result.low).toEqual({
  1768. reasoningEffort: "low",
  1769. reasoningSummary: "auto",
  1770. include: ["reasoning.encrypted_content"],
  1771. })
  1772. })
  1773. test("gpt-5 adds minimal effort", () => {
  1774. const model = createMockModel({
  1775. id: "gpt-5",
  1776. providerID: "azure",
  1777. api: {
  1778. id: "gpt-5",
  1779. url: "https://azure.com",
  1780. npm: "@ai-sdk/azure",
  1781. },
  1782. })
  1783. const result = ProviderTransform.variants(model)
  1784. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1785. })
  1786. })
  1787. describe("@ai-sdk/openai", () => {
  1788. test("gpt-5-pro returns empty object", () => {
  1789. const model = createMockModel({
  1790. id: "gpt-5-pro",
  1791. providerID: "openai",
  1792. api: {
  1793. id: "gpt-5-pro",
  1794. url: "https://api.openai.com",
  1795. npm: "@ai-sdk/openai",
  1796. },
  1797. })
  1798. const result = ProviderTransform.variants(model)
  1799. expect(result).toEqual({})
  1800. })
  1801. test("standard openai models return custom efforts with reasoningSummary", () => {
  1802. const model = createMockModel({
  1803. id: "gpt-5",
  1804. providerID: "openai",
  1805. api: {
  1806. id: "gpt-5",
  1807. url: "https://api.openai.com",
  1808. npm: "@ai-sdk/openai",
  1809. },
  1810. release_date: "2024-06-01",
  1811. })
  1812. const result = ProviderTransform.variants(model)
  1813. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1814. expect(result.low).toEqual({
  1815. reasoningEffort: "low",
  1816. reasoningSummary: "auto",
  1817. include: ["reasoning.encrypted_content"],
  1818. })
  1819. })
  1820. test("models after 2025-11-13 include 'none' effort", () => {
  1821. const model = createMockModel({
  1822. id: "gpt-5-nano",
  1823. providerID: "openai",
  1824. api: {
  1825. id: "gpt-5-nano",
  1826. url: "https://api.openai.com",
  1827. npm: "@ai-sdk/openai",
  1828. },
  1829. release_date: "2025-11-14",
  1830. })
  1831. const result = ProviderTransform.variants(model)
  1832. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
  1833. })
  1834. test("models after 2025-12-04 include 'xhigh' effort", () => {
  1835. const model = createMockModel({
  1836. id: "openai/gpt-5-chat",
  1837. providerID: "openai",
  1838. api: {
  1839. id: "gpt-5-chat",
  1840. url: "https://api.openai.com",
  1841. npm: "@ai-sdk/openai",
  1842. },
  1843. release_date: "2025-12-05",
  1844. })
  1845. const result = ProviderTransform.variants(model)
  1846. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1847. })
  1848. })
  1849. describe("@ai-sdk/anthropic", () => {
  1850. test("returns high and max with thinking config", () => {
  1851. const model = createMockModel({
  1852. id: "anthropic/claude-4",
  1853. providerID: "anthropic",
  1854. api: {
  1855. id: "claude-4",
  1856. url: "https://api.anthropic.com",
  1857. npm: "@ai-sdk/anthropic",
  1858. },
  1859. })
  1860. const result = ProviderTransform.variants(model)
  1861. expect(Object.keys(result)).toEqual(["high", "max"])
  1862. expect(result.high).toEqual({
  1863. thinking: {
  1864. type: "enabled",
  1865. budgetTokens: 16000,
  1866. },
  1867. })
  1868. expect(result.max).toEqual({
  1869. thinking: {
  1870. type: "enabled",
  1871. budgetTokens: 31999,
  1872. },
  1873. })
  1874. })
  1875. })
  1876. describe("@ai-sdk/amazon-bedrock", () => {
  1877. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
  1878. const model = createMockModel({
  1879. id: "bedrock/llama-4",
  1880. providerID: "bedrock",
  1881. api: {
  1882. id: "llama-4-sc",
  1883. url: "https://bedrock.amazonaws.com",
  1884. npm: "@ai-sdk/amazon-bedrock",
  1885. },
  1886. })
  1887. const result = ProviderTransform.variants(model)
  1888. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1889. expect(result.low).toEqual({
  1890. reasoningConfig: {
  1891. type: "enabled",
  1892. maxReasoningEffort: "low",
  1893. },
  1894. })
  1895. })
  1896. })
  1897. describe("@ai-sdk/google", () => {
  1898. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  1899. const model = createMockModel({
  1900. id: "google/gemini-2.5-pro",
  1901. providerID: "google",
  1902. api: {
  1903. id: "gemini-2.5-pro",
  1904. url: "https://generativelanguage.googleapis.com",
  1905. npm: "@ai-sdk/google",
  1906. },
  1907. })
  1908. const result = ProviderTransform.variants(model)
  1909. expect(Object.keys(result)).toEqual(["high", "max"])
  1910. expect(result.high).toEqual({
  1911. thinkingConfig: {
  1912. includeThoughts: true,
  1913. thinkingBudget: 16000,
  1914. },
  1915. })
  1916. expect(result.max).toEqual({
  1917. thinkingConfig: {
  1918. includeThoughts: true,
  1919. thinkingBudget: 24576,
  1920. },
  1921. })
  1922. })
  1923. test("other gemini models return low and high with thinkingLevel", () => {
  1924. const model = createMockModel({
  1925. id: "google/gemini-2.0-pro",
  1926. providerID: "google",
  1927. api: {
  1928. id: "gemini-2.0-pro",
  1929. url: "https://generativelanguage.googleapis.com",
  1930. npm: "@ai-sdk/google",
  1931. },
  1932. })
  1933. const result = ProviderTransform.variants(model)
  1934. expect(Object.keys(result)).toEqual(["low", "high"])
  1935. expect(result.low).toEqual({
  1936. includeThoughts: true,
  1937. thinkingLevel: "low",
  1938. })
  1939. expect(result.high).toEqual({
  1940. includeThoughts: true,
  1941. thinkingLevel: "high",
  1942. })
  1943. })
  1944. })
  1945. describe("@ai-sdk/google-vertex", () => {
  1946. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  1947. const model = createMockModel({
  1948. id: "google-vertex/gemini-2.5-pro",
  1949. providerID: "google-vertex",
  1950. api: {
  1951. id: "gemini-2.5-pro",
  1952. url: "https://vertexai.googleapis.com",
  1953. npm: "@ai-sdk/google-vertex",
  1954. },
  1955. })
  1956. const result = ProviderTransform.variants(model)
  1957. expect(Object.keys(result)).toEqual(["high", "max"])
  1958. })
  1959. test("other vertex models return low and high with thinkingLevel", () => {
  1960. const model = createMockModel({
  1961. id: "google-vertex/gemini-2.0-pro",
  1962. providerID: "google-vertex",
  1963. api: {
  1964. id: "gemini-2.0-pro",
  1965. url: "https://vertexai.googleapis.com",
  1966. npm: "@ai-sdk/google-vertex",
  1967. },
  1968. })
  1969. const result = ProviderTransform.variants(model)
  1970. expect(Object.keys(result)).toEqual(["low", "high"])
  1971. })
  1972. })
  1973. describe("@ai-sdk/cohere", () => {
  1974. test("returns empty object", () => {
  1975. const model = createMockModel({
  1976. id: "cohere/command-r",
  1977. providerID: "cohere",
  1978. api: {
  1979. id: "command-r",
  1980. url: "https://api.cohere.com",
  1981. npm: "@ai-sdk/cohere",
  1982. },
  1983. })
  1984. const result = ProviderTransform.variants(model)
  1985. expect(result).toEqual({})
  1986. })
  1987. })
  1988. describe("@ai-sdk/groq", () => {
  1989. test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
  1990. const model = createMockModel({
  1991. id: "groq/llama-4",
  1992. providerID: "groq",
  1993. api: {
  1994. id: "llama-4-sc",
  1995. url: "https://api.groq.com",
  1996. npm: "@ai-sdk/groq",
  1997. },
  1998. })
  1999. const result = ProviderTransform.variants(model)
  2000. expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
  2001. expect(result.none).toEqual({
  2002. includeThoughts: true,
  2003. thinkingLevel: "none",
  2004. })
  2005. expect(result.low).toEqual({
  2006. includeThoughts: true,
  2007. thinkingLevel: "low",
  2008. })
  2009. })
  2010. })
  2011. describe("@ai-sdk/perplexity", () => {
  2012. test("returns empty object", () => {
  2013. const model = createMockModel({
  2014. id: "perplexity/sonar-plus",
  2015. providerID: "perplexity",
  2016. api: {
  2017. id: "sonar-plus",
  2018. url: "https://api.perplexity.ai",
  2019. npm: "@ai-sdk/perplexity",
  2020. },
  2021. })
  2022. const result = ProviderTransform.variants(model)
  2023. expect(result).toEqual({})
  2024. })
  2025. })
  2026. })