transform.test.ts 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.options - setCacheKey", () => {
  5. const sessionID = "test-session-123"
  6. const mockModel = {
  7. id: "anthropic/claude-3-5-sonnet",
  8. providerID: "anthropic",
  9. api: {
  10. id: "claude-3-5-sonnet-20241022",
  11. url: "https://api.anthropic.com",
  12. npm: "@ai-sdk/anthropic",
  13. },
  14. name: "Claude 3.5 Sonnet",
  15. capabilities: {
  16. temperature: true,
  17. reasoning: false,
  18. attachment: true,
  19. toolcall: true,
  20. input: { text: true, audio: false, image: true, video: false, pdf: true },
  21. output: { text: true, audio: false, image: false, video: false, pdf: false },
  22. interleaved: false,
  23. },
  24. cost: {
  25. input: 0.003,
  26. output: 0.015,
  27. cache: { read: 0.0003, write: 0.00375 },
  28. },
  29. limit: {
  30. context: 200000,
  31. output: 8192,
  32. },
  33. status: "active",
  34. options: {},
  35. headers: {},
  36. } as any
  37. test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
  38. const result = ProviderTransform.options({
  39. model: mockModel,
  40. sessionID,
  41. providerOptions: { setCacheKey: true },
  42. })
  43. expect(result.promptCacheKey).toBe(sessionID)
  44. })
  45. test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
  46. const result = ProviderTransform.options({
  47. model: mockModel,
  48. sessionID,
  49. providerOptions: { setCacheKey: false },
  50. })
  51. expect(result.promptCacheKey).toBeUndefined()
  52. })
  53. test("should not set promptCacheKey when providerOptions is undefined", () => {
  54. const result = ProviderTransform.options({
  55. model: mockModel,
  56. sessionID,
  57. providerOptions: undefined,
  58. })
  59. expect(result.promptCacheKey).toBeUndefined()
  60. })
  61. test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
  62. const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
  63. expect(result.promptCacheKey).toBeUndefined()
  64. })
  65. test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
  66. const openaiModel = {
  67. ...mockModel,
  68. providerID: "openai",
  69. api: {
  70. id: "gpt-4",
  71. url: "https://api.openai.com",
  72. npm: "@ai-sdk/openai",
  73. },
  74. }
  75. const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
  76. expect(result.promptCacheKey).toBe(sessionID)
  77. })
  78. test("should set store=false for openai provider", () => {
  79. const openaiModel = {
  80. ...mockModel,
  81. providerID: "openai",
  82. api: {
  83. id: "gpt-4",
  84. url: "https://api.openai.com",
  85. npm: "@ai-sdk/openai",
  86. },
  87. }
  88. const result = ProviderTransform.options({
  89. model: openaiModel,
  90. sessionID,
  91. providerOptions: {},
  92. })
  93. expect(result.store).toBe(false)
  94. })
  95. })
  96. describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
  97. const sessionID = "test-session-123"
  98. const createGpt5Model = (apiId: string) =>
  99. ({
  100. id: `openai/${apiId}`,
  101. providerID: "openai",
  102. api: {
  103. id: apiId,
  104. url: "https://api.openai.com",
  105. npm: "@ai-sdk/openai",
  106. },
  107. name: apiId,
  108. capabilities: {
  109. temperature: true,
  110. reasoning: true,
  111. attachment: true,
  112. toolcall: true,
  113. input: { text: true, audio: false, image: true, video: false, pdf: false },
  114. output: { text: true, audio: false, image: false, video: false, pdf: false },
  115. interleaved: false,
  116. },
  117. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  118. limit: { context: 128000, output: 4096 },
  119. status: "active",
  120. options: {},
  121. headers: {},
  122. }) as any
  123. test("gpt-5.2 should have textVerbosity set to low", () => {
  124. const model = createGpt5Model("gpt-5.2")
  125. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  126. expect(result.textVerbosity).toBe("low")
  127. })
  128. test("gpt-5.1 should have textVerbosity set to low", () => {
  129. const model = createGpt5Model("gpt-5.1")
  130. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  131. expect(result.textVerbosity).toBe("low")
  132. })
  133. test("gpt-5.2-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  134. const model = createGpt5Model("gpt-5.2-chat-latest")
  135. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  136. expect(result.textVerbosity).toBeUndefined()
  137. })
  138. test("gpt-5.1-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  139. const model = createGpt5Model("gpt-5.1-chat-latest")
  140. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  141. expect(result.textVerbosity).toBeUndefined()
  142. })
  143. test("gpt-5.2-chat should NOT have textVerbosity set", () => {
  144. const model = createGpt5Model("gpt-5.2-chat")
  145. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  146. expect(result.textVerbosity).toBeUndefined()
  147. })
  148. test("gpt-5-chat should NOT have textVerbosity set", () => {
  149. const model = createGpt5Model("gpt-5-chat")
  150. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  151. expect(result.textVerbosity).toBeUndefined()
  152. })
  153. test("gpt-5.2-codex should NOT have textVerbosity set (codex models excluded)", () => {
  154. const model = createGpt5Model("gpt-5.2-codex")
  155. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  156. expect(result.textVerbosity).toBeUndefined()
  157. })
  158. })
  159. describe("ProviderTransform.options - gateway", () => {
  160. const sessionID = "test-session-123"
  161. const createModel = (id: string) =>
  162. ({
  163. id,
  164. providerID: "vercel",
  165. api: {
  166. id,
  167. url: "https://ai-gateway.vercel.sh/v3/ai",
  168. npm: "@ai-sdk/gateway",
  169. },
  170. name: id,
  171. capabilities: {
  172. temperature: true,
  173. reasoning: true,
  174. attachment: true,
  175. toolcall: true,
  176. input: { text: true, audio: false, image: true, video: false, pdf: true },
  177. output: { text: true, audio: false, image: false, video: false, pdf: false },
  178. interleaved: false,
  179. },
  180. cost: {
  181. input: 0.001,
  182. output: 0.002,
  183. cache: { read: 0.0001, write: 0.0002 },
  184. },
  185. limit: {
  186. context: 200_000,
  187. output: 8192,
  188. },
  189. status: "active",
  190. options: {},
  191. headers: {},
  192. release_date: "2024-01-01",
  193. }) as any
  194. test("puts gateway defaults under gateway key", () => {
  195. const model = createModel("anthropic/claude-sonnet-4")
  196. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  197. expect(result).toEqual({
  198. gateway: {
  199. caching: "auto",
  200. },
  201. })
  202. })
  203. })
  204. describe("ProviderTransform.providerOptions", () => {
  205. const createModel = (overrides: Partial<any> = {}) =>
  206. ({
  207. id: "test/test-model",
  208. providerID: "test",
  209. api: {
  210. id: "test-model",
  211. url: "https://api.test.com",
  212. npm: "@ai-sdk/openai",
  213. },
  214. name: "Test Model",
  215. capabilities: {
  216. temperature: true,
  217. reasoning: true,
  218. attachment: true,
  219. toolcall: true,
  220. input: { text: true, audio: false, image: true, video: false, pdf: false },
  221. output: { text: true, audio: false, image: false, video: false, pdf: false },
  222. interleaved: false,
  223. },
  224. cost: {
  225. input: 0.001,
  226. output: 0.002,
  227. cache: { read: 0.0001, write: 0.0002 },
  228. },
  229. limit: {
  230. context: 200_000,
  231. output: 64_000,
  232. },
  233. status: "active",
  234. options: {},
  235. headers: {},
  236. release_date: "2024-01-01",
  237. ...overrides,
  238. }) as any
  239. test("uses sdk key for non-gateway models", () => {
  240. const model = createModel({
  241. providerID: "my-bedrock",
  242. api: {
  243. id: "anthropic.claude-sonnet-4",
  244. url: "https://bedrock.aws",
  245. npm: "@ai-sdk/amazon-bedrock",
  246. },
  247. })
  248. expect(ProviderTransform.providerOptions(model, { cachePoint: { type: "default" } })).toEqual({
  249. bedrock: { cachePoint: { type: "default" } },
  250. })
  251. })
  252. test("uses gateway model provider slug for gateway models", () => {
  253. const model = createModel({
  254. providerID: "vercel",
  255. api: {
  256. id: "anthropic/claude-sonnet-4",
  257. url: "https://ai-gateway.vercel.sh/v3/ai",
  258. npm: "@ai-sdk/gateway",
  259. },
  260. })
  261. expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
  262. anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
  263. })
  264. })
  265. test("falls back to gateway key when gateway api id is unscoped", () => {
  266. const model = createModel({
  267. id: "anthropic/claude-sonnet-4",
  268. providerID: "vercel",
  269. api: {
  270. id: "claude-sonnet-4",
  271. url: "https://ai-gateway.vercel.sh/v3/ai",
  272. npm: "@ai-sdk/gateway",
  273. },
  274. })
  275. expect(ProviderTransform.providerOptions(model, { thinking: { type: "enabled", budgetTokens: 12_000 } })).toEqual({
  276. gateway: { thinking: { type: "enabled", budgetTokens: 12_000 } },
  277. })
  278. })
  279. test("splits gateway routing options from provider-specific options", () => {
  280. const model = createModel({
  281. providerID: "vercel",
  282. api: {
  283. id: "anthropic/claude-sonnet-4",
  284. url: "https://ai-gateway.vercel.sh/v3/ai",
  285. npm: "@ai-sdk/gateway",
  286. },
  287. })
  288. expect(
  289. ProviderTransform.providerOptions(model, {
  290. gateway: { order: ["vertex", "anthropic"] },
  291. thinking: { type: "enabled", budgetTokens: 12_000 },
  292. }),
  293. ).toEqual({
  294. gateway: { order: ["vertex", "anthropic"] },
  295. anthropic: { thinking: { type: "enabled", budgetTokens: 12_000 } },
  296. } as any)
  297. })
  298. test("falls back to gateway key when model id has no provider slug", () => {
  299. const model = createModel({
  300. id: "claude-sonnet-4",
  301. providerID: "vercel",
  302. api: {
  303. id: "claude-sonnet-4",
  304. url: "https://ai-gateway.vercel.sh/v3/ai",
  305. npm: "@ai-sdk/gateway",
  306. },
  307. })
  308. expect(ProviderTransform.providerOptions(model, { reasoningEffort: "high" })).toEqual({
  309. gateway: { reasoningEffort: "high" },
  310. })
  311. })
  312. test("maps amazon slug to bedrock for provider options", () => {
  313. const model = createModel({
  314. providerID: "vercel",
  315. api: {
  316. id: "amazon/nova-2-lite",
  317. url: "https://ai-gateway.vercel.sh/v3/ai",
  318. npm: "@ai-sdk/gateway",
  319. },
  320. })
  321. expect(ProviderTransform.providerOptions(model, { reasoningConfig: { type: "enabled" } })).toEqual({
  322. bedrock: { reasoningConfig: { type: "enabled" } },
  323. })
  324. })
  325. test("uses groq slug for groq models", () => {
  326. const model = createModel({
  327. providerID: "vercel",
  328. api: {
  329. id: "groq/llama-3.3-70b-versatile",
  330. url: "https://ai-gateway.vercel.sh/v3/ai",
  331. npm: "@ai-sdk/gateway",
  332. },
  333. })
  334. expect(ProviderTransform.providerOptions(model, { reasoningFormat: "parsed" })).toEqual({
  335. groq: { reasoningFormat: "parsed" },
  336. })
  337. })
  338. })
  339. describe("ProviderTransform.schema - gemini array items", () => {
  340. test("adds missing items for array properties", () => {
  341. const geminiModel = {
  342. providerID: "google",
  343. api: {
  344. id: "gemini-3-pro",
  345. },
  346. } as any
  347. const schema = {
  348. type: "object",
  349. properties: {
  350. nodes: { type: "array" },
  351. edges: { type: "array", items: { type: "string" } },
  352. },
  353. } as any
  354. const result = ProviderTransform.schema(geminiModel, schema) as any
  355. expect(result.properties.nodes.items).toBeDefined()
  356. expect(result.properties.edges.items.type).toBe("string")
  357. })
  358. })
  359. describe("ProviderTransform.schema - gemini nested array items", () => {
  360. const geminiModel = {
  361. providerID: "google",
  362. api: {
  363. id: "gemini-3-pro",
  364. },
  365. } as any
  366. test("adds type to 2D array with empty inner items", () => {
  367. const schema = {
  368. type: "object",
  369. properties: {
  370. values: {
  371. type: "array",
  372. items: {
  373. type: "array",
  374. items: {}, // Empty items object
  375. },
  376. },
  377. },
  378. } as any
  379. const result = ProviderTransform.schema(geminiModel, schema) as any
  380. // Inner items should have a default type
  381. expect(result.properties.values.items.items.type).toBe("string")
  382. })
  383. test("adds items and type to 2D array with missing inner items", () => {
  384. const schema = {
  385. type: "object",
  386. properties: {
  387. data: {
  388. type: "array",
  389. items: { type: "array" }, // No items at all
  390. },
  391. },
  392. } as any
  393. const result = ProviderTransform.schema(geminiModel, schema) as any
  394. expect(result.properties.data.items.items).toBeDefined()
  395. expect(result.properties.data.items.items.type).toBe("string")
  396. })
  397. test("handles deeply nested arrays (3D)", () => {
  398. const schema = {
  399. type: "object",
  400. properties: {
  401. matrix: {
  402. type: "array",
  403. items: {
  404. type: "array",
  405. items: {
  406. type: "array",
  407. // No items
  408. },
  409. },
  410. },
  411. },
  412. } as any
  413. const result = ProviderTransform.schema(geminiModel, schema) as any
  414. expect(result.properties.matrix.items.items.items).toBeDefined()
  415. expect(result.properties.matrix.items.items.items.type).toBe("string")
  416. })
  417. test("preserves existing item types in nested arrays", () => {
  418. const schema = {
  419. type: "object",
  420. properties: {
  421. numbers: {
  422. type: "array",
  423. items: {
  424. type: "array",
  425. items: { type: "number" }, // Has explicit type
  426. },
  427. },
  428. },
  429. } as any
  430. const result = ProviderTransform.schema(geminiModel, schema) as any
  431. // Should preserve the explicit type
  432. expect(result.properties.numbers.items.items.type).toBe("number")
  433. })
  434. test("handles mixed nested structures with objects and arrays", () => {
  435. const schema = {
  436. type: "object",
  437. properties: {
  438. spreadsheetData: {
  439. type: "object",
  440. properties: {
  441. rows: {
  442. type: "array",
  443. items: {
  444. type: "array",
  445. items: {}, // Empty items
  446. },
  447. },
  448. },
  449. },
  450. },
  451. } as any
  452. const result = ProviderTransform.schema(geminiModel, schema) as any
  453. expect(result.properties.spreadsheetData.properties.rows.items.items.type).toBe("string")
  454. })
  455. })
  456. describe("ProviderTransform.schema - gemini non-object properties removal", () => {
  457. const geminiModel = {
  458. providerID: "google",
  459. api: {
  460. id: "gemini-3-pro",
  461. },
  462. } as any
  463. test("removes properties from non-object types", () => {
  464. const schema = {
  465. type: "object",
  466. properties: {
  467. data: {
  468. type: "string",
  469. properties: { invalid: { type: "string" } },
  470. },
  471. },
  472. } as any
  473. const result = ProviderTransform.schema(geminiModel, schema) as any
  474. expect(result.properties.data.type).toBe("string")
  475. expect(result.properties.data.properties).toBeUndefined()
  476. })
  477. test("removes required from non-object types", () => {
  478. const schema = {
  479. type: "object",
  480. properties: {
  481. data: {
  482. type: "array",
  483. items: { type: "string" },
  484. required: ["invalid"],
  485. },
  486. },
  487. } as any
  488. const result = ProviderTransform.schema(geminiModel, schema) as any
  489. expect(result.properties.data.type).toBe("array")
  490. expect(result.properties.data.required).toBeUndefined()
  491. })
  492. test("removes properties and required from nested non-object types", () => {
  493. const schema = {
  494. type: "object",
  495. properties: {
  496. outer: {
  497. type: "object",
  498. properties: {
  499. inner: {
  500. type: "number",
  501. properties: { bad: { type: "string" } },
  502. required: ["bad"],
  503. },
  504. },
  505. },
  506. },
  507. } as any
  508. const result = ProviderTransform.schema(geminiModel, schema) as any
  509. expect(result.properties.outer.properties.inner.type).toBe("number")
  510. expect(result.properties.outer.properties.inner.properties).toBeUndefined()
  511. expect(result.properties.outer.properties.inner.required).toBeUndefined()
  512. })
  513. test("keeps properties and required on object types", () => {
  514. const schema = {
  515. type: "object",
  516. properties: {
  517. data: {
  518. type: "object",
  519. properties: { name: { type: "string" } },
  520. required: ["name"],
  521. },
  522. },
  523. } as any
  524. const result = ProviderTransform.schema(geminiModel, schema) as any
  525. expect(result.properties.data.type).toBe("object")
  526. expect(result.properties.data.properties).toBeDefined()
  527. expect(result.properties.data.required).toEqual(["name"])
  528. })
  529. test("does not affect non-gemini providers", () => {
  530. const openaiModel = {
  531. providerID: "openai",
  532. api: {
  533. id: "gpt-4",
  534. },
  535. } as any
  536. const schema = {
  537. type: "object",
  538. properties: {
  539. data: {
  540. type: "string",
  541. properties: { invalid: { type: "string" } },
  542. },
  543. },
  544. } as any
  545. const result = ProviderTransform.schema(openaiModel, schema) as any
  546. expect(result.properties.data.properties).toBeDefined()
  547. })
  548. })
  549. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  550. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  551. const msgs = [
  552. {
  553. role: "assistant",
  554. content: [
  555. { type: "reasoning", text: "Let me think about this..." },
  556. {
  557. type: "tool-call",
  558. toolCallId: "test",
  559. toolName: "bash",
  560. input: { command: "echo hello" },
  561. },
  562. ],
  563. },
  564. ] as any[]
  565. const result = ProviderTransform.message(
  566. msgs,
  567. {
  568. id: "deepseek/deepseek-chat",
  569. providerID: "deepseek",
  570. api: {
  571. id: "deepseek-chat",
  572. url: "https://api.deepseek.com",
  573. npm: "@ai-sdk/openai-compatible",
  574. },
  575. name: "DeepSeek Chat",
  576. capabilities: {
  577. temperature: true,
  578. reasoning: true,
  579. attachment: false,
  580. toolcall: true,
  581. input: { text: true, audio: false, image: false, video: false, pdf: false },
  582. output: { text: true, audio: false, image: false, video: false, pdf: false },
  583. interleaved: {
  584. field: "reasoning_content",
  585. },
  586. },
  587. cost: {
  588. input: 0.001,
  589. output: 0.002,
  590. cache: { read: 0.0001, write: 0.0002 },
  591. },
  592. limit: {
  593. context: 128000,
  594. output: 8192,
  595. },
  596. status: "active",
  597. options: {},
  598. headers: {},
  599. release_date: "2023-04-01",
  600. },
  601. {},
  602. )
  603. expect(result).toHaveLength(1)
  604. expect(result[0].content).toEqual([
  605. {
  606. type: "tool-call",
  607. toolCallId: "test",
  608. toolName: "bash",
  609. input: { command: "echo hello" },
  610. },
  611. ])
  612. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  613. })
  614. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  615. const msgs = [
  616. {
  617. role: "assistant",
  618. content: [
  619. { type: "reasoning", text: "Should not be processed" },
  620. { type: "text", text: "Answer" },
  621. ],
  622. },
  623. ] as any[]
  624. const result = ProviderTransform.message(
  625. msgs,
  626. {
  627. id: "openai/gpt-4",
  628. providerID: "openai",
  629. api: {
  630. id: "gpt-4",
  631. url: "https://api.openai.com",
  632. npm: "@ai-sdk/openai",
  633. },
  634. name: "GPT-4",
  635. capabilities: {
  636. temperature: true,
  637. reasoning: false,
  638. attachment: true,
  639. toolcall: true,
  640. input: { text: true, audio: false, image: true, video: false, pdf: false },
  641. output: { text: true, audio: false, image: false, video: false, pdf: false },
  642. interleaved: false,
  643. },
  644. cost: {
  645. input: 0.03,
  646. output: 0.06,
  647. cache: { read: 0.001, write: 0.002 },
  648. },
  649. limit: {
  650. context: 128000,
  651. output: 4096,
  652. },
  653. status: "active",
  654. options: {},
  655. headers: {},
  656. release_date: "2023-04-01",
  657. },
  658. {},
  659. )
  660. expect(result[0].content).toEqual([
  661. { type: "reasoning", text: "Should not be processed" },
  662. { type: "text", text: "Answer" },
  663. ])
  664. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  665. })
  666. })
  667. describe("ProviderTransform.message - empty image handling", () => {
  668. const mockModel = {
  669. id: "anthropic/claude-3-5-sonnet",
  670. providerID: "anthropic",
  671. api: {
  672. id: "claude-3-5-sonnet-20241022",
  673. url: "https://api.anthropic.com",
  674. npm: "@ai-sdk/anthropic",
  675. },
  676. name: "Claude 3.5 Sonnet",
  677. capabilities: {
  678. temperature: true,
  679. reasoning: false,
  680. attachment: true,
  681. toolcall: true,
  682. input: { text: true, audio: false, image: true, video: false, pdf: true },
  683. output: { text: true, audio: false, image: false, video: false, pdf: false },
  684. interleaved: false,
  685. },
  686. cost: {
  687. input: 0.003,
  688. output: 0.015,
  689. cache: { read: 0.0003, write: 0.00375 },
  690. },
  691. limit: {
  692. context: 200000,
  693. output: 8192,
  694. },
  695. status: "active",
  696. options: {},
  697. headers: {},
  698. } as any
  699. test("should replace empty base64 image with error text", () => {
  700. const msgs = [
  701. {
  702. role: "user",
  703. content: [
  704. { type: "text", text: "What is in this image?" },
  705. { type: "image", image: "data:image/png;base64," },
  706. ],
  707. },
  708. ] as any[]
  709. const result = ProviderTransform.message(msgs, mockModel, {})
  710. expect(result).toHaveLength(1)
  711. expect(result[0].content).toHaveLength(2)
  712. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  713. expect(result[0].content[1]).toEqual({
  714. type: "text",
  715. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  716. })
  717. })
  718. test("should keep valid base64 images unchanged", () => {
  719. const validBase64 =
  720. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  721. const msgs = [
  722. {
  723. role: "user",
  724. content: [
  725. { type: "text", text: "What is in this image?" },
  726. { type: "image", image: `data:image/png;base64,${validBase64}` },
  727. ],
  728. },
  729. ] as any[]
  730. const result = ProviderTransform.message(msgs, mockModel, {})
  731. expect(result).toHaveLength(1)
  732. expect(result[0].content).toHaveLength(2)
  733. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  734. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  735. })
  736. test("should handle mixed valid and empty images", () => {
  737. const validBase64 =
  738. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  739. const msgs = [
  740. {
  741. role: "user",
  742. content: [
  743. { type: "text", text: "Compare these images" },
  744. { type: "image", image: `data:image/png;base64,${validBase64}` },
  745. { type: "image", image: "data:image/jpeg;base64," },
  746. ],
  747. },
  748. ] as any[]
  749. const result = ProviderTransform.message(msgs, mockModel, {})
  750. expect(result).toHaveLength(1)
  751. expect(result[0].content).toHaveLength(3)
  752. expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
  753. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  754. expect(result[0].content[2]).toEqual({
  755. type: "text",
  756. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  757. })
  758. })
  759. })
  760. describe("ProviderTransform.message - anthropic empty content filtering", () => {
  761. const anthropicModel = {
  762. id: "anthropic/claude-3-5-sonnet",
  763. providerID: "anthropic",
  764. api: {
  765. id: "claude-3-5-sonnet-20241022",
  766. url: "https://api.anthropic.com",
  767. npm: "@ai-sdk/anthropic",
  768. },
  769. name: "Claude 3.5 Sonnet",
  770. capabilities: {
  771. temperature: true,
  772. reasoning: false,
  773. attachment: true,
  774. toolcall: true,
  775. input: { text: true, audio: false, image: true, video: false, pdf: true },
  776. output: { text: true, audio: false, image: false, video: false, pdf: false },
  777. interleaved: false,
  778. },
  779. cost: {
  780. input: 0.003,
  781. output: 0.015,
  782. cache: { read: 0.0003, write: 0.00375 },
  783. },
  784. limit: {
  785. context: 200000,
  786. output: 8192,
  787. },
  788. status: "active",
  789. options: {},
  790. headers: {},
  791. } as any
  792. test("filters out messages with empty string content", () => {
  793. const msgs = [
  794. { role: "user", content: "Hello" },
  795. { role: "assistant", content: "" },
  796. { role: "user", content: "World" },
  797. ] as any[]
  798. const result = ProviderTransform.message(msgs, anthropicModel, {})
  799. expect(result).toHaveLength(2)
  800. expect(result[0].content).toBe("Hello")
  801. expect(result[1].content).toBe("World")
  802. })
  803. test("filters out empty text parts from array content", () => {
  804. const msgs = [
  805. {
  806. role: "assistant",
  807. content: [
  808. { type: "text", text: "" },
  809. { type: "text", text: "Hello" },
  810. { type: "text", text: "" },
  811. ],
  812. },
  813. ] as any[]
  814. const result = ProviderTransform.message(msgs, anthropicModel, {})
  815. expect(result).toHaveLength(1)
  816. expect(result[0].content).toHaveLength(1)
  817. expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
  818. })
  819. test("filters out empty reasoning parts from array content", () => {
  820. const msgs = [
  821. {
  822. role: "assistant",
  823. content: [
  824. { type: "reasoning", text: "" },
  825. { type: "text", text: "Answer" },
  826. { type: "reasoning", text: "" },
  827. ],
  828. },
  829. ] as any[]
  830. const result = ProviderTransform.message(msgs, anthropicModel, {})
  831. expect(result).toHaveLength(1)
  832. expect(result[0].content).toHaveLength(1)
  833. expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
  834. })
  835. test("removes entire message when all parts are empty", () => {
  836. const msgs = [
  837. { role: "user", content: "Hello" },
  838. {
  839. role: "assistant",
  840. content: [
  841. { type: "text", text: "" },
  842. { type: "reasoning", text: "" },
  843. ],
  844. },
  845. { role: "user", content: "World" },
  846. ] as any[]
  847. const result = ProviderTransform.message(msgs, anthropicModel, {})
  848. expect(result).toHaveLength(2)
  849. expect(result[0].content).toBe("Hello")
  850. expect(result[1].content).toBe("World")
  851. })
  852. test("keeps non-text/reasoning parts even if text parts are empty", () => {
  853. const msgs = [
  854. {
  855. role: "assistant",
  856. content: [
  857. { type: "text", text: "" },
  858. { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
  859. ],
  860. },
  861. ] as any[]
  862. const result = ProviderTransform.message(msgs, anthropicModel, {})
  863. expect(result).toHaveLength(1)
  864. expect(result[0].content).toHaveLength(1)
  865. expect(result[0].content[0]).toEqual({
  866. type: "tool-call",
  867. toolCallId: "123",
  868. toolName: "bash",
  869. input: { command: "ls" },
  870. })
  871. })
  872. test("keeps messages with valid text alongside empty parts", () => {
  873. const msgs = [
  874. {
  875. role: "assistant",
  876. content: [
  877. { type: "reasoning", text: "Thinking..." },
  878. { type: "text", text: "" },
  879. { type: "text", text: "Result" },
  880. ],
  881. },
  882. ] as any[]
  883. const result = ProviderTransform.message(msgs, anthropicModel, {})
  884. expect(result).toHaveLength(1)
  885. expect(result[0].content).toHaveLength(2)
  886. expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
  887. expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
  888. })
  889. test("does not filter for non-anthropic providers", () => {
  890. const openaiModel = {
  891. ...anthropicModel,
  892. providerID: "openai",
  893. api: {
  894. id: "gpt-4",
  895. url: "https://api.openai.com",
  896. npm: "@ai-sdk/openai",
  897. },
  898. }
  899. const msgs = [
  900. { role: "assistant", content: "" },
  901. {
  902. role: "assistant",
  903. content: [{ type: "text", text: "" }],
  904. },
  905. ] as any[]
  906. const result = ProviderTransform.message(msgs, openaiModel, {})
  907. expect(result).toHaveLength(2)
  908. expect(result[0].content).toBe("")
  909. expect(result[1].content).toHaveLength(1)
  910. })
  911. })
  912. describe("ProviderTransform.message - strip openai metadata when store=false", () => {
  913. const openaiModel = {
  914. id: "openai/gpt-5",
  915. providerID: "openai",
  916. api: {
  917. id: "gpt-5",
  918. url: "https://api.openai.com",
  919. npm: "@ai-sdk/openai",
  920. },
  921. name: "GPT-5",
  922. capabilities: {
  923. temperature: true,
  924. reasoning: true,
  925. attachment: true,
  926. toolcall: true,
  927. input: { text: true, audio: false, image: true, video: false, pdf: false },
  928. output: { text: true, audio: false, image: false, video: false, pdf: false },
  929. interleaved: false,
  930. },
  931. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  932. limit: { context: 128000, output: 4096 },
  933. status: "active",
  934. options: {},
  935. headers: {},
  936. } as any
  937. test("preserves itemId and reasoningEncryptedContent when store=false", () => {
  938. const msgs = [
  939. {
  940. role: "assistant",
  941. content: [
  942. {
  943. type: "reasoning",
  944. text: "thinking...",
  945. providerOptions: {
  946. openai: {
  947. itemId: "rs_123",
  948. reasoningEncryptedContent: "encrypted",
  949. },
  950. },
  951. },
  952. {
  953. type: "text",
  954. text: "Hello",
  955. providerOptions: {
  956. openai: {
  957. itemId: "msg_456",
  958. },
  959. },
  960. },
  961. ],
  962. },
  963. ] as any[]
  964. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  965. expect(result).toHaveLength(1)
  966. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  967. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  968. })
  969. test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => {
  970. const zenModel = {
  971. ...openaiModel,
  972. providerID: "zen",
  973. }
  974. const msgs = [
  975. {
  976. role: "assistant",
  977. content: [
  978. {
  979. type: "reasoning",
  980. text: "thinking...",
  981. providerOptions: {
  982. openai: {
  983. itemId: "rs_123",
  984. reasoningEncryptedContent: "encrypted",
  985. },
  986. },
  987. },
  988. {
  989. type: "text",
  990. text: "Hello",
  991. providerOptions: {
  992. openai: {
  993. itemId: "msg_456",
  994. },
  995. },
  996. },
  997. ],
  998. },
  999. ] as any[]
  1000. const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
  1001. expect(result).toHaveLength(1)
  1002. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  1003. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  1004. })
  1005. test("preserves other openai options including itemId", () => {
  1006. const msgs = [
  1007. {
  1008. role: "assistant",
  1009. content: [
  1010. {
  1011. type: "text",
  1012. text: "Hello",
  1013. providerOptions: {
  1014. openai: {
  1015. itemId: "msg_123",
  1016. otherOption: "value",
  1017. },
  1018. },
  1019. },
  1020. ],
  1021. },
  1022. ] as any[]
  1023. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  1024. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1025. expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
  1026. })
  1027. test("preserves metadata for openai package when store is true", () => {
  1028. const msgs = [
  1029. {
  1030. role: "assistant",
  1031. content: [
  1032. {
  1033. type: "text",
  1034. text: "Hello",
  1035. providerOptions: {
  1036. openai: {
  1037. itemId: "msg_123",
  1038. },
  1039. },
  1040. },
  1041. ],
  1042. },
  1043. ] as any[]
  1044. // openai package preserves itemId regardless of store value
  1045. const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
  1046. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1047. })
  1048. test("preserves metadata for non-openai packages when store is false", () => {
  1049. const anthropicModel = {
  1050. ...openaiModel,
  1051. providerID: "anthropic",
  1052. api: {
  1053. id: "claude-3",
  1054. url: "https://api.anthropic.com",
  1055. npm: "@ai-sdk/anthropic",
  1056. },
  1057. }
  1058. const msgs = [
  1059. {
  1060. role: "assistant",
  1061. content: [
  1062. {
  1063. type: "text",
  1064. text: "Hello",
  1065. providerOptions: {
  1066. openai: {
  1067. itemId: "msg_123",
  1068. },
  1069. },
  1070. },
  1071. ],
  1072. },
  1073. ] as any[]
  1074. // store=false preserves metadata for non-openai packages
  1075. const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
  1076. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1077. })
  1078. test("preserves metadata using providerID key when store is false", () => {
  1079. const opencodeModel = {
  1080. ...openaiModel,
  1081. providerID: "opencode",
  1082. api: {
  1083. id: "opencode-test",
  1084. url: "https://api.opencode.ai",
  1085. npm: "@ai-sdk/openai-compatible",
  1086. },
  1087. }
  1088. const msgs = [
  1089. {
  1090. role: "assistant",
  1091. content: [
  1092. {
  1093. type: "text",
  1094. text: "Hello",
  1095. providerOptions: {
  1096. opencode: {
  1097. itemId: "msg_123",
  1098. otherOption: "value",
  1099. },
  1100. },
  1101. },
  1102. ],
  1103. },
  1104. ] as any[]
  1105. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  1106. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_123")
  1107. expect(result[0].content[0].providerOptions?.opencode?.otherOption).toBe("value")
  1108. })
  1109. test("preserves itemId across all providerOptions keys", () => {
  1110. const opencodeModel = {
  1111. ...openaiModel,
  1112. providerID: "opencode",
  1113. api: {
  1114. id: "opencode-test",
  1115. url: "https://api.opencode.ai",
  1116. npm: "@ai-sdk/openai-compatible",
  1117. },
  1118. }
  1119. const msgs = [
  1120. {
  1121. role: "assistant",
  1122. providerOptions: {
  1123. openai: { itemId: "msg_root" },
  1124. opencode: { itemId: "msg_opencode" },
  1125. extra: { itemId: "msg_extra" },
  1126. },
  1127. content: [
  1128. {
  1129. type: "text",
  1130. text: "Hello",
  1131. providerOptions: {
  1132. openai: { itemId: "msg_openai_part" },
  1133. opencode: { itemId: "msg_opencode_part" },
  1134. extra: { itemId: "msg_extra_part" },
  1135. },
  1136. },
  1137. ],
  1138. },
  1139. ] as any[]
  1140. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  1141. expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root")
  1142. expect(result[0].providerOptions?.opencode?.itemId).toBe("msg_opencode")
  1143. expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra")
  1144. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part")
  1145. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_opencode_part")
  1146. expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part")
  1147. })
  1148. test("does not strip metadata for non-openai packages when store is not false", () => {
  1149. const anthropicModel = {
  1150. ...openaiModel,
  1151. providerID: "anthropic",
  1152. api: {
  1153. id: "claude-3",
  1154. url: "https://api.anthropic.com",
  1155. npm: "@ai-sdk/anthropic",
  1156. },
  1157. }
  1158. const msgs = [
  1159. {
  1160. role: "assistant",
  1161. content: [
  1162. {
  1163. type: "text",
  1164. text: "Hello",
  1165. providerOptions: {
  1166. openai: {
  1167. itemId: "msg_123",
  1168. },
  1169. },
  1170. },
  1171. ],
  1172. },
  1173. ] as any[]
  1174. const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
  1175. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1176. })
  1177. })
  1178. describe("ProviderTransform.message - providerOptions key remapping", () => {
  1179. const createModel = (providerID: string, npm: string) =>
  1180. ({
  1181. id: `${providerID}/test-model`,
  1182. providerID,
  1183. api: {
  1184. id: "test-model",
  1185. url: "https://api.test.com",
  1186. npm,
  1187. },
  1188. name: "Test Model",
  1189. capabilities: {
  1190. temperature: true,
  1191. reasoning: false,
  1192. attachment: true,
  1193. toolcall: true,
  1194. input: { text: true, audio: false, image: true, video: false, pdf: true },
  1195. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1196. interleaved: false,
  1197. },
  1198. cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
  1199. limit: { context: 128000, output: 8192 },
  1200. status: "active",
  1201. options: {},
  1202. headers: {},
  1203. }) as any
  1204. test("azure keeps 'azure' key and does not remap to 'openai'", () => {
  1205. const model = createModel("azure", "@ai-sdk/azure")
  1206. const msgs = [
  1207. {
  1208. role: "user",
  1209. content: "Hello",
  1210. providerOptions: {
  1211. azure: { someOption: "value" },
  1212. },
  1213. },
  1214. ] as any[]
  1215. const result = ProviderTransform.message(msgs, model, {})
  1216. expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
  1217. expect(result[0].providerOptions?.openai).toBeUndefined()
  1218. })
  1219. test("copilot remaps providerID to 'copilot' key", () => {
  1220. const model = createModel("github-copilot", "@ai-sdk/github-copilot")
  1221. const msgs = [
  1222. {
  1223. role: "user",
  1224. content: "Hello",
  1225. providerOptions: {
  1226. copilot: { someOption: "value" },
  1227. },
  1228. },
  1229. ] as any[]
  1230. const result = ProviderTransform.message(msgs, model, {})
  1231. expect(result[0].providerOptions?.copilot).toEqual({ someOption: "value" })
  1232. expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
  1233. })
  1234. test("bedrock remaps providerID to 'bedrock' key", () => {
  1235. const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock")
  1236. const msgs = [
  1237. {
  1238. role: "user",
  1239. content: "Hello",
  1240. providerOptions: {
  1241. "my-bedrock": { someOption: "value" },
  1242. },
  1243. },
  1244. ] as any[]
  1245. const result = ProviderTransform.message(msgs, model, {})
  1246. expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" })
  1247. expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined()
  1248. })
  1249. })
  1250. describe("ProviderTransform.message - claude w/bedrock custom inference profile", () => {
  1251. test("adds cachePoint", () => {
  1252. const model = {
  1253. id: "amazon-bedrock/custom-claude-sonnet-4.5",
  1254. providerID: "amazon-bedrock",
  1255. api: {
  1256. id: "arn:aws:bedrock:xxx:yyy:application-inference-profile/zzz",
  1257. url: "https://api.test.com",
  1258. npm: "@ai-sdk/amazon-bedrock",
  1259. },
  1260. name: "Custom inference profile",
  1261. capabilities: {},
  1262. options: {},
  1263. headers: {},
  1264. } as any
  1265. const msgs = [
  1266. {
  1267. role: "user",
  1268. content: "Hello",
  1269. },
  1270. ] as any[]
  1271. const result = ProviderTransform.message(msgs, model, {})
  1272. expect(result[0].providerOptions?.bedrock).toEqual(
  1273. expect.objectContaining({
  1274. cachePoint: {
  1275. type: "default",
  1276. },
  1277. }),
  1278. )
  1279. })
  1280. })
  1281. describe("ProviderTransform.message - cache control on gateway", () => {
  1282. const createModel = (overrides: Partial<any> = {}) =>
  1283. ({
  1284. id: "anthropic/claude-sonnet-4",
  1285. providerID: "vercel",
  1286. api: {
  1287. id: "anthropic/claude-sonnet-4",
  1288. url: "https://ai-gateway.vercel.sh/v3/ai",
  1289. npm: "@ai-sdk/gateway",
  1290. },
  1291. name: "Claude Sonnet 4",
  1292. capabilities: {
  1293. temperature: true,
  1294. reasoning: true,
  1295. attachment: true,
  1296. toolcall: true,
  1297. input: { text: true, audio: false, image: true, video: false, pdf: true },
  1298. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1299. interleaved: false,
  1300. },
  1301. cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
  1302. limit: { context: 200_000, output: 8192 },
  1303. status: "active",
  1304. options: {},
  1305. headers: {},
  1306. ...overrides,
  1307. }) as any
  1308. test("gateway does not set cache control for anthropic models", () => {
  1309. const model = createModel()
  1310. const msgs = [
  1311. {
  1312. role: "system",
  1313. content: [{ type: "text", text: "You are a helpful assistant" }],
  1314. },
  1315. {
  1316. role: "user",
  1317. content: "Hello",
  1318. },
  1319. ] as any[]
  1320. const result = ProviderTransform.message(msgs, model, {}) as any[]
  1321. expect(result[0].content[0].providerOptions).toBeUndefined()
  1322. expect(result[0].providerOptions).toBeUndefined()
  1323. })
  1324. test("non-gateway anthropic keeps existing cache control behavior", () => {
  1325. const model = createModel({
  1326. providerID: "anthropic",
  1327. api: {
  1328. id: "claude-sonnet-4",
  1329. url: "https://api.anthropic.com",
  1330. npm: "@ai-sdk/anthropic",
  1331. },
  1332. })
  1333. const msgs = [
  1334. {
  1335. role: "system",
  1336. content: "You are a helpful assistant",
  1337. },
  1338. {
  1339. role: "user",
  1340. content: "Hello",
  1341. },
  1342. ] as any[]
  1343. const result = ProviderTransform.message(msgs, model, {}) as any[]
  1344. expect(result[0].providerOptions).toEqual({
  1345. anthropic: {
  1346. cacheControl: {
  1347. type: "ephemeral",
  1348. },
  1349. },
  1350. openrouter: {
  1351. cacheControl: {
  1352. type: "ephemeral",
  1353. },
  1354. },
  1355. bedrock: {
  1356. cachePoint: {
  1357. type: "default",
  1358. },
  1359. },
  1360. openaiCompatible: {
  1361. cache_control: {
  1362. type: "ephemeral",
  1363. },
  1364. },
  1365. copilot: {
  1366. copilot_cache_control: {
  1367. type: "ephemeral",
  1368. },
  1369. },
  1370. })
  1371. })
  1372. })
  1373. describe("ProviderTransform.variants", () => {
  1374. const createMockModel = (overrides: Partial<any> = {}): any => ({
  1375. id: "test/test-model",
  1376. providerID: "test",
  1377. api: {
  1378. id: "test-model",
  1379. url: "https://api.test.com",
  1380. npm: "@ai-sdk/openai",
  1381. },
  1382. name: "Test Model",
  1383. capabilities: {
  1384. temperature: true,
  1385. reasoning: true,
  1386. attachment: true,
  1387. toolcall: true,
  1388. input: { text: true, audio: false, image: true, video: false, pdf: false },
  1389. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1390. interleaved: false,
  1391. },
  1392. cost: {
  1393. input: 0.001,
  1394. output: 0.002,
  1395. cache: { read: 0.0001, write: 0.0002 },
  1396. },
  1397. limit: {
  1398. context: 200_000,
  1399. output: 64_000,
  1400. },
  1401. status: "active",
  1402. options: {},
  1403. headers: {},
  1404. release_date: "2024-01-01",
  1405. ...overrides,
  1406. })
  1407. test("returns empty object when model has no reasoning capabilities", () => {
  1408. const model = createMockModel({
  1409. capabilities: { reasoning: false },
  1410. })
  1411. const result = ProviderTransform.variants(model)
  1412. expect(result).toEqual({})
  1413. })
  1414. test("deepseek returns empty object", () => {
  1415. const model = createMockModel({
  1416. id: "deepseek/deepseek-chat",
  1417. providerID: "deepseek",
  1418. api: {
  1419. id: "deepseek-chat",
  1420. url: "https://api.deepseek.com",
  1421. npm: "@ai-sdk/openai-compatible",
  1422. },
  1423. })
  1424. const result = ProviderTransform.variants(model)
  1425. expect(result).toEqual({})
  1426. })
  1427. test("minimax returns empty object", () => {
  1428. const model = createMockModel({
  1429. id: "minimax/minimax-model",
  1430. providerID: "minimax",
  1431. api: {
  1432. id: "minimax-model",
  1433. url: "https://api.minimax.com",
  1434. npm: "@ai-sdk/openai-compatible",
  1435. },
  1436. })
  1437. const result = ProviderTransform.variants(model)
  1438. expect(result).toEqual({})
  1439. })
  1440. test("glm returns empty object", () => {
  1441. const model = createMockModel({
  1442. id: "glm/glm-4",
  1443. providerID: "glm",
  1444. api: {
  1445. id: "glm-4",
  1446. url: "https://api.glm.com",
  1447. npm: "@ai-sdk/openai-compatible",
  1448. },
  1449. })
  1450. const result = ProviderTransform.variants(model)
  1451. expect(result).toEqual({})
  1452. })
  1453. test("mistral returns empty object", () => {
  1454. const model = createMockModel({
  1455. id: "mistral/mistral-large",
  1456. providerID: "mistral",
  1457. api: {
  1458. id: "mistral-large-latest",
  1459. url: "https://api.mistral.com",
  1460. npm: "@ai-sdk/mistral",
  1461. },
  1462. })
  1463. const result = ProviderTransform.variants(model)
  1464. expect(result).toEqual({})
  1465. })
  1466. describe("@openrouter/ai-sdk-provider", () => {
  1467. test("returns empty object for non-qualifying models", () => {
  1468. const model = createMockModel({
  1469. id: "openrouter/test-model",
  1470. providerID: "openrouter",
  1471. api: {
  1472. id: "test-model",
  1473. url: "https://openrouter.ai",
  1474. npm: "@openrouter/ai-sdk-provider",
  1475. },
  1476. })
  1477. const result = ProviderTransform.variants(model)
  1478. expect(result).toEqual({})
  1479. })
  1480. test("gpt models return OPENAI_EFFORTS with reasoning", () => {
  1481. const model = createMockModel({
  1482. id: "openrouter/gpt-4",
  1483. providerID: "openrouter",
  1484. api: {
  1485. id: "gpt-4",
  1486. url: "https://openrouter.ai",
  1487. npm: "@openrouter/ai-sdk-provider",
  1488. },
  1489. })
  1490. const result = ProviderTransform.variants(model)
  1491. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1492. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1493. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1494. })
  1495. test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
  1496. const model = createMockModel({
  1497. id: "openrouter/gemini-3-5-pro",
  1498. providerID: "openrouter",
  1499. api: {
  1500. id: "gemini-3-5-pro",
  1501. url: "https://openrouter.ai",
  1502. npm: "@openrouter/ai-sdk-provider",
  1503. },
  1504. })
  1505. const result = ProviderTransform.variants(model)
  1506. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1507. })
  1508. test("grok-4 returns empty object", () => {
  1509. const model = createMockModel({
  1510. id: "openrouter/grok-4",
  1511. providerID: "openrouter",
  1512. api: {
  1513. id: "grok-4",
  1514. url: "https://openrouter.ai",
  1515. npm: "@openrouter/ai-sdk-provider",
  1516. },
  1517. })
  1518. const result = ProviderTransform.variants(model)
  1519. expect(result).toEqual({})
  1520. })
  1521. test("grok-3-mini returns low and high with reasoning", () => {
  1522. const model = createMockModel({
  1523. id: "openrouter/grok-3-mini",
  1524. providerID: "openrouter",
  1525. api: {
  1526. id: "grok-3-mini",
  1527. url: "https://openrouter.ai",
  1528. npm: "@openrouter/ai-sdk-provider",
  1529. },
  1530. })
  1531. const result = ProviderTransform.variants(model)
  1532. expect(Object.keys(result)).toEqual(["low", "high"])
  1533. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1534. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1535. })
  1536. })
  1537. describe("@ai-sdk/gateway", () => {
  1538. test("anthropic sonnet 4.6 models return adaptive thinking options", () => {
  1539. const model = createMockModel({
  1540. id: "anthropic/claude-sonnet-4-6",
  1541. providerID: "gateway",
  1542. api: {
  1543. id: "anthropic/claude-sonnet-4-6",
  1544. url: "https://gateway.ai",
  1545. npm: "@ai-sdk/gateway",
  1546. },
  1547. })
  1548. const result = ProviderTransform.variants(model)
  1549. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  1550. expect(result.medium).toEqual({
  1551. thinking: {
  1552. type: "adaptive",
  1553. },
  1554. effort: "medium",
  1555. })
  1556. })
  1557. test("anthropic sonnet 4.6 dot-format models return adaptive thinking options", () => {
  1558. const model = createMockModel({
  1559. id: "anthropic/claude-sonnet-4-6",
  1560. providerID: "gateway",
  1561. api: {
  1562. id: "anthropic/claude-sonnet-4.6",
  1563. url: "https://gateway.ai",
  1564. npm: "@ai-sdk/gateway",
  1565. },
  1566. })
  1567. const result = ProviderTransform.variants(model)
  1568. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  1569. expect(result.medium).toEqual({
  1570. thinking: {
  1571. type: "adaptive",
  1572. },
  1573. effort: "medium",
  1574. })
  1575. })
  1576. test("anthropic opus 4.6 dot-format models return adaptive thinking options", () => {
  1577. const model = createMockModel({
  1578. id: "anthropic/claude-opus-4-6",
  1579. providerID: "gateway",
  1580. api: {
  1581. id: "anthropic/claude-opus-4.6",
  1582. url: "https://gateway.ai",
  1583. npm: "@ai-sdk/gateway",
  1584. },
  1585. })
  1586. const result = ProviderTransform.variants(model)
  1587. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  1588. expect(result.high).toEqual({
  1589. thinking: {
  1590. type: "adaptive",
  1591. },
  1592. effort: "high",
  1593. })
  1594. })
  1595. test("anthropic models return anthropic thinking options", () => {
  1596. const model = createMockModel({
  1597. id: "anthropic/claude-sonnet-4",
  1598. providerID: "gateway",
  1599. api: {
  1600. id: "anthropic/claude-sonnet-4",
  1601. url: "https://gateway.ai",
  1602. npm: "@ai-sdk/gateway",
  1603. },
  1604. })
  1605. const result = ProviderTransform.variants(model)
  1606. expect(Object.keys(result)).toEqual(["high", "max"])
  1607. expect(result.high).toEqual({
  1608. thinking: {
  1609. type: "enabled",
  1610. budgetTokens: 16000,
  1611. },
  1612. })
  1613. expect(result.max).toEqual({
  1614. thinking: {
  1615. type: "enabled",
  1616. budgetTokens: 31999,
  1617. },
  1618. })
  1619. })
  1620. test("returns OPENAI_EFFORTS with reasoningEffort", () => {
  1621. const model = createMockModel({
  1622. id: "gateway/gateway-model",
  1623. providerID: "gateway",
  1624. api: {
  1625. id: "gateway-model",
  1626. url: "https://gateway.ai",
  1627. npm: "@ai-sdk/gateway",
  1628. },
  1629. })
  1630. const result = ProviderTransform.variants(model)
  1631. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1632. expect(result.low).toEqual({ reasoningEffort: "low" })
  1633. expect(result.high).toEqual({ reasoningEffort: "high" })
  1634. })
  1635. })
  1636. describe("@ai-sdk/github-copilot", () => {
  1637. test("standard models return low, medium, high", () => {
  1638. const model = createMockModel({
  1639. id: "gpt-4.5",
  1640. providerID: "github-copilot",
  1641. api: {
  1642. id: "gpt-4.5",
  1643. url: "https://api.githubcopilot.com",
  1644. npm: "@ai-sdk/github-copilot",
  1645. },
  1646. })
  1647. const result = ProviderTransform.variants(model)
  1648. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1649. expect(result.low).toEqual({
  1650. reasoningEffort: "low",
  1651. reasoningSummary: "auto",
  1652. include: ["reasoning.encrypted_content"],
  1653. })
  1654. })
  1655. test("gpt-5.1-codex-max includes xhigh", () => {
  1656. const model = createMockModel({
  1657. id: "gpt-5.1-codex-max",
  1658. providerID: "github-copilot",
  1659. api: {
  1660. id: "gpt-5.1-codex-max",
  1661. url: "https://api.githubcopilot.com",
  1662. npm: "@ai-sdk/github-copilot",
  1663. },
  1664. })
  1665. const result = ProviderTransform.variants(model)
  1666. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1667. })
  1668. test("gpt-5.1-codex-mini does not include xhigh", () => {
  1669. const model = createMockModel({
  1670. id: "gpt-5.1-codex-mini",
  1671. providerID: "github-copilot",
  1672. api: {
  1673. id: "gpt-5.1-codex-mini",
  1674. url: "https://api.githubcopilot.com",
  1675. npm: "@ai-sdk/github-copilot",
  1676. },
  1677. })
  1678. const result = ProviderTransform.variants(model)
  1679. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1680. })
  1681. test("gpt-5.1-codex does not include xhigh", () => {
  1682. const model = createMockModel({
  1683. id: "gpt-5.1-codex",
  1684. providerID: "github-copilot",
  1685. api: {
  1686. id: "gpt-5.1-codex",
  1687. url: "https://api.githubcopilot.com",
  1688. npm: "@ai-sdk/github-copilot",
  1689. },
  1690. })
  1691. const result = ProviderTransform.variants(model)
  1692. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1693. })
  1694. test("gpt-5.2 includes xhigh", () => {
  1695. const model = createMockModel({
  1696. id: "gpt-5.2",
  1697. providerID: "github-copilot",
  1698. api: {
  1699. id: "gpt-5.2",
  1700. url: "https://api.githubcopilot.com",
  1701. npm: "@ai-sdk/github-copilot",
  1702. },
  1703. })
  1704. const result = ProviderTransform.variants(model)
  1705. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1706. expect(result.xhigh).toEqual({
  1707. reasoningEffort: "xhigh",
  1708. reasoningSummary: "auto",
  1709. include: ["reasoning.encrypted_content"],
  1710. })
  1711. })
  1712. test("gpt-5.2-codex includes xhigh", () => {
  1713. const model = createMockModel({
  1714. id: "gpt-5.2-codex",
  1715. providerID: "github-copilot",
  1716. api: {
  1717. id: "gpt-5.2-codex",
  1718. url: "https://api.githubcopilot.com",
  1719. npm: "@ai-sdk/github-copilot",
  1720. },
  1721. })
  1722. const result = ProviderTransform.variants(model)
  1723. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1724. })
  1725. })
  1726. describe("@ai-sdk/cerebras", () => {
  1727. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1728. const model = createMockModel({
  1729. id: "cerebras/llama-4",
  1730. providerID: "cerebras",
  1731. api: {
  1732. id: "llama-4-sc",
  1733. url: "https://api.cerebras.ai",
  1734. npm: "@ai-sdk/cerebras",
  1735. },
  1736. })
  1737. const result = ProviderTransform.variants(model)
  1738. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1739. expect(result.low).toEqual({ reasoningEffort: "low" })
  1740. expect(result.high).toEqual({ reasoningEffort: "high" })
  1741. })
  1742. })
  1743. describe("@ai-sdk/togetherai", () => {
  1744. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1745. const model = createMockModel({
  1746. id: "togetherai/llama-4",
  1747. providerID: "togetherai",
  1748. api: {
  1749. id: "llama-4-sc",
  1750. url: "https://api.togetherai.com",
  1751. npm: "@ai-sdk/togetherai",
  1752. },
  1753. })
  1754. const result = ProviderTransform.variants(model)
  1755. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1756. expect(result.low).toEqual({ reasoningEffort: "low" })
  1757. expect(result.high).toEqual({ reasoningEffort: "high" })
  1758. })
  1759. })
  1760. describe("@ai-sdk/xai", () => {
  1761. test("grok-3 returns empty object", () => {
  1762. const model = createMockModel({
  1763. id: "xai/grok-3",
  1764. providerID: "xai",
  1765. api: {
  1766. id: "grok-3",
  1767. url: "https://api.x.ai",
  1768. npm: "@ai-sdk/xai",
  1769. },
  1770. })
  1771. const result = ProviderTransform.variants(model)
  1772. expect(result).toEqual({})
  1773. })
  1774. test("grok-3-mini returns low and high with reasoningEffort", () => {
  1775. const model = createMockModel({
  1776. id: "xai/grok-3-mini",
  1777. providerID: "xai",
  1778. api: {
  1779. id: "grok-3-mini",
  1780. url: "https://api.x.ai",
  1781. npm: "@ai-sdk/xai",
  1782. },
  1783. })
  1784. const result = ProviderTransform.variants(model)
  1785. expect(Object.keys(result)).toEqual(["low", "high"])
  1786. expect(result.low).toEqual({ reasoningEffort: "low" })
  1787. expect(result.high).toEqual({ reasoningEffort: "high" })
  1788. })
  1789. })
  1790. describe("@ai-sdk/deepinfra", () => {
  1791. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1792. const model = createMockModel({
  1793. id: "deepinfra/llama-4",
  1794. providerID: "deepinfra",
  1795. api: {
  1796. id: "llama-4-sc",
  1797. url: "https://api.deepinfra.com",
  1798. npm: "@ai-sdk/deepinfra",
  1799. },
  1800. })
  1801. const result = ProviderTransform.variants(model)
  1802. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1803. expect(result.low).toEqual({ reasoningEffort: "low" })
  1804. expect(result.high).toEqual({ reasoningEffort: "high" })
  1805. })
  1806. })
  1807. describe("@ai-sdk/openai-compatible", () => {
  1808. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1809. const model = createMockModel({
  1810. id: "custom-provider/custom-model",
  1811. providerID: "custom-provider",
  1812. api: {
  1813. id: "custom-model",
  1814. url: "https://api.custom.com",
  1815. npm: "@ai-sdk/openai-compatible",
  1816. },
  1817. })
  1818. const result = ProviderTransform.variants(model)
  1819. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1820. expect(result.low).toEqual({ reasoningEffort: "low" })
  1821. expect(result.high).toEqual({ reasoningEffort: "high" })
  1822. })
  1823. })
  1824. describe("@ai-sdk/azure", () => {
  1825. test("o1-mini returns empty object", () => {
  1826. const model = createMockModel({
  1827. id: "o1-mini",
  1828. providerID: "azure",
  1829. api: {
  1830. id: "o1-mini",
  1831. url: "https://azure.com",
  1832. npm: "@ai-sdk/azure",
  1833. },
  1834. })
  1835. const result = ProviderTransform.variants(model)
  1836. expect(result).toEqual({})
  1837. })
  1838. test("standard azure models return custom efforts with reasoningSummary", () => {
  1839. const model = createMockModel({
  1840. id: "o1",
  1841. providerID: "azure",
  1842. api: {
  1843. id: "o1",
  1844. url: "https://azure.com",
  1845. npm: "@ai-sdk/azure",
  1846. },
  1847. })
  1848. const result = ProviderTransform.variants(model)
  1849. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1850. expect(result.low).toEqual({
  1851. reasoningEffort: "low",
  1852. reasoningSummary: "auto",
  1853. include: ["reasoning.encrypted_content"],
  1854. })
  1855. })
  1856. test("gpt-5 adds minimal effort", () => {
  1857. const model = createMockModel({
  1858. id: "gpt-5",
  1859. providerID: "azure",
  1860. api: {
  1861. id: "gpt-5",
  1862. url: "https://azure.com",
  1863. npm: "@ai-sdk/azure",
  1864. },
  1865. })
  1866. const result = ProviderTransform.variants(model)
  1867. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1868. })
  1869. })
  1870. describe("@ai-sdk/openai", () => {
  1871. test("gpt-5-pro returns empty object", () => {
  1872. const model = createMockModel({
  1873. id: "gpt-5-pro",
  1874. providerID: "openai",
  1875. api: {
  1876. id: "gpt-5-pro",
  1877. url: "https://api.openai.com",
  1878. npm: "@ai-sdk/openai",
  1879. },
  1880. })
  1881. const result = ProviderTransform.variants(model)
  1882. expect(result).toEqual({})
  1883. })
  1884. test("standard openai models return custom efforts with reasoningSummary", () => {
  1885. const model = createMockModel({
  1886. id: "gpt-5",
  1887. providerID: "openai",
  1888. api: {
  1889. id: "gpt-5",
  1890. url: "https://api.openai.com",
  1891. npm: "@ai-sdk/openai",
  1892. },
  1893. release_date: "2024-06-01",
  1894. })
  1895. const result = ProviderTransform.variants(model)
  1896. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1897. expect(result.low).toEqual({
  1898. reasoningEffort: "low",
  1899. reasoningSummary: "auto",
  1900. include: ["reasoning.encrypted_content"],
  1901. })
  1902. })
  1903. test("models after 2025-11-13 include 'none' effort", () => {
  1904. const model = createMockModel({
  1905. id: "gpt-5-nano",
  1906. providerID: "openai",
  1907. api: {
  1908. id: "gpt-5-nano",
  1909. url: "https://api.openai.com",
  1910. npm: "@ai-sdk/openai",
  1911. },
  1912. release_date: "2025-11-14",
  1913. })
  1914. const result = ProviderTransform.variants(model)
  1915. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
  1916. })
  1917. test("models after 2025-12-04 include 'xhigh' effort", () => {
  1918. const model = createMockModel({
  1919. id: "openai/gpt-5-chat",
  1920. providerID: "openai",
  1921. api: {
  1922. id: "gpt-5-chat",
  1923. url: "https://api.openai.com",
  1924. npm: "@ai-sdk/openai",
  1925. },
  1926. release_date: "2025-12-05",
  1927. })
  1928. const result = ProviderTransform.variants(model)
  1929. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1930. })
  1931. })
  1932. describe("@ai-sdk/anthropic", () => {
  1933. test("sonnet 4.6 returns adaptive thinking options", () => {
  1934. const model = createMockModel({
  1935. id: "anthropic/claude-sonnet-4-6",
  1936. providerID: "anthropic",
  1937. api: {
  1938. id: "claude-sonnet-4-6",
  1939. url: "https://api.anthropic.com",
  1940. npm: "@ai-sdk/anthropic",
  1941. },
  1942. })
  1943. const result = ProviderTransform.variants(model)
  1944. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  1945. expect(result.high).toEqual({
  1946. thinking: {
  1947. type: "adaptive",
  1948. },
  1949. effort: "high",
  1950. })
  1951. })
  1952. test("returns high and max with thinking config", () => {
  1953. const model = createMockModel({
  1954. id: "anthropic/claude-4",
  1955. providerID: "anthropic",
  1956. api: {
  1957. id: "claude-4",
  1958. url: "https://api.anthropic.com",
  1959. npm: "@ai-sdk/anthropic",
  1960. },
  1961. })
  1962. const result = ProviderTransform.variants(model)
  1963. expect(Object.keys(result)).toEqual(["high", "max"])
  1964. expect(result.high).toEqual({
  1965. thinking: {
  1966. type: "enabled",
  1967. budgetTokens: 16000,
  1968. },
  1969. })
  1970. expect(result.max).toEqual({
  1971. thinking: {
  1972. type: "enabled",
  1973. budgetTokens: 31999,
  1974. },
  1975. })
  1976. })
  1977. })
  1978. describe("@ai-sdk/amazon-bedrock", () => {
  1979. test("anthropic sonnet 4.6 returns adaptive reasoning options", () => {
  1980. const model = createMockModel({
  1981. id: "bedrock/anthropic-claude-sonnet-4-6",
  1982. providerID: "bedrock",
  1983. api: {
  1984. id: "anthropic.claude-sonnet-4-6",
  1985. url: "https://bedrock.amazonaws.com",
  1986. npm: "@ai-sdk/amazon-bedrock",
  1987. },
  1988. })
  1989. const result = ProviderTransform.variants(model)
  1990. expect(Object.keys(result)).toEqual(["low", "medium", "high", "max"])
  1991. expect(result.max).toEqual({
  1992. reasoningConfig: {
  1993. type: "adaptive",
  1994. maxReasoningEffort: "max",
  1995. },
  1996. })
  1997. })
  1998. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
  1999. const model = createMockModel({
  2000. id: "bedrock/llama-4",
  2001. providerID: "bedrock",
  2002. api: {
  2003. id: "llama-4-sc",
  2004. url: "https://bedrock.amazonaws.com",
  2005. npm: "@ai-sdk/amazon-bedrock",
  2006. },
  2007. })
  2008. const result = ProviderTransform.variants(model)
  2009. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  2010. expect(result.low).toEqual({
  2011. reasoningConfig: {
  2012. type: "enabled",
  2013. maxReasoningEffort: "low",
  2014. },
  2015. })
  2016. })
  2017. })
  2018. describe("@ai-sdk/google", () => {
  2019. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  2020. const model = createMockModel({
  2021. id: "google/gemini-2.5-pro",
  2022. providerID: "google",
  2023. api: {
  2024. id: "gemini-2.5-pro",
  2025. url: "https://generativelanguage.googleapis.com",
  2026. npm: "@ai-sdk/google",
  2027. },
  2028. })
  2029. const result = ProviderTransform.variants(model)
  2030. expect(Object.keys(result)).toEqual(["high", "max"])
  2031. expect(result.high).toEqual({
  2032. thinkingConfig: {
  2033. includeThoughts: true,
  2034. thinkingBudget: 16000,
  2035. },
  2036. })
  2037. expect(result.max).toEqual({
  2038. thinkingConfig: {
  2039. includeThoughts: true,
  2040. thinkingBudget: 24576,
  2041. },
  2042. })
  2043. })
  2044. test("other gemini models return low and high with thinkingLevel", () => {
  2045. const model = createMockModel({
  2046. id: "google/gemini-2.0-pro",
  2047. providerID: "google",
  2048. api: {
  2049. id: "gemini-2.0-pro",
  2050. url: "https://generativelanguage.googleapis.com",
  2051. npm: "@ai-sdk/google",
  2052. },
  2053. })
  2054. const result = ProviderTransform.variants(model)
  2055. expect(Object.keys(result)).toEqual(["low", "high"])
  2056. expect(result.low).toEqual({
  2057. thinkingConfig: {
  2058. includeThoughts: true,
  2059. thinkingLevel: "low",
  2060. },
  2061. })
  2062. expect(result.high).toEqual({
  2063. thinkingConfig: {
  2064. includeThoughts: true,
  2065. thinkingLevel: "high",
  2066. },
  2067. })
  2068. })
  2069. })
  2070. describe("@ai-sdk/google-vertex", () => {
  2071. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  2072. const model = createMockModel({
  2073. id: "google-vertex/gemini-2.5-pro",
  2074. providerID: "google-vertex",
  2075. api: {
  2076. id: "gemini-2.5-pro",
  2077. url: "https://vertexai.googleapis.com",
  2078. npm: "@ai-sdk/google-vertex",
  2079. },
  2080. })
  2081. const result = ProviderTransform.variants(model)
  2082. expect(Object.keys(result)).toEqual(["high", "max"])
  2083. })
  2084. test("other vertex models return low and high with thinkingLevel", () => {
  2085. const model = createMockModel({
  2086. id: "google-vertex/gemini-2.0-pro",
  2087. providerID: "google-vertex",
  2088. api: {
  2089. id: "gemini-2.0-pro",
  2090. url: "https://vertexai.googleapis.com",
  2091. npm: "@ai-sdk/google-vertex",
  2092. },
  2093. })
  2094. const result = ProviderTransform.variants(model)
  2095. expect(Object.keys(result)).toEqual(["low", "high"])
  2096. })
  2097. })
  2098. describe("@ai-sdk/cohere", () => {
  2099. test("returns empty object", () => {
  2100. const model = createMockModel({
  2101. id: "cohere/command-r",
  2102. providerID: "cohere",
  2103. api: {
  2104. id: "command-r",
  2105. url: "https://api.cohere.com",
  2106. npm: "@ai-sdk/cohere",
  2107. },
  2108. })
  2109. const result = ProviderTransform.variants(model)
  2110. expect(result).toEqual({})
  2111. })
  2112. })
  2113. describe("@ai-sdk/groq", () => {
  2114. test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
  2115. const model = createMockModel({
  2116. id: "groq/llama-4",
  2117. providerID: "groq",
  2118. api: {
  2119. id: "llama-4-sc",
  2120. url: "https://api.groq.com",
  2121. npm: "@ai-sdk/groq",
  2122. },
  2123. })
  2124. const result = ProviderTransform.variants(model)
  2125. expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
  2126. expect(result.none).toEqual({
  2127. reasoningEffort: "none",
  2128. })
  2129. expect(result.low).toEqual({
  2130. reasoningEffort: "low",
  2131. })
  2132. })
  2133. })
  2134. describe("@ai-sdk/perplexity", () => {
  2135. test("returns empty object", () => {
  2136. const model = createMockModel({
  2137. id: "perplexity/sonar-plus",
  2138. providerID: "perplexity",
  2139. api: {
  2140. id: "sonar-plus",
  2141. url: "https://api.perplexity.ai",
  2142. npm: "@ai-sdk/perplexity",
  2143. },
  2144. })
  2145. const result = ProviderTransform.variants(model)
  2146. expect(result).toEqual({})
  2147. })
  2148. })
  2149. })