transform.test.ts 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.options - setCacheKey", () => {
  5. const sessionID = "test-session-123"
  6. const mockModel = {
  7. id: "anthropic/claude-3-5-sonnet",
  8. providerID: "anthropic",
  9. api: {
  10. id: "claude-3-5-sonnet-20241022",
  11. url: "https://api.anthropic.com",
  12. npm: "@ai-sdk/anthropic",
  13. },
  14. name: "Claude 3.5 Sonnet",
  15. capabilities: {
  16. temperature: true,
  17. reasoning: false,
  18. attachment: true,
  19. toolcall: true,
  20. input: { text: true, audio: false, image: true, video: false, pdf: true },
  21. output: { text: true, audio: false, image: false, video: false, pdf: false },
  22. interleaved: false,
  23. },
  24. cost: {
  25. input: 0.003,
  26. output: 0.015,
  27. cache: { read: 0.0003, write: 0.00375 },
  28. },
  29. limit: {
  30. context: 200000,
  31. output: 8192,
  32. },
  33. status: "active",
  34. options: {},
  35. headers: {},
  36. } as any
  37. test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
  38. const result = ProviderTransform.options({
  39. model: mockModel,
  40. sessionID,
  41. providerOptions: { setCacheKey: true },
  42. })
  43. expect(result.promptCacheKey).toBe(sessionID)
  44. })
  45. test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
  46. const result = ProviderTransform.options({
  47. model: mockModel,
  48. sessionID,
  49. providerOptions: { setCacheKey: false },
  50. })
  51. expect(result.promptCacheKey).toBeUndefined()
  52. })
  53. test("should not set promptCacheKey when providerOptions is undefined", () => {
  54. const result = ProviderTransform.options({
  55. model: mockModel,
  56. sessionID,
  57. providerOptions: undefined,
  58. })
  59. expect(result.promptCacheKey).toBeUndefined()
  60. })
  61. test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
  62. const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
  63. expect(result.promptCacheKey).toBeUndefined()
  64. })
  65. test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
  66. const openaiModel = {
  67. ...mockModel,
  68. providerID: "openai",
  69. api: {
  70. id: "gpt-4",
  71. url: "https://api.openai.com",
  72. npm: "@ai-sdk/openai",
  73. },
  74. }
  75. const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
  76. expect(result.promptCacheKey).toBe(sessionID)
  77. })
  78. test("should set store=false for openai provider", () => {
  79. const openaiModel = {
  80. ...mockModel,
  81. providerID: "openai",
  82. api: {
  83. id: "gpt-4",
  84. url: "https://api.openai.com",
  85. npm: "@ai-sdk/openai",
  86. },
  87. }
  88. const result = ProviderTransform.options({
  89. model: openaiModel,
  90. sessionID,
  91. providerOptions: {},
  92. })
  93. expect(result.store).toBe(false)
  94. })
  95. })
  96. describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
  97. const sessionID = "test-session-123"
  98. const createGpt5Model = (apiId: string) =>
  99. ({
  100. id: `openai/${apiId}`,
  101. providerID: "openai",
  102. api: {
  103. id: apiId,
  104. url: "https://api.openai.com",
  105. npm: "@ai-sdk/openai",
  106. },
  107. name: apiId,
  108. capabilities: {
  109. temperature: true,
  110. reasoning: true,
  111. attachment: true,
  112. toolcall: true,
  113. input: { text: true, audio: false, image: true, video: false, pdf: false },
  114. output: { text: true, audio: false, image: false, video: false, pdf: false },
  115. interleaved: false,
  116. },
  117. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  118. limit: { context: 128000, output: 4096 },
  119. status: "active",
  120. options: {},
  121. headers: {},
  122. }) as any
  123. test("gpt-5.2 should have textVerbosity set to low", () => {
  124. const model = createGpt5Model("gpt-5.2")
  125. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  126. expect(result.textVerbosity).toBe("low")
  127. })
  128. test("gpt-5.1 should have textVerbosity set to low", () => {
  129. const model = createGpt5Model("gpt-5.1")
  130. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  131. expect(result.textVerbosity).toBe("low")
  132. })
  133. test("gpt-5.2-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  134. const model = createGpt5Model("gpt-5.2-chat-latest")
  135. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  136. expect(result.textVerbosity).toBeUndefined()
  137. })
  138. test("gpt-5.1-chat-latest should NOT have textVerbosity set (only supports medium)", () => {
  139. const model = createGpt5Model("gpt-5.1-chat-latest")
  140. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  141. expect(result.textVerbosity).toBeUndefined()
  142. })
  143. test("gpt-5.2-chat should NOT have textVerbosity set", () => {
  144. const model = createGpt5Model("gpt-5.2-chat")
  145. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  146. expect(result.textVerbosity).toBeUndefined()
  147. })
  148. test("gpt-5-chat should NOT have textVerbosity set", () => {
  149. const model = createGpt5Model("gpt-5-chat")
  150. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  151. expect(result.textVerbosity).toBeUndefined()
  152. })
  153. test("gpt-5.2-codex should NOT have textVerbosity set (codex models excluded)", () => {
  154. const model = createGpt5Model("gpt-5.2-codex")
  155. const result = ProviderTransform.options({ model, sessionID, providerOptions: {} })
  156. expect(result.textVerbosity).toBeUndefined()
  157. })
  158. })
  159. describe("ProviderTransform.maxOutputTokens", () => {
  160. test("returns 32k when modelLimit > 32k", () => {
  161. const modelLimit = 100000
  162. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  163. expect(result).toBe(OUTPUT_TOKEN_MAX)
  164. })
  165. test("returns modelLimit when modelLimit < 32k", () => {
  166. const modelLimit = 16000
  167. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  168. expect(result).toBe(16000)
  169. })
  170. describe("azure", () => {
  171. test("returns 32k when modelLimit > 32k", () => {
  172. const modelLimit = 100000
  173. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  174. expect(result).toBe(OUTPUT_TOKEN_MAX)
  175. })
  176. test("returns modelLimit when modelLimit < 32k", () => {
  177. const modelLimit = 16000
  178. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  179. expect(result).toBe(16000)
  180. })
  181. })
  182. describe("bedrock", () => {
  183. test("returns 32k when modelLimit > 32k", () => {
  184. const modelLimit = 100000
  185. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  186. expect(result).toBe(OUTPUT_TOKEN_MAX)
  187. })
  188. test("returns modelLimit when modelLimit < 32k", () => {
  189. const modelLimit = 16000
  190. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  191. expect(result).toBe(16000)
  192. })
  193. })
  194. describe("anthropic without thinking options", () => {
  195. test("returns 32k when modelLimit > 32k", () => {
  196. const modelLimit = 100000
  197. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  198. expect(result).toBe(OUTPUT_TOKEN_MAX)
  199. })
  200. test("returns modelLimit when modelLimit < 32k", () => {
  201. const modelLimit = 16000
  202. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  203. expect(result).toBe(16000)
  204. })
  205. })
  206. describe("anthropic with thinking options", () => {
  207. test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
  208. const modelLimit = 100000
  209. const options = {
  210. thinking: {
  211. type: "enabled",
  212. budgetTokens: 10000,
  213. },
  214. }
  215. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  216. expect(result).toBe(OUTPUT_TOKEN_MAX)
  217. })
  218. test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
  219. const modelLimit = 50000
  220. const options = {
  221. thinking: {
  222. type: "enabled",
  223. budgetTokens: 30000,
  224. },
  225. }
  226. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  227. expect(result).toBe(20000)
  228. })
  229. test("returns 32k when thinking type is not enabled", () => {
  230. const modelLimit = 100000
  231. const options = {
  232. thinking: {
  233. type: "disabled",
  234. budgetTokens: 10000,
  235. },
  236. }
  237. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  238. expect(result).toBe(OUTPUT_TOKEN_MAX)
  239. })
  240. })
  241. })
  242. describe("ProviderTransform.schema - gemini array items", () => {
  243. test("adds missing items for array properties", () => {
  244. const geminiModel = {
  245. providerID: "google",
  246. api: {
  247. id: "gemini-3-pro",
  248. },
  249. } as any
  250. const schema = {
  251. type: "object",
  252. properties: {
  253. nodes: { type: "array" },
  254. edges: { type: "array", items: { type: "string" } },
  255. },
  256. } as any
  257. const result = ProviderTransform.schema(geminiModel, schema) as any
  258. expect(result.properties.nodes.items).toBeDefined()
  259. expect(result.properties.edges.items.type).toBe("string")
  260. })
  261. })
  262. describe("ProviderTransform.schema - gemini nested array items", () => {
  263. const geminiModel = {
  264. providerID: "google",
  265. api: {
  266. id: "gemini-3-pro",
  267. },
  268. } as any
  269. test("adds type to 2D array with empty inner items", () => {
  270. const schema = {
  271. type: "object",
  272. properties: {
  273. values: {
  274. type: "array",
  275. items: {
  276. type: "array",
  277. items: {}, // Empty items object
  278. },
  279. },
  280. },
  281. } as any
  282. const result = ProviderTransform.schema(geminiModel, schema) as any
  283. // Inner items should have a default type
  284. expect(result.properties.values.items.items.type).toBe("string")
  285. })
  286. test("adds items and type to 2D array with missing inner items", () => {
  287. const schema = {
  288. type: "object",
  289. properties: {
  290. data: {
  291. type: "array",
  292. items: { type: "array" }, // No items at all
  293. },
  294. },
  295. } as any
  296. const result = ProviderTransform.schema(geminiModel, schema) as any
  297. expect(result.properties.data.items.items).toBeDefined()
  298. expect(result.properties.data.items.items.type).toBe("string")
  299. })
  300. test("handles deeply nested arrays (3D)", () => {
  301. const schema = {
  302. type: "object",
  303. properties: {
  304. matrix: {
  305. type: "array",
  306. items: {
  307. type: "array",
  308. items: {
  309. type: "array",
  310. // No items
  311. },
  312. },
  313. },
  314. },
  315. } as any
  316. const result = ProviderTransform.schema(geminiModel, schema) as any
  317. expect(result.properties.matrix.items.items.items).toBeDefined()
  318. expect(result.properties.matrix.items.items.items.type).toBe("string")
  319. })
  320. test("preserves existing item types in nested arrays", () => {
  321. const schema = {
  322. type: "object",
  323. properties: {
  324. numbers: {
  325. type: "array",
  326. items: {
  327. type: "array",
  328. items: { type: "number" }, // Has explicit type
  329. },
  330. },
  331. },
  332. } as any
  333. const result = ProviderTransform.schema(geminiModel, schema) as any
  334. // Should preserve the explicit type
  335. expect(result.properties.numbers.items.items.type).toBe("number")
  336. })
  337. test("handles mixed nested structures with objects and arrays", () => {
  338. const schema = {
  339. type: "object",
  340. properties: {
  341. spreadsheetData: {
  342. type: "object",
  343. properties: {
  344. rows: {
  345. type: "array",
  346. items: {
  347. type: "array",
  348. items: {}, // Empty items
  349. },
  350. },
  351. },
  352. },
  353. },
  354. } as any
  355. const result = ProviderTransform.schema(geminiModel, schema) as any
  356. expect(result.properties.spreadsheetData.properties.rows.items.items.type).toBe("string")
  357. })
  358. })
  359. describe("ProviderTransform.schema - gemini non-object properties removal", () => {
  360. const geminiModel = {
  361. providerID: "google",
  362. api: {
  363. id: "gemini-3-pro",
  364. },
  365. } as any
  366. test("removes properties from non-object types", () => {
  367. const schema = {
  368. type: "object",
  369. properties: {
  370. data: {
  371. type: "string",
  372. properties: { invalid: { type: "string" } },
  373. },
  374. },
  375. } as any
  376. const result = ProviderTransform.schema(geminiModel, schema) as any
  377. expect(result.properties.data.type).toBe("string")
  378. expect(result.properties.data.properties).toBeUndefined()
  379. })
  380. test("removes required from non-object types", () => {
  381. const schema = {
  382. type: "object",
  383. properties: {
  384. data: {
  385. type: "array",
  386. items: { type: "string" },
  387. required: ["invalid"],
  388. },
  389. },
  390. } as any
  391. const result = ProviderTransform.schema(geminiModel, schema) as any
  392. expect(result.properties.data.type).toBe("array")
  393. expect(result.properties.data.required).toBeUndefined()
  394. })
  395. test("removes properties and required from nested non-object types", () => {
  396. const schema = {
  397. type: "object",
  398. properties: {
  399. outer: {
  400. type: "object",
  401. properties: {
  402. inner: {
  403. type: "number",
  404. properties: { bad: { type: "string" } },
  405. required: ["bad"],
  406. },
  407. },
  408. },
  409. },
  410. } as any
  411. const result = ProviderTransform.schema(geminiModel, schema) as any
  412. expect(result.properties.outer.properties.inner.type).toBe("number")
  413. expect(result.properties.outer.properties.inner.properties).toBeUndefined()
  414. expect(result.properties.outer.properties.inner.required).toBeUndefined()
  415. })
  416. test("keeps properties and required on object types", () => {
  417. const schema = {
  418. type: "object",
  419. properties: {
  420. data: {
  421. type: "object",
  422. properties: { name: { type: "string" } },
  423. required: ["name"],
  424. },
  425. },
  426. } as any
  427. const result = ProviderTransform.schema(geminiModel, schema) as any
  428. expect(result.properties.data.type).toBe("object")
  429. expect(result.properties.data.properties).toBeDefined()
  430. expect(result.properties.data.required).toEqual(["name"])
  431. })
  432. test("does not affect non-gemini providers", () => {
  433. const openaiModel = {
  434. providerID: "openai",
  435. api: {
  436. id: "gpt-4",
  437. },
  438. } as any
  439. const schema = {
  440. type: "object",
  441. properties: {
  442. data: {
  443. type: "string",
  444. properties: { invalid: { type: "string" } },
  445. },
  446. },
  447. } as any
  448. const result = ProviderTransform.schema(openaiModel, schema) as any
  449. expect(result.properties.data.properties).toBeDefined()
  450. })
  451. })
  452. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  453. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  454. const msgs = [
  455. {
  456. role: "assistant",
  457. content: [
  458. { type: "reasoning", text: "Let me think about this..." },
  459. {
  460. type: "tool-call",
  461. toolCallId: "test",
  462. toolName: "bash",
  463. input: { command: "echo hello" },
  464. },
  465. ],
  466. },
  467. ] as any[]
  468. const result = ProviderTransform.message(
  469. msgs,
  470. {
  471. id: "deepseek/deepseek-chat",
  472. providerID: "deepseek",
  473. api: {
  474. id: "deepseek-chat",
  475. url: "https://api.deepseek.com",
  476. npm: "@ai-sdk/openai-compatible",
  477. },
  478. name: "DeepSeek Chat",
  479. capabilities: {
  480. temperature: true,
  481. reasoning: true,
  482. attachment: false,
  483. toolcall: true,
  484. input: { text: true, audio: false, image: false, video: false, pdf: false },
  485. output: { text: true, audio: false, image: false, video: false, pdf: false },
  486. interleaved: {
  487. field: "reasoning_content",
  488. },
  489. },
  490. cost: {
  491. input: 0.001,
  492. output: 0.002,
  493. cache: { read: 0.0001, write: 0.0002 },
  494. },
  495. limit: {
  496. context: 128000,
  497. output: 8192,
  498. },
  499. status: "active",
  500. options: {},
  501. headers: {},
  502. release_date: "2023-04-01",
  503. },
  504. {},
  505. )
  506. expect(result).toHaveLength(1)
  507. expect(result[0].content).toEqual([
  508. {
  509. type: "tool-call",
  510. toolCallId: "test",
  511. toolName: "bash",
  512. input: { command: "echo hello" },
  513. },
  514. ])
  515. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  516. })
  517. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  518. const msgs = [
  519. {
  520. role: "assistant",
  521. content: [
  522. { type: "reasoning", text: "Should not be processed" },
  523. { type: "text", text: "Answer" },
  524. ],
  525. },
  526. ] as any[]
  527. const result = ProviderTransform.message(
  528. msgs,
  529. {
  530. id: "openai/gpt-4",
  531. providerID: "openai",
  532. api: {
  533. id: "gpt-4",
  534. url: "https://api.openai.com",
  535. npm: "@ai-sdk/openai",
  536. },
  537. name: "GPT-4",
  538. capabilities: {
  539. temperature: true,
  540. reasoning: false,
  541. attachment: true,
  542. toolcall: true,
  543. input: { text: true, audio: false, image: true, video: false, pdf: false },
  544. output: { text: true, audio: false, image: false, video: false, pdf: false },
  545. interleaved: false,
  546. },
  547. cost: {
  548. input: 0.03,
  549. output: 0.06,
  550. cache: { read: 0.001, write: 0.002 },
  551. },
  552. limit: {
  553. context: 128000,
  554. output: 4096,
  555. },
  556. status: "active",
  557. options: {},
  558. headers: {},
  559. release_date: "2023-04-01",
  560. },
  561. {},
  562. )
  563. expect(result[0].content).toEqual([
  564. { type: "reasoning", text: "Should not be processed" },
  565. { type: "text", text: "Answer" },
  566. ])
  567. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  568. })
  569. })
  570. describe("ProviderTransform.message - empty image handling", () => {
  571. const mockModel = {
  572. id: "anthropic/claude-3-5-sonnet",
  573. providerID: "anthropic",
  574. api: {
  575. id: "claude-3-5-sonnet-20241022",
  576. url: "https://api.anthropic.com",
  577. npm: "@ai-sdk/anthropic",
  578. },
  579. name: "Claude 3.5 Sonnet",
  580. capabilities: {
  581. temperature: true,
  582. reasoning: false,
  583. attachment: true,
  584. toolcall: true,
  585. input: { text: true, audio: false, image: true, video: false, pdf: true },
  586. output: { text: true, audio: false, image: false, video: false, pdf: false },
  587. interleaved: false,
  588. },
  589. cost: {
  590. input: 0.003,
  591. output: 0.015,
  592. cache: { read: 0.0003, write: 0.00375 },
  593. },
  594. limit: {
  595. context: 200000,
  596. output: 8192,
  597. },
  598. status: "active",
  599. options: {},
  600. headers: {},
  601. } as any
  602. test("should replace empty base64 image with error text", () => {
  603. const msgs = [
  604. {
  605. role: "user",
  606. content: [
  607. { type: "text", text: "What is in this image?" },
  608. { type: "image", image: "data:image/png;base64," },
  609. ],
  610. },
  611. ] as any[]
  612. const result = ProviderTransform.message(msgs, mockModel, {})
  613. expect(result).toHaveLength(1)
  614. expect(result[0].content).toHaveLength(2)
  615. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  616. expect(result[0].content[1]).toEqual({
  617. type: "text",
  618. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  619. })
  620. })
  621. test("should keep valid base64 images unchanged", () => {
  622. const validBase64 =
  623. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  624. const msgs = [
  625. {
  626. role: "user",
  627. content: [
  628. { type: "text", text: "What is in this image?" },
  629. { type: "image", image: `data:image/png;base64,${validBase64}` },
  630. ],
  631. },
  632. ] as any[]
  633. const result = ProviderTransform.message(msgs, mockModel, {})
  634. expect(result).toHaveLength(1)
  635. expect(result[0].content).toHaveLength(2)
  636. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  637. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  638. })
  639. test("should handle mixed valid and empty images", () => {
  640. const validBase64 =
  641. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  642. const msgs = [
  643. {
  644. role: "user",
  645. content: [
  646. { type: "text", text: "Compare these images" },
  647. { type: "image", image: `data:image/png;base64,${validBase64}` },
  648. { type: "image", image: "data:image/jpeg;base64," },
  649. ],
  650. },
  651. ] as any[]
  652. const result = ProviderTransform.message(msgs, mockModel, {})
  653. expect(result).toHaveLength(1)
  654. expect(result[0].content).toHaveLength(3)
  655. expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
  656. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  657. expect(result[0].content[2]).toEqual({
  658. type: "text",
  659. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  660. })
  661. })
  662. })
  663. describe("ProviderTransform.message - anthropic empty content filtering", () => {
  664. const anthropicModel = {
  665. id: "anthropic/claude-3-5-sonnet",
  666. providerID: "anthropic",
  667. api: {
  668. id: "claude-3-5-sonnet-20241022",
  669. url: "https://api.anthropic.com",
  670. npm: "@ai-sdk/anthropic",
  671. },
  672. name: "Claude 3.5 Sonnet",
  673. capabilities: {
  674. temperature: true,
  675. reasoning: false,
  676. attachment: true,
  677. toolcall: true,
  678. input: { text: true, audio: false, image: true, video: false, pdf: true },
  679. output: { text: true, audio: false, image: false, video: false, pdf: false },
  680. interleaved: false,
  681. },
  682. cost: {
  683. input: 0.003,
  684. output: 0.015,
  685. cache: { read: 0.0003, write: 0.00375 },
  686. },
  687. limit: {
  688. context: 200000,
  689. output: 8192,
  690. },
  691. status: "active",
  692. options: {},
  693. headers: {},
  694. } as any
  695. test("filters out messages with empty string content", () => {
  696. const msgs = [
  697. { role: "user", content: "Hello" },
  698. { role: "assistant", content: "" },
  699. { role: "user", content: "World" },
  700. ] as any[]
  701. const result = ProviderTransform.message(msgs, anthropicModel, {})
  702. expect(result).toHaveLength(2)
  703. expect(result[0].content).toBe("Hello")
  704. expect(result[1].content).toBe("World")
  705. })
  706. test("filters out empty text parts from array content", () => {
  707. const msgs = [
  708. {
  709. role: "assistant",
  710. content: [
  711. { type: "text", text: "" },
  712. { type: "text", text: "Hello" },
  713. { type: "text", text: "" },
  714. ],
  715. },
  716. ] as any[]
  717. const result = ProviderTransform.message(msgs, anthropicModel, {})
  718. expect(result).toHaveLength(1)
  719. expect(result[0].content).toHaveLength(1)
  720. expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
  721. })
  722. test("filters out empty reasoning parts from array content", () => {
  723. const msgs = [
  724. {
  725. role: "assistant",
  726. content: [
  727. { type: "reasoning", text: "" },
  728. { type: "text", text: "Answer" },
  729. { type: "reasoning", text: "" },
  730. ],
  731. },
  732. ] as any[]
  733. const result = ProviderTransform.message(msgs, anthropicModel, {})
  734. expect(result).toHaveLength(1)
  735. expect(result[0].content).toHaveLength(1)
  736. expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
  737. })
  738. test("removes entire message when all parts are empty", () => {
  739. const msgs = [
  740. { role: "user", content: "Hello" },
  741. {
  742. role: "assistant",
  743. content: [
  744. { type: "text", text: "" },
  745. { type: "reasoning", text: "" },
  746. ],
  747. },
  748. { role: "user", content: "World" },
  749. ] as any[]
  750. const result = ProviderTransform.message(msgs, anthropicModel, {})
  751. expect(result).toHaveLength(2)
  752. expect(result[0].content).toBe("Hello")
  753. expect(result[1].content).toBe("World")
  754. })
  755. test("keeps non-text/reasoning parts even if text parts are empty", () => {
  756. const msgs = [
  757. {
  758. role: "assistant",
  759. content: [
  760. { type: "text", text: "" },
  761. { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
  762. ],
  763. },
  764. ] as any[]
  765. const result = ProviderTransform.message(msgs, anthropicModel, {})
  766. expect(result).toHaveLength(1)
  767. expect(result[0].content).toHaveLength(1)
  768. expect(result[0].content[0]).toEqual({
  769. type: "tool-call",
  770. toolCallId: "123",
  771. toolName: "bash",
  772. input: { command: "ls" },
  773. })
  774. })
  775. test("keeps messages with valid text alongside empty parts", () => {
  776. const msgs = [
  777. {
  778. role: "assistant",
  779. content: [
  780. { type: "reasoning", text: "Thinking..." },
  781. { type: "text", text: "" },
  782. { type: "text", text: "Result" },
  783. ],
  784. },
  785. ] as any[]
  786. const result = ProviderTransform.message(msgs, anthropicModel, {})
  787. expect(result).toHaveLength(1)
  788. expect(result[0].content).toHaveLength(2)
  789. expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
  790. expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
  791. })
  792. test("does not filter for non-anthropic providers", () => {
  793. const openaiModel = {
  794. ...anthropicModel,
  795. providerID: "openai",
  796. api: {
  797. id: "gpt-4",
  798. url: "https://api.openai.com",
  799. npm: "@ai-sdk/openai",
  800. },
  801. }
  802. const msgs = [
  803. { role: "assistant", content: "" },
  804. {
  805. role: "assistant",
  806. content: [{ type: "text", text: "" }],
  807. },
  808. ] as any[]
  809. const result = ProviderTransform.message(msgs, openaiModel, {})
  810. expect(result).toHaveLength(2)
  811. expect(result[0].content).toBe("")
  812. expect(result[1].content).toHaveLength(1)
  813. })
  814. })
  815. describe("ProviderTransform.message - strip openai metadata when store=false", () => {
  816. const openaiModel = {
  817. id: "openai/gpt-5",
  818. providerID: "openai",
  819. api: {
  820. id: "gpt-5",
  821. url: "https://api.openai.com",
  822. npm: "@ai-sdk/openai",
  823. },
  824. name: "GPT-5",
  825. capabilities: {
  826. temperature: true,
  827. reasoning: true,
  828. attachment: true,
  829. toolcall: true,
  830. input: { text: true, audio: false, image: true, video: false, pdf: false },
  831. output: { text: true, audio: false, image: false, video: false, pdf: false },
  832. interleaved: false,
  833. },
  834. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  835. limit: { context: 128000, output: 4096 },
  836. status: "active",
  837. options: {},
  838. headers: {},
  839. } as any
  840. test("preserves itemId and reasoningEncryptedContent when store=false", () => {
  841. const msgs = [
  842. {
  843. role: "assistant",
  844. content: [
  845. {
  846. type: "reasoning",
  847. text: "thinking...",
  848. providerOptions: {
  849. openai: {
  850. itemId: "rs_123",
  851. reasoningEncryptedContent: "encrypted",
  852. },
  853. },
  854. },
  855. {
  856. type: "text",
  857. text: "Hello",
  858. providerOptions: {
  859. openai: {
  860. itemId: "msg_456",
  861. },
  862. },
  863. },
  864. ],
  865. },
  866. ] as any[]
  867. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  868. expect(result).toHaveLength(1)
  869. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  870. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  871. })
  872. test("preserves itemId and reasoningEncryptedContent when store=false even when not openai", () => {
  873. const zenModel = {
  874. ...openaiModel,
  875. providerID: "zen",
  876. }
  877. const msgs = [
  878. {
  879. role: "assistant",
  880. content: [
  881. {
  882. type: "reasoning",
  883. text: "thinking...",
  884. providerOptions: {
  885. openai: {
  886. itemId: "rs_123",
  887. reasoningEncryptedContent: "encrypted",
  888. },
  889. },
  890. },
  891. {
  892. type: "text",
  893. text: "Hello",
  894. providerOptions: {
  895. openai: {
  896. itemId: "msg_456",
  897. },
  898. },
  899. },
  900. ],
  901. },
  902. ] as any[]
  903. const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
  904. expect(result).toHaveLength(1)
  905. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("rs_123")
  906. expect(result[0].content[1].providerOptions?.openai?.itemId).toBe("msg_456")
  907. })
  908. test("preserves other openai options including itemId", () => {
  909. const msgs = [
  910. {
  911. role: "assistant",
  912. content: [
  913. {
  914. type: "text",
  915. text: "Hello",
  916. providerOptions: {
  917. openai: {
  918. itemId: "msg_123",
  919. otherOption: "value",
  920. },
  921. },
  922. },
  923. ],
  924. },
  925. ] as any[]
  926. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  927. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  928. expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
  929. })
  930. test("preserves metadata for openai package when store is true", () => {
  931. const msgs = [
  932. {
  933. role: "assistant",
  934. content: [
  935. {
  936. type: "text",
  937. text: "Hello",
  938. providerOptions: {
  939. openai: {
  940. itemId: "msg_123",
  941. },
  942. },
  943. },
  944. ],
  945. },
  946. ] as any[]
  947. // openai package preserves itemId regardless of store value
  948. const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
  949. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  950. })
  951. test("preserves metadata for non-openai packages when store is false", () => {
  952. const anthropicModel = {
  953. ...openaiModel,
  954. providerID: "anthropic",
  955. api: {
  956. id: "claude-3",
  957. url: "https://api.anthropic.com",
  958. npm: "@ai-sdk/anthropic",
  959. },
  960. }
  961. const msgs = [
  962. {
  963. role: "assistant",
  964. content: [
  965. {
  966. type: "text",
  967. text: "Hello",
  968. providerOptions: {
  969. openai: {
  970. itemId: "msg_123",
  971. },
  972. },
  973. },
  974. ],
  975. },
  976. ] as any[]
  977. // store=false preserves metadata for non-openai packages
  978. const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
  979. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  980. })
  981. test("preserves metadata using providerID key when store is false", () => {
  982. const opencodeModel = {
  983. ...openaiModel,
  984. providerID: "opencode",
  985. api: {
  986. id: "opencode-test",
  987. url: "https://api.opencode.ai",
  988. npm: "@ai-sdk/openai-compatible",
  989. },
  990. }
  991. const msgs = [
  992. {
  993. role: "assistant",
  994. content: [
  995. {
  996. type: "text",
  997. text: "Hello",
  998. providerOptions: {
  999. opencode: {
  1000. itemId: "msg_123",
  1001. otherOption: "value",
  1002. },
  1003. },
  1004. },
  1005. ],
  1006. },
  1007. ] as any[]
  1008. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  1009. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_123")
  1010. expect(result[0].content[0].providerOptions?.opencode?.otherOption).toBe("value")
  1011. })
  1012. test("preserves itemId across all providerOptions keys", () => {
  1013. const opencodeModel = {
  1014. ...openaiModel,
  1015. providerID: "opencode",
  1016. api: {
  1017. id: "opencode-test",
  1018. url: "https://api.opencode.ai",
  1019. npm: "@ai-sdk/openai-compatible",
  1020. },
  1021. }
  1022. const msgs = [
  1023. {
  1024. role: "assistant",
  1025. providerOptions: {
  1026. openai: { itemId: "msg_root" },
  1027. opencode: { itemId: "msg_opencode" },
  1028. extra: { itemId: "msg_extra" },
  1029. },
  1030. content: [
  1031. {
  1032. type: "text",
  1033. text: "Hello",
  1034. providerOptions: {
  1035. openai: { itemId: "msg_openai_part" },
  1036. opencode: { itemId: "msg_opencode_part" },
  1037. extra: { itemId: "msg_extra_part" },
  1038. },
  1039. },
  1040. ],
  1041. },
  1042. ] as any[]
  1043. const result = ProviderTransform.message(msgs, opencodeModel, { store: false }) as any[]
  1044. expect(result[0].providerOptions?.openai?.itemId).toBe("msg_root")
  1045. expect(result[0].providerOptions?.opencode?.itemId).toBe("msg_opencode")
  1046. expect(result[0].providerOptions?.extra?.itemId).toBe("msg_extra")
  1047. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_openai_part")
  1048. expect(result[0].content[0].providerOptions?.opencode?.itemId).toBe("msg_opencode_part")
  1049. expect(result[0].content[0].providerOptions?.extra?.itemId).toBe("msg_extra_part")
  1050. })
  1051. test("does not strip metadata for non-openai packages when store is not false", () => {
  1052. const anthropicModel = {
  1053. ...openaiModel,
  1054. providerID: "anthropic",
  1055. api: {
  1056. id: "claude-3",
  1057. url: "https://api.anthropic.com",
  1058. npm: "@ai-sdk/anthropic",
  1059. },
  1060. }
  1061. const msgs = [
  1062. {
  1063. role: "assistant",
  1064. content: [
  1065. {
  1066. type: "text",
  1067. text: "Hello",
  1068. providerOptions: {
  1069. openai: {
  1070. itemId: "msg_123",
  1071. },
  1072. },
  1073. },
  1074. ],
  1075. },
  1076. ] as any[]
  1077. const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
  1078. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  1079. })
  1080. })
  1081. describe("ProviderTransform.message - providerOptions key remapping", () => {
  1082. const createModel = (providerID: string, npm: string) =>
  1083. ({
  1084. id: `${providerID}/test-model`,
  1085. providerID,
  1086. api: {
  1087. id: "test-model",
  1088. url: "https://api.test.com",
  1089. npm,
  1090. },
  1091. name: "Test Model",
  1092. capabilities: {
  1093. temperature: true,
  1094. reasoning: false,
  1095. attachment: true,
  1096. toolcall: true,
  1097. input: { text: true, audio: false, image: true, video: false, pdf: true },
  1098. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1099. interleaved: false,
  1100. },
  1101. cost: { input: 0.001, output: 0.002, cache: { read: 0.0001, write: 0.0002 } },
  1102. limit: { context: 128000, output: 8192 },
  1103. status: "active",
  1104. options: {},
  1105. headers: {},
  1106. }) as any
  1107. test("azure keeps 'azure' key and does not remap to 'openai'", () => {
  1108. const model = createModel("azure", "@ai-sdk/azure")
  1109. const msgs = [
  1110. {
  1111. role: "user",
  1112. content: "Hello",
  1113. providerOptions: {
  1114. azure: { someOption: "value" },
  1115. },
  1116. },
  1117. ] as any[]
  1118. const result = ProviderTransform.message(msgs, model, {})
  1119. expect(result[0].providerOptions?.azure).toEqual({ someOption: "value" })
  1120. expect(result[0].providerOptions?.openai).toBeUndefined()
  1121. })
  1122. test("copilot remaps providerID to 'copilot' key", () => {
  1123. const model = createModel("github-copilot", "@ai-sdk/github-copilot")
  1124. const msgs = [
  1125. {
  1126. role: "user",
  1127. content: "Hello",
  1128. providerOptions: {
  1129. copilot: { someOption: "value" },
  1130. },
  1131. },
  1132. ] as any[]
  1133. const result = ProviderTransform.message(msgs, model, {})
  1134. expect(result[0].providerOptions?.copilot).toEqual({ someOption: "value" })
  1135. expect(result[0].providerOptions?.["github-copilot"]).toBeUndefined()
  1136. })
  1137. test("bedrock remaps providerID to 'bedrock' key", () => {
  1138. const model = createModel("my-bedrock", "@ai-sdk/amazon-bedrock")
  1139. const msgs = [
  1140. {
  1141. role: "user",
  1142. content: "Hello",
  1143. providerOptions: {
  1144. "my-bedrock": { someOption: "value" },
  1145. },
  1146. },
  1147. ] as any[]
  1148. const result = ProviderTransform.message(msgs, model, {})
  1149. expect(result[0].providerOptions?.bedrock).toEqual({ someOption: "value" })
  1150. expect(result[0].providerOptions?.["my-bedrock"]).toBeUndefined()
  1151. })
  1152. })
  1153. describe("ProviderTransform.message - claude w/bedrock custom inference profile", () => {
  1154. test("adds cachePoint", () => {
  1155. const model = {
  1156. id: "amazon-bedrock/custom-claude-sonnet-4.5",
  1157. providerID: "amazon-bedrock",
  1158. api: {
  1159. id: "arn:aws:bedrock:xxx:yyy:application-inference-profile/zzz",
  1160. url: "https://api.test.com",
  1161. npm: "@ai-sdk/amazon-bedrock",
  1162. },
  1163. name: "Custom inference profile",
  1164. capabilities: {},
  1165. options: {},
  1166. headers: {},
  1167. } as any
  1168. const msgs = [
  1169. {
  1170. role: "user",
  1171. content: "Hello",
  1172. },
  1173. ] as any[]
  1174. const result = ProviderTransform.message(msgs, model, {})
  1175. expect(result[0].providerOptions?.bedrock).toEqual(
  1176. expect.objectContaining({
  1177. cachePoint: {
  1178. type: "default",
  1179. },
  1180. }),
  1181. )
  1182. })
  1183. })
  1184. describe("ProviderTransform.variants", () => {
  1185. const createMockModel = (overrides: Partial<any> = {}): any => ({
  1186. id: "test/test-model",
  1187. providerID: "test",
  1188. api: {
  1189. id: "test-model",
  1190. url: "https://api.test.com",
  1191. npm: "@ai-sdk/openai",
  1192. },
  1193. name: "Test Model",
  1194. capabilities: {
  1195. temperature: true,
  1196. reasoning: true,
  1197. attachment: true,
  1198. toolcall: true,
  1199. input: { text: true, audio: false, image: true, video: false, pdf: false },
  1200. output: { text: true, audio: false, image: false, video: false, pdf: false },
  1201. interleaved: false,
  1202. },
  1203. cost: {
  1204. input: 0.001,
  1205. output: 0.002,
  1206. cache: { read: 0.0001, write: 0.0002 },
  1207. },
  1208. limit: {
  1209. context: 200_000,
  1210. output: 64_000,
  1211. },
  1212. status: "active",
  1213. options: {},
  1214. headers: {},
  1215. release_date: "2024-01-01",
  1216. ...overrides,
  1217. })
  1218. test("returns empty object when model has no reasoning capabilities", () => {
  1219. const model = createMockModel({
  1220. capabilities: { reasoning: false },
  1221. })
  1222. const result = ProviderTransform.variants(model)
  1223. expect(result).toEqual({})
  1224. })
  1225. test("deepseek returns empty object", () => {
  1226. const model = createMockModel({
  1227. id: "deepseek/deepseek-chat",
  1228. providerID: "deepseek",
  1229. api: {
  1230. id: "deepseek-chat",
  1231. url: "https://api.deepseek.com",
  1232. npm: "@ai-sdk/openai-compatible",
  1233. },
  1234. })
  1235. const result = ProviderTransform.variants(model)
  1236. expect(result).toEqual({})
  1237. })
  1238. test("minimax returns empty object", () => {
  1239. const model = createMockModel({
  1240. id: "minimax/minimax-model",
  1241. providerID: "minimax",
  1242. api: {
  1243. id: "minimax-model",
  1244. url: "https://api.minimax.com",
  1245. npm: "@ai-sdk/openai-compatible",
  1246. },
  1247. })
  1248. const result = ProviderTransform.variants(model)
  1249. expect(result).toEqual({})
  1250. })
  1251. test("glm returns empty object", () => {
  1252. const model = createMockModel({
  1253. id: "glm/glm-4",
  1254. providerID: "glm",
  1255. api: {
  1256. id: "glm-4",
  1257. url: "https://api.glm.com",
  1258. npm: "@ai-sdk/openai-compatible",
  1259. },
  1260. })
  1261. const result = ProviderTransform.variants(model)
  1262. expect(result).toEqual({})
  1263. })
  1264. test("mistral returns empty object", () => {
  1265. const model = createMockModel({
  1266. id: "mistral/mistral-large",
  1267. providerID: "mistral",
  1268. api: {
  1269. id: "mistral-large-latest",
  1270. url: "https://api.mistral.com",
  1271. npm: "@ai-sdk/mistral",
  1272. },
  1273. })
  1274. const result = ProviderTransform.variants(model)
  1275. expect(result).toEqual({})
  1276. })
  1277. describe("@openrouter/ai-sdk-provider", () => {
  1278. test("returns empty object for non-qualifying models", () => {
  1279. const model = createMockModel({
  1280. id: "openrouter/test-model",
  1281. providerID: "openrouter",
  1282. api: {
  1283. id: "test-model",
  1284. url: "https://openrouter.ai",
  1285. npm: "@openrouter/ai-sdk-provider",
  1286. },
  1287. })
  1288. const result = ProviderTransform.variants(model)
  1289. expect(result).toEqual({})
  1290. })
  1291. test("gpt models return OPENAI_EFFORTS with reasoning", () => {
  1292. const model = createMockModel({
  1293. id: "openrouter/gpt-4",
  1294. providerID: "openrouter",
  1295. api: {
  1296. id: "gpt-4",
  1297. url: "https://openrouter.ai",
  1298. npm: "@openrouter/ai-sdk-provider",
  1299. },
  1300. })
  1301. const result = ProviderTransform.variants(model)
  1302. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1303. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1304. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1305. })
  1306. test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
  1307. const model = createMockModel({
  1308. id: "openrouter/gemini-3-5-pro",
  1309. providerID: "openrouter",
  1310. api: {
  1311. id: "gemini-3-5-pro",
  1312. url: "https://openrouter.ai",
  1313. npm: "@openrouter/ai-sdk-provider",
  1314. },
  1315. })
  1316. const result = ProviderTransform.variants(model)
  1317. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1318. })
  1319. test("grok-4 returns empty object", () => {
  1320. const model = createMockModel({
  1321. id: "openrouter/grok-4",
  1322. providerID: "openrouter",
  1323. api: {
  1324. id: "grok-4",
  1325. url: "https://openrouter.ai",
  1326. npm: "@openrouter/ai-sdk-provider",
  1327. },
  1328. })
  1329. const result = ProviderTransform.variants(model)
  1330. expect(result).toEqual({})
  1331. })
  1332. test("grok-3-mini returns low and high with reasoning", () => {
  1333. const model = createMockModel({
  1334. id: "openrouter/grok-3-mini",
  1335. providerID: "openrouter",
  1336. api: {
  1337. id: "grok-3-mini",
  1338. url: "https://openrouter.ai",
  1339. npm: "@openrouter/ai-sdk-provider",
  1340. },
  1341. })
  1342. const result = ProviderTransform.variants(model)
  1343. expect(Object.keys(result)).toEqual(["low", "high"])
  1344. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  1345. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  1346. })
  1347. })
  1348. describe("@ai-sdk/gateway", () => {
  1349. test("returns OPENAI_EFFORTS with reasoningEffort", () => {
  1350. const model = createMockModel({
  1351. id: "gateway/gateway-model",
  1352. providerID: "gateway",
  1353. api: {
  1354. id: "gateway-model",
  1355. url: "https://gateway.ai",
  1356. npm: "@ai-sdk/gateway",
  1357. },
  1358. })
  1359. const result = ProviderTransform.variants(model)
  1360. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1361. expect(result.low).toEqual({ reasoningEffort: "low" })
  1362. expect(result.high).toEqual({ reasoningEffort: "high" })
  1363. })
  1364. })
  1365. describe("@ai-sdk/github-copilot", () => {
  1366. test("standard models return low, medium, high", () => {
  1367. const model = createMockModel({
  1368. id: "gpt-4.5",
  1369. providerID: "github-copilot",
  1370. api: {
  1371. id: "gpt-4.5",
  1372. url: "https://api.githubcopilot.com",
  1373. npm: "@ai-sdk/github-copilot",
  1374. },
  1375. })
  1376. const result = ProviderTransform.variants(model)
  1377. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1378. expect(result.low).toEqual({
  1379. reasoningEffort: "low",
  1380. reasoningSummary: "auto",
  1381. include: ["reasoning.encrypted_content"],
  1382. })
  1383. })
  1384. test("gpt-5.1-codex-max includes xhigh", () => {
  1385. const model = createMockModel({
  1386. id: "gpt-5.1-codex-max",
  1387. providerID: "github-copilot",
  1388. api: {
  1389. id: "gpt-5.1-codex-max",
  1390. url: "https://api.githubcopilot.com",
  1391. npm: "@ai-sdk/github-copilot",
  1392. },
  1393. })
  1394. const result = ProviderTransform.variants(model)
  1395. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1396. })
  1397. test("gpt-5.1-codex-mini does not include xhigh", () => {
  1398. const model = createMockModel({
  1399. id: "gpt-5.1-codex-mini",
  1400. providerID: "github-copilot",
  1401. api: {
  1402. id: "gpt-5.1-codex-mini",
  1403. url: "https://api.githubcopilot.com",
  1404. npm: "@ai-sdk/github-copilot",
  1405. },
  1406. })
  1407. const result = ProviderTransform.variants(model)
  1408. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1409. })
  1410. test("gpt-5.1-codex does not include xhigh", () => {
  1411. const model = createMockModel({
  1412. id: "gpt-5.1-codex",
  1413. providerID: "github-copilot",
  1414. api: {
  1415. id: "gpt-5.1-codex",
  1416. url: "https://api.githubcopilot.com",
  1417. npm: "@ai-sdk/github-copilot",
  1418. },
  1419. })
  1420. const result = ProviderTransform.variants(model)
  1421. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1422. })
  1423. test("gpt-5.2 includes xhigh", () => {
  1424. const model = createMockModel({
  1425. id: "gpt-5.2",
  1426. providerID: "github-copilot",
  1427. api: {
  1428. id: "gpt-5.2",
  1429. url: "https://api.githubcopilot.com",
  1430. npm: "@ai-sdk/github-copilot",
  1431. },
  1432. })
  1433. const result = ProviderTransform.variants(model)
  1434. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1435. expect(result.xhigh).toEqual({
  1436. reasoningEffort: "xhigh",
  1437. reasoningSummary: "auto",
  1438. include: ["reasoning.encrypted_content"],
  1439. })
  1440. })
  1441. test("gpt-5.2-codex includes xhigh", () => {
  1442. const model = createMockModel({
  1443. id: "gpt-5.2-codex",
  1444. providerID: "github-copilot",
  1445. api: {
  1446. id: "gpt-5.2-codex",
  1447. url: "https://api.githubcopilot.com",
  1448. npm: "@ai-sdk/github-copilot",
  1449. },
  1450. })
  1451. const result = ProviderTransform.variants(model)
  1452. expect(Object.keys(result)).toEqual(["low", "medium", "high", "xhigh"])
  1453. })
  1454. })
  1455. describe("@ai-sdk/cerebras", () => {
  1456. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1457. const model = createMockModel({
  1458. id: "cerebras/llama-4",
  1459. providerID: "cerebras",
  1460. api: {
  1461. id: "llama-4-sc",
  1462. url: "https://api.cerebras.ai",
  1463. npm: "@ai-sdk/cerebras",
  1464. },
  1465. })
  1466. const result = ProviderTransform.variants(model)
  1467. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1468. expect(result.low).toEqual({ reasoningEffort: "low" })
  1469. expect(result.high).toEqual({ reasoningEffort: "high" })
  1470. })
  1471. })
  1472. describe("@ai-sdk/togetherai", () => {
  1473. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1474. const model = createMockModel({
  1475. id: "togetherai/llama-4",
  1476. providerID: "togetherai",
  1477. api: {
  1478. id: "llama-4-sc",
  1479. url: "https://api.togetherai.com",
  1480. npm: "@ai-sdk/togetherai",
  1481. },
  1482. })
  1483. const result = ProviderTransform.variants(model)
  1484. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1485. expect(result.low).toEqual({ reasoningEffort: "low" })
  1486. expect(result.high).toEqual({ reasoningEffort: "high" })
  1487. })
  1488. })
  1489. describe("@ai-sdk/xai", () => {
  1490. test("grok-3 returns empty object", () => {
  1491. const model = createMockModel({
  1492. id: "xai/grok-3",
  1493. providerID: "xai",
  1494. api: {
  1495. id: "grok-3",
  1496. url: "https://api.x.ai",
  1497. npm: "@ai-sdk/xai",
  1498. },
  1499. })
  1500. const result = ProviderTransform.variants(model)
  1501. expect(result).toEqual({})
  1502. })
  1503. test("grok-3-mini returns low and high with reasoningEffort", () => {
  1504. const model = createMockModel({
  1505. id: "xai/grok-3-mini",
  1506. providerID: "xai",
  1507. api: {
  1508. id: "grok-3-mini",
  1509. url: "https://api.x.ai",
  1510. npm: "@ai-sdk/xai",
  1511. },
  1512. })
  1513. const result = ProviderTransform.variants(model)
  1514. expect(Object.keys(result)).toEqual(["low", "high"])
  1515. expect(result.low).toEqual({ reasoningEffort: "low" })
  1516. expect(result.high).toEqual({ reasoningEffort: "high" })
  1517. })
  1518. })
  1519. describe("@ai-sdk/deepinfra", () => {
  1520. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1521. const model = createMockModel({
  1522. id: "deepinfra/llama-4",
  1523. providerID: "deepinfra",
  1524. api: {
  1525. id: "llama-4-sc",
  1526. url: "https://api.deepinfra.com",
  1527. npm: "@ai-sdk/deepinfra",
  1528. },
  1529. })
  1530. const result = ProviderTransform.variants(model)
  1531. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1532. expect(result.low).toEqual({ reasoningEffort: "low" })
  1533. expect(result.high).toEqual({ reasoningEffort: "high" })
  1534. })
  1535. })
  1536. describe("@ai-sdk/openai-compatible", () => {
  1537. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  1538. const model = createMockModel({
  1539. id: "custom-provider/custom-model",
  1540. providerID: "custom-provider",
  1541. api: {
  1542. id: "custom-model",
  1543. url: "https://api.custom.com",
  1544. npm: "@ai-sdk/openai-compatible",
  1545. },
  1546. })
  1547. const result = ProviderTransform.variants(model)
  1548. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1549. expect(result.low).toEqual({ reasoningEffort: "low" })
  1550. expect(result.high).toEqual({ reasoningEffort: "high" })
  1551. })
  1552. })
  1553. describe("@ai-sdk/azure", () => {
  1554. test("o1-mini returns empty object", () => {
  1555. const model = createMockModel({
  1556. id: "o1-mini",
  1557. providerID: "azure",
  1558. api: {
  1559. id: "o1-mini",
  1560. url: "https://azure.com",
  1561. npm: "@ai-sdk/azure",
  1562. },
  1563. })
  1564. const result = ProviderTransform.variants(model)
  1565. expect(result).toEqual({})
  1566. })
  1567. test("standard azure models return custom efforts with reasoningSummary", () => {
  1568. const model = createMockModel({
  1569. id: "o1",
  1570. providerID: "azure",
  1571. api: {
  1572. id: "o1",
  1573. url: "https://azure.com",
  1574. npm: "@ai-sdk/azure",
  1575. },
  1576. })
  1577. const result = ProviderTransform.variants(model)
  1578. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1579. expect(result.low).toEqual({
  1580. reasoningEffort: "low",
  1581. reasoningSummary: "auto",
  1582. include: ["reasoning.encrypted_content"],
  1583. })
  1584. })
  1585. test("gpt-5 adds minimal effort", () => {
  1586. const model = createMockModel({
  1587. id: "gpt-5",
  1588. providerID: "azure",
  1589. api: {
  1590. id: "gpt-5",
  1591. url: "https://azure.com",
  1592. npm: "@ai-sdk/azure",
  1593. },
  1594. })
  1595. const result = ProviderTransform.variants(model)
  1596. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1597. })
  1598. })
  1599. describe("@ai-sdk/openai", () => {
  1600. test("gpt-5-pro returns empty object", () => {
  1601. const model = createMockModel({
  1602. id: "gpt-5-pro",
  1603. providerID: "openai",
  1604. api: {
  1605. id: "gpt-5-pro",
  1606. url: "https://api.openai.com",
  1607. npm: "@ai-sdk/openai",
  1608. },
  1609. })
  1610. const result = ProviderTransform.variants(model)
  1611. expect(result).toEqual({})
  1612. })
  1613. test("standard openai models return custom efforts with reasoningSummary", () => {
  1614. const model = createMockModel({
  1615. id: "gpt-5",
  1616. providerID: "openai",
  1617. api: {
  1618. id: "gpt-5",
  1619. url: "https://api.openai.com",
  1620. npm: "@ai-sdk/openai",
  1621. },
  1622. release_date: "2024-06-01",
  1623. })
  1624. const result = ProviderTransform.variants(model)
  1625. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1626. expect(result.low).toEqual({
  1627. reasoningEffort: "low",
  1628. reasoningSummary: "auto",
  1629. include: ["reasoning.encrypted_content"],
  1630. })
  1631. })
  1632. test("models after 2025-11-13 include 'none' effort", () => {
  1633. const model = createMockModel({
  1634. id: "gpt-5-nano",
  1635. providerID: "openai",
  1636. api: {
  1637. id: "gpt-5-nano",
  1638. url: "https://api.openai.com",
  1639. npm: "@ai-sdk/openai",
  1640. },
  1641. release_date: "2025-11-14",
  1642. })
  1643. const result = ProviderTransform.variants(model)
  1644. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
  1645. })
  1646. test("models after 2025-12-04 include 'xhigh' effort", () => {
  1647. const model = createMockModel({
  1648. id: "openai/gpt-5-chat",
  1649. providerID: "openai",
  1650. api: {
  1651. id: "gpt-5-chat",
  1652. url: "https://api.openai.com",
  1653. npm: "@ai-sdk/openai",
  1654. },
  1655. release_date: "2025-12-05",
  1656. })
  1657. const result = ProviderTransform.variants(model)
  1658. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1659. })
  1660. })
  1661. describe("@ai-sdk/anthropic", () => {
  1662. test("returns high and max with thinking config", () => {
  1663. const model = createMockModel({
  1664. id: "anthropic/claude-4",
  1665. providerID: "anthropic",
  1666. api: {
  1667. id: "claude-4",
  1668. url: "https://api.anthropic.com",
  1669. npm: "@ai-sdk/anthropic",
  1670. },
  1671. })
  1672. const result = ProviderTransform.variants(model)
  1673. expect(Object.keys(result)).toEqual(["high", "max"])
  1674. expect(result.high).toEqual({
  1675. thinking: {
  1676. type: "enabled",
  1677. budgetTokens: 16000,
  1678. },
  1679. })
  1680. expect(result.max).toEqual({
  1681. thinking: {
  1682. type: "enabled",
  1683. budgetTokens: 31999,
  1684. },
  1685. })
  1686. })
  1687. })
  1688. describe("@ai-sdk/amazon-bedrock", () => {
  1689. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
  1690. const model = createMockModel({
  1691. id: "bedrock/llama-4",
  1692. providerID: "bedrock",
  1693. api: {
  1694. id: "llama-4-sc",
  1695. url: "https://bedrock.amazonaws.com",
  1696. npm: "@ai-sdk/amazon-bedrock",
  1697. },
  1698. })
  1699. const result = ProviderTransform.variants(model)
  1700. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1701. expect(result.low).toEqual({
  1702. reasoningConfig: {
  1703. type: "enabled",
  1704. maxReasoningEffort: "low",
  1705. },
  1706. })
  1707. })
  1708. })
  1709. describe("@ai-sdk/google", () => {
  1710. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  1711. const model = createMockModel({
  1712. id: "google/gemini-2.5-pro",
  1713. providerID: "google",
  1714. api: {
  1715. id: "gemini-2.5-pro",
  1716. url: "https://generativelanguage.googleapis.com",
  1717. npm: "@ai-sdk/google",
  1718. },
  1719. })
  1720. const result = ProviderTransform.variants(model)
  1721. expect(Object.keys(result)).toEqual(["high", "max"])
  1722. expect(result.high).toEqual({
  1723. thinkingConfig: {
  1724. includeThoughts: true,
  1725. thinkingBudget: 16000,
  1726. },
  1727. })
  1728. expect(result.max).toEqual({
  1729. thinkingConfig: {
  1730. includeThoughts: true,
  1731. thinkingBudget: 24576,
  1732. },
  1733. })
  1734. })
  1735. test("other gemini models return low and high with thinkingLevel", () => {
  1736. const model = createMockModel({
  1737. id: "google/gemini-2.0-pro",
  1738. providerID: "google",
  1739. api: {
  1740. id: "gemini-2.0-pro",
  1741. url: "https://generativelanguage.googleapis.com",
  1742. npm: "@ai-sdk/google",
  1743. },
  1744. })
  1745. const result = ProviderTransform.variants(model)
  1746. expect(Object.keys(result)).toEqual(["low", "high"])
  1747. expect(result.low).toEqual({
  1748. includeThoughts: true,
  1749. thinkingLevel: "low",
  1750. })
  1751. expect(result.high).toEqual({
  1752. includeThoughts: true,
  1753. thinkingLevel: "high",
  1754. })
  1755. })
  1756. })
  1757. describe("@ai-sdk/google-vertex", () => {
  1758. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  1759. const model = createMockModel({
  1760. id: "google-vertex/gemini-2.5-pro",
  1761. providerID: "google-vertex",
  1762. api: {
  1763. id: "gemini-2.5-pro",
  1764. url: "https://vertexai.googleapis.com",
  1765. npm: "@ai-sdk/google-vertex",
  1766. },
  1767. })
  1768. const result = ProviderTransform.variants(model)
  1769. expect(Object.keys(result)).toEqual(["high", "max"])
  1770. })
  1771. test("other vertex models return low and high with thinkingLevel", () => {
  1772. const model = createMockModel({
  1773. id: "google-vertex/gemini-2.0-pro",
  1774. providerID: "google-vertex",
  1775. api: {
  1776. id: "gemini-2.0-pro",
  1777. url: "https://vertexai.googleapis.com",
  1778. npm: "@ai-sdk/google-vertex",
  1779. },
  1780. })
  1781. const result = ProviderTransform.variants(model)
  1782. expect(Object.keys(result)).toEqual(["low", "high"])
  1783. })
  1784. })
  1785. describe("@ai-sdk/cohere", () => {
  1786. test("returns empty object", () => {
  1787. const model = createMockModel({
  1788. id: "cohere/command-r",
  1789. providerID: "cohere",
  1790. api: {
  1791. id: "command-r",
  1792. url: "https://api.cohere.com",
  1793. npm: "@ai-sdk/cohere",
  1794. },
  1795. })
  1796. const result = ProviderTransform.variants(model)
  1797. expect(result).toEqual({})
  1798. })
  1799. })
  1800. describe("@ai-sdk/groq", () => {
  1801. test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
  1802. const model = createMockModel({
  1803. id: "groq/llama-4",
  1804. providerID: "groq",
  1805. api: {
  1806. id: "llama-4-sc",
  1807. url: "https://api.groq.com",
  1808. npm: "@ai-sdk/groq",
  1809. },
  1810. })
  1811. const result = ProviderTransform.variants(model)
  1812. expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
  1813. expect(result.none).toEqual({
  1814. includeThoughts: true,
  1815. thinkingLevel: "none",
  1816. })
  1817. expect(result.low).toEqual({
  1818. includeThoughts: true,
  1819. thinkingLevel: "low",
  1820. })
  1821. })
  1822. })
  1823. describe("@ai-sdk/perplexity", () => {
  1824. test("returns empty object", () => {
  1825. const model = createMockModel({
  1826. id: "perplexity/sonar-plus",
  1827. providerID: "perplexity",
  1828. api: {
  1829. id: "sonar-plus",
  1830. url: "https://api.perplexity.ai",
  1831. npm: "@ai-sdk/perplexity",
  1832. },
  1833. })
  1834. const result = ProviderTransform.variants(model)
  1835. expect(result).toEqual({})
  1836. })
  1837. })
  1838. })