transform.test.ts 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.options - setCacheKey", () => {
  5. const sessionID = "test-session-123"
  6. const mockModel = {
  7. id: "anthropic/claude-3-5-sonnet",
  8. providerID: "anthropic",
  9. api: {
  10. id: "claude-3-5-sonnet-20241022",
  11. url: "https://api.anthropic.com",
  12. npm: "@ai-sdk/anthropic",
  13. },
  14. name: "Claude 3.5 Sonnet",
  15. capabilities: {
  16. temperature: true,
  17. reasoning: false,
  18. attachment: true,
  19. toolcall: true,
  20. input: { text: true, audio: false, image: true, video: false, pdf: true },
  21. output: { text: true, audio: false, image: false, video: false, pdf: false },
  22. interleaved: false,
  23. },
  24. cost: {
  25. input: 0.003,
  26. output: 0.015,
  27. cache: { read: 0.0003, write: 0.00375 },
  28. },
  29. limit: {
  30. context: 200000,
  31. output: 8192,
  32. },
  33. status: "active",
  34. options: {},
  35. headers: {},
  36. } as any
  37. test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
  38. const result = ProviderTransform.options({
  39. model: mockModel,
  40. sessionID,
  41. providerOptions: { setCacheKey: true },
  42. })
  43. expect(result.promptCacheKey).toBe(sessionID)
  44. })
  45. test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
  46. const result = ProviderTransform.options({
  47. model: mockModel,
  48. sessionID,
  49. providerOptions: { setCacheKey: false },
  50. })
  51. expect(result.promptCacheKey).toBeUndefined()
  52. })
  53. test("should not set promptCacheKey when providerOptions is undefined", () => {
  54. const result = ProviderTransform.options({
  55. model: mockModel,
  56. sessionID,
  57. providerOptions: undefined,
  58. })
  59. expect(result.promptCacheKey).toBeUndefined()
  60. })
  61. test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
  62. const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
  63. expect(result.promptCacheKey).toBeUndefined()
  64. })
  65. test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
  66. const openaiModel = {
  67. ...mockModel,
  68. providerID: "openai",
  69. api: {
  70. id: "gpt-4",
  71. url: "https://api.openai.com",
  72. npm: "@ai-sdk/openai",
  73. },
  74. }
  75. const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
  76. expect(result.promptCacheKey).toBe(sessionID)
  77. })
  78. test("should set store=false for openai provider", () => {
  79. const openaiModel = {
  80. ...mockModel,
  81. providerID: "openai",
  82. api: {
  83. id: "gpt-4",
  84. url: "https://api.openai.com",
  85. npm: "@ai-sdk/openai",
  86. },
  87. }
  88. const result = ProviderTransform.options({
  89. model: openaiModel,
  90. sessionID,
  91. providerOptions: {},
  92. })
  93. expect(result.store).toBe(false)
  94. })
  95. })
  96. describe("ProviderTransform.maxOutputTokens", () => {
  97. test("returns 32k when modelLimit > 32k", () => {
  98. const modelLimit = 100000
  99. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  100. expect(result).toBe(OUTPUT_TOKEN_MAX)
  101. })
  102. test("returns modelLimit when modelLimit < 32k", () => {
  103. const modelLimit = 16000
  104. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  105. expect(result).toBe(16000)
  106. })
  107. describe("azure", () => {
  108. test("returns 32k when modelLimit > 32k", () => {
  109. const modelLimit = 100000
  110. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  111. expect(result).toBe(OUTPUT_TOKEN_MAX)
  112. })
  113. test("returns modelLimit when modelLimit < 32k", () => {
  114. const modelLimit = 16000
  115. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  116. expect(result).toBe(16000)
  117. })
  118. })
  119. describe("bedrock", () => {
  120. test("returns 32k when modelLimit > 32k", () => {
  121. const modelLimit = 100000
  122. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  123. expect(result).toBe(OUTPUT_TOKEN_MAX)
  124. })
  125. test("returns modelLimit when modelLimit < 32k", () => {
  126. const modelLimit = 16000
  127. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  128. expect(result).toBe(16000)
  129. })
  130. })
  131. describe("anthropic without thinking options", () => {
  132. test("returns 32k when modelLimit > 32k", () => {
  133. const modelLimit = 100000
  134. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  135. expect(result).toBe(OUTPUT_TOKEN_MAX)
  136. })
  137. test("returns modelLimit when modelLimit < 32k", () => {
  138. const modelLimit = 16000
  139. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  140. expect(result).toBe(16000)
  141. })
  142. })
  143. describe("anthropic with thinking options", () => {
  144. test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
  145. const modelLimit = 100000
  146. const options = {
  147. thinking: {
  148. type: "enabled",
  149. budgetTokens: 10000,
  150. },
  151. }
  152. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  153. expect(result).toBe(OUTPUT_TOKEN_MAX)
  154. })
  155. test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
  156. const modelLimit = 50000
  157. const options = {
  158. thinking: {
  159. type: "enabled",
  160. budgetTokens: 30000,
  161. },
  162. }
  163. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  164. expect(result).toBe(20000)
  165. })
  166. test("returns 32k when thinking type is not enabled", () => {
  167. const modelLimit = 100000
  168. const options = {
  169. thinking: {
  170. type: "disabled",
  171. budgetTokens: 10000,
  172. },
  173. }
  174. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  175. expect(result).toBe(OUTPUT_TOKEN_MAX)
  176. })
  177. })
  178. })
  179. describe("ProviderTransform.schema - gemini array items", () => {
  180. test("adds missing items for array properties", () => {
  181. const geminiModel = {
  182. providerID: "google",
  183. api: {
  184. id: "gemini-3-pro",
  185. },
  186. } as any
  187. const schema = {
  188. type: "object",
  189. properties: {
  190. nodes: { type: "array" },
  191. edges: { type: "array", items: { type: "string" } },
  192. },
  193. } as any
  194. const result = ProviderTransform.schema(geminiModel, schema) as any
  195. expect(result.properties.nodes.items).toBeDefined()
  196. expect(result.properties.edges.items.type).toBe("string")
  197. })
  198. })
  199. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  200. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  201. const msgs = [
  202. {
  203. role: "assistant",
  204. content: [
  205. { type: "reasoning", text: "Let me think about this..." },
  206. {
  207. type: "tool-call",
  208. toolCallId: "test",
  209. toolName: "bash",
  210. input: { command: "echo hello" },
  211. },
  212. ],
  213. },
  214. ] as any[]
  215. const result = ProviderTransform.message(
  216. msgs,
  217. {
  218. id: "deepseek/deepseek-chat",
  219. providerID: "deepseek",
  220. api: {
  221. id: "deepseek-chat",
  222. url: "https://api.deepseek.com",
  223. npm: "@ai-sdk/openai-compatible",
  224. },
  225. name: "DeepSeek Chat",
  226. capabilities: {
  227. temperature: true,
  228. reasoning: true,
  229. attachment: false,
  230. toolcall: true,
  231. input: { text: true, audio: false, image: false, video: false, pdf: false },
  232. output: { text: true, audio: false, image: false, video: false, pdf: false },
  233. interleaved: {
  234. field: "reasoning_content",
  235. },
  236. },
  237. cost: {
  238. input: 0.001,
  239. output: 0.002,
  240. cache: { read: 0.0001, write: 0.0002 },
  241. },
  242. limit: {
  243. context: 128000,
  244. output: 8192,
  245. },
  246. status: "active",
  247. options: {},
  248. headers: {},
  249. release_date: "2023-04-01",
  250. },
  251. {},
  252. )
  253. expect(result).toHaveLength(1)
  254. expect(result[0].content).toEqual([
  255. {
  256. type: "tool-call",
  257. toolCallId: "test",
  258. toolName: "bash",
  259. input: { command: "echo hello" },
  260. },
  261. ])
  262. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  263. })
  264. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  265. const msgs = [
  266. {
  267. role: "assistant",
  268. content: [
  269. { type: "reasoning", text: "Should not be processed" },
  270. { type: "text", text: "Answer" },
  271. ],
  272. },
  273. ] as any[]
  274. const result = ProviderTransform.message(
  275. msgs,
  276. {
  277. id: "openai/gpt-4",
  278. providerID: "openai",
  279. api: {
  280. id: "gpt-4",
  281. url: "https://api.openai.com",
  282. npm: "@ai-sdk/openai",
  283. },
  284. name: "GPT-4",
  285. capabilities: {
  286. temperature: true,
  287. reasoning: false,
  288. attachment: true,
  289. toolcall: true,
  290. input: { text: true, audio: false, image: true, video: false, pdf: false },
  291. output: { text: true, audio: false, image: false, video: false, pdf: false },
  292. interleaved: false,
  293. },
  294. cost: {
  295. input: 0.03,
  296. output: 0.06,
  297. cache: { read: 0.001, write: 0.002 },
  298. },
  299. limit: {
  300. context: 128000,
  301. output: 4096,
  302. },
  303. status: "active",
  304. options: {},
  305. headers: {},
  306. release_date: "2023-04-01",
  307. },
  308. {},
  309. )
  310. expect(result[0].content).toEqual([
  311. { type: "reasoning", text: "Should not be processed" },
  312. { type: "text", text: "Answer" },
  313. ])
  314. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  315. })
  316. })
  317. describe("ProviderTransform.message - empty image handling", () => {
  318. const mockModel = {
  319. id: "anthropic/claude-3-5-sonnet",
  320. providerID: "anthropic",
  321. api: {
  322. id: "claude-3-5-sonnet-20241022",
  323. url: "https://api.anthropic.com",
  324. npm: "@ai-sdk/anthropic",
  325. },
  326. name: "Claude 3.5 Sonnet",
  327. capabilities: {
  328. temperature: true,
  329. reasoning: false,
  330. attachment: true,
  331. toolcall: true,
  332. input: { text: true, audio: false, image: true, video: false, pdf: true },
  333. output: { text: true, audio: false, image: false, video: false, pdf: false },
  334. interleaved: false,
  335. },
  336. cost: {
  337. input: 0.003,
  338. output: 0.015,
  339. cache: { read: 0.0003, write: 0.00375 },
  340. },
  341. limit: {
  342. context: 200000,
  343. output: 8192,
  344. },
  345. status: "active",
  346. options: {},
  347. headers: {},
  348. } as any
  349. test("should replace empty base64 image with error text", () => {
  350. const msgs = [
  351. {
  352. role: "user",
  353. content: [
  354. { type: "text", text: "What is in this image?" },
  355. { type: "image", image: "data:image/png;base64," },
  356. ],
  357. },
  358. ] as any[]
  359. const result = ProviderTransform.message(msgs, mockModel, {})
  360. expect(result).toHaveLength(1)
  361. expect(result[0].content).toHaveLength(2)
  362. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  363. expect(result[0].content[1]).toEqual({
  364. type: "text",
  365. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  366. })
  367. })
  368. test("should keep valid base64 images unchanged", () => {
  369. const validBase64 =
  370. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  371. const msgs = [
  372. {
  373. role: "user",
  374. content: [
  375. { type: "text", text: "What is in this image?" },
  376. { type: "image", image: `data:image/png;base64,${validBase64}` },
  377. ],
  378. },
  379. ] as any[]
  380. const result = ProviderTransform.message(msgs, mockModel, {})
  381. expect(result).toHaveLength(1)
  382. expect(result[0].content).toHaveLength(2)
  383. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  384. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  385. })
  386. test("should handle mixed valid and empty images", () => {
  387. const validBase64 =
  388. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  389. const msgs = [
  390. {
  391. role: "user",
  392. content: [
  393. { type: "text", text: "Compare these images" },
  394. { type: "image", image: `data:image/png;base64,${validBase64}` },
  395. { type: "image", image: "data:image/jpeg;base64," },
  396. ],
  397. },
  398. ] as any[]
  399. const result = ProviderTransform.message(msgs, mockModel, {})
  400. expect(result).toHaveLength(1)
  401. expect(result[0].content).toHaveLength(3)
  402. expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
  403. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  404. expect(result[0].content[2]).toEqual({
  405. type: "text",
  406. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  407. })
  408. })
  409. })
  410. describe("ProviderTransform.message - anthropic empty content filtering", () => {
  411. const anthropicModel = {
  412. id: "anthropic/claude-3-5-sonnet",
  413. providerID: "anthropic",
  414. api: {
  415. id: "claude-3-5-sonnet-20241022",
  416. url: "https://api.anthropic.com",
  417. npm: "@ai-sdk/anthropic",
  418. },
  419. name: "Claude 3.5 Sonnet",
  420. capabilities: {
  421. temperature: true,
  422. reasoning: false,
  423. attachment: true,
  424. toolcall: true,
  425. input: { text: true, audio: false, image: true, video: false, pdf: true },
  426. output: { text: true, audio: false, image: false, video: false, pdf: false },
  427. interleaved: false,
  428. },
  429. cost: {
  430. input: 0.003,
  431. output: 0.015,
  432. cache: { read: 0.0003, write: 0.00375 },
  433. },
  434. limit: {
  435. context: 200000,
  436. output: 8192,
  437. },
  438. status: "active",
  439. options: {},
  440. headers: {},
  441. } as any
  442. test("filters out messages with empty string content", () => {
  443. const msgs = [
  444. { role: "user", content: "Hello" },
  445. { role: "assistant", content: "" },
  446. { role: "user", content: "World" },
  447. ] as any[]
  448. const result = ProviderTransform.message(msgs, anthropicModel, {})
  449. expect(result).toHaveLength(2)
  450. expect(result[0].content).toBe("Hello")
  451. expect(result[1].content).toBe("World")
  452. })
  453. test("filters out empty text parts from array content", () => {
  454. const msgs = [
  455. {
  456. role: "assistant",
  457. content: [
  458. { type: "text", text: "" },
  459. { type: "text", text: "Hello" },
  460. { type: "text", text: "" },
  461. ],
  462. },
  463. ] as any[]
  464. const result = ProviderTransform.message(msgs, anthropicModel, {})
  465. expect(result).toHaveLength(1)
  466. expect(result[0].content).toHaveLength(1)
  467. expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
  468. })
  469. test("filters out empty reasoning parts from array content", () => {
  470. const msgs = [
  471. {
  472. role: "assistant",
  473. content: [
  474. { type: "reasoning", text: "" },
  475. { type: "text", text: "Answer" },
  476. { type: "reasoning", text: "" },
  477. ],
  478. },
  479. ] as any[]
  480. const result = ProviderTransform.message(msgs, anthropicModel, {})
  481. expect(result).toHaveLength(1)
  482. expect(result[0].content).toHaveLength(1)
  483. expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
  484. })
  485. test("removes entire message when all parts are empty", () => {
  486. const msgs = [
  487. { role: "user", content: "Hello" },
  488. {
  489. role: "assistant",
  490. content: [
  491. { type: "text", text: "" },
  492. { type: "reasoning", text: "" },
  493. ],
  494. },
  495. { role: "user", content: "World" },
  496. ] as any[]
  497. const result = ProviderTransform.message(msgs, anthropicModel, {})
  498. expect(result).toHaveLength(2)
  499. expect(result[0].content).toBe("Hello")
  500. expect(result[1].content).toBe("World")
  501. })
  502. test("keeps non-text/reasoning parts even if text parts are empty", () => {
  503. const msgs = [
  504. {
  505. role: "assistant",
  506. content: [
  507. { type: "text", text: "" },
  508. { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
  509. ],
  510. },
  511. ] as any[]
  512. const result = ProviderTransform.message(msgs, anthropicModel, {})
  513. expect(result).toHaveLength(1)
  514. expect(result[0].content).toHaveLength(1)
  515. expect(result[0].content[0]).toEqual({
  516. type: "tool-call",
  517. toolCallId: "123",
  518. toolName: "bash",
  519. input: { command: "ls" },
  520. })
  521. })
  522. test("keeps messages with valid text alongside empty parts", () => {
  523. const msgs = [
  524. {
  525. role: "assistant",
  526. content: [
  527. { type: "reasoning", text: "Thinking..." },
  528. { type: "text", text: "" },
  529. { type: "text", text: "Result" },
  530. ],
  531. },
  532. ] as any[]
  533. const result = ProviderTransform.message(msgs, anthropicModel, {})
  534. expect(result).toHaveLength(1)
  535. expect(result[0].content).toHaveLength(2)
  536. expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
  537. expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
  538. })
  539. test("does not filter for non-anthropic providers", () => {
  540. const openaiModel = {
  541. ...anthropicModel,
  542. providerID: "openai",
  543. api: {
  544. id: "gpt-4",
  545. url: "https://api.openai.com",
  546. npm: "@ai-sdk/openai",
  547. },
  548. }
  549. const msgs = [
  550. { role: "assistant", content: "" },
  551. {
  552. role: "assistant",
  553. content: [{ type: "text", text: "" }],
  554. },
  555. ] as any[]
  556. const result = ProviderTransform.message(msgs, openaiModel, {})
  557. expect(result).toHaveLength(2)
  558. expect(result[0].content).toBe("")
  559. expect(result[1].content).toHaveLength(1)
  560. })
  561. })
  562. describe("ProviderTransform.message - strip openai metadata when store=false", () => {
  563. const openaiModel = {
  564. id: "openai/gpt-5",
  565. providerID: "openai",
  566. api: {
  567. id: "gpt-5",
  568. url: "https://api.openai.com",
  569. npm: "@ai-sdk/openai",
  570. },
  571. name: "GPT-5",
  572. capabilities: {
  573. temperature: true,
  574. reasoning: true,
  575. attachment: true,
  576. toolcall: true,
  577. input: { text: true, audio: false, image: true, video: false, pdf: false },
  578. output: { text: true, audio: false, image: false, video: false, pdf: false },
  579. interleaved: false,
  580. },
  581. cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
  582. limit: { context: 128000, output: 4096 },
  583. status: "active",
  584. options: {},
  585. headers: {},
  586. } as any
  587. test("strips itemId and reasoningEncryptedContent when store=false", () => {
  588. const msgs = [
  589. {
  590. role: "assistant",
  591. content: [
  592. {
  593. type: "reasoning",
  594. text: "thinking...",
  595. providerOptions: {
  596. openai: {
  597. itemId: "rs_123",
  598. reasoningEncryptedContent: "encrypted",
  599. },
  600. },
  601. },
  602. {
  603. type: "text",
  604. text: "Hello",
  605. providerOptions: {
  606. openai: {
  607. itemId: "msg_456",
  608. },
  609. },
  610. },
  611. ],
  612. },
  613. ] as any[]
  614. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  615. expect(result).toHaveLength(1)
  616. expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
  617. expect(result[0].content[1].providerOptions?.openai?.itemId).toBeUndefined()
  618. })
  619. test("strips itemId and reasoningEncryptedContent when store=false even when not openai", () => {
  620. const zenModel = {
  621. ...openaiModel,
  622. providerID: "zen",
  623. }
  624. const msgs = [
  625. {
  626. role: "assistant",
  627. content: [
  628. {
  629. type: "reasoning",
  630. text: "thinking...",
  631. providerOptions: {
  632. openai: {
  633. itemId: "rs_123",
  634. reasoningEncryptedContent: "encrypted",
  635. },
  636. },
  637. },
  638. {
  639. type: "text",
  640. text: "Hello",
  641. providerOptions: {
  642. openai: {
  643. itemId: "msg_456",
  644. },
  645. },
  646. },
  647. ],
  648. },
  649. ] as any[]
  650. const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
  651. expect(result).toHaveLength(1)
  652. expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
  653. expect(result[0].content[1].providerOptions?.openai?.itemId).toBeUndefined()
  654. })
  655. test("preserves other openai options when stripping itemId", () => {
  656. const msgs = [
  657. {
  658. role: "assistant",
  659. content: [
  660. {
  661. type: "text",
  662. text: "Hello",
  663. providerOptions: {
  664. openai: {
  665. itemId: "msg_123",
  666. otherOption: "value",
  667. },
  668. },
  669. },
  670. ],
  671. },
  672. ] as any[]
  673. const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
  674. expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
  675. expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
  676. })
  677. test("strips metadata for openai package even when store is true", () => {
  678. const msgs = [
  679. {
  680. role: "assistant",
  681. content: [
  682. {
  683. type: "text",
  684. text: "Hello",
  685. providerOptions: {
  686. openai: {
  687. itemId: "msg_123",
  688. },
  689. },
  690. },
  691. ],
  692. },
  693. ] as any[]
  694. // openai package always strips itemId regardless of store value
  695. const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
  696. expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
  697. })
  698. test("strips metadata for non-openai packages when store is false", () => {
  699. const anthropicModel = {
  700. ...openaiModel,
  701. providerID: "anthropic",
  702. api: {
  703. id: "claude-3",
  704. url: "https://api.anthropic.com",
  705. npm: "@ai-sdk/anthropic",
  706. },
  707. }
  708. const msgs = [
  709. {
  710. role: "assistant",
  711. content: [
  712. {
  713. type: "text",
  714. text: "Hello",
  715. providerOptions: {
  716. openai: {
  717. itemId: "msg_123",
  718. },
  719. },
  720. },
  721. ],
  722. },
  723. ] as any[]
  724. // store=false triggers stripping even for non-openai packages
  725. const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
  726. expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
  727. })
  728. test("does not strip metadata for non-openai packages when store is not false", () => {
  729. const anthropicModel = {
  730. ...openaiModel,
  731. providerID: "anthropic",
  732. api: {
  733. id: "claude-3",
  734. url: "https://api.anthropic.com",
  735. npm: "@ai-sdk/anthropic",
  736. },
  737. }
  738. const msgs = [
  739. {
  740. role: "assistant",
  741. content: [
  742. {
  743. type: "text",
  744. text: "Hello",
  745. providerOptions: {
  746. openai: {
  747. itemId: "msg_123",
  748. },
  749. },
  750. },
  751. ],
  752. },
  753. ] as any[]
  754. const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
  755. expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
  756. })
  757. })
  758. describe("ProviderTransform.variants", () => {
  759. const createMockModel = (overrides: Partial<any> = {}): any => ({
  760. id: "test/test-model",
  761. providerID: "test",
  762. api: {
  763. id: "test-model",
  764. url: "https://api.test.com",
  765. npm: "@ai-sdk/openai",
  766. },
  767. name: "Test Model",
  768. capabilities: {
  769. temperature: true,
  770. reasoning: true,
  771. attachment: true,
  772. toolcall: true,
  773. input: { text: true, audio: false, image: true, video: false, pdf: false },
  774. output: { text: true, audio: false, image: false, video: false, pdf: false },
  775. interleaved: false,
  776. },
  777. cost: {
  778. input: 0.001,
  779. output: 0.002,
  780. cache: { read: 0.0001, write: 0.0002 },
  781. },
  782. limit: {
  783. context: 128000,
  784. output: 8192,
  785. },
  786. status: "active",
  787. options: {},
  788. headers: {},
  789. release_date: "2024-01-01",
  790. ...overrides,
  791. })
  792. test("returns empty object when model has no reasoning capabilities", () => {
  793. const model = createMockModel({
  794. capabilities: { reasoning: false },
  795. })
  796. const result = ProviderTransform.variants(model)
  797. expect(result).toEqual({})
  798. })
  799. test("deepseek returns empty object", () => {
  800. const model = createMockModel({
  801. id: "deepseek/deepseek-chat",
  802. providerID: "deepseek",
  803. api: {
  804. id: "deepseek-chat",
  805. url: "https://api.deepseek.com",
  806. npm: "@ai-sdk/openai-compatible",
  807. },
  808. })
  809. const result = ProviderTransform.variants(model)
  810. expect(result).toEqual({})
  811. })
  812. test("minimax returns empty object", () => {
  813. const model = createMockModel({
  814. id: "minimax/minimax-model",
  815. providerID: "minimax",
  816. api: {
  817. id: "minimax-model",
  818. url: "https://api.minimax.com",
  819. npm: "@ai-sdk/openai-compatible",
  820. },
  821. })
  822. const result = ProviderTransform.variants(model)
  823. expect(result).toEqual({})
  824. })
  825. test("glm returns empty object", () => {
  826. const model = createMockModel({
  827. id: "glm/glm-4",
  828. providerID: "glm",
  829. api: {
  830. id: "glm-4",
  831. url: "https://api.glm.com",
  832. npm: "@ai-sdk/openai-compatible",
  833. },
  834. })
  835. const result = ProviderTransform.variants(model)
  836. expect(result).toEqual({})
  837. })
  838. test("mistral returns empty object", () => {
  839. const model = createMockModel({
  840. id: "mistral/mistral-large",
  841. providerID: "mistral",
  842. api: {
  843. id: "mistral-large-latest",
  844. url: "https://api.mistral.com",
  845. npm: "@ai-sdk/mistral",
  846. },
  847. })
  848. const result = ProviderTransform.variants(model)
  849. expect(result).toEqual({})
  850. })
  851. describe("@openrouter/ai-sdk-provider", () => {
  852. test("returns empty object for non-qualifying models", () => {
  853. const model = createMockModel({
  854. id: "openrouter/test-model",
  855. providerID: "openrouter",
  856. api: {
  857. id: "test-model",
  858. url: "https://openrouter.ai",
  859. npm: "@openrouter/ai-sdk-provider",
  860. },
  861. })
  862. const result = ProviderTransform.variants(model)
  863. expect(result).toEqual({})
  864. })
  865. test("gpt models return OPENAI_EFFORTS with reasoning", () => {
  866. const model = createMockModel({
  867. id: "openrouter/gpt-4",
  868. providerID: "openrouter",
  869. api: {
  870. id: "gpt-4",
  871. url: "https://openrouter.ai",
  872. npm: "@openrouter/ai-sdk-provider",
  873. },
  874. })
  875. const result = ProviderTransform.variants(model)
  876. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  877. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  878. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  879. })
  880. test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
  881. const model = createMockModel({
  882. id: "openrouter/gemini-3-5-pro",
  883. providerID: "openrouter",
  884. api: {
  885. id: "gemini-3-5-pro",
  886. url: "https://openrouter.ai",
  887. npm: "@openrouter/ai-sdk-provider",
  888. },
  889. })
  890. const result = ProviderTransform.variants(model)
  891. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  892. })
  893. test("grok-4 returns OPENAI_EFFORTS with reasoning", () => {
  894. const model = createMockModel({
  895. id: "openrouter/grok-4",
  896. providerID: "openrouter",
  897. api: {
  898. id: "grok-4",
  899. url: "https://openrouter.ai",
  900. npm: "@openrouter/ai-sdk-provider",
  901. },
  902. })
  903. const result = ProviderTransform.variants(model)
  904. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  905. })
  906. })
  907. describe("@ai-sdk/gateway", () => {
  908. test("returns OPENAI_EFFORTS with reasoningEffort", () => {
  909. const model = createMockModel({
  910. id: "gateway/gateway-model",
  911. providerID: "gateway",
  912. api: {
  913. id: "gateway-model",
  914. url: "https://gateway.ai",
  915. npm: "@ai-sdk/gateway",
  916. },
  917. })
  918. const result = ProviderTransform.variants(model)
  919. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  920. expect(result.low).toEqual({ reasoningEffort: "low" })
  921. expect(result.high).toEqual({ reasoningEffort: "high" })
  922. })
  923. })
  924. describe("@ai-sdk/cerebras", () => {
  925. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  926. const model = createMockModel({
  927. id: "cerebras/llama-4",
  928. providerID: "cerebras",
  929. api: {
  930. id: "llama-4-sc",
  931. url: "https://api.cerebras.ai",
  932. npm: "@ai-sdk/cerebras",
  933. },
  934. })
  935. const result = ProviderTransform.variants(model)
  936. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  937. expect(result.low).toEqual({ reasoningEffort: "low" })
  938. expect(result.high).toEqual({ reasoningEffort: "high" })
  939. })
  940. })
  941. describe("@ai-sdk/togetherai", () => {
  942. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  943. const model = createMockModel({
  944. id: "togetherai/llama-4",
  945. providerID: "togetherai",
  946. api: {
  947. id: "llama-4-sc",
  948. url: "https://api.togetherai.com",
  949. npm: "@ai-sdk/togetherai",
  950. },
  951. })
  952. const result = ProviderTransform.variants(model)
  953. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  954. expect(result.low).toEqual({ reasoningEffort: "low" })
  955. expect(result.high).toEqual({ reasoningEffort: "high" })
  956. })
  957. })
  958. describe("@ai-sdk/xai", () => {
  959. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  960. const model = createMockModel({
  961. id: "xai/grok-3",
  962. providerID: "xai",
  963. api: {
  964. id: "grok-3",
  965. url: "https://api.x.ai",
  966. npm: "@ai-sdk/xai",
  967. },
  968. })
  969. const result = ProviderTransform.variants(model)
  970. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  971. expect(result.low).toEqual({ reasoningEffort: "low" })
  972. expect(result.high).toEqual({ reasoningEffort: "high" })
  973. })
  974. })
  975. describe("@ai-sdk/deepinfra", () => {
  976. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  977. const model = createMockModel({
  978. id: "deepinfra/llama-4",
  979. providerID: "deepinfra",
  980. api: {
  981. id: "llama-4-sc",
  982. url: "https://api.deepinfra.com",
  983. npm: "@ai-sdk/deepinfra",
  984. },
  985. })
  986. const result = ProviderTransform.variants(model)
  987. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  988. expect(result.low).toEqual({ reasoningEffort: "low" })
  989. expect(result.high).toEqual({ reasoningEffort: "high" })
  990. })
  991. })
  992. describe("@ai-sdk/openai-compatible", () => {
  993. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  994. const model = createMockModel({
  995. id: "custom-provider/custom-model",
  996. providerID: "custom-provider",
  997. api: {
  998. id: "custom-model",
  999. url: "https://api.custom.com",
  1000. npm: "@ai-sdk/openai-compatible",
  1001. },
  1002. })
  1003. const result = ProviderTransform.variants(model)
  1004. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1005. expect(result.low).toEqual({ reasoningEffort: "low" })
  1006. expect(result.high).toEqual({ reasoningEffort: "high" })
  1007. })
  1008. })
  1009. describe("@ai-sdk/azure", () => {
  1010. test("o1-mini returns empty object", () => {
  1011. const model = createMockModel({
  1012. id: "o1-mini",
  1013. providerID: "azure",
  1014. api: {
  1015. id: "o1-mini",
  1016. url: "https://azure.com",
  1017. npm: "@ai-sdk/azure",
  1018. },
  1019. })
  1020. const result = ProviderTransform.variants(model)
  1021. expect(result).toEqual({})
  1022. })
  1023. test("standard azure models return custom efforts with reasoningSummary", () => {
  1024. const model = createMockModel({
  1025. id: "o1",
  1026. providerID: "azure",
  1027. api: {
  1028. id: "o1",
  1029. url: "https://azure.com",
  1030. npm: "@ai-sdk/azure",
  1031. },
  1032. })
  1033. const result = ProviderTransform.variants(model)
  1034. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1035. expect(result.low).toEqual({
  1036. reasoningEffort: "low",
  1037. reasoningSummary: "auto",
  1038. include: ["reasoning.encrypted_content"],
  1039. })
  1040. })
  1041. test("gpt-5 adds minimal effort", () => {
  1042. const model = createMockModel({
  1043. id: "gpt-5",
  1044. providerID: "azure",
  1045. api: {
  1046. id: "gpt-5",
  1047. url: "https://azure.com",
  1048. npm: "@ai-sdk/azure",
  1049. },
  1050. })
  1051. const result = ProviderTransform.variants(model)
  1052. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1053. })
  1054. })
  1055. describe("@ai-sdk/openai", () => {
  1056. test("gpt-5-pro returns empty object", () => {
  1057. const model = createMockModel({
  1058. id: "gpt-5-pro",
  1059. providerID: "openai",
  1060. api: {
  1061. id: "gpt-5-pro",
  1062. url: "https://api.openai.com",
  1063. npm: "@ai-sdk/openai",
  1064. },
  1065. })
  1066. const result = ProviderTransform.variants(model)
  1067. expect(result).toEqual({})
  1068. })
  1069. test("standard openai models return custom efforts with reasoningSummary", () => {
  1070. const model = createMockModel({
  1071. id: "gpt-5",
  1072. providerID: "openai",
  1073. api: {
  1074. id: "gpt-5",
  1075. url: "https://api.openai.com",
  1076. npm: "@ai-sdk/openai",
  1077. },
  1078. release_date: "2024-06-01",
  1079. })
  1080. const result = ProviderTransform.variants(model)
  1081. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  1082. expect(result.low).toEqual({
  1083. reasoningEffort: "low",
  1084. reasoningSummary: "auto",
  1085. include: ["reasoning.encrypted_content"],
  1086. })
  1087. })
  1088. test("models after 2025-11-13 include 'none' effort", () => {
  1089. const model = createMockModel({
  1090. id: "gpt-5-nano",
  1091. providerID: "openai",
  1092. api: {
  1093. id: "gpt-5-nano",
  1094. url: "https://api.openai.com",
  1095. npm: "@ai-sdk/openai",
  1096. },
  1097. release_date: "2025-11-14",
  1098. })
  1099. const result = ProviderTransform.variants(model)
  1100. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
  1101. })
  1102. test("models after 2025-12-04 include 'xhigh' effort", () => {
  1103. const model = createMockModel({
  1104. id: "openai/gpt-5-chat",
  1105. providerID: "openai",
  1106. api: {
  1107. id: "gpt-5-chat",
  1108. url: "https://api.openai.com",
  1109. npm: "@ai-sdk/openai",
  1110. },
  1111. release_date: "2025-12-05",
  1112. })
  1113. const result = ProviderTransform.variants(model)
  1114. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  1115. })
  1116. })
  1117. describe("@ai-sdk/anthropic", () => {
  1118. test("returns high and max with thinking config", () => {
  1119. const model = createMockModel({
  1120. id: "anthropic/claude-4",
  1121. providerID: "anthropic",
  1122. api: {
  1123. id: "claude-4",
  1124. url: "https://api.anthropic.com",
  1125. npm: "@ai-sdk/anthropic",
  1126. },
  1127. })
  1128. const result = ProviderTransform.variants(model)
  1129. expect(Object.keys(result)).toEqual(["high", "max"])
  1130. expect(result.high).toEqual({
  1131. thinking: {
  1132. type: "enabled",
  1133. budgetTokens: 16000,
  1134. },
  1135. })
  1136. expect(result.max).toEqual({
  1137. thinking: {
  1138. type: "enabled",
  1139. budgetTokens: 31999,
  1140. },
  1141. })
  1142. })
  1143. })
  1144. describe("@ai-sdk/amazon-bedrock", () => {
  1145. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
  1146. const model = createMockModel({
  1147. id: "bedrock/llama-4",
  1148. providerID: "bedrock",
  1149. api: {
  1150. id: "llama-4-sc",
  1151. url: "https://bedrock.amazonaws.com",
  1152. npm: "@ai-sdk/amazon-bedrock",
  1153. },
  1154. })
  1155. const result = ProviderTransform.variants(model)
  1156. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  1157. expect(result.low).toEqual({
  1158. reasoningConfig: {
  1159. type: "enabled",
  1160. maxReasoningEffort: "low",
  1161. },
  1162. })
  1163. })
  1164. })
  1165. describe("@ai-sdk/google", () => {
  1166. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  1167. const model = createMockModel({
  1168. id: "google/gemini-2.5-pro",
  1169. providerID: "google",
  1170. api: {
  1171. id: "gemini-2.5-pro",
  1172. url: "https://generativelanguage.googleapis.com",
  1173. npm: "@ai-sdk/google",
  1174. },
  1175. })
  1176. const result = ProviderTransform.variants(model)
  1177. expect(Object.keys(result)).toEqual(["high", "max"])
  1178. expect(result.high).toEqual({
  1179. thinkingConfig: {
  1180. includeThoughts: true,
  1181. thinkingBudget: 16000,
  1182. },
  1183. })
  1184. expect(result.max).toEqual({
  1185. thinkingConfig: {
  1186. includeThoughts: true,
  1187. thinkingBudget: 24576,
  1188. },
  1189. })
  1190. })
  1191. test("other gemini models return low and high with thinkingLevel", () => {
  1192. const model = createMockModel({
  1193. id: "google/gemini-2.0-pro",
  1194. providerID: "google",
  1195. api: {
  1196. id: "gemini-2.0-pro",
  1197. url: "https://generativelanguage.googleapis.com",
  1198. npm: "@ai-sdk/google",
  1199. },
  1200. })
  1201. const result = ProviderTransform.variants(model)
  1202. expect(Object.keys(result)).toEqual(["low", "high"])
  1203. expect(result.low).toEqual({
  1204. includeThoughts: true,
  1205. thinkingLevel: "low",
  1206. })
  1207. expect(result.high).toEqual({
  1208. includeThoughts: true,
  1209. thinkingLevel: "high",
  1210. })
  1211. })
  1212. })
  1213. describe("@ai-sdk/google-vertex", () => {
  1214. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  1215. const model = createMockModel({
  1216. id: "google-vertex/gemini-2.5-pro",
  1217. providerID: "google-vertex",
  1218. api: {
  1219. id: "gemini-2.5-pro",
  1220. url: "https://vertexai.googleapis.com",
  1221. npm: "@ai-sdk/google-vertex",
  1222. },
  1223. })
  1224. const result = ProviderTransform.variants(model)
  1225. expect(Object.keys(result)).toEqual(["high", "max"])
  1226. })
  1227. test("other vertex models return low and high with thinkingLevel", () => {
  1228. const model = createMockModel({
  1229. id: "google-vertex/gemini-2.0-pro",
  1230. providerID: "google-vertex",
  1231. api: {
  1232. id: "gemini-2.0-pro",
  1233. url: "https://vertexai.googleapis.com",
  1234. npm: "@ai-sdk/google-vertex",
  1235. },
  1236. })
  1237. const result = ProviderTransform.variants(model)
  1238. expect(Object.keys(result)).toEqual(["low", "high"])
  1239. })
  1240. })
  1241. describe("@ai-sdk/cohere", () => {
  1242. test("returns empty object", () => {
  1243. const model = createMockModel({
  1244. id: "cohere/command-r",
  1245. providerID: "cohere",
  1246. api: {
  1247. id: "command-r",
  1248. url: "https://api.cohere.com",
  1249. npm: "@ai-sdk/cohere",
  1250. },
  1251. })
  1252. const result = ProviderTransform.variants(model)
  1253. expect(result).toEqual({})
  1254. })
  1255. })
  1256. describe("@ai-sdk/groq", () => {
  1257. test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
  1258. const model = createMockModel({
  1259. id: "groq/llama-4",
  1260. providerID: "groq",
  1261. api: {
  1262. id: "llama-4-sc",
  1263. url: "https://api.groq.com",
  1264. npm: "@ai-sdk/groq",
  1265. },
  1266. })
  1267. const result = ProviderTransform.variants(model)
  1268. expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
  1269. expect(result.none).toEqual({
  1270. includeThoughts: true,
  1271. thinkingLevel: "none",
  1272. })
  1273. expect(result.low).toEqual({
  1274. includeThoughts: true,
  1275. thinkingLevel: "low",
  1276. })
  1277. })
  1278. })
  1279. describe("@ai-sdk/perplexity", () => {
  1280. test("returns empty object", () => {
  1281. const model = createMockModel({
  1282. id: "perplexity/sonar-plus",
  1283. providerID: "perplexity",
  1284. api: {
  1285. id: "sonar-plus",
  1286. url: "https://api.perplexity.ai",
  1287. npm: "@ai-sdk/perplexity",
  1288. },
  1289. })
  1290. const result = ProviderTransform.variants(model)
  1291. expect(result).toEqual({})
  1292. })
  1293. })
  1294. })