transform.test.ts 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. import { describe, expect, test } from "bun:test"
  2. import { ProviderTransform } from "../../src/provider/transform"
  3. const OUTPUT_TOKEN_MAX = 32000
  4. describe("ProviderTransform.options - setCacheKey", () => {
  5. const sessionID = "test-session-123"
  6. const mockModel = {
  7. id: "anthropic/claude-3-5-sonnet",
  8. providerID: "anthropic",
  9. api: {
  10. id: "claude-3-5-sonnet-20241022",
  11. url: "https://api.anthropic.com",
  12. npm: "@ai-sdk/anthropic",
  13. },
  14. name: "Claude 3.5 Sonnet",
  15. capabilities: {
  16. temperature: true,
  17. reasoning: false,
  18. attachment: true,
  19. toolcall: true,
  20. input: { text: true, audio: false, image: true, video: false, pdf: true },
  21. output: { text: true, audio: false, image: false, video: false, pdf: false },
  22. interleaved: false,
  23. },
  24. cost: {
  25. input: 0.003,
  26. output: 0.015,
  27. cache: { read: 0.0003, write: 0.00375 },
  28. },
  29. limit: {
  30. context: 200000,
  31. output: 8192,
  32. },
  33. status: "active",
  34. options: {},
  35. headers: {},
  36. } as any
  37. test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
  38. const result = ProviderTransform.options(mockModel, sessionID, { setCacheKey: true })
  39. expect(result.promptCacheKey).toBe(sessionID)
  40. })
  41. test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
  42. const result = ProviderTransform.options(mockModel, sessionID, { setCacheKey: false })
  43. expect(result.promptCacheKey).toBeUndefined()
  44. })
  45. test("should not set promptCacheKey when providerOptions is undefined", () => {
  46. const result = ProviderTransform.options(mockModel, sessionID, undefined)
  47. expect(result.promptCacheKey).toBeUndefined()
  48. })
  49. test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
  50. const result = ProviderTransform.options(mockModel, sessionID, {})
  51. expect(result.promptCacheKey).toBeUndefined()
  52. })
  53. test("should set promptCacheKey for openai provider regardless of setCacheKey", () => {
  54. const openaiModel = {
  55. ...mockModel,
  56. providerID: "openai",
  57. api: {
  58. id: "gpt-4",
  59. url: "https://api.openai.com",
  60. npm: "@ai-sdk/openai",
  61. },
  62. }
  63. const result = ProviderTransform.options(openaiModel, sessionID, {})
  64. expect(result.promptCacheKey).toBe(sessionID)
  65. })
  66. })
  67. describe("ProviderTransform.maxOutputTokens", () => {
  68. test("returns 32k when modelLimit > 32k", () => {
  69. const modelLimit = 100000
  70. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  71. expect(result).toBe(OUTPUT_TOKEN_MAX)
  72. })
  73. test("returns modelLimit when modelLimit < 32k", () => {
  74. const modelLimit = 16000
  75. const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
  76. expect(result).toBe(16000)
  77. })
  78. describe("azure", () => {
  79. test("returns 32k when modelLimit > 32k", () => {
  80. const modelLimit = 100000
  81. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  82. expect(result).toBe(OUTPUT_TOKEN_MAX)
  83. })
  84. test("returns modelLimit when modelLimit < 32k", () => {
  85. const modelLimit = 16000
  86. const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
  87. expect(result).toBe(16000)
  88. })
  89. })
  90. describe("bedrock", () => {
  91. test("returns 32k when modelLimit > 32k", () => {
  92. const modelLimit = 100000
  93. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  94. expect(result).toBe(OUTPUT_TOKEN_MAX)
  95. })
  96. test("returns modelLimit when modelLimit < 32k", () => {
  97. const modelLimit = 16000
  98. const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
  99. expect(result).toBe(16000)
  100. })
  101. })
  102. describe("anthropic without thinking options", () => {
  103. test("returns 32k when modelLimit > 32k", () => {
  104. const modelLimit = 100000
  105. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  106. expect(result).toBe(OUTPUT_TOKEN_MAX)
  107. })
  108. test("returns modelLimit when modelLimit < 32k", () => {
  109. const modelLimit = 16000
  110. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
  111. expect(result).toBe(16000)
  112. })
  113. })
  114. describe("anthropic with thinking options", () => {
  115. test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
  116. const modelLimit = 100000
  117. const options = {
  118. thinking: {
  119. type: "enabled",
  120. budgetTokens: 10000,
  121. },
  122. }
  123. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  124. expect(result).toBe(OUTPUT_TOKEN_MAX)
  125. })
  126. test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
  127. const modelLimit = 50000
  128. const options = {
  129. thinking: {
  130. type: "enabled",
  131. budgetTokens: 30000,
  132. },
  133. }
  134. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  135. expect(result).toBe(20000)
  136. })
  137. test("returns 32k when thinking type is not enabled", () => {
  138. const modelLimit = 100000
  139. const options = {
  140. thinking: {
  141. type: "disabled",
  142. budgetTokens: 10000,
  143. },
  144. }
  145. const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
  146. expect(result).toBe(OUTPUT_TOKEN_MAX)
  147. })
  148. })
  149. })
  150. describe("ProviderTransform.schema - gemini array items", () => {
  151. test("adds missing items for array properties", () => {
  152. const geminiModel = {
  153. providerID: "google",
  154. api: {
  155. id: "gemini-3-pro",
  156. },
  157. } as any
  158. const schema = {
  159. type: "object",
  160. properties: {
  161. nodes: { type: "array" },
  162. edges: { type: "array", items: { type: "string" } },
  163. },
  164. } as any
  165. const result = ProviderTransform.schema(geminiModel, schema) as any
  166. expect(result.properties.nodes.items).toBeDefined()
  167. expect(result.properties.edges.items.type).toBe("string")
  168. })
  169. })
  170. describe("ProviderTransform.message - DeepSeek reasoning content", () => {
  171. test("DeepSeek with tool calls includes reasoning_content in providerOptions", () => {
  172. const msgs = [
  173. {
  174. role: "assistant",
  175. content: [
  176. { type: "reasoning", text: "Let me think about this..." },
  177. {
  178. type: "tool-call",
  179. toolCallId: "test",
  180. toolName: "bash",
  181. input: { command: "echo hello" },
  182. },
  183. ],
  184. },
  185. ] as any[]
  186. const result = ProviderTransform.message(msgs, {
  187. id: "deepseek/deepseek-chat",
  188. providerID: "deepseek",
  189. api: {
  190. id: "deepseek-chat",
  191. url: "https://api.deepseek.com",
  192. npm: "@ai-sdk/openai-compatible",
  193. },
  194. name: "DeepSeek Chat",
  195. capabilities: {
  196. temperature: true,
  197. reasoning: true,
  198. attachment: false,
  199. toolcall: true,
  200. input: { text: true, audio: false, image: false, video: false, pdf: false },
  201. output: { text: true, audio: false, image: false, video: false, pdf: false },
  202. interleaved: {
  203. field: "reasoning_content",
  204. },
  205. },
  206. cost: {
  207. input: 0.001,
  208. output: 0.002,
  209. cache: { read: 0.0001, write: 0.0002 },
  210. },
  211. limit: {
  212. context: 128000,
  213. output: 8192,
  214. },
  215. status: "active",
  216. options: {},
  217. headers: {},
  218. release_date: "2023-04-01",
  219. })
  220. expect(result).toHaveLength(1)
  221. expect(result[0].content).toEqual([
  222. {
  223. type: "tool-call",
  224. toolCallId: "test",
  225. toolName: "bash",
  226. input: { command: "echo hello" },
  227. },
  228. ])
  229. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBe("Let me think about this...")
  230. })
  231. test("Non-DeepSeek providers leave reasoning content unchanged", () => {
  232. const msgs = [
  233. {
  234. role: "assistant",
  235. content: [
  236. { type: "reasoning", text: "Should not be processed" },
  237. { type: "text", text: "Answer" },
  238. ],
  239. },
  240. ] as any[]
  241. const result = ProviderTransform.message(msgs, {
  242. id: "openai/gpt-4",
  243. providerID: "openai",
  244. api: {
  245. id: "gpt-4",
  246. url: "https://api.openai.com",
  247. npm: "@ai-sdk/openai",
  248. },
  249. name: "GPT-4",
  250. capabilities: {
  251. temperature: true,
  252. reasoning: false,
  253. attachment: true,
  254. toolcall: true,
  255. input: { text: true, audio: false, image: true, video: false, pdf: false },
  256. output: { text: true, audio: false, image: false, video: false, pdf: false },
  257. interleaved: false,
  258. },
  259. cost: {
  260. input: 0.03,
  261. output: 0.06,
  262. cache: { read: 0.001, write: 0.002 },
  263. },
  264. limit: {
  265. context: 128000,
  266. output: 4096,
  267. },
  268. status: "active",
  269. options: {},
  270. headers: {},
  271. release_date: "2023-04-01",
  272. })
  273. expect(result[0].content).toEqual([
  274. { type: "reasoning", text: "Should not be processed" },
  275. { type: "text", text: "Answer" },
  276. ])
  277. expect(result[0].providerOptions?.openaiCompatible?.reasoning_content).toBeUndefined()
  278. })
  279. })
  280. describe("ProviderTransform.message - empty image handling", () => {
  281. const mockModel = {
  282. id: "anthropic/claude-3-5-sonnet",
  283. providerID: "anthropic",
  284. api: {
  285. id: "claude-3-5-sonnet-20241022",
  286. url: "https://api.anthropic.com",
  287. npm: "@ai-sdk/anthropic",
  288. },
  289. name: "Claude 3.5 Sonnet",
  290. capabilities: {
  291. temperature: true,
  292. reasoning: false,
  293. attachment: true,
  294. toolcall: true,
  295. input: { text: true, audio: false, image: true, video: false, pdf: true },
  296. output: { text: true, audio: false, image: false, video: false, pdf: false },
  297. interleaved: false,
  298. },
  299. cost: {
  300. input: 0.003,
  301. output: 0.015,
  302. cache: { read: 0.0003, write: 0.00375 },
  303. },
  304. limit: {
  305. context: 200000,
  306. output: 8192,
  307. },
  308. status: "active",
  309. options: {},
  310. headers: {},
  311. } as any
  312. test("should replace empty base64 image with error text", () => {
  313. const msgs = [
  314. {
  315. role: "user",
  316. content: [
  317. { type: "text", text: "What is in this image?" },
  318. { type: "image", image: "data:image/png;base64," },
  319. ],
  320. },
  321. ] as any[]
  322. const result = ProviderTransform.message(msgs, mockModel)
  323. expect(result).toHaveLength(1)
  324. expect(result[0].content).toHaveLength(2)
  325. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  326. expect(result[0].content[1]).toEqual({
  327. type: "text",
  328. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  329. })
  330. })
  331. test("should keep valid base64 images unchanged", () => {
  332. const validBase64 =
  333. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  334. const msgs = [
  335. {
  336. role: "user",
  337. content: [
  338. { type: "text", text: "What is in this image?" },
  339. { type: "image", image: `data:image/png;base64,${validBase64}` },
  340. ],
  341. },
  342. ] as any[]
  343. const result = ProviderTransform.message(msgs, mockModel)
  344. expect(result).toHaveLength(1)
  345. expect(result[0].content).toHaveLength(2)
  346. expect(result[0].content[0]).toEqual({ type: "text", text: "What is in this image?" })
  347. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  348. })
  349. test("should handle mixed valid and empty images", () => {
  350. const validBase64 =
  351. "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
  352. const msgs = [
  353. {
  354. role: "user",
  355. content: [
  356. { type: "text", text: "Compare these images" },
  357. { type: "image", image: `data:image/png;base64,${validBase64}` },
  358. { type: "image", image: "data:image/jpeg;base64," },
  359. ],
  360. },
  361. ] as any[]
  362. const result = ProviderTransform.message(msgs, mockModel)
  363. expect(result).toHaveLength(1)
  364. expect(result[0].content).toHaveLength(3)
  365. expect(result[0].content[0]).toEqual({ type: "text", text: "Compare these images" })
  366. expect(result[0].content[1]).toEqual({ type: "image", image: `data:image/png;base64,${validBase64}` })
  367. expect(result[0].content[2]).toEqual({
  368. type: "text",
  369. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  370. })
  371. })
  372. })
  373. describe("ProviderTransform.message - anthropic empty content filtering", () => {
  374. const anthropicModel = {
  375. id: "anthropic/claude-3-5-sonnet",
  376. providerID: "anthropic",
  377. api: {
  378. id: "claude-3-5-sonnet-20241022",
  379. url: "https://api.anthropic.com",
  380. npm: "@ai-sdk/anthropic",
  381. },
  382. name: "Claude 3.5 Sonnet",
  383. capabilities: {
  384. temperature: true,
  385. reasoning: false,
  386. attachment: true,
  387. toolcall: true,
  388. input: { text: true, audio: false, image: true, video: false, pdf: true },
  389. output: { text: true, audio: false, image: false, video: false, pdf: false },
  390. interleaved: false,
  391. },
  392. cost: {
  393. input: 0.003,
  394. output: 0.015,
  395. cache: { read: 0.0003, write: 0.00375 },
  396. },
  397. limit: {
  398. context: 200000,
  399. output: 8192,
  400. },
  401. status: "active",
  402. options: {},
  403. headers: {},
  404. } as any
  405. test("filters out messages with empty string content", () => {
  406. const msgs = [
  407. { role: "user", content: "Hello" },
  408. { role: "assistant", content: "" },
  409. { role: "user", content: "World" },
  410. ] as any[]
  411. const result = ProviderTransform.message(msgs, anthropicModel)
  412. expect(result).toHaveLength(2)
  413. expect(result[0].content).toBe("Hello")
  414. expect(result[1].content).toBe("World")
  415. })
  416. test("filters out empty text parts from array content", () => {
  417. const msgs = [
  418. {
  419. role: "assistant",
  420. content: [
  421. { type: "text", text: "" },
  422. { type: "text", text: "Hello" },
  423. { type: "text", text: "" },
  424. ],
  425. },
  426. ] as any[]
  427. const result = ProviderTransform.message(msgs, anthropicModel)
  428. expect(result).toHaveLength(1)
  429. expect(result[0].content).toHaveLength(1)
  430. expect(result[0].content[0]).toEqual({ type: "text", text: "Hello" })
  431. })
  432. test("filters out empty reasoning parts from array content", () => {
  433. const msgs = [
  434. {
  435. role: "assistant",
  436. content: [
  437. { type: "reasoning", text: "" },
  438. { type: "text", text: "Answer" },
  439. { type: "reasoning", text: "" },
  440. ],
  441. },
  442. ] as any[]
  443. const result = ProviderTransform.message(msgs, anthropicModel)
  444. expect(result).toHaveLength(1)
  445. expect(result[0].content).toHaveLength(1)
  446. expect(result[0].content[0]).toEqual({ type: "text", text: "Answer" })
  447. })
  448. test("removes entire message when all parts are empty", () => {
  449. const msgs = [
  450. { role: "user", content: "Hello" },
  451. {
  452. role: "assistant",
  453. content: [
  454. { type: "text", text: "" },
  455. { type: "reasoning", text: "" },
  456. ],
  457. },
  458. { role: "user", content: "World" },
  459. ] as any[]
  460. const result = ProviderTransform.message(msgs, anthropicModel)
  461. expect(result).toHaveLength(2)
  462. expect(result[0].content).toBe("Hello")
  463. expect(result[1].content).toBe("World")
  464. })
  465. test("keeps non-text/reasoning parts even if text parts are empty", () => {
  466. const msgs = [
  467. {
  468. role: "assistant",
  469. content: [
  470. { type: "text", text: "" },
  471. { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
  472. ],
  473. },
  474. ] as any[]
  475. const result = ProviderTransform.message(msgs, anthropicModel)
  476. expect(result).toHaveLength(1)
  477. expect(result[0].content).toHaveLength(1)
  478. expect(result[0].content[0]).toEqual({
  479. type: "tool-call",
  480. toolCallId: "123",
  481. toolName: "bash",
  482. input: { command: "ls" },
  483. })
  484. })
  485. test("keeps messages with valid text alongside empty parts", () => {
  486. const msgs = [
  487. {
  488. role: "assistant",
  489. content: [
  490. { type: "reasoning", text: "Thinking..." },
  491. { type: "text", text: "" },
  492. { type: "text", text: "Result" },
  493. ],
  494. },
  495. ] as any[]
  496. const result = ProviderTransform.message(msgs, anthropicModel)
  497. expect(result).toHaveLength(1)
  498. expect(result[0].content).toHaveLength(2)
  499. expect(result[0].content[0]).toEqual({ type: "reasoning", text: "Thinking..." })
  500. expect(result[0].content[1]).toEqual({ type: "text", text: "Result" })
  501. })
  502. test("does not filter for non-anthropic providers", () => {
  503. const openaiModel = {
  504. ...anthropicModel,
  505. providerID: "openai",
  506. api: {
  507. id: "gpt-4",
  508. url: "https://api.openai.com",
  509. npm: "@ai-sdk/openai",
  510. },
  511. }
  512. const msgs = [
  513. { role: "assistant", content: "" },
  514. {
  515. role: "assistant",
  516. content: [{ type: "text", text: "" }],
  517. },
  518. ] as any[]
  519. const result = ProviderTransform.message(msgs, openaiModel)
  520. expect(result).toHaveLength(2)
  521. expect(result[0].content).toBe("")
  522. expect(result[1].content).toHaveLength(1)
  523. })
  524. })
  525. describe("ProviderTransform.variants", () => {
  526. const createMockModel = (overrides: Partial<any> = {}): any => ({
  527. id: "test/test-model",
  528. providerID: "test",
  529. api: {
  530. id: "test-model",
  531. url: "https://api.test.com",
  532. npm: "@ai-sdk/openai",
  533. },
  534. name: "Test Model",
  535. capabilities: {
  536. temperature: true,
  537. reasoning: true,
  538. attachment: true,
  539. toolcall: true,
  540. input: { text: true, audio: false, image: true, video: false, pdf: false },
  541. output: { text: true, audio: false, image: false, video: false, pdf: false },
  542. interleaved: false,
  543. },
  544. cost: {
  545. input: 0.001,
  546. output: 0.002,
  547. cache: { read: 0.0001, write: 0.0002 },
  548. },
  549. limit: {
  550. context: 128000,
  551. output: 8192,
  552. },
  553. status: "active",
  554. options: {},
  555. headers: {},
  556. release_date: "2024-01-01",
  557. ...overrides,
  558. })
  559. test("returns empty object when model has no reasoning capabilities", () => {
  560. const model = createMockModel({
  561. capabilities: { reasoning: false },
  562. })
  563. const result = ProviderTransform.variants(model)
  564. expect(result).toEqual({})
  565. })
  566. test("deepseek returns empty object", () => {
  567. const model = createMockModel({
  568. id: "deepseek/deepseek-chat",
  569. providerID: "deepseek",
  570. api: {
  571. id: "deepseek-chat",
  572. url: "https://api.deepseek.com",
  573. npm: "@ai-sdk/openai-compatible",
  574. },
  575. })
  576. const result = ProviderTransform.variants(model)
  577. expect(result).toEqual({})
  578. })
  579. test("minimax returns empty object", () => {
  580. const model = createMockModel({
  581. id: "minimax/minimax-model",
  582. providerID: "minimax",
  583. api: {
  584. id: "minimax-model",
  585. url: "https://api.minimax.com",
  586. npm: "@ai-sdk/openai-compatible",
  587. },
  588. })
  589. const result = ProviderTransform.variants(model)
  590. expect(result).toEqual({})
  591. })
  592. test("glm returns empty object", () => {
  593. const model = createMockModel({
  594. id: "glm/glm-4",
  595. providerID: "glm",
  596. api: {
  597. id: "glm-4",
  598. url: "https://api.glm.com",
  599. npm: "@ai-sdk/openai-compatible",
  600. },
  601. })
  602. const result = ProviderTransform.variants(model)
  603. expect(result).toEqual({})
  604. })
  605. test("mistral returns empty object", () => {
  606. const model = createMockModel({
  607. id: "mistral/mistral-large",
  608. providerID: "mistral",
  609. api: {
  610. id: "mistral-large-latest",
  611. url: "https://api.mistral.com",
  612. npm: "@ai-sdk/mistral",
  613. },
  614. })
  615. const result = ProviderTransform.variants(model)
  616. expect(result).toEqual({})
  617. })
  618. describe("@openrouter/ai-sdk-provider", () => {
  619. test("returns empty object for non-qualifying models", () => {
  620. const model = createMockModel({
  621. id: "openrouter/test-model",
  622. providerID: "openrouter",
  623. api: {
  624. id: "test-model",
  625. url: "https://openrouter.ai",
  626. npm: "@openrouter/ai-sdk-provider",
  627. },
  628. })
  629. const result = ProviderTransform.variants(model)
  630. expect(result).toEqual({})
  631. })
  632. test("gpt models return OPENAI_EFFORTS with reasoning", () => {
  633. const model = createMockModel({
  634. id: "openrouter/gpt-4",
  635. providerID: "openrouter",
  636. api: {
  637. id: "gpt-4",
  638. url: "https://openrouter.ai",
  639. npm: "@openrouter/ai-sdk-provider",
  640. },
  641. })
  642. const result = ProviderTransform.variants(model)
  643. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  644. expect(result.low).toEqual({ reasoning: { effort: "low" } })
  645. expect(result.high).toEqual({ reasoning: { effort: "high" } })
  646. })
  647. test("gemini-3 returns OPENAI_EFFORTS with reasoning", () => {
  648. const model = createMockModel({
  649. id: "openrouter/gemini-3-5-pro",
  650. providerID: "openrouter",
  651. api: {
  652. id: "gemini-3-5-pro",
  653. url: "https://openrouter.ai",
  654. npm: "@openrouter/ai-sdk-provider",
  655. },
  656. })
  657. const result = ProviderTransform.variants(model)
  658. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  659. })
  660. test("grok-4 returns OPENAI_EFFORTS with reasoning", () => {
  661. const model = createMockModel({
  662. id: "openrouter/grok-4",
  663. providerID: "openrouter",
  664. api: {
  665. id: "grok-4",
  666. url: "https://openrouter.ai",
  667. npm: "@openrouter/ai-sdk-provider",
  668. },
  669. })
  670. const result = ProviderTransform.variants(model)
  671. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  672. })
  673. })
  674. describe("@ai-sdk/gateway", () => {
  675. test("returns OPENAI_EFFORTS with reasoningEffort", () => {
  676. const model = createMockModel({
  677. id: "gateway/gateway-model",
  678. providerID: "gateway",
  679. api: {
  680. id: "gateway-model",
  681. url: "https://gateway.ai",
  682. npm: "@ai-sdk/gateway",
  683. },
  684. })
  685. const result = ProviderTransform.variants(model)
  686. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  687. expect(result.low).toEqual({ reasoningEffort: "low" })
  688. expect(result.high).toEqual({ reasoningEffort: "high" })
  689. })
  690. })
  691. describe("@ai-sdk/cerebras", () => {
  692. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  693. const model = createMockModel({
  694. id: "cerebras/llama-4",
  695. providerID: "cerebras",
  696. api: {
  697. id: "llama-4-sc",
  698. url: "https://api.cerebras.ai",
  699. npm: "@ai-sdk/cerebras",
  700. },
  701. })
  702. const result = ProviderTransform.variants(model)
  703. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  704. expect(result.low).toEqual({ reasoningEffort: "low" })
  705. expect(result.high).toEqual({ reasoningEffort: "high" })
  706. })
  707. })
  708. describe("@ai-sdk/togetherai", () => {
  709. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  710. const model = createMockModel({
  711. id: "togetherai/llama-4",
  712. providerID: "togetherai",
  713. api: {
  714. id: "llama-4-sc",
  715. url: "https://api.togetherai.com",
  716. npm: "@ai-sdk/togetherai",
  717. },
  718. })
  719. const result = ProviderTransform.variants(model)
  720. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  721. expect(result.low).toEqual({ reasoningEffort: "low" })
  722. expect(result.high).toEqual({ reasoningEffort: "high" })
  723. })
  724. })
  725. describe("@ai-sdk/xai", () => {
  726. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  727. const model = createMockModel({
  728. id: "xai/grok-3",
  729. providerID: "xai",
  730. api: {
  731. id: "grok-3",
  732. url: "https://api.x.ai",
  733. npm: "@ai-sdk/xai",
  734. },
  735. })
  736. const result = ProviderTransform.variants(model)
  737. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  738. expect(result.low).toEqual({ reasoningEffort: "low" })
  739. expect(result.high).toEqual({ reasoningEffort: "high" })
  740. })
  741. })
  742. describe("@ai-sdk/deepinfra", () => {
  743. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  744. const model = createMockModel({
  745. id: "deepinfra/llama-4",
  746. providerID: "deepinfra",
  747. api: {
  748. id: "llama-4-sc",
  749. url: "https://api.deepinfra.com",
  750. npm: "@ai-sdk/deepinfra",
  751. },
  752. })
  753. const result = ProviderTransform.variants(model)
  754. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  755. expect(result.low).toEqual({ reasoningEffort: "low" })
  756. expect(result.high).toEqual({ reasoningEffort: "high" })
  757. })
  758. })
  759. describe("@ai-sdk/openai-compatible", () => {
  760. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningEffort", () => {
  761. const model = createMockModel({
  762. id: "custom-provider/custom-model",
  763. providerID: "custom-provider",
  764. api: {
  765. id: "custom-model",
  766. url: "https://api.custom.com",
  767. npm: "@ai-sdk/openai-compatible",
  768. },
  769. })
  770. const result = ProviderTransform.variants(model)
  771. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  772. expect(result.low).toEqual({ reasoningEffort: "low" })
  773. expect(result.high).toEqual({ reasoningEffort: "high" })
  774. })
  775. })
  776. describe("@ai-sdk/azure", () => {
  777. test("o1-mini returns empty object", () => {
  778. const model = createMockModel({
  779. id: "o1-mini",
  780. providerID: "azure",
  781. api: {
  782. id: "o1-mini",
  783. url: "https://azure.com",
  784. npm: "@ai-sdk/azure",
  785. },
  786. })
  787. const result = ProviderTransform.variants(model)
  788. expect(result).toEqual({})
  789. })
  790. test("standard azure models return custom efforts with reasoningSummary", () => {
  791. const model = createMockModel({
  792. id: "o1",
  793. providerID: "azure",
  794. api: {
  795. id: "o1",
  796. url: "https://azure.com",
  797. npm: "@ai-sdk/azure",
  798. },
  799. })
  800. const result = ProviderTransform.variants(model)
  801. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  802. expect(result.low).toEqual({
  803. reasoningEffort: "low",
  804. reasoningSummary: "auto",
  805. include: ["reasoning.encrypted_content"],
  806. })
  807. })
  808. test("gpt-5 adds minimal effort", () => {
  809. const model = createMockModel({
  810. id: "gpt-5",
  811. providerID: "azure",
  812. api: {
  813. id: "gpt-5",
  814. url: "https://azure.com",
  815. npm: "@ai-sdk/azure",
  816. },
  817. })
  818. const result = ProviderTransform.variants(model)
  819. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  820. })
  821. })
  822. describe("@ai-sdk/openai", () => {
  823. test("gpt-5-pro returns empty object", () => {
  824. const model = createMockModel({
  825. id: "gpt-5-pro",
  826. providerID: "openai",
  827. api: {
  828. id: "gpt-5-pro",
  829. url: "https://api.openai.com",
  830. npm: "@ai-sdk/openai",
  831. },
  832. })
  833. const result = ProviderTransform.variants(model)
  834. expect(result).toEqual({})
  835. })
  836. test("standard openai models return custom efforts with reasoningSummary", () => {
  837. const model = createMockModel({
  838. id: "gpt-5",
  839. providerID: "openai",
  840. api: {
  841. id: "gpt-5",
  842. url: "https://api.openai.com",
  843. npm: "@ai-sdk/openai",
  844. },
  845. release_date: "2024-06-01",
  846. })
  847. const result = ProviderTransform.variants(model)
  848. expect(Object.keys(result)).toEqual(["minimal", "low", "medium", "high"])
  849. expect(result.low).toEqual({
  850. reasoningEffort: "low",
  851. reasoningSummary: "auto",
  852. include: ["reasoning.encrypted_content"],
  853. })
  854. })
  855. test("models after 2025-11-13 include 'none' effort", () => {
  856. const model = createMockModel({
  857. id: "gpt-5-nano",
  858. providerID: "openai",
  859. api: {
  860. id: "gpt-5-nano",
  861. url: "https://api.openai.com",
  862. npm: "@ai-sdk/openai",
  863. },
  864. release_date: "2025-11-14",
  865. })
  866. const result = ProviderTransform.variants(model)
  867. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high"])
  868. })
  869. test("models after 2025-12-04 include 'xhigh' effort", () => {
  870. const model = createMockModel({
  871. id: "openai/gpt-5-chat",
  872. providerID: "openai",
  873. api: {
  874. id: "gpt-5-chat",
  875. url: "https://api.openai.com",
  876. npm: "@ai-sdk/openai",
  877. },
  878. release_date: "2025-12-05",
  879. })
  880. const result = ProviderTransform.variants(model)
  881. expect(Object.keys(result)).toEqual(["none", "minimal", "low", "medium", "high", "xhigh"])
  882. })
  883. })
  884. describe("@ai-sdk/anthropic", () => {
  885. test("returns high and max with thinking config", () => {
  886. const model = createMockModel({
  887. id: "anthropic/claude-4",
  888. providerID: "anthropic",
  889. api: {
  890. id: "claude-4",
  891. url: "https://api.anthropic.com",
  892. npm: "@ai-sdk/anthropic",
  893. },
  894. })
  895. const result = ProviderTransform.variants(model)
  896. expect(Object.keys(result)).toEqual(["high", "max"])
  897. expect(result.high).toEqual({
  898. thinking: {
  899. type: "enabled",
  900. budgetTokens: 16000,
  901. },
  902. })
  903. expect(result.max).toEqual({
  904. thinking: {
  905. type: "enabled",
  906. budgetTokens: 31999,
  907. },
  908. })
  909. })
  910. })
  911. describe("@ai-sdk/amazon-bedrock", () => {
  912. test("returns WIDELY_SUPPORTED_EFFORTS with reasoningConfig", () => {
  913. const model = createMockModel({
  914. id: "bedrock/llama-4",
  915. providerID: "bedrock",
  916. api: {
  917. id: "llama-4-sc",
  918. url: "https://bedrock.amazonaws.com",
  919. npm: "@ai-sdk/amazon-bedrock",
  920. },
  921. })
  922. const result = ProviderTransform.variants(model)
  923. expect(Object.keys(result)).toEqual(["low", "medium", "high"])
  924. expect(result.low).toEqual({
  925. reasoningConfig: {
  926. type: "enabled",
  927. maxReasoningEffort: "low",
  928. },
  929. })
  930. })
  931. })
  932. describe("@ai-sdk/google", () => {
  933. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  934. const model = createMockModel({
  935. id: "google/gemini-2.5-pro",
  936. providerID: "google",
  937. api: {
  938. id: "gemini-2.5-pro",
  939. url: "https://generativelanguage.googleapis.com",
  940. npm: "@ai-sdk/google",
  941. },
  942. })
  943. const result = ProviderTransform.variants(model)
  944. expect(Object.keys(result)).toEqual(["high", "max"])
  945. expect(result.high).toEqual({
  946. thinkingConfig: {
  947. includeThoughts: true,
  948. thinkingBudget: 16000,
  949. },
  950. })
  951. expect(result.max).toEqual({
  952. thinkingConfig: {
  953. includeThoughts: true,
  954. thinkingBudget: 24576,
  955. },
  956. })
  957. })
  958. test("other gemini models return low and high with thinkingLevel", () => {
  959. const model = createMockModel({
  960. id: "google/gemini-2.0-pro",
  961. providerID: "google",
  962. api: {
  963. id: "gemini-2.0-pro",
  964. url: "https://generativelanguage.googleapis.com",
  965. npm: "@ai-sdk/google",
  966. },
  967. })
  968. const result = ProviderTransform.variants(model)
  969. expect(Object.keys(result)).toEqual(["low", "high"])
  970. expect(result.low).toEqual({
  971. includeThoughts: true,
  972. thinkingLevel: "low",
  973. })
  974. expect(result.high).toEqual({
  975. includeThoughts: true,
  976. thinkingLevel: "high",
  977. })
  978. })
  979. })
  980. describe("@ai-sdk/google-vertex", () => {
  981. test("gemini-2.5 returns high and max with thinkingConfig and thinkingBudget", () => {
  982. const model = createMockModel({
  983. id: "google-vertex/gemini-2.5-pro",
  984. providerID: "google-vertex",
  985. api: {
  986. id: "gemini-2.5-pro",
  987. url: "https://vertexai.googleapis.com",
  988. npm: "@ai-sdk/google-vertex",
  989. },
  990. })
  991. const result = ProviderTransform.variants(model)
  992. expect(Object.keys(result)).toEqual(["high", "max"])
  993. })
  994. test("other vertex models return low and high with thinkingLevel", () => {
  995. const model = createMockModel({
  996. id: "google-vertex/gemini-2.0-pro",
  997. providerID: "google-vertex",
  998. api: {
  999. id: "gemini-2.0-pro",
  1000. url: "https://vertexai.googleapis.com",
  1001. npm: "@ai-sdk/google-vertex",
  1002. },
  1003. })
  1004. const result = ProviderTransform.variants(model)
  1005. expect(Object.keys(result)).toEqual(["low", "high"])
  1006. })
  1007. })
  1008. describe("@ai-sdk/cohere", () => {
  1009. test("returns empty object", () => {
  1010. const model = createMockModel({
  1011. id: "cohere/command-r",
  1012. providerID: "cohere",
  1013. api: {
  1014. id: "command-r",
  1015. url: "https://api.cohere.com",
  1016. npm: "@ai-sdk/cohere",
  1017. },
  1018. })
  1019. const result = ProviderTransform.variants(model)
  1020. expect(result).toEqual({})
  1021. })
  1022. })
  1023. describe("@ai-sdk/groq", () => {
  1024. test("returns none and WIDELY_SUPPORTED_EFFORTS with thinkingLevel", () => {
  1025. const model = createMockModel({
  1026. id: "groq/llama-4",
  1027. providerID: "groq",
  1028. api: {
  1029. id: "llama-4-sc",
  1030. url: "https://api.groq.com",
  1031. npm: "@ai-sdk/groq",
  1032. },
  1033. })
  1034. const result = ProviderTransform.variants(model)
  1035. expect(Object.keys(result)).toEqual(["none", "low", "medium", "high"])
  1036. expect(result.none).toEqual({
  1037. includeThoughts: true,
  1038. thinkingLevel: "none",
  1039. })
  1040. expect(result.low).toEqual({
  1041. includeThoughts: true,
  1042. thinkingLevel: "low",
  1043. })
  1044. })
  1045. })
  1046. describe("@ai-sdk/perplexity", () => {
  1047. test("returns empty object", () => {
  1048. const model = createMockModel({
  1049. id: "perplexity/sonar-plus",
  1050. providerID: "perplexity",
  1051. api: {
  1052. id: "sonar-plus",
  1053. url: "https://api.perplexity.ai",
  1054. npm: "@ai-sdk/perplexity",
  1055. },
  1056. })
  1057. const result = ProviderTransform.variants(model)
  1058. expect(result).toEqual({})
  1059. })
  1060. })
  1061. })