ai-sdk.spec.ts 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. import { Anthropic } from "@anthropic-ai/sdk"
  2. import OpenAI from "openai"
  3. import {
  4. convertToAiSdkMessages,
  5. convertToolsForAiSdk,
  6. processAiSdkStreamPart,
  7. mapToolChoice,
  8. extractAiSdkErrorMessage,
  9. handleAiSdkError,
  10. flattenAiSdkMessagesToStringContent,
  11. } from "../ai-sdk"
  12. vitest.mock("ai", () => ({
  13. tool: vitest.fn((t) => t),
  14. jsonSchema: vitest.fn((s) => s),
  15. }))
  16. describe("AI SDK conversion utilities", () => {
  17. describe("convertToAiSdkMessages", () => {
  18. it("converts simple string messages", () => {
  19. const messages: Anthropic.Messages.MessageParam[] = [
  20. { role: "user", content: "Hello" },
  21. { role: "assistant", content: "Hi there" },
  22. ]
  23. const result = convertToAiSdkMessages(messages)
  24. expect(result).toHaveLength(2)
  25. expect(result[0]).toEqual({ role: "user", content: "Hello" })
  26. expect(result[1]).toEqual({ role: "assistant", content: "Hi there" })
  27. })
  28. it("converts user messages with text content blocks", () => {
  29. const messages: Anthropic.Messages.MessageParam[] = [
  30. {
  31. role: "user",
  32. content: [{ type: "text", text: "Hello world" }],
  33. },
  34. ]
  35. const result = convertToAiSdkMessages(messages)
  36. expect(result).toHaveLength(1)
  37. expect(result[0]).toEqual({
  38. role: "user",
  39. content: [{ type: "text", text: "Hello world" }],
  40. })
  41. })
  42. it("converts user messages with image content", () => {
  43. const messages: Anthropic.Messages.MessageParam[] = [
  44. {
  45. role: "user",
  46. content: [
  47. { type: "text", text: "What is in this image?" },
  48. {
  49. type: "image",
  50. source: {
  51. type: "base64",
  52. media_type: "image/png",
  53. data: "base64encodeddata",
  54. },
  55. },
  56. ],
  57. },
  58. ]
  59. const result = convertToAiSdkMessages(messages)
  60. expect(result).toHaveLength(1)
  61. expect(result[0]).toEqual({
  62. role: "user",
  63. content: [
  64. { type: "text", text: "What is in this image?" },
  65. {
  66. type: "image",
  67. image: "data:image/png;base64,base64encodeddata",
  68. mimeType: "image/png",
  69. },
  70. ],
  71. })
  72. })
  73. it("converts user messages with URL image content", () => {
  74. const messages: Anthropic.Messages.MessageParam[] = [
  75. {
  76. role: "user",
  77. content: [
  78. { type: "text", text: "What is in this image?" },
  79. {
  80. type: "image",
  81. source: {
  82. type: "url",
  83. url: "https://example.com/image.png",
  84. },
  85. } as any,
  86. ],
  87. },
  88. ]
  89. const result = convertToAiSdkMessages(messages)
  90. expect(result).toHaveLength(1)
  91. expect(result[0]).toEqual({
  92. role: "user",
  93. content: [
  94. { type: "text", text: "What is in this image?" },
  95. {
  96. type: "image",
  97. image: "https://example.com/image.png",
  98. },
  99. ],
  100. })
  101. })
  102. it("converts tool results into separate tool role messages with resolved tool names", () => {
  103. const messages: Anthropic.Messages.MessageParam[] = [
  104. {
  105. role: "assistant",
  106. content: [
  107. {
  108. type: "tool_use",
  109. id: "call_123",
  110. name: "read_file",
  111. input: { path: "test.ts" },
  112. },
  113. ],
  114. },
  115. {
  116. role: "user",
  117. content: [
  118. {
  119. type: "tool_result",
  120. tool_use_id: "call_123",
  121. content: "Tool result content",
  122. },
  123. ],
  124. },
  125. ]
  126. const result = convertToAiSdkMessages(messages)
  127. expect(result).toHaveLength(2)
  128. expect(result[0]).toEqual({
  129. role: "assistant",
  130. content: [
  131. {
  132. type: "tool-call",
  133. toolCallId: "call_123",
  134. toolName: "read_file",
  135. input: { path: "test.ts" },
  136. },
  137. ],
  138. })
  139. // Tool results now go to role: "tool" messages per AI SDK v6 schema
  140. expect(result[1]).toEqual({
  141. role: "tool",
  142. content: [
  143. {
  144. type: "tool-result",
  145. toolCallId: "call_123",
  146. toolName: "read_file",
  147. output: { type: "text", value: "Tool result content" },
  148. },
  149. ],
  150. })
  151. })
  152. it("uses unknown_tool for tool results without matching tool call", () => {
  153. const messages: Anthropic.Messages.MessageParam[] = [
  154. {
  155. role: "user",
  156. content: [
  157. {
  158. type: "tool_result",
  159. tool_use_id: "call_orphan",
  160. content: "Orphan result",
  161. },
  162. ],
  163. },
  164. ]
  165. const result = convertToAiSdkMessages(messages)
  166. expect(result).toHaveLength(1)
  167. // Tool results go to role: "tool" messages
  168. expect(result[0]).toEqual({
  169. role: "tool",
  170. content: [
  171. {
  172. type: "tool-result",
  173. toolCallId: "call_orphan",
  174. toolName: "unknown_tool",
  175. output: { type: "text", value: "Orphan result" },
  176. },
  177. ],
  178. })
  179. })
  180. it("separates tool results and text content into different messages", () => {
  181. const messages: Anthropic.Messages.MessageParam[] = [
  182. {
  183. role: "assistant",
  184. content: [
  185. {
  186. type: "tool_use",
  187. id: "call_123",
  188. name: "read_file",
  189. input: { path: "test.ts" },
  190. },
  191. ],
  192. },
  193. {
  194. role: "user",
  195. content: [
  196. {
  197. type: "tool_result",
  198. tool_use_id: "call_123",
  199. content: "File contents here",
  200. },
  201. {
  202. type: "text",
  203. text: "Please analyze this file",
  204. },
  205. ],
  206. },
  207. ]
  208. const result = convertToAiSdkMessages(messages)
  209. expect(result).toHaveLength(3)
  210. expect(result[0]).toEqual({
  211. role: "assistant",
  212. content: [
  213. {
  214. type: "tool-call",
  215. toolCallId: "call_123",
  216. toolName: "read_file",
  217. input: { path: "test.ts" },
  218. },
  219. ],
  220. })
  221. // Tool results go first in a "tool" message
  222. expect(result[1]).toEqual({
  223. role: "tool",
  224. content: [
  225. {
  226. type: "tool-result",
  227. toolCallId: "call_123",
  228. toolName: "read_file",
  229. output: { type: "text", value: "File contents here" },
  230. },
  231. ],
  232. })
  233. // Text content goes in a separate "user" message
  234. expect(result[2]).toEqual({
  235. role: "user",
  236. content: [{ type: "text", text: "Please analyze this file" }],
  237. })
  238. })
  239. it("converts assistant messages with tool use", () => {
  240. const messages: Anthropic.Messages.MessageParam[] = [
  241. {
  242. role: "assistant",
  243. content: [
  244. { type: "text", text: "Let me read that file" },
  245. {
  246. type: "tool_use",
  247. id: "call_456",
  248. name: "read_file",
  249. input: { path: "test.ts" },
  250. },
  251. ],
  252. },
  253. ]
  254. const result = convertToAiSdkMessages(messages)
  255. expect(result).toHaveLength(1)
  256. expect(result[0]).toEqual({
  257. role: "assistant",
  258. content: [
  259. { type: "text", text: "Let me read that file" },
  260. {
  261. type: "tool-call",
  262. toolCallId: "call_456",
  263. toolName: "read_file",
  264. input: { path: "test.ts" },
  265. },
  266. ],
  267. })
  268. })
  269. it("handles empty assistant content", () => {
  270. const messages: Anthropic.Messages.MessageParam[] = [
  271. {
  272. role: "assistant",
  273. content: [],
  274. },
  275. ]
  276. const result = convertToAiSdkMessages(messages)
  277. expect(result).toHaveLength(1)
  278. expect(result[0]).toEqual({
  279. role: "assistant",
  280. content: [{ type: "text", text: "" }],
  281. })
  282. })
  283. it("converts assistant reasoning blocks", () => {
  284. const messages: Anthropic.Messages.MessageParam[] = [
  285. {
  286. role: "assistant",
  287. content: [
  288. { type: "reasoning" as any, text: "Thinking..." },
  289. { type: "text", text: "Answer" },
  290. ],
  291. },
  292. ]
  293. const result = convertToAiSdkMessages(messages)
  294. expect(result).toHaveLength(1)
  295. expect(result[0]).toEqual({
  296. role: "assistant",
  297. content: [
  298. { type: "reasoning", text: "Thinking..." },
  299. { type: "text", text: "Answer" },
  300. ],
  301. })
  302. })
  303. it("converts assistant thinking blocks to reasoning", () => {
  304. const messages: Anthropic.Messages.MessageParam[] = [
  305. {
  306. role: "assistant",
  307. content: [
  308. { type: "thinking" as any, thinking: "Deep thought", signature: "sig" },
  309. { type: "text", text: "OK" },
  310. ],
  311. },
  312. ]
  313. const result = convertToAiSdkMessages(messages)
  314. expect(result).toHaveLength(1)
  315. expect(result[0]).toEqual({
  316. role: "assistant",
  317. content: [
  318. {
  319. type: "reasoning",
  320. text: "Deep thought",
  321. providerOptions: {
  322. bedrock: { signature: "sig" },
  323. anthropic: { signature: "sig" },
  324. },
  325. },
  326. { type: "text", text: "OK" },
  327. ],
  328. })
  329. })
  330. it("converts assistant message-level reasoning_content to reasoning part", () => {
  331. const messages: Anthropic.Messages.MessageParam[] = [
  332. {
  333. role: "assistant",
  334. content: [{ type: "text", text: "Answer" }],
  335. reasoning_content: "Thinking...",
  336. } as any,
  337. ]
  338. const result = convertToAiSdkMessages(messages)
  339. expect(result).toHaveLength(1)
  340. expect(result[0]).toEqual({
  341. role: "assistant",
  342. content: [
  343. { type: "reasoning", text: "Thinking..." },
  344. { type: "text", text: "Answer" },
  345. ],
  346. })
  347. })
  348. it("prefers message-level reasoning_content over reasoning blocks", () => {
  349. const messages: Anthropic.Messages.MessageParam[] = [
  350. {
  351. role: "assistant",
  352. content: [
  353. { type: "reasoning" as any, text: "BLOCK" },
  354. { type: "text", text: "Answer" },
  355. ],
  356. reasoning_content: "MSG",
  357. } as any,
  358. ]
  359. const result = convertToAiSdkMessages(messages)
  360. expect(result).toHaveLength(1)
  361. expect(result[0]).toEqual({
  362. role: "assistant",
  363. content: [
  364. { type: "reasoning", text: "MSG" },
  365. { type: "text", text: "Answer" },
  366. ],
  367. })
  368. })
  369. it("attaches thoughtSignature to first tool-call part for Gemini 3 round-tripping", () => {
  370. const messages: Anthropic.Messages.MessageParam[] = [
  371. {
  372. role: "assistant",
  373. content: [
  374. { type: "text", text: "Let me check that." },
  375. {
  376. type: "tool_use",
  377. id: "tool-1",
  378. name: "read_file",
  379. input: { path: "test.txt" },
  380. },
  381. { type: "thoughtSignature", thoughtSignature: "encrypted-sig-abc" } as any,
  382. ],
  383. },
  384. ]
  385. const result = convertToAiSdkMessages(messages)
  386. expect(result).toHaveLength(1)
  387. const assistantMsg = result[0]
  388. expect(assistantMsg.role).toBe("assistant")
  389. const content = assistantMsg.content as any[]
  390. expect(content).toHaveLength(2) // text + tool-call (thoughtSignature block is consumed, not passed through)
  391. const toolCallPart = content.find((p: any) => p.type === "tool-call")
  392. expect(toolCallPart).toBeDefined()
  393. expect(toolCallPart.providerOptions).toEqual({
  394. google: { thoughtSignature: "encrypted-sig-abc" },
  395. vertex: { thoughtSignature: "encrypted-sig-abc" },
  396. })
  397. })
  398. it("attaches thoughtSignature only to the first tool-call in parallel calls", () => {
  399. const messages: Anthropic.Messages.MessageParam[] = [
  400. {
  401. role: "assistant",
  402. content: [
  403. {
  404. type: "tool_use",
  405. id: "tool-1",
  406. name: "get_weather",
  407. input: { city: "Paris" },
  408. },
  409. {
  410. type: "tool_use",
  411. id: "tool-2",
  412. name: "get_weather",
  413. input: { city: "London" },
  414. },
  415. { type: "thoughtSignature", thoughtSignature: "sig-parallel" } as any,
  416. ],
  417. },
  418. ]
  419. const result = convertToAiSdkMessages(messages)
  420. const content = (result[0] as any).content as any[]
  421. const toolCalls = content.filter((p: any) => p.type === "tool-call")
  422. expect(toolCalls).toHaveLength(2)
  423. // Only the first tool call should have the signature
  424. expect(toolCalls[0].providerOptions).toEqual({
  425. google: { thoughtSignature: "sig-parallel" },
  426. vertex: { thoughtSignature: "sig-parallel" },
  427. })
  428. // Second tool call should NOT have the signature
  429. expect(toolCalls[1].providerOptions).toBeUndefined()
  430. })
  431. it("does not attach providerOptions when no thoughtSignature block is present", () => {
  432. const messages: Anthropic.Messages.MessageParam[] = [
  433. {
  434. role: "assistant",
  435. content: [
  436. { type: "text", text: "Using tool" },
  437. {
  438. type: "tool_use",
  439. id: "tool-1",
  440. name: "read_file",
  441. input: { path: "test.txt" },
  442. },
  443. ],
  444. },
  445. ]
  446. const result = convertToAiSdkMessages(messages)
  447. const content = (result[0] as any).content as any[]
  448. const toolCallPart = content.find((p: any) => p.type === "tool-call")
  449. expect(toolCallPart).toBeDefined()
  450. expect(toolCallPart.providerOptions).toBeUndefined()
  451. })
  452. })
  453. describe("convertToolsForAiSdk", () => {
  454. it("returns undefined for empty tools", () => {
  455. expect(convertToolsForAiSdk(undefined)).toBeUndefined()
  456. expect(convertToolsForAiSdk([])).toBeUndefined()
  457. })
  458. it("converts function tools to AI SDK format", () => {
  459. const tools: OpenAI.Chat.ChatCompletionTool[] = [
  460. {
  461. type: "function",
  462. function: {
  463. name: "read_file",
  464. description: "Read a file from disk",
  465. parameters: {
  466. type: "object",
  467. properties: {
  468. path: { type: "string", description: "File path" },
  469. },
  470. required: ["path"],
  471. },
  472. },
  473. },
  474. ]
  475. const result = convertToolsForAiSdk(tools)
  476. expect(result).toBeDefined()
  477. expect(result!.read_file).toBeDefined()
  478. expect(result!.read_file.description).toBe("Read a file from disk")
  479. })
  480. it("converts multiple tools", () => {
  481. const tools: OpenAI.Chat.ChatCompletionTool[] = [
  482. {
  483. type: "function",
  484. function: {
  485. name: "read_file",
  486. description: "Read a file",
  487. parameters: {},
  488. },
  489. },
  490. {
  491. type: "function",
  492. function: {
  493. name: "write_file",
  494. description: "Write a file",
  495. parameters: {},
  496. },
  497. },
  498. ]
  499. const result = convertToolsForAiSdk(tools)
  500. expect(result).toBeDefined()
  501. expect(Object.keys(result!)).toHaveLength(2)
  502. expect(result!.read_file).toBeDefined()
  503. expect(result!.write_file).toBeDefined()
  504. })
  505. })
  506. describe("processAiSdkStreamPart", () => {
  507. it("processes text-delta chunks", () => {
  508. const part = { type: "text-delta" as const, id: "1", text: "Hello" }
  509. const chunks = [...processAiSdkStreamPart(part)]
  510. expect(chunks).toHaveLength(1)
  511. expect(chunks[0]).toEqual({ type: "text", text: "Hello" })
  512. })
  513. it("processes text chunks (fullStream format)", () => {
  514. const part = { type: "text" as const, text: "Hello from fullStream" }
  515. const chunks = [...processAiSdkStreamPart(part as any)]
  516. expect(chunks).toHaveLength(1)
  517. expect(chunks[0]).toEqual({ type: "text", text: "Hello from fullStream" })
  518. })
  519. it("processes reasoning-delta chunks", () => {
  520. const part = { type: "reasoning-delta" as const, id: "1", text: "thinking..." }
  521. const chunks = [...processAiSdkStreamPart(part)]
  522. expect(chunks).toHaveLength(1)
  523. expect(chunks[0]).toEqual({ type: "reasoning", text: "thinking..." })
  524. })
  525. it("processes reasoning chunks (fullStream format)", () => {
  526. const part = { type: "reasoning" as const, text: "reasoning from fullStream" }
  527. const chunks = [...processAiSdkStreamPart(part as any)]
  528. expect(chunks).toHaveLength(1)
  529. expect(chunks[0]).toEqual({ type: "reasoning", text: "reasoning from fullStream" })
  530. })
  531. it("processes tool-input-start chunks", () => {
  532. const part = { type: "tool-input-start" as const, id: "call_1", toolName: "read_file" }
  533. const chunks = [...processAiSdkStreamPart(part)]
  534. expect(chunks).toHaveLength(1)
  535. expect(chunks[0]).toEqual({ type: "tool_call_start", id: "call_1", name: "read_file" })
  536. })
  537. it("processes tool-input-delta chunks", () => {
  538. const part = { type: "tool-input-delta" as const, id: "call_1", delta: '{"path":' }
  539. const chunks = [...processAiSdkStreamPart(part)]
  540. expect(chunks).toHaveLength(1)
  541. expect(chunks[0]).toEqual({ type: "tool_call_delta", id: "call_1", delta: '{"path":' })
  542. })
  543. it("processes tool-input-end chunks", () => {
  544. const part = { type: "tool-input-end" as const, id: "call_1" }
  545. const chunks = [...processAiSdkStreamPart(part)]
  546. expect(chunks).toHaveLength(1)
  547. expect(chunks[0]).toEqual({ type: "tool_call_end", id: "call_1" })
  548. })
  549. it("ignores tool-call chunks to prevent duplicate tools in UI", () => {
  550. // tool-call is intentionally ignored because tool-input-start/delta/end already
  551. // provide complete tool call information. Emitting tool-call would cause duplicate
  552. // tools in the UI for AI SDK providers (e.g., DeepSeek, Moonshot).
  553. const part = {
  554. type: "tool-call" as const,
  555. toolCallId: "call_1",
  556. toolName: "read_file",
  557. input: { path: "test.ts" },
  558. }
  559. const chunks = [...processAiSdkStreamPart(part)]
  560. expect(chunks).toHaveLength(0)
  561. })
  562. it("processes source chunks with URL", () => {
  563. const part = {
  564. type: "source" as const,
  565. url: "https://example.com",
  566. title: "Example Source",
  567. }
  568. const chunks = [...processAiSdkStreamPart(part as any)]
  569. expect(chunks).toHaveLength(1)
  570. expect(chunks[0]).toEqual({
  571. type: "grounding",
  572. sources: [
  573. {
  574. title: "Example Source",
  575. url: "https://example.com",
  576. snippet: undefined,
  577. },
  578. ],
  579. })
  580. })
  581. it("processes error chunks", () => {
  582. const part = { type: "error" as const, error: new Error("Test error") }
  583. const chunks = [...processAiSdkStreamPart(part)]
  584. expect(chunks).toHaveLength(1)
  585. expect(chunks[0]).toEqual({
  586. type: "error",
  587. error: "StreamError",
  588. message: "Test error",
  589. })
  590. })
  591. it("ignores lifecycle events", () => {
  592. const lifecycleEvents = [
  593. { type: "text-start" as const },
  594. { type: "text-end" as const },
  595. { type: "reasoning-start" as const },
  596. { type: "reasoning-end" as const },
  597. { type: "start-step" as const },
  598. { type: "finish-step" as const },
  599. { type: "start" as const },
  600. { type: "finish" as const },
  601. { type: "abort" as const },
  602. ]
  603. for (const event of lifecycleEvents) {
  604. const chunks = [...processAiSdkStreamPart(event as any)]
  605. expect(chunks).toHaveLength(0)
  606. }
  607. })
  608. })
  609. describe("mapToolChoice", () => {
  610. it("should return undefined for null or undefined", () => {
  611. expect(mapToolChoice(null)).toBeUndefined()
  612. expect(mapToolChoice(undefined)).toBeUndefined()
  613. })
  614. it("should handle string tool choices", () => {
  615. expect(mapToolChoice("auto")).toBe("auto")
  616. expect(mapToolChoice("none")).toBe("none")
  617. expect(mapToolChoice("required")).toBe("required")
  618. })
  619. it("should return auto for unknown string values", () => {
  620. expect(mapToolChoice("unknown")).toBe("auto")
  621. expect(mapToolChoice("invalid")).toBe("auto")
  622. })
  623. it("should handle object tool choice with function name", () => {
  624. const result = mapToolChoice({
  625. type: "function",
  626. function: { name: "my_tool" },
  627. })
  628. expect(result).toEqual({ type: "tool", toolName: "my_tool" })
  629. })
  630. it("should return undefined for object without function name", () => {
  631. const result = mapToolChoice({
  632. type: "function",
  633. function: {},
  634. })
  635. expect(result).toBeUndefined()
  636. })
  637. it("should return undefined for object with non-function type", () => {
  638. const result = mapToolChoice({
  639. type: "other",
  640. function: { name: "my_tool" },
  641. })
  642. expect(result).toBeUndefined()
  643. })
  644. })
  645. describe("extractAiSdkErrorMessage", () => {
  646. it("should return 'Unknown error' for null/undefined", () => {
  647. expect(extractAiSdkErrorMessage(null)).toBe("Unknown error")
  648. expect(extractAiSdkErrorMessage(undefined)).toBe("Unknown error")
  649. })
  650. it("should extract message from AI_RetryError", () => {
  651. const retryError = {
  652. name: "AI_RetryError",
  653. message: "Failed after 3 attempts",
  654. errors: [new Error("Error 1"), new Error("Error 2"), new Error("Too Many Requests")],
  655. lastError: { message: "Too Many Requests", status: 429 },
  656. }
  657. const result = extractAiSdkErrorMessage(retryError)
  658. expect(result).toBe("Failed after 3 attempts (429): Too Many Requests")
  659. })
  660. it("should handle AI_RetryError without status", () => {
  661. const retryError = {
  662. name: "AI_RetryError",
  663. message: "Failed after 2 attempts",
  664. errors: [new Error("Error 1"), new Error("Connection failed")],
  665. lastError: { message: "Connection failed" },
  666. }
  667. const result = extractAiSdkErrorMessage(retryError)
  668. expect(result).toBe("Failed after 2 attempts: Connection failed")
  669. })
  670. it("should extract message from AI_APICallError", () => {
  671. const apiError = {
  672. name: "AI_APICallError",
  673. message: "Rate limit exceeded",
  674. status: 429,
  675. }
  676. const result = extractAiSdkErrorMessage(apiError)
  677. expect(result).toBe("API Error (429): Rate limit exceeded")
  678. })
  679. it("should handle AI_APICallError without status", () => {
  680. const apiError = {
  681. name: "AI_APICallError",
  682. message: "Connection timeout",
  683. }
  684. const result = extractAiSdkErrorMessage(apiError)
  685. expect(result).toBe("Connection timeout")
  686. })
  687. it("should extract message from standard Error", () => {
  688. const error = new Error("Something went wrong")
  689. expect(extractAiSdkErrorMessage(error)).toBe("Something went wrong")
  690. })
  691. it("should convert non-Error to string", () => {
  692. expect(extractAiSdkErrorMessage("string error")).toBe("string error")
  693. expect(extractAiSdkErrorMessage({ custom: "object" })).toBe("[object Object]")
  694. })
  695. })
  696. describe("handleAiSdkError", () => {
  697. it("should wrap error with provider name", () => {
  698. const error = new Error("API Error")
  699. const result = handleAiSdkError(error, "Fireworks")
  700. expect(result.message).toBe("Fireworks: API Error")
  701. })
  702. it("should preserve status code from AI_RetryError", () => {
  703. const retryError = {
  704. name: "AI_RetryError",
  705. errors: [new Error("Too Many Requests")],
  706. lastError: { message: "Too Many Requests", status: 429 },
  707. }
  708. const result = handleAiSdkError(retryError, "Groq")
  709. expect(result.message).toContain("Groq:")
  710. expect(result.message).toContain("429")
  711. expect((result as any).status).toBe(429)
  712. })
  713. it("should preserve status code from AI_APICallError", () => {
  714. const apiError = {
  715. name: "AI_APICallError",
  716. message: "Unauthorized",
  717. status: 401,
  718. }
  719. const result = handleAiSdkError(apiError, "DeepSeek")
  720. expect(result.message).toContain("DeepSeek:")
  721. expect(result.message).toContain("401")
  722. expect((result as any).status).toBe(401)
  723. })
  724. it("should preserve original error as cause", () => {
  725. const originalError = new Error("Original error")
  726. const result = handleAiSdkError(originalError, "Cerebras")
  727. expect((result as any).cause).toBe(originalError)
  728. })
  729. })
  730. describe("flattenAiSdkMessagesToStringContent", () => {
  731. it("should return messages unchanged if content is already a string", () => {
  732. const messages = [
  733. { role: "user" as const, content: "Hello" },
  734. { role: "assistant" as const, content: "Hi there" },
  735. ]
  736. const result = flattenAiSdkMessagesToStringContent(messages)
  737. expect(result).toEqual(messages)
  738. })
  739. it("should flatten user messages with only text parts to string", () => {
  740. const messages = [
  741. {
  742. role: "user" as const,
  743. content: [
  744. { type: "text" as const, text: "Hello" },
  745. { type: "text" as const, text: "World" },
  746. ],
  747. },
  748. ]
  749. const result = flattenAiSdkMessagesToStringContent(messages)
  750. expect(result).toHaveLength(1)
  751. expect(result[0].role).toBe("user")
  752. expect(result[0].content).toBe("Hello\nWorld")
  753. })
  754. it("should flatten assistant messages with only text parts to string", () => {
  755. const messages = [
  756. {
  757. role: "assistant" as const,
  758. content: [{ type: "text" as const, text: "I am an assistant" }],
  759. },
  760. ]
  761. const result = flattenAiSdkMessagesToStringContent(messages)
  762. expect(result).toHaveLength(1)
  763. expect(result[0].role).toBe("assistant")
  764. expect(result[0].content).toBe("I am an assistant")
  765. })
  766. it("should not flatten user messages with image parts", () => {
  767. const messages = [
  768. {
  769. role: "user" as const,
  770. content: [
  771. { type: "text" as const, text: "Look at this" },
  772. { type: "image" as const, image: "data:image/png;base64,abc123" },
  773. ],
  774. },
  775. ]
  776. const result = flattenAiSdkMessagesToStringContent(messages)
  777. expect(result).toEqual(messages)
  778. })
  779. it("should not flatten assistant messages with tool calls", () => {
  780. const messages = [
  781. {
  782. role: "assistant" as const,
  783. content: [
  784. { type: "text" as const, text: "Let me use a tool" },
  785. {
  786. type: "tool-call" as const,
  787. toolCallId: "123",
  788. toolName: "read_file",
  789. input: { path: "test.txt" },
  790. },
  791. ],
  792. },
  793. ]
  794. const result = flattenAiSdkMessagesToStringContent(messages)
  795. expect(result).toEqual(messages)
  796. })
  797. it("should not flatten tool role messages", () => {
  798. const messages = [
  799. {
  800. role: "tool" as const,
  801. content: [
  802. {
  803. type: "tool-result" as const,
  804. toolCallId: "123",
  805. toolName: "test",
  806. output: { type: "text" as const, value: "result" },
  807. },
  808. ],
  809. },
  810. ] as any
  811. const result = flattenAiSdkMessagesToStringContent(messages)
  812. expect(result).toEqual(messages)
  813. })
  814. it("should respect flattenUserMessages option", () => {
  815. const messages = [
  816. {
  817. role: "user" as const,
  818. content: [{ type: "text" as const, text: "Hello" }],
  819. },
  820. ]
  821. const result = flattenAiSdkMessagesToStringContent(messages, { flattenUserMessages: false })
  822. expect(result).toEqual(messages)
  823. })
  824. it("should respect flattenAssistantMessages option", () => {
  825. const messages = [
  826. {
  827. role: "assistant" as const,
  828. content: [{ type: "text" as const, text: "Hi" }],
  829. },
  830. ]
  831. const result = flattenAiSdkMessagesToStringContent(messages, { flattenAssistantMessages: false })
  832. expect(result).toEqual(messages)
  833. })
  834. it("should handle mixed message types correctly", () => {
  835. const messages = [
  836. { role: "user" as const, content: "Simple string" },
  837. {
  838. role: "user" as const,
  839. content: [{ type: "text" as const, text: "Text parts" }],
  840. },
  841. {
  842. role: "assistant" as const,
  843. content: [{ type: "text" as const, text: "Assistant text" }],
  844. },
  845. {
  846. role: "assistant" as const,
  847. content: [
  848. { type: "text" as const, text: "With tool" },
  849. { type: "tool-call" as const, toolCallId: "456", toolName: "test", input: {} },
  850. ],
  851. },
  852. ]
  853. const result = flattenAiSdkMessagesToStringContent(messages)
  854. expect(result[0].content).toBe("Simple string") // unchanged
  855. expect(result[1].content).toBe("Text parts") // flattened
  856. expect(result[2].content).toBe("Assistant text") // flattened
  857. expect(result[3]).toEqual(messages[3]) // unchanged (has tool call)
  858. })
  859. it("should handle empty text parts", () => {
  860. const messages = [
  861. {
  862. role: "user" as const,
  863. content: [
  864. { type: "text" as const, text: "" },
  865. { type: "text" as const, text: "Hello" },
  866. ],
  867. },
  868. ]
  869. const result = flattenAiSdkMessagesToStringContent(messages)
  870. expect(result[0].content).toBe("\nHello")
  871. })
  872. it("should strip reasoning parts and flatten text for string-only models", () => {
  873. const messages = [
  874. {
  875. role: "assistant" as const,
  876. content: [
  877. { type: "reasoning" as const, text: "I am thinking about this..." },
  878. { type: "text" as const, text: "Here is my answer" },
  879. ],
  880. },
  881. ]
  882. const result = flattenAiSdkMessagesToStringContent(messages)
  883. // Reasoning should be stripped, only text should remain
  884. expect(result[0].content).toBe("Here is my answer")
  885. })
  886. it("should handle messages with only reasoning parts", () => {
  887. const messages = [
  888. {
  889. role: "assistant" as const,
  890. content: [{ type: "reasoning" as const, text: "Only reasoning, no text" }],
  891. },
  892. ]
  893. const result = flattenAiSdkMessagesToStringContent(messages)
  894. // Should flatten to empty string when only reasoning is present
  895. expect(result[0].content).toBe("")
  896. })
  897. it("should not flatten if tool calls are present with reasoning", () => {
  898. const messages = [
  899. {
  900. role: "assistant" as const,
  901. content: [
  902. { type: "reasoning" as const, text: "Thinking..." },
  903. { type: "text" as const, text: "Using tool" },
  904. { type: "tool-call" as const, toolCallId: "abc", toolName: "test", input: {} },
  905. ],
  906. },
  907. ]
  908. const result = flattenAiSdkMessagesToStringContent(messages)
  909. // Should not flatten because there's a tool call
  910. expect(result[0]).toEqual(messages[0])
  911. })
  912. })
  913. })