| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063 |
- import { Anthropic } from "@anthropic-ai/sdk"
- import OpenAI from "openai"
- import {
- convertToAiSdkMessages,
- convertToolsForAiSdk,
- processAiSdkStreamPart,
- mapToolChoice,
- extractAiSdkErrorMessage,
- handleAiSdkError,
- flattenAiSdkMessagesToStringContent,
- } from "../ai-sdk"
- vitest.mock("ai", () => ({
- tool: vitest.fn((t) => t),
- jsonSchema: vitest.fn((s) => s),
- }))
- describe("AI SDK conversion utilities", () => {
- describe("convertToAiSdkMessages", () => {
- it("converts simple string messages", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- { role: "user", content: "Hello" },
- { role: "assistant", content: "Hi there" },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(2)
- expect(result[0]).toEqual({ role: "user", content: "Hello" })
- expect(result[1]).toEqual({ role: "assistant", content: "Hi there" })
- })
- it("converts user messages with text content blocks", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "user",
- content: [{ type: "text", text: "Hello world" }],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- expect(result[0]).toEqual({
- role: "user",
- content: [{ type: "text", text: "Hello world" }],
- })
- })
- it("converts user messages with image content", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "user",
- content: [
- { type: "text", text: "What is in this image?" },
- {
- type: "image",
- source: {
- type: "base64",
- media_type: "image/png",
- data: "base64encodeddata",
- },
- },
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- expect(result[0]).toEqual({
- role: "user",
- content: [
- { type: "text", text: "What is in this image?" },
- {
- type: "image",
- image: "data:image/png;base64,base64encodeddata",
- mimeType: "image/png",
- },
- ],
- })
- })
- it("converts user messages with URL image content", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "user",
- content: [
- { type: "text", text: "What is in this image?" },
- {
- type: "image",
- source: {
- type: "url",
- url: "https://example.com/image.png",
- },
- } as any,
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- expect(result[0]).toEqual({
- role: "user",
- content: [
- { type: "text", text: "What is in this image?" },
- {
- type: "image",
- image: "https://example.com/image.png",
- },
- ],
- })
- })
- it("converts tool results into separate tool role messages with resolved tool names", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [
- {
- type: "tool_use",
- id: "call_123",
- name: "read_file",
- input: { path: "test.ts" },
- },
- ],
- },
- {
- role: "user",
- content: [
- {
- type: "tool_result",
- tool_use_id: "call_123",
- content: "Tool result content",
- },
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(2)
- expect(result[0]).toEqual({
- role: "assistant",
- content: [
- {
- type: "tool-call",
- toolCallId: "call_123",
- toolName: "read_file",
- input: { path: "test.ts" },
- },
- ],
- })
- // Tool results now go to role: "tool" messages per AI SDK v6 schema
- expect(result[1]).toEqual({
- role: "tool",
- content: [
- {
- type: "tool-result",
- toolCallId: "call_123",
- toolName: "read_file",
- output: { type: "text", value: "Tool result content" },
- },
- ],
- })
- })
- it("uses unknown_tool for tool results without matching tool call", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "user",
- content: [
- {
- type: "tool_result",
- tool_use_id: "call_orphan",
- content: "Orphan result",
- },
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- // Tool results go to role: "tool" messages
- expect(result[0]).toEqual({
- role: "tool",
- content: [
- {
- type: "tool-result",
- toolCallId: "call_orphan",
- toolName: "unknown_tool",
- output: { type: "text", value: "Orphan result" },
- },
- ],
- })
- })
- it("separates tool results and text content into different messages", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [
- {
- type: "tool_use",
- id: "call_123",
- name: "read_file",
- input: { path: "test.ts" },
- },
- ],
- },
- {
- role: "user",
- content: [
- {
- type: "tool_result",
- tool_use_id: "call_123",
- content: "File contents here",
- },
- {
- type: "text",
- text: "Please analyze this file",
- },
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(3)
- expect(result[0]).toEqual({
- role: "assistant",
- content: [
- {
- type: "tool-call",
- toolCallId: "call_123",
- toolName: "read_file",
- input: { path: "test.ts" },
- },
- ],
- })
- // Tool results go first in a "tool" message
- expect(result[1]).toEqual({
- role: "tool",
- content: [
- {
- type: "tool-result",
- toolCallId: "call_123",
- toolName: "read_file",
- output: { type: "text", value: "File contents here" },
- },
- ],
- })
- // Text content goes in a separate "user" message
- expect(result[2]).toEqual({
- role: "user",
- content: [{ type: "text", text: "Please analyze this file" }],
- })
- })
- it("converts assistant messages with tool use", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [
- { type: "text", text: "Let me read that file" },
- {
- type: "tool_use",
- id: "call_456",
- name: "read_file",
- input: { path: "test.ts" },
- },
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- expect(result[0]).toEqual({
- role: "assistant",
- content: [
- { type: "text", text: "Let me read that file" },
- {
- type: "tool-call",
- toolCallId: "call_456",
- toolName: "read_file",
- input: { path: "test.ts" },
- },
- ],
- })
- })
- it("handles empty assistant content", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- expect(result[0]).toEqual({
- role: "assistant",
- content: [{ type: "text", text: "" }],
- })
- })
- it("converts assistant reasoning blocks", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [
- { type: "reasoning" as any, text: "Thinking..." },
- { type: "text", text: "Answer" },
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- expect(result[0]).toEqual({
- role: "assistant",
- content: [
- { type: "reasoning", text: "Thinking..." },
- { type: "text", text: "Answer" },
- ],
- })
- })
- it("converts assistant thinking blocks to reasoning", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [
- { type: "thinking" as any, thinking: "Deep thought", signature: "sig" },
- { type: "text", text: "OK" },
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- expect(result[0]).toEqual({
- role: "assistant",
- content: [
- {
- type: "reasoning",
- text: "Deep thought",
- providerOptions: {
- bedrock: { signature: "sig" },
- anthropic: { signature: "sig" },
- },
- },
- { type: "text", text: "OK" },
- ],
- })
- })
- it("converts assistant message-level reasoning_content to reasoning part", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [{ type: "text", text: "Answer" }],
- reasoning_content: "Thinking...",
- } as any,
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- expect(result[0]).toEqual({
- role: "assistant",
- content: [
- { type: "reasoning", text: "Thinking..." },
- { type: "text", text: "Answer" },
- ],
- })
- })
- it("prefers message-level reasoning_content over reasoning blocks", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [
- { type: "reasoning" as any, text: "BLOCK" },
- { type: "text", text: "Answer" },
- ],
- reasoning_content: "MSG",
- } as any,
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- expect(result[0]).toEqual({
- role: "assistant",
- content: [
- { type: "reasoning", text: "MSG" },
- { type: "text", text: "Answer" },
- ],
- })
- })
- it("attaches thoughtSignature to first tool-call part for Gemini 3 round-tripping", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [
- { type: "text", text: "Let me check that." },
- {
- type: "tool_use",
- id: "tool-1",
- name: "read_file",
- input: { path: "test.txt" },
- },
- { type: "thoughtSignature", thoughtSignature: "encrypted-sig-abc" } as any,
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- expect(result).toHaveLength(1)
- const assistantMsg = result[0]
- expect(assistantMsg.role).toBe("assistant")
- const content = assistantMsg.content as any[]
- expect(content).toHaveLength(2) // text + tool-call (thoughtSignature block is consumed, not passed through)
- const toolCallPart = content.find((p: any) => p.type === "tool-call")
- expect(toolCallPart).toBeDefined()
- expect(toolCallPart.providerOptions).toEqual({
- google: { thoughtSignature: "encrypted-sig-abc" },
- vertex: { thoughtSignature: "encrypted-sig-abc" },
- })
- })
- it("attaches thoughtSignature only to the first tool-call in parallel calls", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [
- {
- type: "tool_use",
- id: "tool-1",
- name: "get_weather",
- input: { city: "Paris" },
- },
- {
- type: "tool_use",
- id: "tool-2",
- name: "get_weather",
- input: { city: "London" },
- },
- { type: "thoughtSignature", thoughtSignature: "sig-parallel" } as any,
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- const content = (result[0] as any).content as any[]
- const toolCalls = content.filter((p: any) => p.type === "tool-call")
- expect(toolCalls).toHaveLength(2)
- // Only the first tool call should have the signature
- expect(toolCalls[0].providerOptions).toEqual({
- google: { thoughtSignature: "sig-parallel" },
- vertex: { thoughtSignature: "sig-parallel" },
- })
- // Second tool call should NOT have the signature
- expect(toolCalls[1].providerOptions).toBeUndefined()
- })
- it("does not attach providerOptions when no thoughtSignature block is present", () => {
- const messages: Anthropic.Messages.MessageParam[] = [
- {
- role: "assistant",
- content: [
- { type: "text", text: "Using tool" },
- {
- type: "tool_use",
- id: "tool-1",
- name: "read_file",
- input: { path: "test.txt" },
- },
- ],
- },
- ]
- const result = convertToAiSdkMessages(messages)
- const content = (result[0] as any).content as any[]
- const toolCallPart = content.find((p: any) => p.type === "tool-call")
- expect(toolCallPart).toBeDefined()
- expect(toolCallPart.providerOptions).toBeUndefined()
- })
- })
- describe("convertToolsForAiSdk", () => {
- it("returns undefined for empty tools", () => {
- expect(convertToolsForAiSdk(undefined)).toBeUndefined()
- expect(convertToolsForAiSdk([])).toBeUndefined()
- })
- it("converts function tools to AI SDK format", () => {
- const tools: OpenAI.Chat.ChatCompletionTool[] = [
- {
- type: "function",
- function: {
- name: "read_file",
- description: "Read a file from disk",
- parameters: {
- type: "object",
- properties: {
- path: { type: "string", description: "File path" },
- },
- required: ["path"],
- },
- },
- },
- ]
- const result = convertToolsForAiSdk(tools)
- expect(result).toBeDefined()
- expect(result!.read_file).toBeDefined()
- expect(result!.read_file.description).toBe("Read a file from disk")
- })
- it("converts multiple tools", () => {
- const tools: OpenAI.Chat.ChatCompletionTool[] = [
- {
- type: "function",
- function: {
- name: "read_file",
- description: "Read a file",
- parameters: {},
- },
- },
- {
- type: "function",
- function: {
- name: "write_file",
- description: "Write a file",
- parameters: {},
- },
- },
- ]
- const result = convertToolsForAiSdk(tools)
- expect(result).toBeDefined()
- expect(Object.keys(result!)).toHaveLength(2)
- expect(result!.read_file).toBeDefined()
- expect(result!.write_file).toBeDefined()
- })
- })
- describe("processAiSdkStreamPart", () => {
- it("processes text-delta chunks", () => {
- const part = { type: "text-delta" as const, id: "1", text: "Hello" }
- const chunks = [...processAiSdkStreamPart(part)]
- expect(chunks).toHaveLength(1)
- expect(chunks[0]).toEqual({ type: "text", text: "Hello" })
- })
- it("processes text chunks (fullStream format)", () => {
- const part = { type: "text" as const, text: "Hello from fullStream" }
- const chunks = [...processAiSdkStreamPart(part as any)]
- expect(chunks).toHaveLength(1)
- expect(chunks[0]).toEqual({ type: "text", text: "Hello from fullStream" })
- })
- it("processes reasoning-delta chunks", () => {
- const part = { type: "reasoning-delta" as const, id: "1", text: "thinking..." }
- const chunks = [...processAiSdkStreamPart(part)]
- expect(chunks).toHaveLength(1)
- expect(chunks[0]).toEqual({ type: "reasoning", text: "thinking..." })
- })
- it("processes reasoning chunks (fullStream format)", () => {
- const part = { type: "reasoning" as const, text: "reasoning from fullStream" }
- const chunks = [...processAiSdkStreamPart(part as any)]
- expect(chunks).toHaveLength(1)
- expect(chunks[0]).toEqual({ type: "reasoning", text: "reasoning from fullStream" })
- })
- it("processes tool-input-start chunks", () => {
- const part = { type: "tool-input-start" as const, id: "call_1", toolName: "read_file" }
- const chunks = [...processAiSdkStreamPart(part)]
- expect(chunks).toHaveLength(1)
- expect(chunks[0]).toEqual({ type: "tool_call_start", id: "call_1", name: "read_file" })
- })
- it("processes tool-input-delta chunks", () => {
- const part = { type: "tool-input-delta" as const, id: "call_1", delta: '{"path":' }
- const chunks = [...processAiSdkStreamPart(part)]
- expect(chunks).toHaveLength(1)
- expect(chunks[0]).toEqual({ type: "tool_call_delta", id: "call_1", delta: '{"path":' })
- })
- it("processes tool-input-end chunks", () => {
- const part = { type: "tool-input-end" as const, id: "call_1" }
- const chunks = [...processAiSdkStreamPart(part)]
- expect(chunks).toHaveLength(1)
- expect(chunks[0]).toEqual({ type: "tool_call_end", id: "call_1" })
- })
- it("ignores tool-call chunks to prevent duplicate tools in UI", () => {
- // tool-call is intentionally ignored because tool-input-start/delta/end already
- // provide complete tool call information. Emitting tool-call would cause duplicate
- // tools in the UI for AI SDK providers (e.g., DeepSeek, Moonshot).
- const part = {
- type: "tool-call" as const,
- toolCallId: "call_1",
- toolName: "read_file",
- input: { path: "test.ts" },
- }
- const chunks = [...processAiSdkStreamPart(part)]
- expect(chunks).toHaveLength(0)
- })
- it("processes source chunks with URL", () => {
- const part = {
- type: "source" as const,
- url: "https://example.com",
- title: "Example Source",
- }
- const chunks = [...processAiSdkStreamPart(part as any)]
- expect(chunks).toHaveLength(1)
- expect(chunks[0]).toEqual({
- type: "grounding",
- sources: [
- {
- title: "Example Source",
- url: "https://example.com",
- snippet: undefined,
- },
- ],
- })
- })
- it("processes error chunks", () => {
- const part = { type: "error" as const, error: new Error("Test error") }
- const chunks = [...processAiSdkStreamPart(part)]
- expect(chunks).toHaveLength(1)
- expect(chunks[0]).toEqual({
- type: "error",
- error: "StreamError",
- message: "Test error",
- })
- })
- it("ignores lifecycle events", () => {
- const lifecycleEvents = [
- { type: "text-start" as const },
- { type: "text-end" as const },
- { type: "reasoning-start" as const },
- { type: "reasoning-end" as const },
- { type: "start-step" as const },
- { type: "finish-step" as const },
- { type: "start" as const },
- { type: "finish" as const },
- { type: "abort" as const },
- ]
- for (const event of lifecycleEvents) {
- const chunks = [...processAiSdkStreamPart(event as any)]
- expect(chunks).toHaveLength(0)
- }
- })
- })
- describe("mapToolChoice", () => {
- it("should return undefined for null or undefined", () => {
- expect(mapToolChoice(null)).toBeUndefined()
- expect(mapToolChoice(undefined)).toBeUndefined()
- })
- it("should handle string tool choices", () => {
- expect(mapToolChoice("auto")).toBe("auto")
- expect(mapToolChoice("none")).toBe("none")
- expect(mapToolChoice("required")).toBe("required")
- })
- it("should return auto for unknown string values", () => {
- expect(mapToolChoice("unknown")).toBe("auto")
- expect(mapToolChoice("invalid")).toBe("auto")
- })
- it("should handle object tool choice with function name", () => {
- const result = mapToolChoice({
- type: "function",
- function: { name: "my_tool" },
- })
- expect(result).toEqual({ type: "tool", toolName: "my_tool" })
- })
- it("should return undefined for object without function name", () => {
- const result = mapToolChoice({
- type: "function",
- function: {},
- })
- expect(result).toBeUndefined()
- })
- it("should return undefined for object with non-function type", () => {
- const result = mapToolChoice({
- type: "other",
- function: { name: "my_tool" },
- })
- expect(result).toBeUndefined()
- })
- })
- describe("extractAiSdkErrorMessage", () => {
- it("should return 'Unknown error' for null/undefined", () => {
- expect(extractAiSdkErrorMessage(null)).toBe("Unknown error")
- expect(extractAiSdkErrorMessage(undefined)).toBe("Unknown error")
- })
- it("should extract message from AI_RetryError", () => {
- const retryError = {
- name: "AI_RetryError",
- message: "Failed after 3 attempts",
- errors: [new Error("Error 1"), new Error("Error 2"), new Error("Too Many Requests")],
- lastError: { message: "Too Many Requests", status: 429 },
- }
- const result = extractAiSdkErrorMessage(retryError)
- expect(result).toBe("Failed after 3 attempts (429): Too Many Requests")
- })
- it("should handle AI_RetryError without status", () => {
- const retryError = {
- name: "AI_RetryError",
- message: "Failed after 2 attempts",
- errors: [new Error("Error 1"), new Error("Connection failed")],
- lastError: { message: "Connection failed" },
- }
- const result = extractAiSdkErrorMessage(retryError)
- expect(result).toBe("Failed after 2 attempts: Connection failed")
- })
- it("should extract message from AI_APICallError", () => {
- const apiError = {
- name: "AI_APICallError",
- message: "Rate limit exceeded",
- status: 429,
- }
- const result = extractAiSdkErrorMessage(apiError)
- expect(result).toBe("API Error (429): Rate limit exceeded")
- })
- it("should handle AI_APICallError without status", () => {
- const apiError = {
- name: "AI_APICallError",
- message: "Connection timeout",
- }
- const result = extractAiSdkErrorMessage(apiError)
- expect(result).toBe("Connection timeout")
- })
- it("should extract message from standard Error", () => {
- const error = new Error("Something went wrong")
- expect(extractAiSdkErrorMessage(error)).toBe("Something went wrong")
- })
- it("should convert non-Error to string", () => {
- expect(extractAiSdkErrorMessage("string error")).toBe("string error")
- expect(extractAiSdkErrorMessage({ custom: "object" })).toBe("[object Object]")
- })
- })
- describe("handleAiSdkError", () => {
- it("should wrap error with provider name", () => {
- const error = new Error("API Error")
- const result = handleAiSdkError(error, "Fireworks")
- expect(result.message).toBe("Fireworks: API Error")
- })
- it("should preserve status code from AI_RetryError", () => {
- const retryError = {
- name: "AI_RetryError",
- errors: [new Error("Too Many Requests")],
- lastError: { message: "Too Many Requests", status: 429 },
- }
- const result = handleAiSdkError(retryError, "Groq")
- expect(result.message).toContain("Groq:")
- expect(result.message).toContain("429")
- expect((result as any).status).toBe(429)
- })
- it("should preserve status code from AI_APICallError", () => {
- const apiError = {
- name: "AI_APICallError",
- message: "Unauthorized",
- status: 401,
- }
- const result = handleAiSdkError(apiError, "DeepSeek")
- expect(result.message).toContain("DeepSeek:")
- expect(result.message).toContain("401")
- expect((result as any).status).toBe(401)
- })
- it("should preserve original error as cause", () => {
- const originalError = new Error("Original error")
- const result = handleAiSdkError(originalError, "Cerebras")
- expect((result as any).cause).toBe(originalError)
- })
- })
- describe("flattenAiSdkMessagesToStringContent", () => {
- it("should return messages unchanged if content is already a string", () => {
- const messages = [
- { role: "user" as const, content: "Hello" },
- { role: "assistant" as const, content: "Hi there" },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages)
- expect(result).toEqual(messages)
- })
- it("should flatten user messages with only text parts to string", () => {
- const messages = [
- {
- role: "user" as const,
- content: [
- { type: "text" as const, text: "Hello" },
- { type: "text" as const, text: "World" },
- ],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages)
- expect(result).toHaveLength(1)
- expect(result[0].role).toBe("user")
- expect(result[0].content).toBe("Hello\nWorld")
- })
- it("should flatten assistant messages with only text parts to string", () => {
- const messages = [
- {
- role: "assistant" as const,
- content: [{ type: "text" as const, text: "I am an assistant" }],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages)
- expect(result).toHaveLength(1)
- expect(result[0].role).toBe("assistant")
- expect(result[0].content).toBe("I am an assistant")
- })
- it("should not flatten user messages with image parts", () => {
- const messages = [
- {
- role: "user" as const,
- content: [
- { type: "text" as const, text: "Look at this" },
- { type: "image" as const, image: "data:image/png;base64,abc123" },
- ],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages)
- expect(result).toEqual(messages)
- })
- it("should not flatten assistant messages with tool calls", () => {
- const messages = [
- {
- role: "assistant" as const,
- content: [
- { type: "text" as const, text: "Let me use a tool" },
- {
- type: "tool-call" as const,
- toolCallId: "123",
- toolName: "read_file",
- input: { path: "test.txt" },
- },
- ],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages)
- expect(result).toEqual(messages)
- })
- it("should not flatten tool role messages", () => {
- const messages = [
- {
- role: "tool" as const,
- content: [
- {
- type: "tool-result" as const,
- toolCallId: "123",
- toolName: "test",
- output: { type: "text" as const, value: "result" },
- },
- ],
- },
- ] as any
- const result = flattenAiSdkMessagesToStringContent(messages)
- expect(result).toEqual(messages)
- })
- it("should respect flattenUserMessages option", () => {
- const messages = [
- {
- role: "user" as const,
- content: [{ type: "text" as const, text: "Hello" }],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages, { flattenUserMessages: false })
- expect(result).toEqual(messages)
- })
- it("should respect flattenAssistantMessages option", () => {
- const messages = [
- {
- role: "assistant" as const,
- content: [{ type: "text" as const, text: "Hi" }],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages, { flattenAssistantMessages: false })
- expect(result).toEqual(messages)
- })
- it("should handle mixed message types correctly", () => {
- const messages = [
- { role: "user" as const, content: "Simple string" },
- {
- role: "user" as const,
- content: [{ type: "text" as const, text: "Text parts" }],
- },
- {
- role: "assistant" as const,
- content: [{ type: "text" as const, text: "Assistant text" }],
- },
- {
- role: "assistant" as const,
- content: [
- { type: "text" as const, text: "With tool" },
- { type: "tool-call" as const, toolCallId: "456", toolName: "test", input: {} },
- ],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages)
- expect(result[0].content).toBe("Simple string") // unchanged
- expect(result[1].content).toBe("Text parts") // flattened
- expect(result[2].content).toBe("Assistant text") // flattened
- expect(result[3]).toEqual(messages[3]) // unchanged (has tool call)
- })
- it("should handle empty text parts", () => {
- const messages = [
- {
- role: "user" as const,
- content: [
- { type: "text" as const, text: "" },
- { type: "text" as const, text: "Hello" },
- ],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages)
- expect(result[0].content).toBe("\nHello")
- })
- it("should strip reasoning parts and flatten text for string-only models", () => {
- const messages = [
- {
- role: "assistant" as const,
- content: [
- { type: "reasoning" as const, text: "I am thinking about this..." },
- { type: "text" as const, text: "Here is my answer" },
- ],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages)
- // Reasoning should be stripped, only text should remain
- expect(result[0].content).toBe("Here is my answer")
- })
- it("should handle messages with only reasoning parts", () => {
- const messages = [
- {
- role: "assistant" as const,
- content: [{ type: "reasoning" as const, text: "Only reasoning, no text" }],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages)
- // Should flatten to empty string when only reasoning is present
- expect(result[0].content).toBe("")
- })
- it("should not flatten if tool calls are present with reasoning", () => {
- const messages = [
- {
- role: "assistant" as const,
- content: [
- { type: "reasoning" as const, text: "Thinking..." },
- { type: "text" as const, text: "Using tool" },
- { type: "tool-call" as const, toolCallId: "abc", toolName: "test", input: {} },
- ],
- },
- ]
- const result = flattenAiSdkMessagesToStringContent(messages)
- // Should not flatten because there's a tool call
- expect(result[0]).toEqual(messages[0])
- })
- })
- })
|