Răsfoiți Sursa

feat: implement ModelMessage storage layer with AI SDK response messages (#11409)

Co-authored-by: Claude Opus 4.6 (1M context) <[email protected]>
Daniel 2 zile în urmă
părinte
comite
e6f0e79c38
100 a modificat fișierele cu 2561 adăugiri și 3165 ștergeri
  1. 3 5
      src/api/index.ts
  2. 10 98
      src/api/providers/__tests__/anthropic-vertex.spec.ts
  3. 1 89
      src/api/providers/__tests__/anthropic.spec.ts
  4. 3 2
      src/api/providers/__tests__/azure.spec.ts
  5. 2 1
      src/api/providers/__tests__/base-provider.spec.ts
  6. 4 3
      src/api/providers/__tests__/baseten.spec.ts
  7. 0 85
      src/api/providers/__tests__/bedrock-reasoning.spec.ts
  8. 20 28
      src/api/providers/__tests__/bedrock.spec.ts
  9. 4 3
      src/api/providers/__tests__/deepseek.spec.ts
  10. 3 2
      src/api/providers/__tests__/fireworks.spec.ts
  11. 3 2
      src/api/providers/__tests__/gemini.spec.ts
  12. 2 1
      src/api/providers/__tests__/lite-llm.spec.ts
  13. 2 1
      src/api/providers/__tests__/lmstudio.spec.ts
  14. 5 37
      src/api/providers/__tests__/minimax.spec.ts
  15. 3 2
      src/api/providers/__tests__/mistral.spec.ts
  16. 3 2
      src/api/providers/__tests__/moonshot.spec.ts
  17. 32 41
      src/api/providers/__tests__/openai-native-reasoning.spec.ts
  18. 2 1
      src/api/providers/__tests__/openai-native-usage.spec.ts
  19. 2 1
      src/api/providers/__tests__/openai-native.spec.ts
  20. 2 1
      src/api/providers/__tests__/openai-usage-tracking.spec.ts
  21. 8 7
      src/api/providers/__tests__/openai.spec.ts
  22. 2 112
      src/api/providers/__tests__/openrouter.spec.ts
  23. 4 3
      src/api/providers/__tests__/qwen-code-native-tools.spec.ts
  24. 3 4
      src/api/providers/__tests__/requesty.spec.ts
  25. 3 73
      src/api/providers/__tests__/roo.spec.ts
  26. 4 3
      src/api/providers/__tests__/sambanova.spec.ts
  27. 6 5
      src/api/providers/__tests__/vercel-ai-gateway.spec.ts
  28. 2 1
      src/api/providers/__tests__/vertex.spec.ts
  29. 6 5
      src/api/providers/__tests__/vscode-lm.spec.ts
  30. 3 2
      src/api/providers/__tests__/xai.spec.ts
  31. 2 1
      src/api/providers/__tests__/zai.spec.ts
  32. 15 91
      src/api/providers/anthropic-vertex.ts
  33. 15 92
      src/api/providers/anthropic.ts
  34. 4 3
      src/api/providers/azure.ts
  35. 2 1
      src/api/providers/base-provider.ts
  36. 4 3
      src/api/providers/baseten.ts
  37. 16 118
      src/api/providers/bedrock.ts
  38. 4 3
      src/api/providers/deepseek.ts
  39. 3 6
      src/api/providers/fake-ai.ts
  40. 4 3
      src/api/providers/fireworks.ts
  41. 8 26
      src/api/providers/gemini.ts
  42. 2 1
      src/api/providers/lite-llm.ts
  43. 12 3
      src/api/providers/lm-studio.ts
  44. 16 89
      src/api/providers/minimax.ts
  45. 4 3
      src/api/providers/mistral.ts
  46. 7 3
      src/api/providers/native-ollama.ts
  47. 8 4
      src/api/providers/openai-codex.ts
  48. 4 3
      src/api/providers/openai-compatible.ts
  49. 15 18
      src/api/providers/openai-native.ts
  50. 9 5
      src/api/providers/openai.ts
  51. 12 32
      src/api/providers/openrouter.ts
  52. 2 1
      src/api/providers/qwen-code.ts
  53. 4 3
      src/api/providers/requesty.ts
  54. 9 33
      src/api/providers/roo.ts
  55. 5 6
      src/api/providers/sambanova.ts
  56. 7 3
      src/api/providers/vercel-ai-gateway.ts
  57. 8 29
      src/api/providers/vertex.ts
  58. 5 4
      src/api/providers/vscode-lm.ts
  59. 3 2
      src/api/providers/xai.ts
  60. 4 3
      src/api/providers/zai.ts
  61. 8 9
      src/api/transform/__tests__/image-cleaning.spec.ts
  62. 68 24
      src/api/transform/__tests__/openai-format.spec.ts
  63. 27 0
      src/api/transform/ai-sdk.ts
  64. 21 21
      src/api/transform/image-cleaning.ts
  65. 231 185
      src/api/transform/openai-format.ts
  66. 12 0
      src/api/transform/stream.ts
  67. 37 35
      src/core/assistant-message/__tests__/presentAssistantMessage-images.spec.ts
  68. 20 19
      src/core/assistant-message/__tests__/presentAssistantMessage-unknown-tool.spec.ts
  69. 36 30
      src/core/assistant-message/presentAssistantMessage.ts
  70. 31 33
      src/core/condense/__tests__/condense.spec.ts
  71. 2 2
      src/core/condense/__tests__/foldedFileContext.spec.ts
  72. 113 105
      src/core/condense/__tests__/index.spec.ts
  73. 11 11
      src/core/condense/__tests__/nested-condense.spec.ts
  74. 49 49
      src/core/condense/__tests__/rewind-after-condense.spec.ts
  75. 138 86
      src/core/condense/index.ts
  76. 17 17
      src/core/context-management/__tests__/context-management.spec.ts
  77. 26 27
      src/core/context-management/__tests__/truncation.spec.ts
  78. 20 21
      src/core/context-management/index.ts
  79. 6 184
      src/core/mentions/__tests__/processUserContentMentions.spec.ts
  80. 12 117
      src/core/mentions/processUserContentMentions.ts
  81. 1 1
      src/core/message-manager/index.ts
  82. 10 12
      src/core/prompts/responses.ts
  83. 33 0
      src/core/task-persistence/__tests__/rooMessage.spec.ts
  84. 13 0
      src/core/task-persistence/converters/anthropicToRoo.ts
  85. 30 2
      src/core/task-persistence/index.ts
  86. 166 0
      src/core/task-persistence/rooMessage.ts
  87. 478 380
      src/core/task/Task.ts
  88. 52 25
      src/core/task/__tests__/Task.persistence.spec.ts
  89. 68 72
      src/core/task/__tests__/Task.spec.ts
  90. 57 49
      src/core/task/__tests__/flushPendingToolResultsToHistory.spec.ts
  91. 2 2
      src/core/task/__tests__/grounding-sources.test.ts
  92. 4 4
      src/core/task/__tests__/mergeConsecutiveApiMessages.spec.ts
  93. 78 159
      src/core/task/__tests__/reasoning-preservation.test.ts
  94. 9 4
      src/core/task/__tests__/task-tool-history.spec.ts
  95. 134 136
      src/core/task/__tests__/validateToolResultIds.spec.ts
  96. 26 11
      src/core/task/mergeConsecutiveApiMessages.ts
  97. 111 102
      src/core/task/validateToolResultIds.ts
  98. 13 14
      src/core/webview/ClineProvider.ts
  99. 30 2
      src/core/webview/__tests__/ClineProvider.spec.ts
  100. 1 3
      src/core/webview/webviewMessageHandler.ts

+ 3 - 5
src/api/index.ts

@@ -3,6 +3,8 @@ import OpenAI from "openai"
 
 import { isRetiredProvider, type ProviderSettings, type ModelInfo } from "@roo-code/types"
 
+import type { RooMessage } from "../core/task-persistence/rooMessage"
+
 import { ApiStream } from "./transform/stream"
 
 import {
@@ -89,11 +91,7 @@ export interface ApiHandlerCreateMessageMetadata {
 }
 
 export interface ApiHandler {
-	createMessage(
-		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
-		metadata?: ApiHandlerCreateMessageMetadata,
-	): ApiStream
+	createMessage(systemPrompt: string, messages: RooMessage[], metadata?: ApiHandlerCreateMessageMetadata): ApiStream
 
 	getModel(): { id: string; info: ModelInfo }
 

+ 10 - 98
src/api/providers/__tests__/anthropic-vertex.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run src/api/providers/__tests__/anthropic-vertex.spec.ts
 
 import { AnthropicVertexHandler } from "../anthropic-vertex"
@@ -54,6 +55,7 @@ vitest.mock("../../transform/ai-sdk", () => ({
 	}),
 	mapToolChoice: vitest.fn().mockReturnValue(undefined),
 	handleAiSdkError: vitest.fn().mockImplementation((error: any) => error),
+	yieldResponseMessage: vitest.fn().mockImplementation(function* () {}),
 }))
 
 // Import mocked modules
@@ -184,7 +186,7 @@ describe("AnthropicVertexHandler", () => {
 	})
 
 	describe("createMessage", () => {
-		const mockMessages: Anthropic.Messages.MessageParam[] = [
+		const mockMessages: RooMessage[] = [
 			{
 				role: "user",
 				content: "Hello",
@@ -244,7 +246,7 @@ describe("AnthropicVertexHandler", () => {
 			)
 		})
 
-		it("should call convertToAiSdkMessages with the messages", async () => {
+		it("should pass messages directly to streamText as ModelMessage[]", async () => {
 			mockStreamText.mockReturnValue(createMockStreamResult([]))
 
 			const stream = handler.createMessage(systemPrompt, mockMessages)
@@ -252,7 +254,12 @@ describe("AnthropicVertexHandler", () => {
 				// consume
 			}
 
-			expect(convertToAiSdkMessages).toHaveBeenCalledWith(mockMessages)
+			// Messages are now already in ModelMessage format, passed directly to streamText
+			expect(mockStreamText).toHaveBeenCalledWith(
+				expect.objectContaining({
+					messages: mockMessages,
+				}),
+			)
 		})
 
 		it("should pass tools through AI SDK conversion pipeline", async () => {
@@ -363,55 +370,6 @@ describe("AnthropicVertexHandler", () => {
 			expect(textChunks[0].text).toBe("Here's my answer:")
 		})
 
-		it("should capture thought signature from stream events", async () => {
-			const streamParts = [
-				{
-					type: "reasoning-delta",
-					text: "thinking...",
-					providerMetadata: {
-						anthropic: { signature: "test-signature-abc123" },
-					},
-				},
-				{ type: "text-delta", text: "answer" },
-			]
-
-			mockStreamText.mockReturnValue(createMockStreamResult(streamParts))
-
-			const stream = handler.createMessage(systemPrompt, mockMessages)
-			for await (const _chunk of stream) {
-				// consume
-			}
-
-			expect(handler.getThoughtSignature()).toBe("test-signature-abc123")
-		})
-
-		it("should capture redacted thinking blocks from stream events", async () => {
-			const streamParts = [
-				{
-					type: "reasoning-delta",
-					text: "",
-					providerMetadata: {
-						anthropic: { redactedData: "encrypted-redacted-data" },
-					},
-				},
-				{ type: "text-delta", text: "answer" },
-			]
-
-			mockStreamText.mockReturnValue(createMockStreamResult(streamParts))
-
-			const stream = handler.createMessage(systemPrompt, mockMessages)
-			for await (const _chunk of stream) {
-				// consume
-			}
-
-			const redactedBlocks = handler.getRedactedThinkingBlocks()
-			expect(redactedBlocks).toHaveLength(1)
-			expect(redactedBlocks![0]).toEqual({
-				type: "redacted_thinking",
-				data: "encrypted-redacted-data",
-			})
-		})
-
 		it("should configure thinking providerOptions for thinking models", async () => {
 			const thinkingHandler = new AnthropicVertexHandler({
 				apiModelId: "claude-3-7-sonnet@20250219:thinking",
@@ -674,50 +632,4 @@ describe("AnthropicVertexHandler", () => {
 			expect(handler.isAiSdkProvider()).toBe(true)
 		})
 	})
-
-	describe("thought signature and redacted thinking", () => {
-		beforeEach(() => {
-			handler = new AnthropicVertexHandler({
-				apiModelId: "claude-3-5-sonnet-v2@20241022",
-				vertexProjectId: "test-project",
-				vertexRegion: "us-central1",
-			})
-		})
-
-		it("should return undefined for thought signature before any request", () => {
-			expect(handler.getThoughtSignature()).toBeUndefined()
-		})
-
-		it("should return undefined for redacted thinking blocks before any request", () => {
-			expect(handler.getRedactedThinkingBlocks()).toBeUndefined()
-		})
-
-		it("should reset thought signature on each createMessage call", async () => {
-			// First call with signature
-			mockStreamText.mockReturnValue(
-				createMockStreamResult([
-					{
-						type: "reasoning-delta",
-						text: "thinking",
-						providerMetadata: { anthropic: { signature: "sig-1" } },
-					},
-				]),
-			)
-
-			const stream1 = handler.createMessage("test", [{ role: "user", content: "Hello" }])
-			for await (const _chunk of stream1) {
-				// consume
-			}
-			expect(handler.getThoughtSignature()).toBe("sig-1")
-
-			// Second call without signature
-			mockStreamText.mockReturnValue(createMockStreamResult([{ type: "text-delta", text: "just text" }]))
-
-			const stream2 = handler.createMessage("test", [{ role: "user", content: "Hello again" }])
-			for await (const _chunk of stream2) {
-				// consume
-			}
-			expect(handler.getThoughtSignature()).toBeUndefined()
-		})
-	})
 })

+ 1 - 89
src/api/providers/__tests__/anthropic.spec.ts

@@ -50,6 +50,7 @@ vitest.mock("../../transform/ai-sdk", () => ({
 	}),
 	mapToolChoice: vitest.fn().mockReturnValue(undefined),
 	handleAiSdkError: vitest.fn().mockImplementation((error: any) => error),
+	yieldResponseMessage: vitest.fn().mockImplementation(function* () {}),
 }))
 
 // Import mocked modules
@@ -398,85 +399,6 @@ describe("AnthropicHandler", () => {
 			expect(endChunk).toBeDefined()
 		})
 
-		it("should capture thinking signature from stream events", async () => {
-			const testSignature = "test-thinking-signature"
-			setupStreamTextMock([
-				{
-					type: "reasoning-delta",
-					text: "thinking...",
-					providerMetadata: { anthropic: { signature: testSignature } },
-				},
-				{ type: "text-delta", text: "Answer" },
-			])
-
-			const stream = handler.createMessage(systemPrompt, [
-				{ role: "user", content: [{ type: "text" as const, text: "test" }] },
-			])
-
-			for await (const _chunk of stream) {
-				// Consume stream
-			}
-
-			expect(handler.getThoughtSignature()).toBe(testSignature)
-		})
-
-		it("should capture redacted thinking blocks from stream events", async () => {
-			setupStreamTextMock([
-				{
-					type: "reasoning-delta",
-					text: "",
-					providerMetadata: { anthropic: { redactedData: "redacted-data-base64" } },
-				},
-				{ type: "text-delta", text: "Answer" },
-			])
-
-			const stream = handler.createMessage(systemPrompt, [
-				{ role: "user", content: [{ type: "text" as const, text: "test" }] },
-			])
-
-			for await (const _chunk of stream) {
-				// Consume stream
-			}
-
-			const redactedBlocks = handler.getRedactedThinkingBlocks()
-			expect(redactedBlocks).toBeDefined()
-			expect(redactedBlocks).toHaveLength(1)
-			expect(redactedBlocks![0]).toEqual({
-				type: "redacted_thinking",
-				data: "redacted-data-base64",
-			})
-		})
-
-		it("should reset thinking state between requests", async () => {
-			// First request with signature
-			setupStreamTextMock([
-				{
-					type: "reasoning-delta",
-					text: "thinking...",
-					providerMetadata: { anthropic: { signature: "sig-1" } },
-				},
-			])
-
-			const stream1 = handler.createMessage(systemPrompt, [
-				{ role: "user", content: [{ type: "text" as const, text: "test 1" }] },
-			])
-			for await (const _chunk of stream1) {
-				// Consume
-			}
-			expect(handler.getThoughtSignature()).toBe("sig-1")
-
-			// Second request without signature
-			setupStreamTextMock([{ type: "text-delta", text: "plain answer" }])
-
-			const stream2 = handler.createMessage(systemPrompt, [
-				{ role: "user", content: [{ type: "text" as const, text: "test 2" }] },
-			])
-			for await (const _chunk of stream2) {
-				// Consume
-			}
-			expect(handler.getThoughtSignature()).toBeUndefined()
-		})
-
 		it("should pass system prompt via system param with systemProviderOptions for cache control", async () => {
 			setupStreamTextMock([{ type: "text-delta", text: "test" }])
 
@@ -610,14 +532,4 @@ describe("AnthropicHandler", () => {
 			expect(handler.isAiSdkProvider()).toBe(true)
 		})
 	})
-
-	describe("thinking signature", () => {
-		it("should return undefined when no signature captured", () => {
-			expect(handler.getThoughtSignature()).toBeUndefined()
-		})
-
-		it("should return undefined for redacted blocks when none captured", () => {
-			expect(handler.getRedactedThinkingBlocks()).toBeUndefined()
-		})
-	})
 })

+ 3 - 2
src/api/providers/__tests__/azure.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
 const { mockStreamText, mockGenerateText, mockCreateAzure } = vi.hoisted(() => ({
 	mockStreamText: vi.fn(),
@@ -132,7 +133,7 @@ describe("AzureHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -376,7 +377,7 @@ describe("AzureHandler", () => {
 
 	describe("tools", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Use a tool" }],

+ 2 - 1
src/api/providers/__tests__/base-provider.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 import { Anthropic } from "@anthropic-ai/sdk"
 
 import type { ModelInfo } from "@roo-code/types"
@@ -7,7 +8,7 @@ import type { ApiStream } from "../../transform/stream"
 
 // Create a concrete implementation for testing
 class TestProvider extends BaseProvider {
-	createMessage(_systemPrompt: string, _messages: Anthropic.Messages.MessageParam[]): ApiStream {
+	createMessage(_systemPrompt: string, _messages: RooMessage[]): ApiStream {
 		throw new Error("Not implemented")
 	}
 

+ 4 - 3
src/api/providers/__tests__/baseten.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run src/api/providers/__tests__/baseten.spec.ts
 
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
@@ -101,7 +102,7 @@ describe("BasetenHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -281,7 +282,7 @@ describe("BasetenHandler", () => {
 
 	describe("tool handling", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Hello!" }],
@@ -389,7 +390,7 @@ describe("BasetenHandler", () => {
 
 	describe("error handling", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Hello!" }],

+ 0 - 85
src/api/providers/__tests__/bedrock-reasoning.spec.ts

@@ -131,91 +131,6 @@ describe("AwsBedrockHandler - Extended Thinking", () => {
 			expect(bedrockOpts?.reasoningConfig).toBeUndefined()
 		})
 
-		it("should capture thinking signature from stream providerMetadata", async () => {
-			const handler = new AwsBedrockHandler({
-				apiProvider: "bedrock",
-				apiModelId: "anthropic.claude-sonnet-4-20250514-v1:0",
-				awsRegion: "us-east-1",
-				enableReasoningEffort: true,
-				modelMaxThinkingTokens: 4096,
-			})
-
-			const testSignature = "test-thinking-signature-abc123"
-
-			// Mock stream with reasoning content that includes a signature in providerMetadata
-			async function* mockFullStream() {
-				yield { type: "reasoning", text: "Let me think..." }
-				// The SDK emits signature as a reasoning-delta with providerMetadata.bedrock.signature
-				yield {
-					type: "reasoning",
-					text: "",
-					providerMetadata: { bedrock: { signature: testSignature } },
-				}
-				yield { type: "text-delta", text: "Answer" }
-			}
-
-			mockStreamText.mockReturnValue({
-				fullStream: mockFullStream(),
-				usage: Promise.resolve({ inputTokens: 100, outputTokens: 50 }),
-				providerMetadata: Promise.resolve({}),
-			})
-
-			const messages = [{ role: "user" as const, content: "Test message" }]
-			const stream = handler.createMessage("System prompt", messages)
-
-			for await (const _chunk of stream) {
-				// consume stream
-			}
-
-			// Verify thinking signature was captured
-			expect(handler.getThoughtSignature()).toBe(testSignature)
-		})
-
-		it("should capture redacted thinking blocks from stream providerMetadata", async () => {
-			const handler = new AwsBedrockHandler({
-				apiProvider: "bedrock",
-				apiModelId: "anthropic.claude-sonnet-4-20250514-v1:0",
-				awsRegion: "us-east-1",
-				enableReasoningEffort: true,
-				modelMaxThinkingTokens: 4096,
-			})
-
-			const redactedData = "base64-encoded-redacted-data"
-
-			// Mock stream with redacted reasoning content
-			async function* mockFullStream() {
-				yield { type: "reasoning", text: "Some thinking..." }
-				yield {
-					type: "reasoning",
-					text: "",
-					providerMetadata: { bedrock: { redactedData } },
-				}
-				yield { type: "text-delta", text: "Answer" }
-			}
-
-			mockStreamText.mockReturnValue({
-				fullStream: mockFullStream(),
-				usage: Promise.resolve({ inputTokens: 100, outputTokens: 50 }),
-				providerMetadata: Promise.resolve({}),
-			})
-
-			const messages = [{ role: "user" as const, content: "Test message" }]
-			const stream = handler.createMessage("System prompt", messages)
-
-			for await (const _chunk of stream) {
-				// consume stream
-			}
-
-			// Verify redacted thinking blocks were captured
-			const redactedBlocks = handler.getRedactedThinkingBlocks()
-			expect(redactedBlocks).toBeDefined()
-			expect(redactedBlocks).toHaveLength(1)
-			expect(redactedBlocks![0]).toEqual({
-				type: "redacted_thinking",
-				data: redactedData,
-			})
-		})
-
 		it("should enable reasoning when enableReasoningEffort is true in settings", async () => {
 			const handler = new AwsBedrockHandler({
 				apiProvider: "bedrock",

+ 20 - 28
src/api/providers/__tests__/bedrock.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // Mock TelemetryService before other imports
 const mockCaptureException = vi.fn()
 
@@ -490,17 +491,14 @@ describe("AwsBedrockHandler", () => {
 		it("should properly pass image content through to streamText via AI SDK messages", async () => {
 			setupMockStreamText()
 
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: any[] = [
 				{
 					role: "user",
 					content: [
 						{
 							type: "image",
-							source: {
-								type: "base64",
-								data: mockImageData,
-								media_type: "image/jpeg",
-							},
+							image: `data:image/jpeg;base64,${mockImageData}`,
+							mimeType: "image/jpeg",
 						},
 						{
 							type: "text",
@@ -530,7 +528,7 @@ describe("AwsBedrockHandler", () => {
 			expect(userMsg).toBeDefined()
 			expect(Array.isArray(userMsg.content)).toBe(true)
 
-			// The AI SDK convertToAiSdkMessages converts images to { type: "image", image: "data:...", mimeType: "..." }
+			// Messages are already in AI SDK ImagePart format
 			const imagePart = userMsg.content.find((p: { type: string }) => p.type === "image")
 			expect(imagePart).toBeDefined()
 			expect(imagePart.image).toContain("data:image/jpeg;base64,")
@@ -544,17 +542,14 @@ describe("AwsBedrockHandler", () => {
 		it("should handle multiple images in a single message", async () => {
 			setupMockStreamText()
 
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: any[] = [
 				{
 					role: "user",
 					content: [
 						{
 							type: "image",
-							source: {
-								type: "base64",
-								data: mockImageData,
-								media_type: "image/jpeg",
-							},
+							image: `data:image/jpeg;base64,${mockImageData}`,
+							mimeType: "image/jpeg",
 						},
 						{
 							type: "text",
@@ -562,11 +557,8 @@ describe("AwsBedrockHandler", () => {
 						},
 						{
 							type: "image",
-							source: {
-								type: "base64",
-								data: mockImageData,
-								media_type: "image/png",
-							},
+							image: `data:image/png;base64,${mockImageData}`,
+							mimeType: "image/png",
 						},
 						{
 							type: "text",
@@ -761,7 +753,7 @@ describe("AwsBedrockHandler", () => {
 				awsBedrock1MContext: true,
 			})
 
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Test message",
@@ -794,7 +786,7 @@ describe("AwsBedrockHandler", () => {
 				awsBedrock1MContext: false,
 			})
 
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Test message",
@@ -828,7 +820,7 @@ describe("AwsBedrockHandler", () => {
 				awsBedrock1MContext: true,
 			})
 
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Test message",
@@ -881,7 +873,7 @@ describe("AwsBedrockHandler", () => {
 				awsBedrock1MContext: true,
 			})
 
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Test message",
@@ -1013,7 +1005,7 @@ describe("AwsBedrockHandler", () => {
 					awsBedrockServiceTier: "PRIORITY",
 				})
 
-				const messages: Anthropic.Messages.MessageParam[] = [
+				const messages: RooMessage[] = [
 					{
 						role: "user",
 						content: "Test message",
@@ -1050,7 +1042,7 @@ describe("AwsBedrockHandler", () => {
 					awsBedrockServiceTier: "FLEX",
 				})
 
-				const messages: Anthropic.Messages.MessageParam[] = [
+				const messages: RooMessage[] = [
 					{
 						role: "user",
 						content: "Test message",
@@ -1087,7 +1079,7 @@ describe("AwsBedrockHandler", () => {
 					awsBedrockServiceTier: "PRIORITY", // Try to apply PRIORITY tier
 				})
 
-				const messages: Anthropic.Messages.MessageParam[] = [
+				const messages: RooMessage[] = [
 					{
 						role: "user",
 						content: "Test message",
@@ -1122,7 +1114,7 @@ describe("AwsBedrockHandler", () => {
 					// No awsBedrockServiceTier specified
 				})
 
-				const messages: Anthropic.Messages.MessageParam[] = [
+				const messages: RooMessage[] = [
 					{
 						role: "user",
 						content: "Test message",
@@ -1192,7 +1184,7 @@ describe("AwsBedrockHandler", () => {
 				awsRegion: "us-east-1",
 			})
 
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Hello",
@@ -1267,7 +1259,7 @@ describe("AwsBedrockHandler", () => {
 				awsRegion: "us-east-1",
 			})
 
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Hello",

+ 4 - 3
src/api/providers/__tests__/deepseek.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
 const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({
 	mockStreamText: vi.fn(),
@@ -173,7 +174,7 @@ describe("DeepSeekHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -400,7 +401,7 @@ describe("DeepSeekHandler", () => {
 
 	describe("reasoning content with deepseek-reasoner", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -570,7 +571,7 @@ describe("DeepSeekHandler", () => {
 
 	describe("tool handling", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Hello!" }],

+ 3 - 2
src/api/providers/__tests__/fireworks.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run src/api/providers/__tests__/fireworks.spec.ts
 
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
@@ -363,7 +364,7 @@ describe("FireworksHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -730,7 +731,7 @@ describe("FireworksHandler", () => {
 
 	describe("tool handling", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Hello!" }],

+ 3 - 2
src/api/providers/__tests__/gemini.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run src/api/providers/__tests__/gemini.spec.ts
 
 import { NoOutputGeneratedError } from "ai"
@@ -102,7 +103,7 @@ describe("GeminiHandler", () => {
 	})
 
 	describe("createMessage", () => {
-		const mockMessages: Anthropic.Messages.MessageParam[] = [
+		const mockMessages: RooMessage[] = [
 			{
 				role: "user",
 				content: "Hello",
@@ -377,7 +378,7 @@ describe("GeminiHandler", () => {
 	})
 
 	describe("error telemetry", () => {
-		const mockMessages: Anthropic.Messages.MessageParam[] = [
+		const mockMessages: RooMessage[] = [
 			{
 				role: "user",
 				content: "Hello",

+ 2 - 1
src/api/providers/__tests__/lite-llm.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({
 	mockStreamText: vi.fn(),
 	mockGenerateText: vi.fn(),
@@ -257,7 +258,7 @@ describe("LiteLLMHandler", () => {
 			})
 
 			const systemPrompt = "You are a helpful assistant"
-			const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
+			const messages: RooMessage[] = [{ role: "user", content: "Hello" }]
 
 			const generator = handler.createMessage(systemPrompt, messages)
 			for await (const _chunk of generator) {

+ 2 - 1
src/api/providers/__tests__/lmstudio.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
 const { mockStreamText, mockGenerateText, mockWrapLanguageModel } = vi.hoisted(() => ({
 	mockStreamText: vi.fn(),
@@ -60,7 +61,7 @@ describe("LmStudioHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: "Hello!",

+ 5 - 37
src/api/providers/__tests__/minimax.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 import { describe, it, expect, beforeEach } from "vitest"
 
 import type { Anthropic } from "@anthropic-ai/sdk"
@@ -22,7 +23,7 @@ const {
 		mockGenerateText: vi.fn(),
 		mockCreateAnthropic: vi.fn().mockReturnValue(mockModel),
 		mockModel,
-		mockMergeEnvironmentDetailsForMiniMax: vi.fn((messages: Anthropic.Messages.MessageParam[]) => messages),
+		mockMergeEnvironmentDetailsForMiniMax: vi.fn((messages: RooMessage[]) => messages),
 		mockHandleAiSdkError: vi.fn((error: unknown, providerName: string) => {
 			const message = error instanceof Error ? error.message : String(error)
 			return new Error(`${providerName}: ${message}`)
@@ -96,7 +97,7 @@ async function collectChunks(stream: ApiStream): Promise<ApiStreamChunk[]> {
 
 describe("MiniMaxHandler", () => {
 	const systemPrompt = "You are a helpful assistant."
-	const messages: Anthropic.Messages.MessageParam[] = [
+	const messages: RooMessage[] = [
 		{
 			role: "user",
 			content: [{ type: "text", text: "Hello" }],
@@ -106,9 +107,7 @@ describe("MiniMaxHandler", () => {
 	beforeEach(() => {
 		vi.clearAllMocks()
 		mockCreateAnthropic.mockReturnValue(mockModel)
-		mockMergeEnvironmentDetailsForMiniMax.mockImplementation(
-			(inputMessages: Anthropic.Messages.MessageParam[]) => inputMessages,
-		)
+		mockMergeEnvironmentDetailsForMiniMax.mockImplementation((inputMessages: RooMessage[]) => inputMessages)
 		mockHandleAiSdkError.mockImplementation((error: unknown, providerName: string) => {
 			const message = error instanceof Error ? error.message : String(error)
 			return new Error(`${providerName}: ${message}`)
@@ -325,7 +324,7 @@ describe("MiniMaxHandler", () => {
 		})
 
 		it("calls mergeEnvironmentDetailsForMiniMax before conversion", async () => {
-			const mergedMessages: Anthropic.Messages.MessageParam[] = [
+			const mergedMessages: RooMessage[] = [
 				{
 					role: "user",
 					content: [{ type: "text", text: "Merged message" }],
@@ -369,37 +368,6 @@ describe("MiniMaxHandler", () => {
 		})
 	})
 
-	describe("thinking signature", () => {
-		it("returns undefined thought signature before any request", () => {
-			const handler = createHandler()
-			expect(handler.getThoughtSignature()).toBeUndefined()
-		})
-
-		it("captures thought signature from stream providerMetadata", async () => {
-			const signature = "test-thinking-signature"
-			mockStreamText.mockReturnValue(
-				createMockStream([
-					{
-						type: "reasoning-delta",
-						text: "thinking...",
-						providerMetadata: { anthropic: { signature } },
-					},
-					{ type: "text-delta", text: "Answer" },
-				]),
-			)
-
-			const handler = createHandler()
-			await collectChunks(handler.createMessage(systemPrompt, messages))
-
-			expect(handler.getThoughtSignature()).toBe(signature)
-		})
-
-		it("returns undefined redacted thinking blocks before any request", () => {
-			const handler = createHandler()
-			expect(handler.getRedactedThinkingBlocks()).toBeUndefined()
-		})
-	})
-
 	describe("completePrompt", () => {
 		it("calls generateText with model and prompt and returns text", async () => {
 			mockGenerateText.mockResolvedValue({ text: "response" })

+ 3 - 2
src/api/providers/__tests__/mistral.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
 const { mockStreamText, mockGenerateText, mockCreateMistral } = vi.hoisted(() => ({
 	mockStreamText: vi.fn(),
@@ -102,7 +103,7 @@ describe("MistralHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -329,7 +330,7 @@ describe("MistralHandler", () => {
 
 	describe("tool handling", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Hello!" }],

+ 3 - 2
src/api/providers/__tests__/moonshot.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
 const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({
 	mockStreamText: vi.fn(),
@@ -121,7 +122,7 @@ describe("MoonshotHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -344,7 +345,7 @@ describe("MoonshotHandler", () => {
 
 	describe("tool handling", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Hello!" }],

+ 32 - 41
src/api/providers/__tests__/openai-native-reasoning.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run api/providers/__tests__/openai-native-reasoning.spec.ts
 
 import type { Anthropic } from "@anthropic-ai/sdk"
@@ -16,54 +17,50 @@ describe("OpenAI Native reasoning helpers", () => {
 	// ───────────────────────────────────────────────────────────
 	describe("stripPlainTextReasoningBlocks", () => {
 		it("passes through user messages unchanged", () => {
-			const messages: Anthropic.Messages.MessageParam[] = [
-				{ role: "user", content: [{ type: "text", text: "Hello" }] },
-			]
+			const messages: RooMessage[] = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
 			const result = stripPlainTextReasoningBlocks(messages)
 			expect(result).toEqual(messages)
 		})
 
 		it("passes through assistant messages with only text blocks", () => {
-			const messages: Anthropic.Messages.MessageParam[] = [
-				{ role: "assistant", content: [{ type: "text", text: "Hi there" }] },
-			]
+			const messages: RooMessage[] = [{ role: "assistant", content: [{ type: "text", text: "Hi there" }] }]
 			const result = stripPlainTextReasoningBlocks(messages)
 			expect(result).toEqual(messages)
 		})
 
 		it("passes through string-content assistant messages", () => {
-			const messages: Anthropic.Messages.MessageParam[] = [{ role: "assistant", content: "Hello" }]
+			const messages: RooMessage[] = [{ role: "assistant", content: "Hello" }]
 			const result = stripPlainTextReasoningBlocks(messages)
 			expect(result).toEqual(messages)
 		})
 
 		it("strips plain-text reasoning blocks from assistant content", () => {
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "assistant",
 					content: [
 						{
 							type: "reasoning",
 							text: "Let me think...",
-						} as unknown as Anthropic.Messages.ContentBlockParam,
+						} as any,
 						{ type: "text", text: "The answer is 42" },
 					],
 				},
 			]
 			const result = stripPlainTextReasoningBlocks(messages)
 			expect(result).toHaveLength(1)
-			expect(result[0].content).toEqual([{ type: "text", text: "The answer is 42" }])
+			expect((result[0] as any).content).toEqual([{ type: "text", text: "The answer is 42" }])
 		})
 
 		it("removes assistant messages whose content becomes empty after filtering", () => {
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "assistant",
 					content: [
 						{
 							type: "reasoning",
 							text: "Thinking only...",
-						} as unknown as Anthropic.Messages.ContentBlockParam,
+						} as any,
 					],
 				},
 			]
@@ -72,24 +69,24 @@ describe("OpenAI Native reasoning helpers", () => {
 		})
 
 		it("preserves tool_use blocks alongside stripped reasoning", () => {
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "assistant",
 					content: [
-						{ type: "reasoning", text: "Thinking..." } as unknown as Anthropic.Messages.ContentBlockParam,
+						{ type: "reasoning", text: "Thinking..." } as any,
 						{ type: "tool_use", id: "call_1", name: "read_file", input: { path: "a.ts" } },
 					],
 				},
 			]
 			const result = stripPlainTextReasoningBlocks(messages)
 			expect(result).toHaveLength(1)
-			expect(result[0].content).toEqual([
+			expect((result[0] as any).content).toEqual([
 				{ type: "tool_use", id: "call_1", name: "read_file", input: { path: "a.ts" } },
 			])
 		})
 
 		it("does NOT strip blocks that have encrypted_content (those are not plain-text reasoning)", () => {
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "assistant",
 					content: [
@@ -97,7 +94,7 @@ describe("OpenAI Native reasoning helpers", () => {
 							type: "reasoning",
 							text: "summary",
 							encrypted_content: "abc123",
-						} as unknown as Anthropic.Messages.ContentBlockParam,
+						} as any,
 						{ type: "text", text: "Response" },
 					],
 				},
@@ -105,32 +102,26 @@ describe("OpenAI Native reasoning helpers", () => {
 			const result = stripPlainTextReasoningBlocks(messages)
 			expect(result).toHaveLength(1)
 			// Both blocks should remain
-			expect(result[0].content).toHaveLength(2)
+			expect((result[0] as any).content).toHaveLength(2)
 		})
 
 		it("handles multiple messages correctly", () => {
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{ role: "user", content: [{ type: "text", text: "Q1" }] },
 				{
 					role: "assistant",
-					content: [
-						{ type: "reasoning", text: "Think1" } as unknown as Anthropic.Messages.ContentBlockParam,
-						{ type: "text", text: "A1" },
-					],
+					content: [{ type: "reasoning", text: "Think1" } as any, { type: "text", text: "A1" }],
 				},
 				{ role: "user", content: [{ type: "text", text: "Q2" }] },
 				{
 					role: "assistant",
-					content: [
-						{ type: "reasoning", text: "Think2" } as unknown as Anthropic.Messages.ContentBlockParam,
-						{ type: "text", text: "A2" },
-					],
+					content: [{ type: "reasoning", text: "Think2" } as any, { type: "text", text: "A2" }],
 				},
 			]
 			const result = stripPlainTextReasoningBlocks(messages)
 			expect(result).toHaveLength(4)
-			expect(result[1].content).toEqual([{ type: "text", text: "A1" }])
-			expect(result[3].content).toEqual([{ type: "text", text: "A2" }])
+			expect((result[1] as any).content).toEqual([{ type: "text", text: "A1" }])
+			expect((result[3] as any).content).toEqual([{ type: "text", text: "A2" }])
 		})
 	})
 
@@ -139,7 +130,7 @@ describe("OpenAI Native reasoning helpers", () => {
 	// ───────────────────────────────────────────────────────────
 	describe("collectEncryptedReasoningItems", () => {
 		it("returns empty array when no encrypted reasoning items exist", () => {
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{ role: "user", content: [{ type: "text", text: "Hello" }] },
 				{ role: "assistant", content: [{ type: "text", text: "Hi" }] },
 			]
@@ -157,7 +148,7 @@ describe("OpenAI Native reasoning helpers", () => {
 					summary: [{ type: "summary_text", text: "I thought about it" }],
 				},
 				{ role: "assistant", content: [{ type: "text", text: "Hi" }] },
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			const result = collectEncryptedReasoningItems(messages)
 			expect(result).toHaveLength(1)
@@ -187,7 +178,7 @@ describe("OpenAI Native reasoning helpers", () => {
 					summary: [{ type: "summary_text", text: "Summary 2" }],
 				},
 				{ role: "assistant", content: [{ type: "text", text: "A2" }] },
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			const result = collectEncryptedReasoningItems(messages)
 			expect(result).toHaveLength(2)
@@ -201,7 +192,7 @@ describe("OpenAI Native reasoning helpers", () => {
 			const messages = [
 				{ type: "reasoning", id: "rs_x", text: "plain reasoning" },
 				{ role: "user", content: [{ type: "text", text: "Hello" }] },
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			const result = collectEncryptedReasoningItems(messages)
 			expect(result).toEqual([])
@@ -215,7 +206,7 @@ describe("OpenAI Native reasoning helpers", () => {
 					encrypted_content: "enc_data",
 				},
 				{ role: "assistant", content: [{ type: "text", text: "Hi" }] },
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			const result = collectEncryptedReasoningItems(messages)
 			expect(result).toHaveLength(1)
@@ -248,7 +239,7 @@ describe("OpenAI Native reasoning helpers", () => {
 					summary: [{ type: "summary_text", text: "I considered the question" }],
 				},
 				{ role: "assistant", content: [{ type: "text", text: "Hi there" }] },
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			// AI SDK messages (after filtering encrypted items + converting)
 			const aiSdkMessages: ModelMessage[] = [
@@ -304,7 +295,7 @@ describe("OpenAI Native reasoning helpers", () => {
 					summary: [{ type: "summary_text", text: "Thought 2" }],
 				},
 				{ role: "assistant", content: [{ type: "text", text: "A2" }] },
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			const aiSdkMessages: ModelMessage[] = [
 				{ role: "user", content: "Q1" },
@@ -362,7 +353,7 @@ describe("OpenAI Native reasoning helpers", () => {
 					],
 				},
 				{ role: "assistant", content: [{ type: "text", text: "Response" }] },
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			const aiSdkMessages: ModelMessage[] = [
 				{ role: "user", content: "Hi" },
@@ -397,7 +388,7 @@ describe("OpenAI Native reasoning helpers", () => {
 					encrypted_content: "enc_nosummary",
 				},
 				{ role: "assistant", content: [{ type: "text", text: "Response" }] },
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			const aiSdkMessages: ModelMessage[] = [
 				{ role: "user", content: "Hi" },
@@ -437,7 +428,7 @@ describe("OpenAI Native reasoning helpers", () => {
 					summary: [{ type: "summary_text", text: "Step B" }],
 				},
 				{ role: "assistant", content: [{ type: "text", text: "Done" }] },
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			const aiSdkMessages: ModelMessage[] = [
 				{ role: "user", content: "Hi" },
@@ -496,7 +487,7 @@ describe("OpenAI Native reasoning helpers", () => {
 					summary: [{ type: "summary_text", text: "Thought after tool" }],
 				},
 				{ role: "assistant", content: [{ type: "text", text: "OK" }] },
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			// AI SDK messages after conversion (tool_result splits into tool + user)
 			const aiSdkMessages: ModelMessage[] = [
@@ -539,7 +530,7 @@ describe("OpenAI Native reasoning helpers", () => {
 					id: "rs_orphan",
 					encrypted_content: "enc_orphan",
 				},
-			] as unknown as Anthropic.Messages.MessageParam[]
+			] as unknown as RooMessage[]
 
 			const aiSdkMessages: ModelMessage[] = [{ role: "user", content: "Hi" }]
 

+ 2 - 1
src/api/providers/__tests__/openai-native-usage.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run api/providers/__tests__/openai-native-usage.spec.ts
 
 const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({
@@ -38,7 +39,7 @@ import type { ApiHandlerOptions } from "../../../shared/api"
 describe("OpenAiNativeHandler - usage metrics", () => {
 	let handler: OpenAiNativeHandler
 	const systemPrompt = "You are a helpful assistant."
-	const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello!" }]
+	const messages: RooMessage[] = [{ role: "user", content: "Hello!" }]
 
 	beforeEach(() => {
 		handler = new OpenAiNativeHandler({

+ 2 - 1
src/api/providers/__tests__/openai-native.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run api/providers/__tests__/openai-native.spec.ts
 
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
@@ -41,7 +42,7 @@ describe("OpenAiNativeHandler", () => {
 	let handler: OpenAiNativeHandler
 	let mockOptions: ApiHandlerOptions
 	const systemPrompt = "You are a helpful assistant."
-	const messages: Anthropic.Messages.MessageParam[] = [
+	const messages: RooMessage[] = [
 		{
 			role: "user",
 			content: [

+ 2 - 1
src/api/providers/__tests__/openai-usage-tracking.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run api/providers/__tests__/openai-usage-tracking.spec.ts
 
 import { Anthropic } from "@anthropic-ai/sdk"
@@ -53,7 +54,7 @@ describe("OpenAiHandler with usage tracking fix", () => {
 
 	describe("usage metrics with streaming", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [

+ 8 - 7
src/api/providers/__tests__/openai.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run api/providers/__tests__/openai.spec.ts
 
 const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({
@@ -154,7 +155,7 @@ describe("OpenAiHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -445,7 +446,7 @@ describe("OpenAiHandler", () => {
 	})
 
 	describe("error handling", () => {
-		const testMessages: Anthropic.Messages.MessageParam[] = [
+		const testMessages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -578,7 +579,7 @@ describe("OpenAiHandler", () => {
 
 			const azureHandler = new OpenAiHandler(makeAzureOptions())
 			const systemPrompt = "You are a helpful assistant."
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Hello!",
@@ -609,7 +610,7 @@ describe("OpenAiHandler", () => {
 				openAiStreamingEnabled: false,
 			})
 			const systemPrompt = "You are a helpful assistant."
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Hello!",
@@ -684,7 +685,7 @@ describe("OpenAiHandler", () => {
 				modelMaxTokens: 32000,
 			})
 			const systemPrompt = "You are a helpful assistant."
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Hello!",
@@ -720,7 +721,7 @@ describe("OpenAiHandler", () => {
 				includeMaxTokens: false,
 			})
 			const systemPrompt = "You are a helpful assistant."
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Hello!",
@@ -750,7 +751,7 @@ describe("OpenAiHandler", () => {
 				includeMaxTokens: true,
 			})
 			const systemPrompt = "You are a helpful assistant."
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user",
 					content: "Hello!",

+ 2 - 112
src/api/providers/__tests__/openrouter.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // pnpm --filter roo-cline test api/providers/__tests__/openrouter.spec.ts
 
 vitest.mock("vscode", () => ({}))
@@ -268,7 +269,7 @@ describe("OpenRouterHandler", () => {
 			})
 
 			const systemPrompt = "test system prompt"
-			const messages: Anthropic.Messages.MessageParam[] = [{ role: "user" as const, content: "test message" }]
+			const messages: RooMessage[] = [{ role: "user" as const, content: "test message" }]
 
 			const generator = handler.createMessage(systemPrompt, messages)
 			const chunks = []
@@ -475,36 +476,6 @@ describe("OpenRouterHandler", () => {
 			expect(chunks[1]).toEqual({ type: "text", text: "result" })
 		})
 
-		it("accumulates reasoning details for getReasoningDetails()", async () => {
-			const handler = new OpenRouterHandler(mockOptions)
-
-			const mockFullStream = (async function* () {
-				yield { type: "reasoning-delta", text: "step 1...", id: "1" }
-				yield { type: "reasoning-delta", text: "step 2...", id: "2" }
-				yield { type: "text-delta", text: "result", id: "3" }
-			})()
-
-			mockStreamText.mockReturnValue({
-				fullStream: mockFullStream,
-				usage: Promise.resolve({ inputTokens: 10, outputTokens: 20, totalTokens: 30 }),
-				totalUsage: Promise.resolve({ inputTokens: 10, outputTokens: 20, totalTokens: 30 }),
-			})
-
-			const generator = handler.createMessage("test", [{ role: "user", content: "test" }])
-
-			for await (const _ of generator) {
-				// consume all chunks
-			}
-
-			// After streaming, getReasoningDetails should return accumulated reasoning
-			const reasoningDetails = handler.getReasoningDetails()
-			expect(reasoningDetails).toBeDefined()
-			expect(reasoningDetails).toHaveLength(1)
-			expect(reasoningDetails![0].type).toBe("reasoning.text")
-			expect(reasoningDetails![0].text).toBe("step 1...step 2...")
-			expect(reasoningDetails![0].index).toBe(0)
-		})
-
 		it("handles tool call streaming", async () => {
 			const handler = new OpenRouterHandler(mockOptions)
 
@@ -906,87 +877,6 @@ describe("OpenRouterHandler", () => {
 		})
 	})
 
-	describe("getReasoningDetails", () => {
-		it("returns undefined when no reasoning was captured", async () => {
-			const handler = new OpenRouterHandler(mockOptions)
-
-			// Stream with no reasoning
-			const mockFullStream = (async function* () {
-				yield { type: "text-delta", text: "just text", id: "1" }
-			})()
-
-			mockStreamText.mockReturnValue({
-				fullStream: mockFullStream,
-				usage: Promise.resolve({ inputTokens: 10, outputTokens: 20, totalTokens: 30 }),
-				totalUsage: Promise.resolve({ inputTokens: 10, outputTokens: 20, totalTokens: 30 }),
-			})
-
-			const generator = handler.createMessage("test", [{ role: "user", content: "test" }])
-
-			for await (const _ of generator) {
-				// consume all chunks
-			}
-
-			// No reasoning was captured, should return undefined
-			const reasoningDetails = handler.getReasoningDetails()
-			expect(reasoningDetails).toBeUndefined()
-		})
-
-		it("resets reasoning details between requests", async () => {
-			const handler = new OpenRouterHandler(mockOptions)
-
-			// First request with reasoning
-			const mockFullStream1 = (async function* () {
-				yield { type: "reasoning-delta", text: "first request reasoning", id: "1" }
-				yield { type: "text-delta", text: "result 1", id: "2" }
-			})()
-
-			mockStreamText.mockReturnValue({
-				fullStream: mockFullStream1,
-				usage: Promise.resolve({ inputTokens: 10, outputTokens: 20, totalTokens: 30 }),
-				totalUsage: Promise.resolve({ inputTokens: 10, outputTokens: 20, totalTokens: 30 }),
-			})
-
-			const generator1 = handler.createMessage("test", [{ role: "user", content: "test" }])
-			for await (const _ of generator1) {
-				// consume
-			}
-
-			// Verify first request captured reasoning
-			let reasoningDetails = handler.getReasoningDetails()
-			expect(reasoningDetails).toBeDefined()
-			expect(reasoningDetails![0].text).toBe("first request reasoning")
-
-			// Second request without reasoning
-			const mockFullStream2 = (async function* () {
-				yield { type: "text-delta", text: "result 2", id: "1" }
-			})()
-
-			mockStreamText.mockReturnValue({
-				fullStream: mockFullStream2,
-				usage: Promise.resolve({ inputTokens: 10, outputTokens: 20, totalTokens: 30 }),
-				totalUsage: Promise.resolve({ inputTokens: 10, outputTokens: 20, totalTokens: 30 }),
-			})
-
-			const generator2 = handler.createMessage("test", [{ role: "user", content: "test" }])
-			for await (const _ of generator2) {
-				// consume
-			}
-
-			// Reasoning details should be reset (undefined since second request had no reasoning)
-			reasoningDetails = handler.getReasoningDetails()
-			expect(reasoningDetails).toBeUndefined()
-		})
-
-		it("returns undefined before any streaming occurs", () => {
-			const handler = new OpenRouterHandler(mockOptions)
-
-			// getReasoningDetails before any createMessage call
-			const reasoningDetails = handler.getReasoningDetails()
-			expect(reasoningDetails).toBeUndefined()
-		})
-	})
-
 	describe("model-specific handling", () => {
 		const mockStreamResult = () => {
 			const mockFullStream = (async function* () {

+ 4 - 3
src/api/providers/__tests__/qwen-code-native-tools.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run api/providers/__tests__/qwen-code-native-tools.spec.ts
 
 const {
@@ -261,7 +262,7 @@ describe("QwenCodeHandler (AI SDK)", () => {
 			})
 
 		const handler = new QwenCodeHandler({ apiModelId: "qwen3-coder-plus", qwenCodeOauthPath: oauthPath })
-		const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hi" }]
+		const messages: RooMessage[] = [{ role: "user", content: "Hi" }]
 
 		const chunks = await collectStreamChunks(handler.createMessage("System", messages))
 
@@ -289,7 +290,7 @@ describe("QwenCodeHandler (AI SDK)", () => {
 		})
 
 		const handler = new QwenCodeHandler({ apiModelId: "qwen3-coder-plus" })
-		const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
+		const messages: RooMessage[] = [{ role: "user", content: "Hello" }]
 
 		const chunks = await collectStreamChunks(handler.createMessage("System", messages))
 
@@ -365,7 +366,7 @@ describe("QwenCodeHandler (AI SDK)", () => {
 		})
 
 		const handler = new QwenCodeHandler({ apiModelId: "qwen3-coder-plus" })
-		const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
+		const messages: RooMessage[] = [{ role: "user", content: "Hello" }]
 
 		await collectStreamChunks(handler.createMessage("System", messages))
 

+ 3 - 4
src/api/providers/__tests__/requesty.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run api/providers/__tests__/requesty.spec.ts
 
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
@@ -134,7 +135,7 @@ describe("RequestyHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "test system prompt"
-		const messages: Anthropic.Messages.MessageParam[] = [{ role: "user" as const, content: "test message" }]
+		const messages: RooMessage[] = [{ role: "user" as const, content: "test message" }]
 
 		it("generates correct stream chunks", async () => {
 			async function* mockFullStream() {
@@ -265,9 +266,7 @@ describe("RequestyHandler", () => {
 		})
 
 		describe("native tool support", () => {
-			const toolMessages: Anthropic.Messages.MessageParam[] = [
-				{ role: "user" as const, content: "What's the weather?" },
-			]
+			const toolMessages: RooMessage[] = [{ role: "user" as const, content: "What's the weather?" }]
 
 			it("should include tools in request when tools are provided", async () => {
 				const mockTools = [

+ 3 - 73
src/api/providers/__tests__/roo.spec.ts

@@ -4,6 +4,7 @@ import { Anthropic } from "@anthropic-ai/sdk"
 import { rooDefaultModelId } from "@roo-code/types"
 
 import { ApiHandlerOptions } from "../../../shared/api"
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 
 // Mock the AI SDK
 const mockStreamText = vitest.fn()
@@ -138,7 +139,7 @@ describe("RooHandler", () => {
 	let handler: RooHandler
 	let mockOptions: ApiHandlerOptions
 	const systemPrompt = "You are a helpful assistant."
-	const messages: Anthropic.Messages.MessageParam[] = [
+	const messages: RooMessage[] = [
 		{
 			role: "user",
 			content: "Hello!",
@@ -297,7 +298,7 @@ describe("RooHandler", () => {
 		it("should handle multiple messages in conversation", async () => {
 			mockStreamText.mockReturnValue(createMockStreamResult())
 
-			const multipleMessages: Anthropic.Messages.MessageParam[] = [
+			const multipleMessages: RooMessage[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "First response" },
 				{ role: "user", content: "Second message" },
@@ -688,77 +689,6 @@ describe("RooHandler", () => {
 		})
 	})
 
-	describe("reasoning details accumulation", () => {
-		beforeEach(() => {
-			handler = new RooHandler(mockOptions)
-		})
-
-		it("should accumulate reasoning text from reasoning-delta parts", async () => {
-			mockStreamText.mockReturnValue(
-				createMockStreamResult({
-					reasoningChunks: ["thinking ", "about ", "this"],
-					textChunks: ["answer"],
-				}),
-			)
-
-			const stream = handler.createMessage(systemPrompt, messages)
-			const chunks: any[] = []
-			for await (const chunk of stream) {
-				chunks.push(chunk)
-			}
-
-			const reasoningChunks = chunks.filter((c) => c.type === "reasoning")
-			expect(reasoningChunks).toHaveLength(3)
-			expect(reasoningChunks[0].text).toBe("thinking ")
-			expect(reasoningChunks[1].text).toBe("about ")
-			expect(reasoningChunks[2].text).toBe("this")
-
-			const details = handler.getReasoningDetails()
-			expect(details).toBeDefined()
-			expect(details![0].type).toBe("reasoning.text")
-			expect(details![0].text).toBe("thinking about this")
-		})
-
-		it("should override reasoning details from providerMetadata", async () => {
-			const providerReasoningDetails = [{ type: "reasoning.summary", summary: "Server summary", index: 0 }]
-
-			mockStreamText.mockReturnValue(
-				createMockStreamResult({
-					reasoningChunks: ["local thinking"],
-					textChunks: ["answer"],
-					providerMetadata: {
-						roo: { reasoning_details: providerReasoningDetails },
-					},
-				}),
-			)
-
-			const stream = handler.createMessage(systemPrompt, messages)
-			for await (const _chunk of stream) {
-				// consume
-			}
-
-			const details = handler.getReasoningDetails()
-			expect(details).toBeDefined()
-			expect(details).toEqual(providerReasoningDetails)
-		})
-
-		it("should return undefined when no reasoning details", async () => {
-			mockStreamText.mockReturnValue(
-				createMockStreamResult({
-					reasoningChunks: [],
-					textChunks: ["just text"],
-				}),
-			)
-
-			const stream = handler.createMessage(systemPrompt, messages)
-			for await (const _chunk of stream) {
-				// consume
-			}
-
-			expect(handler.getReasoningDetails()).toBeUndefined()
-		})
-	})
-
 	describe("usage and cost processing", () => {
 		beforeEach(() => {
 			handler = new RooHandler(mockOptions)

+ 4 - 3
src/api/providers/__tests__/sambanova.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run src/api/providers/__tests__/sambanova.spec.ts
 
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
@@ -116,7 +117,7 @@ describe("SambaNovaHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -454,7 +455,7 @@ describe("SambaNovaHandler", () => {
 
 	describe("tool handling", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Hello!" }],
@@ -569,7 +570,7 @@ describe("SambaNovaHandler", () => {
 
 	describe("error handling", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Hello!" }],

+ 6 - 5
src/api/providers/__tests__/vercel-ai-gateway.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run src/api/providers/__tests__/vercel-ai-gateway.spec.ts
 
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
@@ -170,7 +171,7 @@ describe("VercelAiGatewayHandler", () => {
 
 			const handler = new VercelAiGatewayHandler(mockOptions)
 			const systemPrompt = "You are a helpful assistant."
-			const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
+			const messages: RooMessage[] = [{ role: "user", content: "Hello" }]
 
 			const stream = handler.createMessage(systemPrompt, messages)
 			const chunks = []
@@ -203,7 +204,7 @@ describe("VercelAiGatewayHandler", () => {
 			})
 
 			const systemPrompt = "You are a helpful assistant."
-			const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
+			const messages: RooMessage[] = [{ role: "user", content: "Hello" }]
 
 			await handler.createMessage(systemPrompt, messages).next()
 
@@ -220,7 +221,7 @@ describe("VercelAiGatewayHandler", () => {
 			const handler = new VercelAiGatewayHandler(mockOptions)
 
 			const systemPrompt = "You are a helpful assistant."
-			const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
+			const messages: RooMessage[] = [{ role: "user", content: "Hello" }]
 
 			await handler.createMessage(systemPrompt, messages).next()
 
@@ -237,7 +238,7 @@ describe("VercelAiGatewayHandler", () => {
 			const handler = new VercelAiGatewayHandler(mockOptions)
 
 			const systemPrompt = "You are a helpful assistant."
-			const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
+			const messages: RooMessage[] = [{ role: "user", content: "Hello" }]
 
 			await handler.createMessage(systemPrompt, messages).next()
 
@@ -264,7 +265,7 @@ describe("VercelAiGatewayHandler", () => {
 
 			const handler = new VercelAiGatewayHandler(mockOptions)
 			const systemPrompt = "You are a helpful assistant."
-			const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
+			const messages: RooMessage[] = [{ role: "user", content: "Hello" }]
 
 			const stream = handler.createMessage(systemPrompt, messages)
 			const chunks = []

+ 2 - 1
src/api/providers/__tests__/vertex.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run src/api/providers/__tests__/vertex.spec.ts
 
 // Mock vscode first to avoid import errors
@@ -140,7 +141,7 @@ describe("VertexHandler", () => {
 	})
 
 	describe("createMessage", () => {
-		const mockMessages: Anthropic.Messages.MessageParam[] = [
+		const mockMessages: RooMessage[] = [
 			{ role: "user", content: "Hello" },
 			{ role: "assistant", content: "Hi there!" },
 		]

+ 6 - 5
src/api/providers/__tests__/vscode-lm.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 import type { Mock } from "vitest"
 
 // Mocks must come first, before imports
@@ -143,7 +144,7 @@ describe("VsCodeLmHandler", () => {
 
 		it("should stream text responses", async () => {
 			const systemPrompt = "You are a helpful assistant"
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user" as const,
 					content: "Hello",
@@ -182,7 +183,7 @@ describe("VsCodeLmHandler", () => {
 
 		it("should emit tool_call chunks when tools are provided", async () => {
 			const systemPrompt = "You are a helpful assistant"
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user" as const,
 					content: "Calculate 2+2",
@@ -247,7 +248,7 @@ describe("VsCodeLmHandler", () => {
 
 		it("should handle native tool calls when tools are provided", async () => {
 			const systemPrompt = "You are a helpful assistant"
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user" as const,
 					content: "Calculate 2+2",
@@ -312,7 +313,7 @@ describe("VsCodeLmHandler", () => {
 
 		it("should pass tools to request options when tools are provided", async () => {
 			const systemPrompt = "You are a helpful assistant"
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user" as const,
 					content: "Calculate 2+2",
@@ -380,7 +381,7 @@ describe("VsCodeLmHandler", () => {
 
 		it("should handle errors", async () => {
 			const systemPrompt = "You are a helpful assistant"
-			const messages: Anthropic.Messages.MessageParam[] = [
+			const messages: RooMessage[] = [
 				{
 					role: "user" as const,
 					content: "Hello",

+ 3 - 2
src/api/providers/__tests__/xai.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run api/providers/__tests__/xai.spec.ts
 
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
@@ -141,7 +142,7 @@ describe("XAIHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [
@@ -538,7 +539,7 @@ describe("XAIHandler", () => {
 
 	describe("tool handling", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Hello!" }],

+ 2 - 1
src/api/providers/__tests__/zai.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 // npx vitest run src/api/providers/__tests__/zai.spec.ts
 
 // Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
@@ -262,7 +263,7 @@ describe("ZAiHandler", () => {
 
 	describe("createMessage", () => {
 		const systemPrompt = "You are a helpful assistant."
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: RooMessage[] = [
 			{
 				role: "user",
 				content: [{ type: "text" as const, text: "Hello!" }],

+ 15 - 91
src/api/providers/anthropic-vertex.ts

@@ -1,6 +1,6 @@
 import type { Anthropic } from "@anthropic-ai/sdk"
 import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import {
 	type ModelInfo,
@@ -24,20 +24,20 @@ import {
 	processAiSdkStreamPart,
 	mapToolChoice,
 	handleAiSdkError,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
 import { calculateApiCostAnthropic } from "../../shared/cost"
 
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 // https://docs.anthropic.com/en/api/claude-on-vertex-ai
 export class AnthropicVertexHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
 	private provider: ReturnType<typeof createVertexAnthropic>
 	private readonly providerName = "Vertex (Anthropic)"
-	private lastThoughtSignature: string | undefined
-	private lastRedactedThinkingBlocks: Array<{ type: "redacted_thinking"; data: string }> = []
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -85,17 +85,13 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const modelConfig = this.getModel()
 
-		// Reset thinking state for this request
-		this.lastThoughtSignature = undefined
-		this.lastRedactedThinkingBlocks = []
-
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
@@ -139,7 +135,7 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
 		const cacheProviderOption = { anthropic: { cacheControl: { type: "ephemeral" as const } } }
 
 		const userMsgIndices = messages.reduce(
-			(acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc),
+			(acc, msg, index) => ("role" in msg && msg.role === "user" ? [...acc, index] : acc),
 			[] as number[],
 		)
 
@@ -151,7 +147,7 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
 		if (secondLastUserMsgIndex >= 0) targetIndices.add(secondLastUserMsgIndex)
 
 		if (targetIndices.size > 0) {
-			this.applyCacheControlToAiSdkMessages(messages, aiSdkMessages, targetIndices, cacheProviderOption)
+			this.applyCacheControlToAiSdkMessages(messages as ModelMessage[], targetIndices, cacheProviderOption)
 		}
 
 		// Build streamText request
@@ -177,22 +173,6 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
 
 			let lastStreamError: string | undefined
 			for await (const part of result.fullStream) {
-				// Capture thinking signature from stream events
-				// The AI SDK's @ai-sdk/anthropic emits the signature as a reasoning-delta
-				// event with providerMetadata.anthropic.signature
-				const partAny = part as any
-				if (partAny.providerMetadata?.anthropic?.signature) {
-					this.lastThoughtSignature = partAny.providerMetadata.anthropic.signature
-				}
-
-				// Capture redacted thinking blocks from stream events
-				if (partAny.providerMetadata?.anthropic?.redactedData) {
-					this.lastRedactedThinkingBlocks.push({
-						type: "redacted_thinking",
-						data: partAny.providerMetadata.anthropic.redactedData,
-					})
-				}
-
 				for (const chunk of processAiSdkStreamPart(part)) {
 					if (chunk.type === "error") {
 						lastStreamError = chunk.message
@@ -214,6 +194,8 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
 				}
 				throw usageError
 			}
+
+			yield* yieldResponseMessage(result)
 		} catch (error) {
 			const errorMessage = error instanceof Error ? error.message : String(error)
 			TelemetryService.instance.captureException(
@@ -268,57 +250,16 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
 	 * accounts for that split so cache control lands on the right message.
 	 */
 	private applyCacheControlToAiSdkMessages(
-		originalMessages: Anthropic.Messages.MessageParam[],
 		aiSdkMessages: { role: string; providerOptions?: Record<string, Record<string, unknown>> }[],
-		targetOriginalIndices: Set<number>,
+		targetIndices: Set<number>,
 		cacheProviderOption: Record<string, Record<string, unknown>>,
 	): void {
-		let aiSdkIdx = 0
-		for (let origIdx = 0; origIdx < originalMessages.length; origIdx++) {
-			const origMsg = originalMessages[origIdx]
-
-			if (typeof origMsg.content === "string") {
-				if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-					aiSdkMessages[aiSdkIdx].providerOptions = {
-						...aiSdkMessages[aiSdkIdx].providerOptions,
-						...cacheProviderOption,
-					}
+		for (const idx of targetIndices) {
+			if (idx >= 0 && idx < aiSdkMessages.length) {
+				aiSdkMessages[idx].providerOptions = {
+					...aiSdkMessages[idx].providerOptions,
+					...cacheProviderOption,
 				}
-				aiSdkIdx++
-			} else if (origMsg.role === "user") {
-				const hasToolResults = origMsg.content.some((part) => (part as { type: string }).type === "tool_result")
-				const hasNonToolContent = origMsg.content.some(
-					(part) => (part as { type: string }).type === "text" || (part as { type: string }).type === "image",
-				)
-
-				if (hasToolResults && hasNonToolContent) {
-					const userMsgIdx = aiSdkIdx + 1
-					if (targetOriginalIndices.has(origIdx) && userMsgIdx < aiSdkMessages.length) {
-						aiSdkMessages[userMsgIdx].providerOptions = {
-							...aiSdkMessages[userMsgIdx].providerOptions,
-							...cacheProviderOption,
-						}
-					}
-					aiSdkIdx += 2
-				} else if (hasToolResults) {
-					if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-						aiSdkMessages[aiSdkIdx].providerOptions = {
-							...aiSdkMessages[aiSdkIdx].providerOptions,
-							...cacheProviderOption,
-						}
-					}
-					aiSdkIdx++
-				} else {
-					if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-						aiSdkMessages[aiSdkIdx].providerOptions = {
-							...aiSdkMessages[aiSdkIdx].providerOptions,
-							...cacheProviderOption,
-						}
-					}
-					aiSdkIdx++
-				}
-			} else {
-				aiSdkIdx++
 			}
 		}
 	}
@@ -401,23 +342,6 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
 		}
 	}
 
-	/**
-	 * Returns the thinking signature captured from the last Anthropic response.
-	 * Claude models with extended thinking return a cryptographic signature
-	 * which must be round-tripped back for multi-turn conversations with tool use.
-	 */
-	getThoughtSignature(): string | undefined {
-		return this.lastThoughtSignature
-	}
-
-	/**
-	 * Returns any redacted thinking blocks captured from the last Anthropic response.
-	 * Anthropic returns these when safety filters trigger on reasoning content.
-	 */
-	getRedactedThinkingBlocks(): Array<{ type: "redacted_thinking"; data: string }> | undefined {
-		return this.lastRedactedThinkingBlocks.length > 0 ? this.lastRedactedThinkingBlocks : undefined
-	}
-
 	override isAiSdkProvider(): boolean {
 		return true
 	}

+ 15 - 92
src/api/providers/anthropic.ts

@@ -1,6 +1,5 @@
-import type { Anthropic } from "@anthropic-ai/sdk"
 import { createAnthropic } from "@ai-sdk/anthropic"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import {
 	type ModelInfo,
@@ -23,19 +22,19 @@ import {
 	processAiSdkStreamPart,
 	mapToolChoice,
 	handleAiSdkError,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
 import { calculateApiCostAnthropic } from "../../shared/cost"
 
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 export class AnthropicHandler extends BaseProvider implements SingleCompletionHandler {
 	private options: ApiHandlerOptions
 	private provider: ReturnType<typeof createAnthropic>
 	private readonly providerName = "Anthropic"
-	private lastThoughtSignature: string | undefined
-	private lastRedactedThinkingBlocks: Array<{ type: "redacted_thinking"; data: string }> = []
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -72,17 +71,13 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const modelConfig = this.getModel()
 
-		// Reset thinking state for this request
-		this.lastThoughtSignature = undefined
-		this.lastRedactedThinkingBlocks = []
-
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
@@ -115,7 +110,7 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 		const cacheProviderOption = { anthropic: { cacheControl: { type: "ephemeral" as const } } }
 
 		const userMsgIndices = messages.reduce(
-			(acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc),
+			(acc, msg, index) => ("role" in msg && msg.role === "user" ? [...acc, index] : acc),
 			[] as number[],
 		)
 
@@ -127,7 +122,7 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 		if (secondLastUserMsgIndex >= 0) targetIndices.add(secondLastUserMsgIndex)
 
 		if (targetIndices.size > 0) {
-			this.applyCacheControlToAiSdkMessages(messages, aiSdkMessages, targetIndices, cacheProviderOption)
+			this.applyCacheControlToAiSdkMessages(messages as ModelMessage[], targetIndices, cacheProviderOption)
 		}
 
 		// Build streamText request
@@ -153,22 +148,6 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 
 			let lastStreamError: string | undefined
 			for await (const part of result.fullStream) {
-				// Capture thinking signature from stream events
-				// The AI SDK's @ai-sdk/anthropic emits the signature as a reasoning-delta
-				// event with providerMetadata.anthropic.signature
-				const partAny = part as any
-				if (partAny.providerMetadata?.anthropic?.signature) {
-					this.lastThoughtSignature = partAny.providerMetadata.anthropic.signature
-				}
-
-				// Capture redacted thinking blocks from stream events
-				if (partAny.providerMetadata?.anthropic?.redactedData) {
-					this.lastRedactedThinkingBlocks.push({
-						type: "redacted_thinking",
-						data: partAny.providerMetadata.anthropic.redactedData,
-					})
-				}
-
 				for (const chunk of processAiSdkStreamPart(part)) {
 					if (chunk.type === "error") {
 						lastStreamError = chunk.message
@@ -190,6 +169,8 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 				}
 				throw usageError
 			}
+
+			yield* yieldResponseMessage(result)
 		} catch (error) {
 			const errorMessage = error instanceof Error ? error.message : String(error)
 			TelemetryService.instance.captureException(
@@ -244,57 +225,16 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 	 * accounts for that split so cache control lands on the right message.
 	 */
 	private applyCacheControlToAiSdkMessages(
-		originalMessages: Anthropic.Messages.MessageParam[],
 		aiSdkMessages: { role: string; providerOptions?: Record<string, Record<string, unknown>> }[],
-		targetOriginalIndices: Set<number>,
+		targetIndices: Set<number>,
 		cacheProviderOption: Record<string, Record<string, unknown>>,
 	): void {
-		let aiSdkIdx = 0
-		for (let origIdx = 0; origIdx < originalMessages.length; origIdx++) {
-			const origMsg = originalMessages[origIdx]
-
-			if (typeof origMsg.content === "string") {
-				if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-					aiSdkMessages[aiSdkIdx].providerOptions = {
-						...aiSdkMessages[aiSdkIdx].providerOptions,
-						...cacheProviderOption,
-					}
+		for (const idx of targetIndices) {
+			if (idx >= 0 && idx < aiSdkMessages.length) {
+				aiSdkMessages[idx].providerOptions = {
+					...aiSdkMessages[idx].providerOptions,
+					...cacheProviderOption,
 				}
-				aiSdkIdx++
-			} else if (origMsg.role === "user") {
-				const hasToolResults = origMsg.content.some((part) => (part as { type: string }).type === "tool_result")
-				const hasNonToolContent = origMsg.content.some(
-					(part) => (part as { type: string }).type === "text" || (part as { type: string }).type === "image",
-				)
-
-				if (hasToolResults && hasNonToolContent) {
-					const userMsgIdx = aiSdkIdx + 1
-					if (targetOriginalIndices.has(origIdx) && userMsgIdx < aiSdkMessages.length) {
-						aiSdkMessages[userMsgIdx].providerOptions = {
-							...aiSdkMessages[userMsgIdx].providerOptions,
-							...cacheProviderOption,
-						}
-					}
-					aiSdkIdx += 2
-				} else if (hasToolResults) {
-					if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-						aiSdkMessages[aiSdkIdx].providerOptions = {
-							...aiSdkMessages[aiSdkIdx].providerOptions,
-							...cacheProviderOption,
-						}
-					}
-					aiSdkIdx++
-				} else {
-					if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-						aiSdkMessages[aiSdkIdx].providerOptions = {
-							...aiSdkMessages[aiSdkIdx].providerOptions,
-							...cacheProviderOption,
-						}
-					}
-					aiSdkIdx++
-				}
-			} else {
-				aiSdkIdx++
 			}
 		}
 	}
@@ -366,23 +306,6 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 		}
 	}
 
-	/**
-	 * Returns the thinking signature captured from the last Anthropic response.
-	 * Claude models with extended thinking return a cryptographic signature
-	 * which must be round-tripped back for multi-turn conversations with tool use.
-	 */
-	getThoughtSignature(): string | undefined {
-		return this.lastThoughtSignature
-	}
-
-	/**
-	 * Returns any redacted thinking blocks captured from the last Anthropic response.
-	 * Anthropic returns these when safety filters trigger on reasoning content.
-	 */
-	getRedactedThinkingBlocks(): Array<{ type: "redacted_thinking"; data: string }> | undefined {
-		return this.lastRedactedThinkingBlocks.length > 0 ? this.lastRedactedThinkingBlocks : undefined
-	}
-
 	override isAiSdkProvider(): boolean {
 		return true
 	}

+ 4 - 3
src/api/providers/azure.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createAzure } from "@ai-sdk/azure"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { azureModels, azureDefaultModelInfo, type ModelInfo } from "@roo-code/types"
 
@@ -19,6 +19,7 @@ import { getModelParams } from "../transform/model-params"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 const AZURE_DEFAULT_TEMPERATURE = 0
 
@@ -131,14 +132,14 @@ export class AzureHandler extends BaseProvider implements SingleCompletionHandle
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 2 - 1
src/api/providers/base-provider.ts

@@ -1,4 +1,5 @@
 import { Anthropic } from "@anthropic-ai/sdk"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 import type { ModelInfo } from "@roo-code/types"
 
@@ -13,7 +14,7 @@ import { isMcpTool } from "../../utils/mcp-name"
 export abstract class BaseProvider implements ApiHandler {
 	abstract createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream
 

+ 4 - 3
src/api/providers/baseten.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createBaseten } from "@ai-sdk/baseten"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { basetenModels, basetenDefaultModelId, type ModelInfo } from "@roo-code/types"
 
@@ -19,6 +19,7 @@ import { getModelParams } from "../transform/model-params"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 const BASETEN_DEFAULT_TEMPERATURE = 0.5
 
@@ -94,13 +95,13 @@ export class BasetenHandler extends BaseProvider implements SingleCompletionHand
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 16 - 118
src/api/providers/bedrock.ts

@@ -1,6 +1,6 @@
 import type { Anthropic } from "@anthropic-ai/sdk"
 import { createAmazonBedrock, type AmazonBedrockProvider } from "@ai-sdk/amazon-bedrock"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 import { fromIni } from "@aws-sdk/credential-providers"
 import OpenAI from "openai"
 
@@ -30,6 +30,7 @@ import {
 	processAiSdkStreamPart,
 	mapToolChoice,
 	handleAiSdkError,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
 import { getModelParams } from "../transform/model-params"
 import { shouldUseReasoningBudget } from "../../shared/api"
@@ -38,6 +39,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { logger } from "../../utils/logging"
 import { Package } from "../../shared/package"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 /************************************************************************************
  *
@@ -50,8 +52,6 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 	protected provider: AmazonBedrockProvider
 	private arnInfo: any
 	private readonly providerName = "Bedrock"
-	private lastThoughtSignature: string | undefined
-	private lastRedactedThinkingBlocks: Array<{ type: "redacted_thinking"; data: string }> = []
 
 	constructor(options: ProviderSettings) {
 		super()
@@ -188,19 +188,15 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const modelConfig = this.getModel()
 
-		// Reset thinking state for this request
-		this.lastThoughtSignature = undefined
-		this.lastRedactedThinkingBlocks = []
-
 		// Filter out provider-specific meta entries (e.g., { type: "reasoning" })
 		// that are not valid Anthropic MessageParam values
 		type ReasoningMetaLike = { type?: string }
-		const filteredMessages = messages.filter((message): message is Anthropic.Messages.MessageParam => {
+		const filteredMessages = messages.filter((message) => {
 			const meta = message as ReasoningMetaLike
 			if (meta.type === "reasoning") {
 				return false
@@ -209,7 +205,7 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 		})
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(filteredMessages)
+		const aiSdkMessages = filteredMessages as ModelMessage[]
 
 		// Convert tools to AI SDK format
 		let openAiTools = this.convertToolsForOpenAI(metadata?.tools)
@@ -278,7 +274,7 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 
 			// Find all user message indices in the original (pre-conversion) message array.
 			const originalUserIndices = filteredMessages.reduce<number[]>(
-				(acc, msg, idx) => (msg.role === "user" ? [...acc, idx] : acc),
+				(acc, msg, idx) => ("role" in msg && msg.role === "user" ? [...acc, idx] : acc),
 				[],
 			)
 
@@ -313,12 +309,7 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 			// A single original user message with tool_results becomes [tool-role msg, user-role msg]
 			// in the AI SDK array, while a plain user message becomes [user-role msg].
 			if (targetOriginalIndices.size > 0) {
-				this.applyCachePointsToAiSdkMessages(
-					filteredMessages,
-					aiSdkMessages,
-					targetOriginalIndices,
-					cachePointOption,
-				)
+				this.applyCachePointsToAiSdkMessages(aiSdkMessages, targetOriginalIndices, cachePointOption)
 			}
 		}
 
@@ -347,31 +338,6 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 
 			// Process the full stream
 			for await (const part of result.fullStream) {
-				// Capture thinking signature from stream events.
-				// The AI SDK's @ai-sdk/amazon-bedrock emits the signature as a reasoning-delta
-				// event with providerMetadata.bedrock.signature (empty delta text, signature in metadata).
-				// Also check tool-call events for thoughtSignature (Gemini pattern).
-				const partAny = part as any
-				if (partAny.providerMetadata?.bedrock?.signature) {
-					this.lastThoughtSignature = partAny.providerMetadata.bedrock.signature
-					logger.info("Captured thinking signature from stream", {
-						ctx: "bedrock",
-						signatureLength: this.lastThoughtSignature?.length,
-					})
-				} else if (partAny.providerMetadata?.bedrock?.thoughtSignature) {
-					this.lastThoughtSignature = partAny.providerMetadata.bedrock.thoughtSignature
-				} else if (partAny.providerMetadata?.anthropic?.thoughtSignature) {
-					this.lastThoughtSignature = partAny.providerMetadata.anthropic.thoughtSignature
-				}
-
-				// Capture redacted reasoning data from stream events
-				if (partAny.providerMetadata?.bedrock?.redactedData) {
-					this.lastRedactedThinkingBlocks.push({
-						type: "redacted_thinking",
-						data: partAny.providerMetadata.bedrock.redactedData,
-					})
-				}
-
 				for (const chunk of processAiSdkStreamPart(part)) {
 					if (chunk.type === "error") {
 						lastStreamError = chunk.message
@@ -393,6 +359,8 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 				}
 				throw usageError
 			}
+
+			yield* yieldResponseMessage(result)
 		} catch (error) {
 			const errorMessage = error instanceof Error ? error.message : String(error)
 			const apiError = new ApiProviderError(errorMessage, this.providerName, modelConfig.id, "createMessage")
@@ -747,63 +715,16 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 	 * accounts for that split so cache points land on the right message.
 	 */
 	private applyCachePointsToAiSdkMessages(
-		originalMessages: Anthropic.Messages.MessageParam[],
 		aiSdkMessages: { role: string; providerOptions?: Record<string, Record<string, unknown>> }[],
-		targetOriginalIndices: Set<number>,
+		targetIndices: Set<number>,
 		cachePointOption: Record<string, Record<string, unknown>>,
 	): void {
-		let aiSdkIdx = 0
-		for (let origIdx = 0; origIdx < originalMessages.length; origIdx++) {
-			const origMsg = originalMessages[origIdx]
-
-			if (typeof origMsg.content === "string") {
-				// Simple string content → 1 AI SDK message
-				if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-					aiSdkMessages[aiSdkIdx].providerOptions = {
-						...aiSdkMessages[aiSdkIdx].providerOptions,
-						...cachePointOption,
-					}
-				}
-				aiSdkIdx++
-			} else if (origMsg.role === "user") {
-				// User message with array content may split into tool + user messages.
-				const hasToolResults = origMsg.content.some((part) => (part as { type: string }).type === "tool_result")
-				const hasNonToolContent = origMsg.content.some(
-					(part) => (part as { type: string }).type === "text" || (part as { type: string }).type === "image",
-				)
-
-				if (hasToolResults && hasNonToolContent) {
-					// Split into tool msg + user msg — cache the user msg (the second one)
-					const userMsgIdx = aiSdkIdx + 1
-					if (targetOriginalIndices.has(origIdx) && userMsgIdx < aiSdkMessages.length) {
-						aiSdkMessages[userMsgIdx].providerOptions = {
-							...aiSdkMessages[userMsgIdx].providerOptions,
-							...cachePointOption,
-						}
-					}
-					aiSdkIdx += 2
-				} else if (hasToolResults) {
-					// Only tool results → 1 tool msg
-					if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-						aiSdkMessages[aiSdkIdx].providerOptions = {
-							...aiSdkMessages[aiSdkIdx].providerOptions,
-							...cachePointOption,
-						}
-					}
-					aiSdkIdx++
-				} else {
-					// Only text/image content → 1 user msg
-					if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-						aiSdkMessages[aiSdkIdx].providerOptions = {
-							...aiSdkMessages[aiSdkIdx].providerOptions,
-							...cachePointOption,
-						}
-					}
-					aiSdkIdx++
+		for (const idx of targetIndices) {
+			if (idx >= 0 && idx < aiSdkMessages.length) {
+				aiSdkMessages[idx].providerOptions = {
+					...aiSdkMessages[idx].providerOptions,
+					...cachePointOption,
 				}
-			} else {
-				// Assistant message → 1 AI SDK message
-				aiSdkIdx++
 			}
 		}
 	}
@@ -869,29 +790,6 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 		return inputTokensCost + outputTokensCost + cacheWriteCost + cacheReadCost
 	}
 
-	/************************************************************************************
-	 *
-	 *     THINKING SIGNATURE ROUND-TRIP
-	 *
-	 *************************************************************************************/
-
-	/**
-	 * Returns the thinking signature captured from the last Bedrock response.
-	 * Claude models with extended thinking return a cryptographic signature
-	 * which must be round-tripped back for multi-turn conversations with tool use.
-	 */
-	getThoughtSignature(): string | undefined {
-		return this.lastThoughtSignature
-	}
-
-	/**
-	 * Returns any redacted thinking blocks captured from the last Bedrock response.
-	 * Anthropic returns these when safety filters trigger on reasoning content.
-	 */
-	getRedactedThinkingBlocks(): Array<{ type: "redacted_thinking"; data: string }> | undefined {
-		return this.lastRedactedThinkingBlocks.length > 0 ? this.lastRedactedThinkingBlocks : undefined
-	}
-
 	override isAiSdkProvider(): boolean {
 		return true
 	}

+ 4 - 3
src/api/providers/deepseek.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createDeepSeek } from "@ai-sdk/deepseek"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { deepSeekModels, deepSeekDefaultModelId, DEEP_SEEK_DEFAULT_TEMPERATURE, type ModelInfo } from "@roo-code/types"
 
@@ -19,6 +19,7 @@ import { getModelParams } from "../transform/model-params"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 /**
  * DeepSeek provider using the dedicated @ai-sdk/deepseek package.
@@ -109,14 +110,14 @@ export class DeepSeekHandler extends BaseProvider implements SingleCompletionHan
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 3 - 6
src/api/providers/fake-ai.ts

@@ -5,6 +5,7 @@ import type { ModelInfo } from "@roo-code/types"
 import type { ApiHandler, SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { ApiHandlerOptions } from "../../shared/api"
 import { ApiStream } from "../transform/stream"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 interface FakeAI {
 	/**
@@ -21,11 +22,7 @@ interface FakeAI {
 	 */
 	removeFromCache?: () => void
 
-	createMessage(
-		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
-		metadata?: ApiHandlerCreateMessageMetadata,
-	): ApiStream
+	createMessage(systemPrompt: string, messages: RooMessage[], metadata?: ApiHandlerCreateMessageMetadata): ApiStream
 	getModel(): { id: string; info: ModelInfo }
 	countTokens(content: Array<Anthropic.Messages.ContentBlockParam>): Promise<number>
 	completePrompt(prompt: string): Promise<string>
@@ -61,7 +58,7 @@ export class FakeAIHandler implements ApiHandler, SingleCompletionHandler {
 
 	async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		yield* this.ai.createMessage(systemPrompt, messages, metadata)

+ 4 - 3
src/api/providers/fireworks.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createFireworks } from "@ai-sdk/fireworks"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { fireworksModels, fireworksDefaultModelId, type ModelInfo } from "@roo-code/types"
 
@@ -19,6 +19,7 @@ import { getModelParams } from "../transform/model-params"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 const FIREWORKS_DEFAULT_TEMPERATURE = 0.5
 
@@ -109,14 +110,14 @@ export class FireworksHandler extends BaseProvider implements SingleCompletionHa
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 8 - 26
src/api/providers/gemini.ts

@@ -1,6 +1,6 @@
 import type { Anthropic } from "@anthropic-ai/sdk"
 import { createGoogleGenerativeAI, type GoogleGenerativeAIProvider } from "@ai-sdk/google"
-import { streamText, generateText, NoOutputGeneratedError, ToolSet } from "ai"
+import { streamText, generateText, NoOutputGeneratedError, ToolSet, ModelMessage } from "ai"
 
 import {
 	type ModelInfo,
@@ -19,6 +19,7 @@ import {
 	processAiSdkStreamPart,
 	mapToolChoice,
 	handleAiSdkError,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
 import { t } from "i18next"
 import type { ApiStream, ApiStreamUsageChunk, GroundingSource } from "../transform/stream"
@@ -27,12 +28,12 @@ import { getModelParams } from "../transform/model-params"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { BaseProvider } from "./base-provider"
 import { DEFAULT_HEADERS } from "./constants"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 export class GeminiHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
 	protected provider: GoogleGenerativeAIProvider
 	private readonly providerName = "Gemini"
-	private lastThoughtSignature: string | undefined
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -51,7 +52,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 
 	async *createMessage(
 		systemInstruction: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { id: modelId, info, reasoning: thinkingConfig, maxTokens } = this.getModel()
@@ -81,7 +82,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 		// Anthropic.MessageParam values and will cause failures.
 		type ReasoningMetaLike = { type?: string }
 
-		const filteredMessages = messages.filter((message): message is Anthropic.Messages.MessageParam => {
+		const filteredMessages = messages.filter((message) => {
 			const meta = message as ReasoningMetaLike
 			if (meta.type === "reasoning") {
 				return false
@@ -90,7 +91,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 		})
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(filteredMessages)
+		const aiSdkMessages = filteredMessages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		let openAiTools = this.convertToolsForOpenAI(metadata?.tools)
@@ -126,9 +127,6 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 		}
 
 		try {
-			// Reset thought signature for this request
-			this.lastThoughtSignature = undefined
-
 			// Use streamText for streaming responses
 			const result = streamText(requestOptions)
 
@@ -138,15 +136,6 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 
 			// Process the full stream to get all events including reasoning
 			for await (const part of result.fullStream) {
-				// Capture thoughtSignature from tool-call events (Gemini 3 thought signatures)
-				// The AI SDK's tool-call event includes providerMetadata with the signature
-				if (part.type === "tool-call") {
-					const googleMeta = (part as any).providerMetadata?.google
-					if (googleMeta?.thoughtSignature) {
-						this.lastThoughtSignature = googleMeta.thoughtSignature
-					}
-				}
-
 				for (const chunk of processAiSdkStreamPart(part)) {
 					if (chunk.type === "error") {
 						lastStreamError = chunk.message
@@ -216,6 +205,8 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 					throw usageError
 				}
 			}
+
+			yield* yieldResponseMessage(result)
 		} catch (error) {
 			throw handleAiSdkError(error, this.providerName, {
 				onError: (msg) => {
@@ -442,13 +433,4 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 	override isAiSdkProvider(): boolean {
 		return true
 	}
-
-	/**
-	 * Returns the thought signature captured from the last Gemini response.
-	 * Gemini 3 models return thoughtSignature on function call parts,
-	 * which must be round-tripped back for tool use continuations.
-	 */
-	getThoughtSignature(): string | undefined {
-		return this.lastThoughtSignature
-	}
 }

+ 2 - 1
src/api/providers/lite-llm.ts

@@ -18,6 +18,7 @@ import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
 import { OpenAICompatibleHandler } from "./openai-compatible"
 import { getModels, getModelsFromCache } from "./fetchers/modelCache"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 export class LiteLLMHandler extends OpenAICompatibleHandler implements SingleCompletionHandler {
 	private models: ModelRecord = {}
@@ -80,7 +81,7 @@ export class LiteLLMHandler extends OpenAICompatibleHandler implements SingleCom
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		await this.fetchModel()

+ 12 - 3
src/api/providers/lm-studio.ts

@@ -1,5 +1,13 @@
 import { Anthropic } from "@anthropic-ai/sdk"
-import { streamText, generateText, ToolSet, wrapLanguageModel, extractReasoningMiddleware, LanguageModel } from "ai"
+import {
+	streamText,
+	generateText,
+	ToolSet,
+	wrapLanguageModel,
+	extractReasoningMiddleware,
+	LanguageModel,
+	ModelMessage,
+} from "ai"
 
 import { type ModelInfo, openAiModelInfoSaneDefaults, LMSTUDIO_DEFAULT_TEMPERATURE } from "@roo-code/types"
 
@@ -17,6 +25,7 @@ import { ApiStream } from "../transform/stream"
 import { OpenAICompatibleHandler, OpenAICompatibleConfig } from "./openai-compatible"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { getModelsFromCache } from "./fetchers/modelCache"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 export class LmStudioHandler extends OpenAICompatibleHandler implements SingleCompletionHandler {
 	constructor(options: ApiHandlerOptions) {
@@ -49,13 +58,13 @@ export class LmStudioHandler extends OpenAICompatibleHandler implements SingleCo
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const model = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 16 - 89
src/api/providers/minimax.ts

@@ -1,6 +1,6 @@
 import type { Anthropic } from "@anthropic-ai/sdk"
 import { createAnthropic } from "@ai-sdk/anthropic"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { type ModelInfo, minimaxDefaultModelId, minimaxModels } from "@roo-code/types"
 
@@ -14,19 +14,19 @@ import {
 	processAiSdkStreamPart,
 	mapToolChoice,
 	handleAiSdkError,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
 import { calculateApiCostAnthropic } from "../../shared/cost"
 
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 export class MiniMaxHandler extends BaseProvider implements SingleCompletionHandler {
 	private client: ReturnType<typeof createAnthropic>
 	private options: ApiHandlerOptions
 	private readonly providerName = "MiniMax"
-	private lastThoughtSignature: string | undefined
-	private lastRedactedThinkingBlocks: Array<{ type: "redacted_thinking"; data: string }> = []
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -58,15 +58,11 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const modelConfig = this.getModel()
 
-		// Reset thinking state for this request
-		this.lastThoughtSignature = undefined
-		this.lastRedactedThinkingBlocks = []
-
 		const modelParams = getModelParams({
 			format: "anthropic",
 			modelId: modelConfig.id,
@@ -75,8 +71,8 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand
 			defaultTemperature: 1.0,
 		})
 
-		const mergedMessages = mergeEnvironmentDetailsForMiniMax(messages)
-		const aiSdkMessages = convertToAiSdkMessages(mergedMessages)
+		const mergedMessages = mergeEnvironmentDetailsForMiniMax(messages as any)
+		const aiSdkMessages = mergedMessages as ModelMessage[]
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined
 
@@ -107,7 +103,7 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand
 		if (secondLastUserMsgIndex >= 0) targetIndices.add(secondLastUserMsgIndex)
 
 		if (targetIndices.size > 0) {
-			this.applyCacheControlToAiSdkMessages(mergedMessages, aiSdkMessages, targetIndices, cacheProviderOption)
+			this.applyCacheControlToAiSdkMessages(aiSdkMessages, targetIndices, cacheProviderOption)
 		}
 
 		const requestOptions = {
@@ -128,32 +124,10 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand
 
 		try {
 			const result = streamText(requestOptions as Parameters<typeof streamText>[0])
-	
+
 			let lastStreamError: string | undefined
 
 			for await (const part of result.fullStream) {
-				const anthropicMetadata = (
-					part as {
-						providerMetadata?: {
-							anthropic?: {
-								signature?: string
-								redactedData?: string
-							}
-						}
-					}
-				).providerMetadata?.anthropic
-
-				if (anthropicMetadata?.signature) {
-					this.lastThoughtSignature = anthropicMetadata.signature
-				}
-
-				if (anthropicMetadata?.redactedData) {
-					this.lastRedactedThinkingBlocks.push({
-						type: "redacted_thinking",
-						data: anthropicMetadata.redactedData,
-					})
-				}
-
 				for (const chunk of processAiSdkStreamPart(part)) {
 					if (chunk.type === "error") {
 						lastStreamError = chunk.message
@@ -174,6 +148,8 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand
 				}
 				throw usageError
 			}
+
+			yield* yieldResponseMessage(result)
 		} catch (error) {
 			throw handleAiSdkError(error, this.providerName)
 		}
@@ -212,57 +188,16 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand
 	}
 
 	private applyCacheControlToAiSdkMessages(
-		originalMessages: Anthropic.Messages.MessageParam[],
 		aiSdkMessages: { role: string; providerOptions?: Record<string, Record<string, unknown>> }[],
-		targetOriginalIndices: Set<number>,
+		targetIndices: Set<number>,
 		cacheProviderOption: Record<string, Record<string, unknown>>,
 	): void {
-		let aiSdkIdx = 0
-		for (let origIdx = 0; origIdx < originalMessages.length; origIdx++) {
-			const origMsg = originalMessages[origIdx]
-
-			if (typeof origMsg.content === "string") {
-				if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-					aiSdkMessages[aiSdkIdx].providerOptions = {
-						...aiSdkMessages[aiSdkIdx].providerOptions,
-						...cacheProviderOption,
-					}
+		for (const idx of targetIndices) {
+			if (idx >= 0 && idx < aiSdkMessages.length) {
+				aiSdkMessages[idx].providerOptions = {
+					...aiSdkMessages[idx].providerOptions,
+					...cacheProviderOption,
 				}
-				aiSdkIdx++
-			} else if (origMsg.role === "user") {
-				const hasToolResults = origMsg.content.some((part) => (part as { type: string }).type === "tool_result")
-				const hasNonToolContent = origMsg.content.some(
-					(part) => (part as { type: string }).type === "text" || (part as { type: string }).type === "image",
-				)
-
-				if (hasToolResults && hasNonToolContent) {
-					const userMsgIdx = aiSdkIdx + 1
-					if (targetOriginalIndices.has(origIdx) && userMsgIdx < aiSdkMessages.length) {
-						aiSdkMessages[userMsgIdx].providerOptions = {
-							...aiSdkMessages[userMsgIdx].providerOptions,
-							...cacheProviderOption,
-						}
-					}
-					aiSdkIdx += 2
-				} else if (hasToolResults) {
-					if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-						aiSdkMessages[aiSdkIdx].providerOptions = {
-							...aiSdkMessages[aiSdkIdx].providerOptions,
-							...cacheProviderOption,
-						}
-					}
-					aiSdkIdx++
-				} else {
-					if (targetOriginalIndices.has(origIdx) && aiSdkIdx < aiSdkMessages.length) {
-						aiSdkMessages[aiSdkIdx].providerOptions = {
-							...aiSdkMessages[aiSdkIdx].providerOptions,
-							...cacheProviderOption,
-						}
-					}
-					aiSdkIdx++
-				}
-			} else {
-				aiSdkIdx++
 			}
 		}
 	}
@@ -305,14 +240,6 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand
 		}
 	}
 
-	getThoughtSignature(): string | undefined {
-		return this.lastThoughtSignature
-	}
-
-	getRedactedThinkingBlocks(): Array<{ type: "redacted_thinking"; data: string }> | undefined {
-		return this.lastRedactedThinkingBlocks.length > 0 ? this.lastRedactedThinkingBlocks : undefined
-	}
-
 	override isAiSdkProvider(): boolean {
 		return true
 	}

+ 4 - 3
src/api/providers/mistral.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createMistral } from "@ai-sdk/mistral"
-import { streamText, generateText, ToolSet, LanguageModel } from "ai"
+import { streamText, generateText, ToolSet, LanguageModel, ModelMessage } from "ai"
 
 import {
 	mistralModels,
@@ -19,6 +19,7 @@ import { getModelParams } from "../transform/model-params"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 /**
  * Mistral provider using the dedicated @ai-sdk/mistral package.
@@ -137,13 +138,13 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const languageModel = this.getLanguageModel()
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 7 - 3
src/api/providers/native-ollama.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createOllama } from "ollama-ai-provider-v2"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { ModelInfo, openAiModelInfoSaneDefaults, DEEP_SEEK_DEFAULT_TEMPERATURE } from "@roo-code/types"
 
@@ -12,12 +12,14 @@ import {
 	processAiSdkStreamPart,
 	mapToolChoice,
 	handleAiSdkError,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
 import { ApiStream } from "../transform/stream"
 
 import { BaseProvider } from "./base-provider"
 import { getOllamaModels } from "./fetchers/ollama"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 /**
  * NativeOllamaHandler using the ollama-ai-provider-v2 AI SDK community provider.
@@ -83,7 +85,7 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		await this.fetchModel()
@@ -93,7 +95,7 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
 
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined
@@ -127,6 +129,8 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
 					outputTokens: usage.outputTokens || 0,
 				}
 			}
+
+			yield* yieldResponseMessage(result)
 		} catch (error) {
 			this.handleOllamaError(error, modelId)
 		}

+ 8 - 4
src/api/providers/openai-codex.ts

@@ -2,7 +2,7 @@ import * as os from "os"
 import { v7 as uuidv7 } from "uuid"
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createOpenAI } from "@ai-sdk/openai"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { Package } from "../../shared/package"
 import {
@@ -21,6 +21,7 @@ import {
 	processAiSdkStreamPart,
 	mapToolChoice,
 	handleAiSdkError,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
 import { ApiStream } from "../transform/stream"
 import { getModelParams } from "../transform/model-params"
@@ -28,6 +29,7 @@ import { getModelParams } from "../transform/model-params"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { openAiCodexOAuthManager } from "../../integrations/openai-codex/oauth"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 import {
 	stripPlainTextReasoningBlocks,
 	collectEncryptedReasoningItems,
@@ -143,7 +145,7 @@ export class OpenAiCodexHandler extends BaseProvider implements SingleCompletion
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const model = this.getModel()
@@ -177,11 +179,11 @@ export class OpenAiCodexHandler extends BaseProvider implements SingleCompletion
 				const cleanedMessages = stripPlainTextReasoningBlocks(standardMessages)
 
 				// Step 4: Convert to AI SDK messages.
-				const aiSdkMessages = convertToAiSdkMessages(cleanedMessages)
+				const aiSdkMessages = cleanedMessages as ModelMessage[]
 
 				// Step 5: Re-inject encrypted reasoning as properly-formed AI SDK reasoning parts.
 				if (encryptedReasoningItems.length > 0) {
-					injectEncryptedReasoning(aiSdkMessages, encryptedReasoningItems, messages)
+					injectEncryptedReasoning(aiSdkMessages, encryptedReasoningItems, messages as RooMessage[])
 				}
 
 				// Convert tools to OpenAI format first, then to AI SDK format
@@ -276,6 +278,8 @@ export class OpenAiCodexHandler extends BaseProvider implements SingleCompletion
 					throw usageError
 				}
 
+				yield* yieldResponseMessage(result)
+
 				// Success — exit the retry loop
 				return
 			} catch (error) {

+ 4 - 3
src/api/providers/openai-compatible.ts

@@ -6,7 +6,7 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
 import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
-import { streamText, generateText, LanguageModel, ToolSet } from "ai"
+import { streamText, generateText, LanguageModel, ToolSet, ModelMessage } from "ai"
 
 import type { ModelInfo } from "@roo-code/types"
 
@@ -24,6 +24,7 @@ import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 /**
  * Configuration options for creating an OpenAI-compatible provider.
@@ -124,14 +125,14 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const model = this.getModel()
 		const languageModel = this.getLanguageModel()
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 15 - 18
src/api/providers/openai-native.ts

@@ -31,6 +31,7 @@ import { getModelParams } from "../transform/model-params"
 
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 export type OpenAiNativeModel = ReturnType<OpenAiNativeHandler["getModel"]>
 
@@ -57,16 +58,14 @@ export interface EncryptedReasoningItem {
  * This function removes them BEFORE conversion. If an assistant message's
  * content becomes empty after filtering, the message is removed entirely.
  */
-export function stripPlainTextReasoningBlocks(
-	messages: Anthropic.Messages.MessageParam[],
-): Anthropic.Messages.MessageParam[] {
-	return messages.reduce<Anthropic.Messages.MessageParam[]>((acc, msg) => {
-		if (msg.role !== "assistant" || typeof msg.content === "string") {
+export function stripPlainTextReasoningBlocks(messages: RooMessage[]): RooMessage[] {
+	return messages.reduce<RooMessage[]>((acc, msg) => {
+		if (!("role" in msg) || msg.role !== "assistant" || typeof msg.content === "string") {
 			acc.push(msg)
 			return acc
 		}
 
-		const filteredContent = msg.content.filter((block) => {
+		const filteredContent = (msg.content as any[]).filter((block: any) => {
 			const b = block as unknown as Record<string, unknown>
 			// Remove blocks that are plain-text reasoning:
 			// type === "reasoning" AND has "text" AND does NOT have "encrypted_content"
@@ -78,7 +77,7 @@ export function stripPlainTextReasoningBlocks(
 
 		// Only include the message if it still has content
 		if (filteredContent.length > 0) {
-			acc.push({ ...msg, content: filteredContent })
+			acc.push({ ...msg, content: filteredContent } as RooMessage)
 		}
 
 		return acc
@@ -92,10 +91,10 @@ export function stripPlainTextReasoningBlocks(
  * injected by `buildCleanConversationHistory` for OpenAI Responses API
  * reasoning continuity.
  */
-export function collectEncryptedReasoningItems(messages: Anthropic.Messages.MessageParam[]): EncryptedReasoningItem[] {
+export function collectEncryptedReasoningItems(messages: RooMessage[]): EncryptedReasoningItem[] {
 	const items: EncryptedReasoningItem[] = []
 	messages.forEach((msg, index) => {
-		const m = msg as unknown as Record<string, unknown>
+		const m = msg as any
 		if (m.type === "reasoning" && m.encrypted_content) {
 			items.push({
 				id: m.id as string,
@@ -124,7 +123,7 @@ export function collectEncryptedReasoningItems(messages: Anthropic.Messages.Mess
 export function injectEncryptedReasoning(
 	aiSdkMessages: ModelMessage[],
 	encryptedItems: EncryptedReasoningItem[],
-	originalMessages: Anthropic.Messages.MessageParam[],
+	originalMessages: RooMessage[],
 ): void {
 	if (encryptedItems.length === 0) return
 
@@ -135,7 +134,7 @@ export function injectEncryptedReasoning(
 		// Walk forward from the encrypted item to find its corresponding assistant message,
 		// skipping over any other encrypted reasoning items.
 		for (let i = item.originalIndex + 1; i < originalMessages.length; i++) {
-			const msg = originalMessages[i] as unknown as Record<string, unknown>
+			const msg = originalMessages[i] as any
 			if (msg.type === "reasoning" && msg.encrypted_content) continue
 			if ((msg as { role?: string }).role === "assistant") {
 				const existing = itemsByAssistantOrigIdx.get(i) || []
@@ -153,7 +152,7 @@ export function injectEncryptedReasoning(
 	// encrypted reasoning items have been filtered out (order preserved).
 	const standardAssistantOriginalIndices: number[] = []
 	for (let i = 0; i < originalMessages.length; i++) {
-		const msg = originalMessages[i] as unknown as Record<string, unknown>
+		const msg = originalMessages[i] as any
 		if (msg.type === "reasoning" && msg.encrypted_content) continue
 		if ((msg as { role?: string }).role === "assistant") {
 			standardAssistantOriginalIndices.push(i)
@@ -398,7 +397,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const model = this.getModel()
@@ -416,9 +415,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
 		// Step 2: Filter out standalone encrypted reasoning items (they lack role
 		// and would break convertToAiSdkMessages which expects user/assistant/tool).
 		const standardMessages = messages.filter(
-			(msg) =>
-				(msg as unknown as Record<string, unknown>).type !== "reasoning" ||
-				!(msg as unknown as Record<string, unknown>).encrypted_content,
+			(msg) => (msg as any).type !== "reasoning" || !(msg as any).encrypted_content,
 		)
 
 		// Step 3: Strip plain-text reasoning blocks from assistant content arrays.
@@ -427,12 +424,12 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
 		const cleanedMessages = stripPlainTextReasoningBlocks(standardMessages)
 
 		// Step 4: Convert to AI SDK messages.
-		const aiSdkMessages = convertToAiSdkMessages(cleanedMessages)
+		const aiSdkMessages = cleanedMessages as ModelMessage[]
 
 		// Step 5: Re-inject encrypted reasoning as properly-formed AI SDK reasoning
 		// parts with providerOptions.openai.itemId and reasoningEncryptedContent.
 		if (encryptedReasoningItems.length > 0) {
-			injectEncryptedReasoning(aiSdkMessages, encryptedReasoningItems, messages)
+			injectEncryptedReasoning(aiSdkMessages, encryptedReasoningItems, messages as RooMessage[])
 		}
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 9 - 5
src/api/providers/openai.ts

@@ -2,7 +2,7 @@ import { Anthropic } from "@anthropic-ai/sdk"
 import { createOpenAI } from "@ai-sdk/openai"
 import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
 import { createAzure } from "@ai-sdk/azure"
-import { streamText, generateText, ToolSet, LanguageModel } from "ai"
+import { streamText, generateText, ToolSet, LanguageModel, ModelMessage } from "ai"
 import axios from "axios"
 
 import {
@@ -22,6 +22,7 @@ import {
 	processAiSdkStreamPart,
 	mapToolChoice,
 	handleAiSdkError,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
 import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
 import { getModelParams } from "../transform/model-params"
@@ -29,6 +30,7 @@ import { getModelParams } from "../transform/model-params"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 // TODO: Rename this to OpenAICompatibleHandler. Also, I think the
 // `OpenAINativeHandler` can subclass from this, since it's obviously
@@ -93,7 +95,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { info: modelInfo, temperature, reasoning } = this.getModel()
@@ -104,7 +106,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined
@@ -170,7 +172,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 	private async *handleStreaming(
 		languageModel: LanguageModel,
 		systemPrompt: string | undefined,
-		messages: ReturnType<typeof convertToAiSdkMessages>,
+		messages: ModelMessage[],
 		temperature: number | undefined,
 		tools: ToolSet | undefined,
 		metadata: ApiHandlerCreateMessageMetadata | undefined,
@@ -231,6 +233,8 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 				}
 				throw usageError
 			}
+
+			yield* yieldResponseMessage(result)
 		} catch (error) {
 			throw handleAiSdkError(error, this.providerName)
 		}
@@ -239,7 +243,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 	private async *handleNonStreaming(
 		languageModel: LanguageModel,
 		systemPrompt: string | undefined,
-		messages: ReturnType<typeof convertToAiSdkMessages>,
+		messages: ModelMessage[],
 		temperature: number | undefined,
 		tools: ToolSet | undefined,
 		metadata: ApiHandlerCreateMessageMetadata | undefined,

+ 12 - 32
src/api/providers/openrouter.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createOpenRouter } from "@openrouter/ai-sdk-provider"
-import { streamText, generateText } from "ai"
+import { streamText, generateText, ModelMessage } from "ai"
 
 import {
 	type ModelRecord,
@@ -16,9 +16,13 @@ import { TelemetryService } from "@roo-code/telemetry"
 import type { ApiHandlerOptions } from "../../shared/api"
 import { calculateApiCostOpenAI } from "../../shared/cost"
 
-import { type ReasoningDetail } from "../transform/openai-format"
 import { getModelParams } from "../transform/model-params"
-import { convertToAiSdkMessages, convertToolsForAiSdk, processAiSdkStreamPart } from "../transform/ai-sdk"
+import {
+	convertToAiSdkMessages,
+	convertToolsForAiSdk,
+	processAiSdkStreamPart,
+	yieldResponseMessage,
+} from "../transform/ai-sdk"
 
 import { BaseProvider } from "./base-provider"
 import { getModels, getModelsFromCache } from "./fetchers/modelCache"
@@ -28,13 +32,13 @@ import { generateImageWithProvider, ImageGenerationResult } from "./utils/image-
 
 import type { ApiHandlerCreateMessageMetadata, SingleCompletionHandler } from "../index"
 import type { ApiStreamChunk, ApiStreamUsageChunk } from "../transform/stream"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 export class OpenRouterHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
 	protected models: ModelRecord = {}
 	protected endpoints: ModelRecord = {}
 	private readonly providerName = "OpenRouter"
-	private currentReasoningDetails: ReasoningDetail[] = []
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -82,10 +86,6 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 		})
 	}
 
-	getReasoningDetails(): ReasoningDetail[] | undefined {
-		return this.currentReasoningDetails.length > 0 ? this.currentReasoningDetails : undefined
-	}
-
 	private normalizeUsage(
 		usage: { inputTokens: number; outputTokens: number },
 		providerMetadata: Record<string, any> | undefined,
@@ -130,10 +130,9 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): AsyncGenerator<ApiStreamChunk> {
-		this.currentReasoningDetails = []
 		const model = await this.fetchModel()
 		let { id: modelId, maxTokens, temperature, topP, reasoning } = model
 
@@ -149,7 +148,7 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 			? { "x-anthropic-beta": "fine-grained-tool-streaming-2025-05-14" }
 			: undefined
 
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openrouter = this.createOpenRouterProvider({ reasoning, headers })
 
@@ -175,8 +174,6 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 					}
 				: undefined
 
-		let accumulatedReasoningText = ""
-
 		try {
 			const result = streamText({
 				model: openrouter.chat(modelId),
@@ -191,31 +188,12 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 			})
 
 			for await (const part of result.fullStream) {
-				if (part.type === "reasoning-delta" && part.text !== "[REDACTED]") {
-					accumulatedReasoningText += part.text
-				}
 				yield* processAiSdkStreamPart(part)
 			}
 
-			if (accumulatedReasoningText) {
-				this.currentReasoningDetails.push({
-					type: "reasoning.text",
-					text: accumulatedReasoningText,
-					index: 0,
-				})
-			}
-
 			const providerMetadata =
 				(await result.providerMetadata) ?? (await (result as any).experimental_providerMetadata)
 
-			const providerReasoningDetails = providerMetadata?.openrouter?.reasoning_details as
-				| ReasoningDetail[]
-				| undefined
-
-			if (providerReasoningDetails && providerReasoningDetails.length > 0) {
-				this.currentReasoningDetails = providerReasoningDetails
-			}
-
 			const usage = await result.usage
 			const totalUsage = await result.totalUsage
 			const usageChunk = this.normalizeUsage(
@@ -227,6 +205,8 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 				model.info,
 			)
 			yield usageChunk
+
+			yield* yieldResponseMessage(result)
 		} catch (error: any) {
 			const errorMessage = error instanceof Error ? error.message : String(error)
 			const apiError = new ApiProviderError(errorMessage, this.providerName, modelId, "createMessage")

+ 2 - 1
src/api/providers/qwen-code.ts

@@ -16,6 +16,7 @@ import { ApiStream } from "../transform/stream"
 import { DEFAULT_HEADERS } from "./constants"
 import { OpenAICompatibleHandler, OpenAICompatibleConfig } from "./openai-compatible"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 const QWEN_OAUTH_BASE_URL = "https://chat.qwen.ai"
 const QWEN_OAUTH_TOKEN_ENDPOINT = `${QWEN_OAUTH_BASE_URL}/api/v1/oauth2/token`
@@ -274,7 +275,7 @@ export class QwenCodeHandler extends OpenAICompatibleHandler implements SingleCo
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		await this.ensureAuthenticated()

+ 4 - 3
src/api/providers/requesty.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createRequesty, type RequestyProviderMetadata } from "@requesty/ai-sdk"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { type ModelInfo, type ModelRecord, requestyDefaultModelId, requestyDefaultModelInfo } from "@roo-code/types"
 
@@ -23,6 +23,7 @@ import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { toRequestyServiceUrl } from "../../shared/utils/requesty"
 import { applyRouterToolPreferences } from "./utils/router-tool-preferences"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 /**
  * Requesty provider using the dedicated @requesty/ai-sdk package.
@@ -172,13 +173,13 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { info, temperature } = await this.fetchModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 9 - 33
src/api/providers/roo.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
-import { streamText, generateText } from "ai"
+import { streamText, generateText, type ModelMessage } from "ai"
 
 import { rooDefaultModelId, getApiProtocol, type ImageGenerationApiMethod } from "@roo-code/types"
 import { CloudService } from "@roo-code/cloud"
@@ -11,13 +11,12 @@ import { calculateApiCostOpenAI } from "../../shared/cost"
 import { ApiStream } from "../transform/stream"
 import { getModelParams } from "../transform/model-params"
 import {
-	convertToAiSdkMessages,
 	convertToolsForAiSdk,
 	processAiSdkStreamPart,
 	handleAiSdkError,
 	mapToolChoice,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
-import { type ReasoningDetail } from "../transform/openai-format"
 import type { RooReasoningParams } from "../transform/reasoning"
 import { getRooReasoning } from "../transform/reasoning"
 
@@ -26,6 +25,7 @@ import { BaseProvider } from "./base-provider"
 import { getModels, getModelsFromCache } from "./fetchers/modelCache"
 import { generateImageWithProvider, generateImageWithImagesApi, ImageGenerationResult } from "./utils/image-generation"
 import { t } from "../../i18n"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 function getSessionToken(): string {
 	const token = CloudService.hasInstance() ? CloudService.instance.authService?.getSessionToken() : undefined
@@ -35,7 +35,6 @@ function getSessionToken(): string {
 export class RooHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
 	private fetcherBaseURL: string
-	private currentReasoningDetails: ReasoningDetail[] = []
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -89,18 +88,11 @@ export class RooHandler extends BaseProvider implements SingleCompletionHandler
 		return true as const
 	}
 
-	getReasoningDetails(): ReasoningDetail[] | undefined {
-		return this.currentReasoningDetails.length > 0 ? this.currentReasoningDetails : undefined
-	}
-
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
-		// Reset reasoning_details accumulator for this request
-		this.currentReasoningDetails = []
-
 		const model = this.getModel()
 		const { id: modelId, info } = model
 
@@ -127,11 +119,10 @@ export class RooHandler extends BaseProvider implements SingleCompletionHandler
 		// Create per-request provider with fresh session token
 		const provider = this.createRooProvider({ reasoning, taskId: metadata?.taskId })
 
-		// Convert messages and tools to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		// RooMessage[] is already AI SDK-compatible, cast directly
+		const aiSdkMessages = messages as ModelMessage[]
 		const tools = convertToolsForAiSdk(this.convertToolsForOpenAI(metadata?.tools))
 
-		let accumulatedReasoningText = ""
 		let lastStreamError: string | undefined
 
 		try {
@@ -146,9 +137,6 @@ export class RooHandler extends BaseProvider implements SingleCompletionHandler
 			})
 
 			for await (const part of result.fullStream) {
-				if (part.type === "reasoning-delta" && part.text !== "[REDACTED]") {
-					accumulatedReasoningText += part.text
-				}
 				for (const chunk of processAiSdkStreamPart(part)) {
 					if (chunk.type === "error") {
 						lastStreamError = chunk.message
@@ -157,25 +145,11 @@ export class RooHandler extends BaseProvider implements SingleCompletionHandler
 				}
 			}
 
-			// Build reasoning details from accumulated text
-			if (accumulatedReasoningText) {
-				this.currentReasoningDetails.push({
-					type: "reasoning.text",
-					text: accumulatedReasoningText,
-					index: 0,
-				})
-			}
-
-			// Check provider metadata for reasoning_details (override if present)
+			// Check provider metadata for usage details
 			const providerMetadata =
 				(await result.providerMetadata) ?? (await (result as any).experimental_providerMetadata)
 			const rooMeta = providerMetadata?.roo as Record<string, any> | undefined
 
-			const providerReasoningDetails = rooMeta?.reasoning_details as ReasoningDetail[] | undefined
-			if (providerReasoningDetails && providerReasoningDetails.length > 0) {
-				this.currentReasoningDetails = providerReasoningDetails
-			}
-
 			// Process usage with protocol-aware normalization
 			const usage = await result.usage
 			const promptTokens = usage.inputTokens ?? 0
@@ -212,6 +186,8 @@ export class RooHandler extends BaseProvider implements SingleCompletionHandler
 				cacheReadTokens: cacheRead,
 				totalCost,
 			}
+
+			yield* yieldResponseMessage(result)
 		} catch (error) {
 			if (lastStreamError) {
 				throw new Error(lastStreamError)

+ 5 - 6
src/api/providers/sambanova.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createSambaNova } from "sambanova-ai-provider"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { sambaNovaModels, sambaNovaDefaultModelId, type ModelInfo } from "@roo-code/types"
 
@@ -20,6 +20,7 @@ import { getModelParams } from "../transform/model-params"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 const SAMBANOVA_DEFAULT_TEMPERATURE = 0.7
 
@@ -110,18 +111,16 @@ export class SambaNovaHandler extends BaseProvider implements SingleCompletionHa
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { temperature, info } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// Convert messages to AI SDK format
 		// For models that don't support multi-part content (like DeepSeek), flatten messages to string content
 		// SambaNova's DeepSeek models expect string content, not array content
-		const aiSdkMessages = convertToAiSdkMessages(messages, {
-			transform: info.supportsImages ? undefined : flattenAiSdkMessagesToStringContent,
-		})
+		const castMessages = messages as ModelMessage[]
+		const aiSdkMessages = info.supportsImages ? castMessages : flattenAiSdkMessagesToStringContent(castMessages)
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 7 - 3
src/api/providers/vercel-ai-gateway.ts

@@ -1,5 +1,5 @@
 import { Anthropic } from "@anthropic-ai/sdk"
-import { createGateway, streamText, generateText, ToolSet } from "ai"
+import { createGateway, streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import {
 	vercelAiGatewayDefaultModelId,
@@ -17,6 +17,7 @@ import {
 	processAiSdkStreamPart,
 	mapToolChoice,
 	handleAiSdkError,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
 import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
 
@@ -24,6 +25,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import { getModels, getModelsFromCache } from "./fetchers/modelCache"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 /**
  * Vercel AI Gateway provider using the built-in AI SDK gateway support.
@@ -108,13 +110,13 @@ export class VercelAiGatewayHandler extends BaseProvider implements SingleComple
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { id: modelId, info } = await this.fetchModel()
 		const languageModel = this.getLanguageModel(modelId)
 
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined
@@ -157,6 +159,8 @@ export class VercelAiGatewayHandler extends BaseProvider implements SingleComple
 				}
 				throw usageError
 			}
+
+			yield* yieldResponseMessage(result)
 		} catch (error) {
 			throw handleAiSdkError(error, "Vercel AI Gateway")
 		}

+ 8 - 29
src/api/providers/vertex.ts

@@ -1,6 +1,6 @@
 import type { Anthropic } from "@anthropic-ai/sdk"
 import { createVertex, type GoogleVertexProvider } from "@ai-sdk/google-vertex"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import {
 	type ModelInfo,
@@ -19,6 +19,7 @@ import {
 	processAiSdkStreamPart,
 	mapToolChoice,
 	handleAiSdkError,
+	yieldResponseMessage,
 } from "../transform/ai-sdk"
 import { t } from "i18next"
 import type { ApiStream, ApiStreamUsageChunk, GroundingSource } from "../transform/stream"
@@ -27,6 +28,7 @@ import { getModelParams } from "../transform/model-params"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { BaseProvider } from "./base-provider"
 import { DEFAULT_HEADERS } from "./constants"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 /**
  * Vertex AI provider using the dedicated @ai-sdk/google-vertex package.
@@ -36,7 +38,6 @@ export class VertexHandler extends BaseProvider implements SingleCompletionHandl
 	protected options: ApiHandlerOptions
 	protected provider: GoogleVertexProvider
 	private readonly providerName = "Vertex"
-	private lastThoughtSignature: string | undefined
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -65,7 +66,7 @@ export class VertexHandler extends BaseProvider implements SingleCompletionHandl
 
 	async *createMessage(
 		systemInstruction: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { id: modelId, info, reasoning: thinkingConfig, maxTokens } = this.getModel()
@@ -95,7 +96,7 @@ export class VertexHandler extends BaseProvider implements SingleCompletionHandl
 		// Anthropic.MessageParam values and will cause failures.
 		type ReasoningMetaLike = { type?: string }
 
-		const filteredMessages = messages.filter((message): message is Anthropic.Messages.MessageParam => {
+		const filteredMessages = messages.filter((message) => {
 			const meta = message as ReasoningMetaLike
 			if (meta.type === "reasoning") {
 				return false
@@ -104,7 +105,7 @@ export class VertexHandler extends BaseProvider implements SingleCompletionHandl
 		})
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(filteredMessages)
+		const aiSdkMessages = filteredMessages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		let openAiTools = this.convertToolsForOpenAI(metadata?.tools)
@@ -140,27 +141,12 @@ export class VertexHandler extends BaseProvider implements SingleCompletionHandl
 		}
 
 		try {
-			// Reset thought signature for this request
-			this.lastThoughtSignature = undefined
-
 			// Use streamText for streaming responses
 			const result = streamText(requestOptions)
 
 			// Process the full stream to get all events including reasoning
 			let lastStreamError: string | undefined
 			for await (const part of result.fullStream) {
-				// Capture thoughtSignature from tool-call events (Gemini 3 thought signatures)
-				// The AI SDK's tool-call event includes providerMetadata with the signature
-				// Vertex AI stores it under the "vertex" key in providerMetadata
-				if (part.type === "tool-call") {
-					const vertexMeta = (part as any).providerMetadata?.vertex
-					const googleMeta = (part as any).providerMetadata?.google
-					const sig = vertexMeta?.thoughtSignature ?? googleMeta?.thoughtSignature
-					if (sig) {
-						this.lastThoughtSignature = sig
-					}
-				}
-
 				for (const chunk of processAiSdkStreamPart(part)) {
 					if (chunk.type === "error") {
 						lastStreamError = chunk.message
@@ -200,6 +186,8 @@ export class VertexHandler extends BaseProvider implements SingleCompletionHandl
 				}
 				throw usageError
 			}
+
+			yield* yieldResponseMessage(result)
 		} catch (error) {
 			throw handleAiSdkError(error, this.providerName, {
 				onError: (msg) => {
@@ -417,13 +405,4 @@ export class VertexHandler extends BaseProvider implements SingleCompletionHandl
 	override isAiSdkProvider(): boolean {
 		return true
 	}
-
-	/**
-	 * Returns the thought signature captured from the last Vertex AI response.
-	 * Gemini 3 models return thoughtSignature on function call parts,
-	 * which must be round-tripped back for tool use continuations.
-	 */
-	getThoughtSignature(): string | undefined {
-		return this.lastThoughtSignature
-	}
 }

+ 5 - 4
src/api/providers/vscode-lm.ts

@@ -13,6 +13,7 @@ import { convertToVsCodeLmMessages, extractTextCountFromMessage } from "../trans
 
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 /**
  * Converts OpenAI-format tools to VSCode Language Model tools.
@@ -364,7 +365,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
 
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		// Ensure clean state before starting a new request
@@ -374,13 +375,13 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
 		// Process messages
 		const cleanedMessages = messages.map((msg) => ({
 			...msg,
-			content: this.cleanMessageContent(msg.content),
+			...("content" in msg ? { content: this.cleanMessageContent((msg as any).content) } : {}),
 		}))
 
-		// Convert Anthropic messages to VS Code LM messages
+		// Convert messages to VS Code LM messages
 		const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
 			vscode.LanguageModelChatMessage.Assistant(systemPrompt),
-			...convertToVsCodeLmMessages(cleanedMessages),
+			...convertToVsCodeLmMessages(cleanedMessages as any),
 		]
 
 		// Initialize cancellation token for the request

+ 3 - 2
src/api/providers/xai.ts

@@ -19,6 +19,7 @@ import { getModelParams } from "../transform/model-params"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 const XAI_DEFAULT_TEMPERATURE = 0
 
@@ -118,14 +119,14 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { temperature, reasoning } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 4 - 3
src/api/providers/zai.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createZhipu } from "zhipu-ai-provider"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import {
 	internationalZAiModels,
@@ -27,6 +27,7 @@ import { getModelParams } from "../transform/model-params"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import type { RooMessage } from "../../core/task-persistence/rooMessage"
 
 /**
  * Z.ai provider using the dedicated zhipu-ai-provider package.
@@ -91,13 +92,13 @@ export class ZAiHandler extends BaseProvider implements SingleCompletionHandler
 	 */
 	override async *createMessage(
 		systemPrompt: string,
-		messages: Anthropic.Messages.MessageParam[],
+		messages: RooMessage[],
 		metadata?: ApiHandlerCreateMessageMetadata,
 	): ApiStream {
 		const { id: modelId, info, temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = convertToAiSdkMessages(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 8 - 9
src/api/transform/__tests__/image-cleaning.spec.ts

@@ -3,7 +3,6 @@
 import type { ModelInfo } from "@roo-code/types"
 
 import { ApiHandler } from "../../index"
-import { ApiMessage } from "../../../core/task-persistence/apiMessages"
 import { maybeRemoveImageBlocks } from "../image-cleaning"
 
 describe("maybeRemoveImageBlocks", () => {
@@ -24,7 +23,7 @@ describe("maybeRemoveImageBlocks", () => {
 
 	it("should handle empty messages array", () => {
 		const apiHandler = createMockApiHandler(true)
-		const messages: ApiMessage[] = []
+		const messages: any[] = []
 
 		const result = maybeRemoveImageBlocks(messages, apiHandler)
 
@@ -34,7 +33,7 @@ describe("maybeRemoveImageBlocks", () => {
 
 	it("should not modify messages with no image blocks", () => {
 		const apiHandler = createMockApiHandler(true)
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{
 				role: "user",
 				content: "Hello, world!",
@@ -53,7 +52,7 @@ describe("maybeRemoveImageBlocks", () => {
 
 	it("should not modify messages with array content but no image blocks", () => {
 		const apiHandler = createMockApiHandler(true)
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{
 				role: "user",
 				content: [
@@ -77,7 +76,7 @@ describe("maybeRemoveImageBlocks", () => {
 
 	it("should not modify image blocks when API handler supports images", () => {
 		const apiHandler = createMockApiHandler(true)
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{
 				role: "user",
 				content: [
@@ -106,7 +105,7 @@ describe("maybeRemoveImageBlocks", () => {
 
 	it("should convert image blocks to text descriptions when API handler doesn't support images", () => {
 		const apiHandler = createMockApiHandler(false)
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{
 				role: "user",
 				content: [
@@ -149,7 +148,7 @@ describe("maybeRemoveImageBlocks", () => {
 
 	it("should handle mixed content messages with multiple text and image blocks", () => {
 		const apiHandler = createMockApiHandler(false)
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{
 				role: "user",
 				content: [
@@ -212,7 +211,7 @@ describe("maybeRemoveImageBlocks", () => {
 
 	it("should handle multiple messages with image blocks", () => {
 		const apiHandler = createMockApiHandler(false)
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{
 				role: "user",
 				content: [
@@ -293,7 +292,7 @@ describe("maybeRemoveImageBlocks", () => {
 
 	it("should preserve additional message properties", () => {
 		const apiHandler = createMockApiHandler(false)
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{
 				role: "user",
 				content: [

+ 68 - 24
src/api/transform/__tests__/openai-format.spec.ts

@@ -1,7 +1,7 @@
 // npx vitest run api/transform/__tests__/openai-format.spec.ts
 
-import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
 
 import {
 	convertToOpenAiMessages,
@@ -13,7 +13,7 @@ import { normalizeMistralToolCallId } from "../mistral-format"
 
 describe("convertToOpenAiMessages", () => {
 	it("should convert simple text messages", () => {
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "user",
 				content: "Hello",
@@ -37,7 +37,7 @@ describe("convertToOpenAiMessages", () => {
 	})
 
 	it("should handle messages with image content", () => {
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "user",
 				content: [
@@ -76,8 +76,52 @@ describe("convertToOpenAiMessages", () => {
 		})
 	})
 
+	it("should preserve AI SDK image data URLs without double-prefixing", () => {
+		const messages: any[] = [
+			{
+				role: "user",
+				content: [
+					{
+						type: "image",
+						image: "data:image/png;base64,already_encoded",
+						mediaType: "image/png",
+					},
+				],
+			},
+		]
+
+		const openAiMessages = convertToOpenAiMessages(messages)
+		const content = openAiMessages[0].content as Array<{ type: string; image_url?: { url: string } }>
+		expect(content[0]).toEqual({
+			type: "image_url",
+			image_url: { url: "data:image/png;base64,already_encoded" },
+		})
+	})
+
+	it("should preserve AI SDK image http URLs without converting to data URLs", () => {
+		const messages: any[] = [
+			{
+				role: "user",
+				content: [
+					{
+						type: "image",
+						image: "https://example.com/image.png",
+						mediaType: "image/png",
+					},
+				],
+			},
+		]
+
+		const openAiMessages = convertToOpenAiMessages(messages)
+		const content = openAiMessages[0].content as Array<{ type: string; image_url?: { url: string } }>
+		expect(content[0]).toEqual({
+			type: "image_url",
+			image_url: { url: "https://example.com/image.png" },
+		})
+	})
+
 	it("should handle assistant messages with tool use (no normalization without normalizeToolCallId)", () => {
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "assistant",
 				content: [
@@ -113,7 +157,7 @@ describe("convertToOpenAiMessages", () => {
 	})
 
 	it("should handle user messages with tool results (no normalization without normalizeToolCallId)", () => {
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "user",
 				content: [
@@ -136,7 +180,7 @@ describe("convertToOpenAiMessages", () => {
 	})
 
 	it("should normalize tool call IDs when normalizeToolCallId function is provided", () => {
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "assistant",
 				content: [
@@ -173,7 +217,7 @@ describe("convertToOpenAiMessages", () => {
 	})
 
 	it("should not normalize tool call IDs when normalizeToolCallId function is not provided", () => {
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "assistant",
 				content: [
@@ -208,7 +252,7 @@ describe("convertToOpenAiMessages", () => {
 	})
 
 	it("should use custom normalization function when provided", () => {
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "assistant",
 				content: [
@@ -235,7 +279,7 @@ describe("convertToOpenAiMessages", () => {
 		// have content set to "" instead of undefined. Gemini (via OpenRouter) requires
 		// every message to have at least one "parts" field, which fails if content is undefined.
 		// See: ROO-425
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "assistant",
 				content: [
@@ -265,7 +309,7 @@ describe("convertToOpenAiMessages", () => {
 		// of an empty string. Gemini (via OpenRouter) requires function responses to have
 		// non-empty content in the "parts" field, and an empty string causes validation failure
 		// with error: "Unable to submit request because it must include at least one parts field"
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "user",
 				content: [
@@ -289,7 +333,7 @@ describe("convertToOpenAiMessages", () => {
 	})
 
 	it('should use "(empty)" placeholder for tool result with undefined content (Gemini compatibility)', () => {
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "user",
 				content: [
@@ -297,7 +341,7 @@ describe("convertToOpenAiMessages", () => {
 						type: "tool_result",
 						tool_use_id: "tool-456",
 						// content is undefined/not provided
-					} as Anthropic.ToolResultBlockParam,
+					},
 				],
 			},
 		]
@@ -311,7 +355,7 @@ describe("convertToOpenAiMessages", () => {
 	})
 
 	it('should use "(empty)" placeholder for tool result with empty array content (Gemini compatibility)', () => {
-		const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+		const anthropicMessages: any[] = [
 			{
 				role: "user",
 				content: [
@@ -319,7 +363,7 @@ describe("convertToOpenAiMessages", () => {
 						type: "tool_result",
 						tool_use_id: "tool-789",
 						content: [], // Empty array
-					} as Anthropic.ToolResultBlockParam,
+					},
 				],
 			},
 		]
@@ -337,7 +381,7 @@ describe("convertToOpenAiMessages", () => {
 			// This test ensures that user messages with empty text blocks are filtered out
 			// to prevent "must include at least one parts field" error from Gemini (via OpenRouter).
 			// Empty text blocks can occur in edge cases during message construction.
-			const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+			const anthropicMessages: any[] = [
 				{
 					role: "user",
 					content: [
@@ -365,7 +409,7 @@ describe("convertToOpenAiMessages", () => {
 
 		it("should not create user message when all text blocks are empty (Gemini compatibility)", () => {
 			// If all text blocks are empty, no user message should be created
-			const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+			const anthropicMessages: any[] = [
 				{
 					role: "user",
 					content: [
@@ -387,7 +431,7 @@ describe("convertToOpenAiMessages", () => {
 		})
 
 		it("should preserve image blocks when filtering empty text blocks", () => {
-			const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+			const anthropicMessages: any[] = [
 				{
 					role: "user",
 					content: [
@@ -426,7 +470,7 @@ describe("convertToOpenAiMessages", () => {
 
 	describe("mergeToolResultText option", () => {
 		it("should merge text content into last tool message when mergeToolResultText is true", () => {
-			const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+			const anthropicMessages: any[] = [
 				{
 					role: "user",
 					content: [
@@ -456,7 +500,7 @@ describe("convertToOpenAiMessages", () => {
 		})
 
 		it("should merge text into last tool message when multiple tool results exist", () => {
-			const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+			const anthropicMessages: any[] = [
 				{
 					role: "user",
 					content: [
@@ -489,7 +533,7 @@ describe("convertToOpenAiMessages", () => {
 		})
 
 		it("should NOT merge text when images are present (fall back to user message)", () => {
-			const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+			const anthropicMessages: any[] = [
 				{
 					role: "user",
 					content: [
@@ -519,7 +563,7 @@ describe("convertToOpenAiMessages", () => {
 		})
 
 		it("should create separate user message when mergeToolResultText is false", () => {
-			const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+			const anthropicMessages: any[] = [
 				{
 					role: "user",
 					content: [
@@ -548,7 +592,7 @@ describe("convertToOpenAiMessages", () => {
 		})
 
 		it("should work with normalizeToolCallId when mergeToolResultText is true", () => {
-			const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+			const anthropicMessages: any[] = [
 				{
 					role: "user",
 					content: [
@@ -581,7 +625,7 @@ describe("convertToOpenAiMessages", () => {
 		})
 
 		it("should handle user messages with only text content (no tool results)", () => {
-			const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+			const anthropicMessages: any[] = [
 				{
 					role: "user",
 					content: [
@@ -906,7 +950,7 @@ describe("convertToOpenAiMessages", () => {
 		})
 
 		it("should handle messages without reasoning_details", () => {
-			const anthropicMessages: Anthropic.Messages.MessageParam[] = [
+			const anthropicMessages: any[] = [
 				{
 					role: "assistant",
 					content: [{ type: "text", text: "Simple response" }],

+ 27 - 0
src/api/transform/ai-sdk.ts

@@ -6,6 +6,7 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
 import { tool as createTool, jsonSchema, type ModelMessage, type TextStreamPart } from "ai"
+import type { AssistantModelMessage } from "ai"
 import type { ApiStreamChunk, ApiStream } from "./stream"
 
 /**
@@ -512,6 +513,7 @@ export async function* consumeAiSdkStream(
 	result: {
 		fullStream: AsyncIterable<ExtendedStreamPart>
 		usage: PromiseLike<{ inputTokens?: number; outputTokens?: number }>
+		response?: PromiseLike<{ messages?: Array<{ role: string; content: unknown; providerOptions?: unknown }> }>
 	},
 	usageHandler?: () => AsyncGenerator<ApiStreamChunk>,
 ): ApiStream {
@@ -545,6 +547,31 @@ export async function* consumeAiSdkStream(
 		}
 		throw usageError
 	}
+
+	// Yield the AI SDK's fully-formed assistant message for direct storage
+	yield* yieldResponseMessage(result)
+}
+
+/**
+ * Await `result.response` and yield the assistant message from `response.messages`.
+ * Used by both `consumeAiSdkStream` and providers with manual `fullStream` iteration.
+ */
+export async function* yieldResponseMessage(result: {
+	response?: PromiseLike<{ messages?: Array<{ role: string; content: unknown; providerOptions?: unknown }> }>
+}): ApiStream {
+	if (!result.response) return
+	try {
+		const response = await result.response
+		if (response.messages && response.messages.length > 0) {
+			const assistantMsg = response.messages.find((m) => m.role === "assistant")
+			if (assistantMsg) {
+				yield { type: "response_message", message: assistantMsg as AssistantModelMessage }
+			}
+		}
+	} catch {
+		// response resolution can fail if the stream errored — ignore silently
+		// since the stream error is already surfaced via lastStreamError
+	}
 }
 
 /**

+ 21 - 21
src/api/transform/image-cleaning.ts

@@ -1,32 +1,32 @@
-import { ApiMessage } from "../../core/task-persistence/apiMessages"
+import { type RooMessage } from "../../core/task-persistence/rooMessage"
 
 import { ApiHandler } from "../index"
 
 /* Removes image blocks from messages if they are not supported by the Api Handler */
-export function maybeRemoveImageBlocks(messages: ApiMessage[], apiHandler: ApiHandler): ApiMessage[] {
+export function maybeRemoveImageBlocks(messages: RooMessage[], apiHandler: ApiHandler): RooMessage[] {
 	// Check model capability ONCE instead of for every message
 	const supportsImages = apiHandler.getModel().info.supportsImages
 
+	if (supportsImages) {
+		return messages
+	}
+
 	return messages.map((message) => {
-		// Handle array content (could contain image blocks).
-		let { content } = message
-		if (Array.isArray(content)) {
-			if (!supportsImages) {
-				// Convert image blocks to text descriptions.
-				content = content.map((block) => {
-					if (block.type === "image") {
-						// Convert image blocks to text descriptions.
-						// Note: We can't access the actual image content/url due to API limitations,
-						// but we can indicate that an image was present in the conversation.
-						return {
-							type: "text",
-							text: "[Referenced image in conversation]",
-						}
-					}
-					return block
-				})
-			}
+		// Only process messages with a role and array content
+		if (!("role" in message) || !Array.isArray(message.content)) {
+			return message
 		}
-		return { ...message, content }
+
+		const content = message.content.map((block: any) => {
+			if (block.type === "image") {
+				return {
+					type: "text" as const,
+					text: "[Referenced image in conversation]",
+				}
+			}
+			return block
+		})
+
+		return { ...message, content } as typeof message
 	})
 }

+ 231 - 185
src/api/transform/openai-format.ts

@@ -1,5 +1,18 @@
-import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
+import {
+	type RooMessage,
+	type RooRoleMessage,
+	type AnyToolCallBlock,
+	type AnyToolResultBlock,
+	isRooRoleMessage,
+	isAnyToolCallBlock,
+	isAnyToolResultBlock,
+	getToolCallId,
+	getToolCallName,
+	getToolCallInput,
+	getToolResultCallId,
+	getToolResultContent,
+} from "../../core/task-persistence/rooMessage"
 
 /**
  * Type for OpenRouter's reasoning detail elements.
@@ -145,6 +158,12 @@ export function consolidateReasoningDetails(reasoningDetails: ReasoningDetail[])
 	return consolidated
 }
 
+/**
+ * A RooRoleMessage that may carry `reasoning_details` from OpenAI/OpenRouter providers.
+ * Used to type-narrow instead of `as any` when accessing reasoning metadata.
+ */
+type MessageWithReasoningDetails = RooRoleMessage & { reasoning_details?: ReasoningDetail[] }
+
 /**
  * Sanitizes OpenAI messages for Gemini models by filtering reasoning_details
  * to only include entries that match the tool call IDs.
@@ -254,17 +273,17 @@ export function sanitizeGeminiMessages(
 }
 
 /**
- * Options for converting Anthropic messages to OpenAI format.
+ * Options for converting messages to OpenAI format.
  */
 export interface ConvertToOpenAiMessagesOptions {
 	/**
 	 * Optional function to normalize tool call IDs for providers with strict ID requirements.
-	 * When provided, this function will be applied to all tool_use IDs and tool_result tool_use_ids.
+	 * When provided, this function will be applied to all tool call IDs.
 	 * This allows callers to declare provider-specific ID format requirements.
 	 */
 	normalizeToolCallId?: (id: string) => string
 	/**
-	 * If true, merge text content after tool_results into the last tool message
+	 * If true, merge text content after tool results into the last tool message
 	 * instead of creating a separate user message. This is critical for providers
 	 * with reasoning/thinking models (like DeepSeek-reasoner, GLM-4.7, etc.) where
 	 * a user message after tool results causes the model to drop all previous
@@ -273,8 +292,13 @@ export interface ConvertToOpenAiMessagesOptions {
 	mergeToolResultText?: boolean
 }
 
+/**
+ * Converts RooMessage[] to OpenAI chat completion messages.
+ * Handles both AI SDK format (tool-call/tool-result) and legacy Anthropic format
+ * (tool_use/tool_result) for backward compatibility with persisted data.
+ */
 export function convertToOpenAiMessages(
-	anthropicMessages: Anthropic.Messages.MessageParam[],
+	messages: RooMessage[],
 	options?: ConvertToOpenAiMessagesOptions,
 ): OpenAI.Chat.ChatCompletionMessageParam[] {
 	const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = []
@@ -300,208 +324,230 @@ export function convertToOpenAiMessages(
 	// Use provided normalization function or identity function
 	const normalizeId = options?.normalizeToolCallId ?? ((id: string) => id)
 
-	for (const anthropicMessage of anthropicMessages) {
-		if (typeof anthropicMessage.content === "string") {
-			// Some upstream transforms (e.g. [`Task.buildCleanConversationHistory()`](src/core/task/Task.ts:4048))
-			// will convert a single text block into a string for compactness.
-			// If a message also contains reasoning_details (Gemini 3 / xAI / o-series, etc.),
-			// we must preserve it here as well.
-			const messageWithDetails = anthropicMessage as any
+	/** Get image data URL from either AI SDK or legacy format. */
+	const getImageDataUrl = (part: {
+		type: string
+		image?: string
+		mediaType?: string
+		source?: { media_type?: string; data?: string }
+	}): string => {
+		// AI SDK format:
+		// - raw base64 + mediaType: construct data URL
+		// - existing data/http(s) URL in image: pass through unchanged
+		if (part.image) {
+			const image = part.image.trim()
+			if (image.startsWith("data:") || /^https?:\/\//i.test(image)) {
+				return image
+			}
+			if (part.mediaType) {
+				return `data:${part.mediaType};base64,${image}`
+			}
+		}
+		// Legacy Anthropic format: { type: "image", source: { media_type, data } }
+		if (part.source?.media_type && part.source?.data) {
+			return `data:${part.source.media_type};base64,${part.source.data}`
+		}
+		return ""
+	}
+
+	for (const message of messages) {
+		// Skip RooReasoningMessage (no role property)
+		if (!("role" in message)) {
+			continue
+		}
+
+		if (typeof message.content === "string") {
+			// String content: simple text message
+			const messageWithDetails = message as MessageWithReasoningDetails
 			const baseMessage: OpenAI.Chat.ChatCompletionMessageParam & { reasoning_details?: any[] } = {
-				role: anthropicMessage.role,
-				content: anthropicMessage.content,
+				role: message.role as "user" | "assistant",
+				content: message.content,
 			}
 
-			if (anthropicMessage.role === "assistant") {
+			if (message.role === "assistant") {
 				const mapped = mapReasoningDetails(messageWithDetails.reasoning_details)
 				if (mapped) {
-					;(baseMessage as any).reasoning_details = mapped
+					baseMessage.reasoning_details = mapped
 				}
 			}
 
 			openAiMessages.push(baseMessage)
-		} else {
-			// image_url.url is base64 encoded image data
-			// ensure it contains the content-type of the image: data:image/png;base64,
-			/*
-        { role: "user", content: "" | { type: "text", text: string } | { type: "image_url", image_url: { url: string } } },
-         // content required unless tool_calls is present
-        { role: "assistant", content?: "" | null, tool_calls?: [{ id: "", function: { name: "", arguments: "" }, type: "function" }] },
-        { role: "tool", tool_call_id: "", content: ""}
-         */
-			if (anthropicMessage.role === "user") {
-				const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
-					nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[]
-					toolMessages: Anthropic.ToolResultBlockParam[]
-				}>(
-					(acc, part) => {
-						if (part.type === "tool_result") {
-							acc.toolMessages.push(part)
-						} else if (part.type === "text" || part.type === "image") {
-							acc.nonToolMessages.push(part)
-						} // user cannot send tool_use messages
-						return acc
-					},
-					{ nonToolMessages: [], toolMessages: [] },
-				)
-
-				// Process tool result messages FIRST since they must follow the tool use messages
-				let toolResultImages: Anthropic.Messages.ImageBlockParam[] = []
-				toolMessages.forEach((toolMessage) => {
-					// The Anthropic SDK allows tool results to be a string or an array of text and image blocks, enabling rich and structured content. In contrast, the OpenAI SDK only supports tool results as a single string, so we map the Anthropic tool result parts into one concatenated string to maintain compatibility.
-					let content: string
-
-					if (typeof toolMessage.content === "string") {
-						content = toolMessage.content
-					} else {
-						content =
-							toolMessage.content
-								?.map((part) => {
-									if (part.type === "image") {
-										toolResultImages.push(part)
-										return "(see following user message for image)"
-									}
-									return part.text
-								})
-								.join("\n") ?? ""
-					}
-					openAiMessages.push({
-						role: "tool",
-						tool_call_id: normalizeId(toolMessage.tool_use_id),
-						// Use "(empty)" placeholder for empty content to satisfy providers like Gemini (via OpenRouter)
-						content: content || "(empty)",
-					})
-				})
-
-				// If tool results contain images, send as a separate user message
-				// I ran into an issue where if I gave feedback for one of many tool uses, the request would fail.
-				// "Messages following `tool_use` blocks must begin with a matching number of `tool_result` blocks."
-				// Therefore we need to send these images after the tool result messages
-				// NOTE: it's actually okay to have multiple user messages in a row, the model will treat them as a continuation of the same input (this way works better than combining them into one message, since the tool result specifically mentions (see following user message for image)
-				// UPDATE v2.0: we don't use tools anymore, but if we did it's important to note that the openrouter prompt caching mechanism requires one user message at a time, so we would need to add these images to the user content array instead.
-				// if (toolResultImages.length > 0) {
-				// 	openAiMessages.push({
-				// 		role: "user",
-				// 		content: toolResultImages.map((part) => ({
-				// 			type: "image_url",
-				// 			image_url: { url: `data:${part.source.media_type};base64,${part.source.data}` },
-				// 		})),
-				// 	})
-				// }
-
-				// Process non-tool messages
-				// Filter out empty text blocks to prevent "must include at least one parts field" error
-				// from Gemini (via OpenRouter). Images always have content (base64 data).
-				const filteredNonToolMessages = nonToolMessages.filter(
-					(part) => part.type === "image" || (part.type === "text" && part.text),
-				)
-
-				if (filteredNonToolMessages.length > 0) {
-					// Check if we should merge text into the last tool message
-					// This is critical for reasoning/thinking models where a user message
-					// after tool results causes the model to drop all previous reasoning_content
-					const hasOnlyTextContent = filteredNonToolMessages.every((part) => part.type === "text")
-					const hasToolMessages = toolMessages.length > 0
-					const shouldMergeIntoToolMessage =
-						options?.mergeToolResultText && hasToolMessages && hasOnlyTextContent
-
-					if (shouldMergeIntoToolMessage) {
-						// Merge text content into the last tool message
-						const lastToolMessage = openAiMessages[
-							openAiMessages.length - 1
-						] as OpenAI.Chat.ChatCompletionToolMessageParam
-						if (lastToolMessage?.role === "tool") {
-							const additionalText = filteredNonToolMessages
-								.map((part) => (part as Anthropic.TextBlockParam).text)
-								.join("\n")
-							lastToolMessage.content = `${lastToolMessage.content}\n\n${additionalText}`
+		} else if (message.role === "tool") {
+			// RooToolMessage: each tool-result → OpenAI tool message
+			if (Array.isArray(message.content)) {
+				for (const part of message.content) {
+					if (isAnyToolResultBlock(part as { type: string })) {
+						const resultBlock = part as AnyToolResultBlock
+						const rawContent = getToolResultContent(resultBlock)
+						let content: string
+						if (typeof rawContent === "string") {
+							content = rawContent
+						} else if (rawContent && typeof rawContent === "object" && "value" in rawContent) {
+							content = String((rawContent as { value: unknown }).value)
+						} else {
+							content = rawContent ? JSON.stringify(rawContent) : ""
 						}
-					} else {
-						// Standard behavior: add user message with text/image content
 						openAiMessages.push({
-							role: "user",
-							content: filteredNonToolMessages.map((part) => {
-								if (part.type === "image") {
-									return {
-										type: "image_url",
-										image_url: { url: `data:${part.source.media_type};base64,${part.source.data}` },
-									}
-								}
-								return { type: "text", text: part.text }
-							}),
+							role: "tool",
+							tool_call_id: normalizeId(getToolResultCallId(resultBlock)),
+							content: content || "(empty)",
 						})
 					}
 				}
-			} else if (anthropicMessage.role === "assistant") {
-				const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
-					nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[]
-					toolMessages: Anthropic.ToolUseBlockParam[]
-				}>(
-					(acc, part) => {
-						if (part.type === "tool_use") {
-							acc.toolMessages.push(part)
-						} else if (part.type === "text" || part.type === "image") {
-							acc.nonToolMessages.push(part)
-						} // assistant cannot send tool_result messages
-						return acc
-					},
-					{ nonToolMessages: [], toolMessages: [] },
-				)
-
-				// Process non-tool messages
-				let content: string | undefined
-				if (nonToolMessages.length > 0) {
-					content = nonToolMessages
-						.map((part) => {
-							if (part.type === "image") {
-								return "" // impossible as the assistant cannot send images
-							}
-							return part.text
-						})
-						.join("\n")
+			}
+		} else if (message.role === "user") {
+			// User message: separate tool results from text/image content
+			// Persisted data may contain legacy Anthropic tool_result blocks alongside AI SDK parts,
+			// so we widen the element type to handle all possible block shapes.
+			const contentArray: Array<{ type: string }> = Array.isArray(message.content)
+				? (message.content as unknown as Array<{ type: string }>)
+				: []
+
+			const nonToolMessages: Array<{ type: string; text?: unknown; [k: string]: unknown }> = []
+			const toolMessages: AnyToolResultBlock[] = []
+
+			for (const part of contentArray) {
+				if (isAnyToolResultBlock(part)) {
+					toolMessages.push(part)
+				} else if (part.type === "text" || part.type === "image") {
+					nonToolMessages.push(part as { type: string; text?: unknown; [k: string]: unknown })
 				}
+			}
 
-				// Process tool use messages
-				let tool_calls: OpenAI.Chat.ChatCompletionMessageToolCall[] = toolMessages.map((toolMessage) => ({
-					id: normalizeId(toolMessage.id),
-					type: "function",
-					function: {
-						name: toolMessage.name,
-						// json string
-						arguments: JSON.stringify(toolMessage.input),
-					},
-				}))
-
-				// Check if the message has reasoning_details (used by Gemini 3, xAI, etc.)
-				const messageWithDetails = anthropicMessage as any
-
-				// Build message with reasoning_details BEFORE tool_calls to preserve
-				// the order expected by providers like Roo. Property order matters
-				// when sending messages back to some APIs.
-				const baseMessage: OpenAI.Chat.ChatCompletionAssistantMessageParam & {
-					reasoning_details?: any[]
-				} = {
-					role: "assistant",
-					// Use empty string instead of undefined for providers like Gemini (via OpenRouter)
-					// that require every message to have content in the "parts" field
-					content: content ?? "",
+			// Process tool result messages FIRST
+			toolMessages.forEach((toolMessage) => {
+				const rawContent = getToolResultContent(toolMessage)
+				let content: string
+
+				if (typeof rawContent === "string") {
+					content = rawContent
+				} else if (Array.isArray(rawContent)) {
+					content =
+						rawContent
+							.map((part: { type: string; text?: string }) => {
+								if (part.type === "image") {
+									return "(see following user message for image)"
+								}
+								return part.text
+							})
+							.join("\n") ?? ""
+				} else if (rawContent && typeof rawContent === "object" && "value" in rawContent) {
+					content = String((rawContent as { value: unknown }).value)
+				} else {
+					content = rawContent ? JSON.stringify(rawContent) : ""
 				}
 
-				// Pass through reasoning_details to preserve the original shape from the API.
-				// The `id` field is stripped from openai-responses-v1 blocks (see mapReasoningDetails).
-				const mapped = mapReasoningDetails(messageWithDetails.reasoning_details)
-				if (mapped) {
-					baseMessage.reasoning_details = mapped
+				openAiMessages.push({
+					role: "tool",
+					tool_call_id: normalizeId(getToolResultCallId(toolMessage)),
+					content: content || "(empty)",
+				})
+			})
+
+			// Process non-tool messages
+			// Filter out empty text blocks to prevent "must include at least one parts field" error
+			const filteredNonToolMessages = nonToolMessages.filter(
+				(part) => part.type === "image" || (part.type === "text" && part.text),
+			)
+
+			if (filteredNonToolMessages.length > 0) {
+				const hasOnlyTextContent = filteredNonToolMessages.every((part) => part.type === "text")
+				const hasToolMessages = toolMessages.length > 0
+				const shouldMergeIntoToolMessage = options?.mergeToolResultText && hasToolMessages && hasOnlyTextContent
+
+				if (shouldMergeIntoToolMessage) {
+					const lastToolMessage = openAiMessages[
+						openAiMessages.length - 1
+					] as OpenAI.Chat.ChatCompletionToolMessageParam
+					if (lastToolMessage?.role === "tool") {
+						const additionalText = filteredNonToolMessages.map((part) => String(part.text ?? "")).join("\n")
+						lastToolMessage.content = `${lastToolMessage.content}\n\n${additionalText}`
+					}
+				} else {
+					openAiMessages.push({
+						role: "user",
+						content: filteredNonToolMessages.map((part) => {
+							if (part.type === "image") {
+								return {
+									type: "image_url",
+									image_url: {
+										url: getImageDataUrl(
+											part as {
+												type: string
+												image?: string
+												mediaType?: string
+												source?: { media_type?: string; data?: string }
+											},
+										),
+									},
+								}
+							}
+							return { type: "text", text: String(part.text ?? "") }
+						}),
+					})
 				}
-
-				// Add tool_calls after reasoning_details
-				// Cannot be an empty array. API expects an array with minimum length 1, and will respond with an error if it's empty
-				if (tool_calls.length > 0) {
-					baseMessage.tool_calls = tool_calls
+			}
+		} else if (message.role === "assistant") {
+			// Assistant message: separate tool calls from text content
+			// Persisted data may contain legacy Anthropic tool_use blocks, so we widen
+			// the element type to accommodate both AI SDK and legacy block shapes.
+			const contentArray: Array<{ type: string }> = Array.isArray(message.content)
+				? (message.content as unknown as Array<{ type: string }>)
+				: []
+
+			const nonToolMessages: Array<{ type: string; text?: unknown }> = []
+			const toolCallMessages: AnyToolCallBlock[] = []
+
+			for (const part of contentArray) {
+				if (isAnyToolCallBlock(part)) {
+					toolCallMessages.push(part)
+				} else if (part.type === "text" || part.type === "image") {
+					nonToolMessages.push(part as { type: string; text?: unknown })
 				}
+			}
+
+			// Process non-tool messages
+			let content: string | undefined
+			if (nonToolMessages.length > 0) {
+				content = nonToolMessages
+					.map((part) => {
+						if (part.type === "image") {
+							return ""
+						}
+						return part.text as string
+					})
+					.join("\n")
+			}
 
-				openAiMessages.push(baseMessage)
+			// Process tool call messages
+			let tool_calls: OpenAI.Chat.ChatCompletionMessageToolCall[] = toolCallMessages.map((tc) => ({
+				id: normalizeId(getToolCallId(tc)),
+				type: "function" as const,
+				function: {
+					name: getToolCallName(tc),
+					arguments: JSON.stringify(getToolCallInput(tc)),
+				},
+			}))
+
+			const messageWithDetails = message as MessageWithReasoningDetails
+
+			const baseMessage: OpenAI.Chat.ChatCompletionAssistantMessageParam & {
+				reasoning_details?: any[]
+			} = {
+				role: "assistant",
+				content: content ?? "",
 			}
+
+			const mapped = mapReasoningDetails(messageWithDetails.reasoning_details)
+			if (mapped) {
+				baseMessage.reasoning_details = mapped
+			}
+
+			if (tool_calls.length > 0) {
+				baseMessage.tool_calls = tool_calls
+			}
+
+			openAiMessages.push(baseMessage)
 		}
 	}
 

+ 12 - 0
src/api/transform/stream.ts

@@ -1,3 +1,5 @@
+import type { AssistantModelMessage } from "ai"
+
 export type ApiStream = AsyncGenerator<ApiStreamChunk>
 
 export type ApiStreamChunk =
@@ -11,6 +13,7 @@ export type ApiStreamChunk =
 	| ApiStreamToolCallDeltaChunk
 	| ApiStreamToolCallEndChunk
 	| ApiStreamToolCallPartialChunk
+	| ApiStreamResponseMessageChunk
 	| ApiStreamError
 
 export interface ApiStreamError {
@@ -107,6 +110,15 @@ export interface ApiStreamToolCallPartialChunk {
 	arguments?: string
 }
 
+/**
+ * Carries the fully-formed assistant message from the AI SDK's `result.response.messages`.
+ * Yielded after streaming completes so Task.ts can store it directly without manual reconstruction.
+ */
+export interface ApiStreamResponseMessageChunk {
+	type: "response_message"
+	message: AssistantModelMessage
+}
+
 export interface GroundingSource {
 	title: string
 	url: string

+ 37 - 35
src/core/assistant-message/__tests__/presentAssistantMessage-images.spec.ts

@@ -38,6 +38,7 @@ describe("presentAssistantMessage - Image Handling in Native Tool Calling", () =
 			currentStreamingContentIndex: 0,
 			assistantMessageContent: [],
 			userMessageContent: [],
+			pendingToolResults: [],
 			didCompleteReadingStream: false,
 			didRejectTool: false,
 			didAlreadyUseTool: false,
@@ -66,13 +67,13 @@ describe("presentAssistantMessage - Image Handling in Native Tool Calling", () =
 
 		// Add pushToolResultToUserContent method after mockTask is created so it can reference mockTask
 		mockTask.pushToolResultToUserContent = vi.fn().mockImplementation((toolResult: any) => {
-			const existingResult = mockTask.userMessageContent.find(
-				(block: any) => block.type === "tool_result" && block.tool_use_id === toolResult.tool_use_id,
+			const existingResult = mockTask.pendingToolResults.find(
+				(block: any) => block.type === "tool-result" && block.toolCallId === toolResult.toolCallId,
 			)
 			if (existingResult) {
 				return false
 			}
-			mockTask.userMessageContent.push(toolResult)
+			mockTask.pendingToolResults.push(toolResult)
 			return true
 		})
 	})
@@ -109,25 +110,25 @@ describe("presentAssistantMessage - Image Handling in Native Tool Calling", () =
 		// Execute presentAssistantMessage
 		await presentAssistantMessage(mockTask)
 
-		// Verify that userMessageContent was populated
-		expect(mockTask.userMessageContent.length).toBeGreaterThan(0)
+		// Verify that pendingToolResults was populated
+		expect(mockTask.pendingToolResults.length).toBeGreaterThan(0)
 
-		// Find the tool_result block
-		const toolResult = mockTask.userMessageContent.find(
-			(item: any) => item.type === "tool_result" && item.tool_use_id === toolCallId,
+		// Find the tool-result block in pendingToolResults
+		const toolResult = mockTask.pendingToolResults.find(
+			(item: any) => item.type === "tool-result" && item.toolCallId === toolCallId,
 		)
 
 		expect(toolResult).toBeDefined()
-		expect(toolResult.tool_use_id).toBe(toolCallId)
+		expect(toolResult.toolCallId).toBe(toolCallId)
 
-		// For native tool calling, tool_result content should be a string (text only)
-		expect(typeof toolResult.content).toBe("string")
-		expect(toolResult.content).toContain("I see a cat")
+		// For native tool calling, output should be a text value
+		expect(toolResult.output).toBeDefined()
+		expect(toolResult.output.value).toContain("I see a cat")
 
-		// Images should be added as separate blocks AFTER the tool_result
+		// Images should be added as separate ImagePart blocks in userMessageContent
 		const imageBlocks = mockTask.userMessageContent.filter((item: any) => item.type === "image")
 		expect(imageBlocks.length).toBeGreaterThan(0)
-		expect(imageBlocks[0].source.data).toBe("base64ImageData")
+		expect(imageBlocks[0].image).toBe("base64ImageData")
 	})
 
 	it("should convert to string when no images are present (native tool calling)", async () => {
@@ -152,14 +153,15 @@ describe("presentAssistantMessage - Image Handling in Native Tool Calling", () =
 
 		await presentAssistantMessage(mockTask)
 
-		const toolResult = mockTask.userMessageContent.find(
-			(item: any) => item.type === "tool_result" && item.tool_use_id === toolCallId,
+		const toolResult = mockTask.pendingToolResults.find(
+			(item: any) => item.type === "tool-result" && item.toolCallId === toolCallId,
 		)
 
 		expect(toolResult).toBeDefined()
 
-		// When no images, content should be a string
-		expect(typeof toolResult.content).toBe("string")
+		// When no images, output should be a text value
+		expect(toolResult.output.type).toBe("text")
+		expect(typeof toolResult.output.value).toBe("string")
 	})
 
 	it("should fail fast when tool_use is missing id (legacy/XML-style tool call)", async () => {
@@ -209,13 +211,13 @@ describe("presentAssistantMessage - Image Handling in Native Tool Calling", () =
 
 		await presentAssistantMessage(mockTask)
 
-		const toolResult = mockTask.userMessageContent.find(
-			(item: any) => item.type === "tool_result" && item.tool_use_id === toolCallId,
+		const toolResult = mockTask.pendingToolResults.find(
+			(item: any) => item.type === "tool-result" && item.toolCallId === toolCallId,
 		)
 
 		expect(toolResult).toBeDefined()
 		// Should have fallback text
-		expect(toolResult.content).toBeTruthy()
+		expect(toolResult.output).toBeTruthy()
 	})
 
 	describe("Multiple tool calls handling", () => {
@@ -246,20 +248,20 @@ describe("presentAssistantMessage - Image Handling in Native Tool Calling", () =
 			mockTask.currentStreamingContentIndex = 1
 			await presentAssistantMessage(mockTask)
 
-			// Find the tool_result for the second tool
-			const toolResult = mockTask.userMessageContent.find(
-				(item: any) => item.type === "tool_result" && item.tool_use_id === toolCallId2,
+			// Find the tool-result for the second tool in pendingToolResults
+			const toolResult = mockTask.pendingToolResults.find(
+				(item: any) => item.type === "tool-result" && item.toolCallId === toolCallId2,
 			)
 
-			// Verify that a tool_result block was created (not a text block)
+			// Verify that a tool-result block was created (not a text block)
 			expect(toolResult).toBeDefined()
-			expect(toolResult.tool_use_id).toBe(toolCallId2)
-			expect(toolResult.is_error).toBe(true)
-			expect(toolResult.content).toContain("due to user rejecting a previous tool")
+			expect(toolResult.toolCallId).toBe(toolCallId2)
+			expect(toolResult.output.value).toContain("[ERROR]")
+			expect(toolResult.output.value).toContain("due to user rejecting a previous tool")
 
 			// Ensure no text blocks were added for this rejection
 			const textBlocks = mockTask.userMessageContent.filter(
-				(item: any) => item.type === "text" && item.text.includes("due to user rejecting"),
+				(item: any) => item.type === "text" && item.text?.includes("due to user rejecting"),
 			)
 			expect(textBlocks.length).toBe(0)
 		})
@@ -310,15 +312,15 @@ describe("presentAssistantMessage - Image Handling in Native Tool Calling", () =
 
 			await presentAssistantMessage(mockTask)
 
-			// Find the tool_result
-			const toolResult = mockTask.userMessageContent.find(
-				(item: any) => item.type === "tool_result" && item.tool_use_id === toolCallId,
+			// Find the tool-result in pendingToolResults
+			const toolResult = mockTask.pendingToolResults.find(
+				(item: any) => item.type === "tool-result" && item.toolCallId === toolCallId,
 			)
 
-			// Verify tool_result was created for partial block
+			// Verify tool-result was created for partial block
 			expect(toolResult).toBeDefined()
-			expect(toolResult.is_error).toBe(true)
-			expect(toolResult.content).toContain("was interrupted and not executed")
+			expect(toolResult.output.value).toContain("[ERROR]")
+			expect(toolResult.output.value).toContain("was interrupted and not executed")
 		})
 	})
 })

+ 20 - 19
src/core/assistant-message/__tests__/presentAssistantMessage-unknown-tool.spec.ts

@@ -32,6 +32,7 @@ describe("presentAssistantMessage - Unknown Tool Handling", () => {
 			currentStreamingContentIndex: 0,
 			assistantMessageContent: [],
 			userMessageContent: [],
+			pendingToolResults: [],
 			didCompleteReadingStream: false,
 			didRejectTool: false,
 			didAlreadyUseTool: false,
@@ -62,13 +63,13 @@ describe("presentAssistantMessage - Unknown Tool Handling", () => {
 
 		// Add pushToolResultToUserContent method after mockTask is created so 'this' binds correctly
 		mockTask.pushToolResultToUserContent = vi.fn().mockImplementation((toolResult: any) => {
-			const existingResult = mockTask.userMessageContent.find(
-				(block: any) => block.type === "tool_result" && block.tool_use_id === toolResult.tool_use_id,
+			const existingResult = mockTask.pendingToolResults.find(
+				(block: any) => block.type === "tool-result" && block.toolCallId === toolResult.toolCallId,
 			)
 			if (existingResult) {
 				return false
 			}
-			mockTask.userMessageContent.push(toolResult)
+			mockTask.pendingToolResults.push(toolResult)
 			return true
 		})
 	})
@@ -89,17 +90,17 @@ describe("presentAssistantMessage - Unknown Tool Handling", () => {
 		// Execute presentAssistantMessage
 		await presentAssistantMessage(mockTask)
 
-		// Verify that a tool_result with error was pushed
-		const toolResult = mockTask.userMessageContent.find(
-			(item: any) => item.type === "tool_result" && item.tool_use_id === toolCallId,
+		// Verify that a tool-result with error was pushed to pendingToolResults
+		const toolResult = mockTask.pendingToolResults.find(
+			(item: any) => item.type === "tool-result" && item.toolCallId === toolCallId,
 		)
 
 		expect(toolResult).toBeDefined()
-		expect(toolResult.tool_use_id).toBe(toolCallId)
-		// The error is wrapped in JSON by formatResponse.toolError
-		expect(toolResult.content).toContain("nonexistent_tool")
-		expect(toolResult.content).toContain("does not exist")
-		expect(toolResult.content).toContain("error")
+		expect(toolResult.toolCallId).toBe(toolCallId)
+		// The error is wrapped in output.value by formatResponse.toolError
+		expect(toolResult.output.value).toContain("nonexistent_tool")
+		expect(toolResult.output.value).toContain("does not exist")
+		expect(toolResult.output.value).toContain("error")
 
 		// Verify consecutiveMistakeCount was incremented
 		expect(mockTask.consecutiveMistakeCount).toBe(1)
@@ -169,9 +170,9 @@ describe("presentAssistantMessage - Unknown Tool Handling", () => {
 		const completed = await Promise.race([resultPromise, timeoutPromise])
 		expect(completed).toBe(true)
 
-		// Verify a tool_result was pushed (critical for API not to freeze)
-		const toolResult = mockTask.userMessageContent.find(
-			(item: any) => item.type === "tool_result" && item.tool_use_id === toolCallId,
+		// Verify a tool-result was pushed (critical for API not to freeze)
+		const toolResult = mockTask.pendingToolResults.find(
+			(item: any) => item.type === "tool-result" && item.toolCallId === toolCallId,
 		)
 		expect(toolResult).toBeDefined()
 	})
@@ -233,13 +234,13 @@ describe("presentAssistantMessage - Unknown Tool Handling", () => {
 
 		await presentAssistantMessage(mockTask)
 
-		// When didRejectTool is true, should send error tool_result
-		const toolResult = mockTask.userMessageContent.find(
-			(item: any) => item.type === "tool_result" && item.tool_use_id === toolCallId,
+		// When didRejectTool is true, should send error tool-result
+		const toolResult = mockTask.pendingToolResults.find(
+			(item: any) => item.type === "tool-result" && item.toolCallId === toolCallId,
 		)
 
 		expect(toolResult).toBeDefined()
-		expect(toolResult.is_error).toBe(true)
-		expect(toolResult.content).toContain("due to user rejecting a previous tool")
+		expect(toolResult.output.value).toContain("[ERROR]")
+		expect(toolResult.output.value).toContain("due to user rejecting a previous tool")
 	})
 })

+ 36 - 30
src/core/assistant-message/presentAssistantMessage.ts

@@ -1,6 +1,7 @@
 import { serializeError } from "serialize-error"
 import { Anthropic } from "@anthropic-ai/sdk"
 
+import type { ImagePart, ToolResultPart } from "../task-persistence"
 import type { ToolName, ClineAsk, ToolProgressStatus } from "@roo-code/types"
 import { ConsecutiveMistakeError, TelemetryEventName } from "@roo-code/types"
 import { TelemetryService } from "@roo-code/telemetry"
@@ -118,10 +119,10 @@ export async function presentAssistantMessage(cline: Task) {
 
 				if (toolCallId) {
 					cline.pushToolResultToUserContent({
-						type: "tool_result",
-						tool_use_id: sanitizeToolUseId(toolCallId),
-						content: errorMessage,
-						is_error: true,
+						type: "tool-result",
+						toolCallId: sanitizeToolUseId(toolCallId),
+						toolName: mcpBlock.name,
+						output: { type: "text", value: `[ERROR] ${errorMessage}` },
 					})
 				}
 				break
@@ -143,13 +144,13 @@ export async function presentAssistantMessage(cline: Task) {
 				}
 
 				let resultContent: string
-				let imageBlocks: Anthropic.ImageBlockParam[] = []
+				let imageBlocks: ImagePart[] = []
 
 				if (typeof content === "string") {
 					resultContent = content || "(tool did not return anything)"
 				} else {
 					const textBlocks = content.filter((item) => item.type === "text")
-					imageBlocks = content.filter((item) => item.type === "image") as Anthropic.ImageBlockParam[]
+					imageBlocks = content.filter((item) => item.type === "image") as ImagePart[]
 					resultContent =
 						textBlocks.map((item) => (item as Anthropic.TextBlockParam).text).join("\n") ||
 						"(tool did not return anything)"
@@ -169,9 +170,10 @@ export async function presentAssistantMessage(cline: Task) {
 
 				if (toolCallId) {
 					cline.pushToolResultToUserContent({
-						type: "tool_result",
-						tool_use_id: sanitizeToolUseId(toolCallId),
-						content: resultContent,
+						type: "tool-result",
+						toolCallId: sanitizeToolUseId(toolCallId),
+						toolName: mcpBlock.name,
+						output: { type: "text", value: resultContent },
 					})
 
 					if (imageBlocks.length > 0) {
@@ -399,10 +401,10 @@ export async function presentAssistantMessage(cline: Task) {
 					: `Tool ${toolDescription()} was interrupted and not executed due to user rejecting a previous tool.`
 
 				cline.pushToolResultToUserContent({
-					type: "tool_result",
-					tool_use_id: sanitizeToolUseId(toolCallId),
-					content: errorMessage,
-					is_error: true,
+					type: "tool-result",
+					toolCallId: sanitizeToolUseId(toolCallId),
+					toolName: block.name,
+					output: { type: "text", value: `[ERROR] ${errorMessage}` },
 				})
 
 				break
@@ -436,10 +438,10 @@ export async function presentAssistantMessage(cline: Task) {
 					// Push tool_result directly without setting didAlreadyUseTool so streaming can
 					// continue gracefully.
 					cline.pushToolResultToUserContent({
-						type: "tool_result",
-						tool_use_id: sanitizeToolUseId(toolCallId),
-						content: formatResponse.toolError(errorMessage),
-						is_error: true,
+						type: "tool-result",
+						toolCallId: sanitizeToolUseId(toolCallId),
+						toolName: block.name,
+						output: { type: "text", value: `[ERROR] ${formatResponse.toolError(errorMessage)}` },
 					})
 
 					break
@@ -459,13 +461,13 @@ export async function presentAssistantMessage(cline: Task) {
 				}
 
 				let resultContent: string
-				let imageBlocks: Anthropic.ImageBlockParam[] = []
+				let imageBlocks: ImagePart[] = []
 
 				if (typeof content === "string") {
 					resultContent = content || "(tool did not return anything)"
 				} else {
 					const textBlocks = content.filter((item) => item.type === "text")
-					imageBlocks = content.filter((item) => item.type === "image") as Anthropic.ImageBlockParam[]
+					imageBlocks = content.filter((item) => item.type === "image") as ImagePart[]
 					resultContent =
 						textBlocks.map((item) => (item as Anthropic.TextBlockParam).text).join("\n") ||
 						"(tool did not return anything)"
@@ -482,9 +484,10 @@ export async function presentAssistantMessage(cline: Task) {
 				}
 
 				cline.pushToolResultToUserContent({
-					type: "tool_result",
-					tool_use_id: sanitizeToolUseId(toolCallId),
-					content: resultContent,
+					type: "tool-result",
+					toolCallId: sanitizeToolUseId(toolCallId),
+					toolName: block.name,
+					output: { type: "text", value: resultContent },
 				})
 
 				if (imageBlocks.length > 0) {
@@ -644,10 +647,13 @@ export async function presentAssistantMessage(cline: Task) {
 					const errorContent = formatResponse.toolError(error.message)
 					// Push tool_result directly without setting didAlreadyUseTool
 					cline.pushToolResultToUserContent({
-						type: "tool_result",
-						tool_use_id: sanitizeToolUseId(toolCallId),
-						content: typeof errorContent === "string" ? errorContent : "(validation error)",
-						is_error: true,
+						type: "tool-result",
+						toolCallId: sanitizeToolUseId(toolCallId),
+						toolName: block.name,
+						output: {
+							type: "text",
+							value: `[ERROR] ${typeof errorContent === "string" ? errorContent : "(validation error)"}`,
+						},
 					})
 
 					break
@@ -948,10 +954,10 @@ export async function presentAssistantMessage(cline: Task) {
 					// Push tool_result directly WITHOUT setting didAlreadyUseTool
 					// This prevents the stream from being interrupted with "Response interrupted by tool use result"
 					cline.pushToolResultToUserContent({
-						type: "tool_result",
-						tool_use_id: sanitizeToolUseId(toolCallId),
-						content: formatResponse.toolError(errorMessage),
-						is_error: true,
+						type: "tool-result",
+						toolCallId: sanitizeToolUseId(toolCallId),
+						toolName: block.name,
+						output: { type: "text", value: `[ERROR] ${formatResponse.toolError(errorMessage)}` },
 					})
 					break
 				}

+ 31 - 33
src/core/condense/__tests__/condense.spec.ts

@@ -1,11 +1,9 @@
 // npx vitest src/core/condense/__tests__/condense.spec.ts
 
-import { Anthropic } from "@anthropic-ai/sdk"
 import type { ModelInfo } from "@roo-code/types"
 import { TelemetryService } from "@roo-code/telemetry"
 
 import { BaseProvider } from "../../../api/providers/base-provider"
-import { ApiMessage } from "../../task-persistence/apiMessages"
 import {
 	summarizeConversation,
 	getMessagesSinceLastSummary,
@@ -41,7 +39,7 @@ class MockApiHandler extends BaseProvider {
 		}
 	}
 
-	override async countTokens(content: Array<Anthropic.Messages.ContentBlockParam>): Promise<number> {
+	override async countTokens(content: Array<any>): Promise<number> {
 		// Simple token counting for testing
 		let tokens = 0
 		for (const block of content) {
@@ -65,7 +63,7 @@ describe("Condense", () => {
 
 	describe("extractCommandBlocks", () => {
 		it("should extract command blocks from string content", () => {
-			const message: ApiMessage = {
+			const message: any = {
 				role: "user",
 				content: 'Some text <command name="prr">/prr #123</command> more text',
 			}
@@ -75,7 +73,7 @@ describe("Condense", () => {
 		})
 
 		it("should extract multiple command blocks", () => {
-			const message: ApiMessage = {
+			const message: any = {
 				role: "user",
 				content: '<command name="prr">/prr #123</command> text <command name="mode">/mode code</command>',
 			}
@@ -85,7 +83,7 @@ describe("Condense", () => {
 		})
 
 		it("should extract command blocks from array content", () => {
-			const message: ApiMessage = {
+			const message: any = {
 				role: "user",
 				content: [
 					{ type: "text", text: "Some user text" },
@@ -98,7 +96,7 @@ describe("Condense", () => {
 		})
 
 		it("should return empty string when no command blocks found", () => {
-			const message: ApiMessage = {
+			const message: any = {
 				role: "user",
 				content: "Just regular text without commands",
 			}
@@ -108,7 +106,7 @@ describe("Condense", () => {
 		})
 
 		it("should handle multiline command blocks", () => {
-			const message: ApiMessage = {
+			const message: any = {
 				role: "user",
 				content: `<command name="prr">
 Line 1
@@ -124,7 +122,7 @@ Line 2
 
 	describe("summarizeConversation", () => {
 		it("should create a summary message with role user (fresh start model)", async () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message with /prr command content" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Third message" },
@@ -147,22 +145,22 @@ Line 2
 			// Verify we have a summary message with role "user" (fresh start model)
 			const summaryMessage = result.messages.find((msg) => msg.isSummary)
 			expect(summaryMessage).toBeTruthy()
-			expect(summaryMessage!.role).toBe("user")
-			expect(Array.isArray(summaryMessage!.content)).toBe(true)
-			const contentArray = summaryMessage!.content as any[]
+			expect((summaryMessage as any).role).toBe("user")
+			expect(Array.isArray((summaryMessage as any).content)).toBe(true)
+			const contentArray = (summaryMessage as any).content as any[]
 			expect(contentArray.some((b) => b.type === "text")).toBe(true)
 			// Should NOT have reasoning blocks (no longer needed for user messages)
 			expect(contentArray.some((b) => b.type === "reasoning")).toBe(false)
 
 			// Fresh start model: effective history should only contain the summary
-			const effectiveHistory = getEffectiveApiHistory(result.messages)
+			const effectiveHistory = getEffectiveApiHistory(result.messages as any)
 			expect(effectiveHistory.length).toBe(1)
 			expect(effectiveHistory[0].isSummary).toBe(true)
-			expect(effectiveHistory[0].role).toBe("user")
+			expect((effectiveHistory[0] as any).role).toBe("user")
 		})
 
 		it("should tag ALL messages with condenseParent", async () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message with /prr command content" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Third message" },
@@ -187,7 +185,7 @@ Line 2
 		})
 
 		it("should preserve <command> blocks in the summary", async () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{
 					role: "user",
 					content: [
@@ -216,7 +214,7 @@ Line 2
 			const summaryMessage = result.messages.find((msg) => msg.isSummary)
 			expect(summaryMessage).toBeTruthy()
 
-			const contentArray = summaryMessage!.content as any[]
+			const contentArray = (summaryMessage as any).content as any[]
 			// Summary content is split into separate text blocks:
 			// - First block: "## Conversation Summary\n..."
 			// - Second block: "<system-reminder>..." with command blocks
@@ -228,12 +226,12 @@ Line 2
 		})
 
 		it("should handle complex first message content", async () => {
-			const complexContent: Anthropic.Messages.ContentBlockParam[] = [
+			const complexContent: any[] = [
 				{ type: "text", text: "/mode code" },
 				{ type: "text", text: "Additional context from the user" },
 			]
 
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: complexContent },
 				{ role: "assistant", content: "Switching to code mode" },
 				{ role: "user", content: "Write a function" },
@@ -254,14 +252,14 @@ Line 2
 			})
 
 			// Effective history should contain only the summary (fresh start)
-			const effectiveHistory = getEffectiveApiHistory(result.messages)
+			const effectiveHistory = getEffectiveApiHistory(result.messages as any)
 			expect(effectiveHistory).toHaveLength(1)
 			expect(effectiveHistory[0].isSummary).toBe(true)
-			expect(effectiveHistory[0].role).toBe("user")
+			expect((effectiveHistory[0] as any).role).toBe("user")
 		})
 
 		it("should return error when not enough messages to summarize", async () => {
-			const messages: ApiMessage[] = [{ role: "user", content: "Only one message" }]
+			const messages: any[] = [{ role: "user", content: "Only one message" }]
 
 			const result = await summarizeConversation({
 				messages,
@@ -278,7 +276,7 @@ Line 2
 		})
 
 		it("should not summarize messages that already contain a recent summary with no new messages", async () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message with /command" },
 				{ role: "user", content: "Previous summary", isSummary: true },
 			]
@@ -312,7 +310,7 @@ Line 2
 			}
 
 			const emptyHandler = new EmptyMockApiHandler()
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "Second" },
 				{ role: "user", content: "Third" },
@@ -339,7 +337,7 @@ Line 2
 	describe("getEffectiveApiHistory", () => {
 		it("should return only summary when summary exists (fresh start)", () => {
 			const condenseId = "test-condense-id"
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First", condenseParent: condenseId },
 				{ role: "assistant", content: "Second", condenseParent: condenseId },
 				{ role: "user", content: "Third", condenseParent: condenseId },
@@ -359,7 +357,7 @@ Line 2
 
 		it("should include messages after summary in fresh start model", () => {
 			const condenseId = "test-condense-id"
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First", condenseParent: condenseId },
 				{ role: "assistant", content: "Second", condenseParent: condenseId },
 				{
@@ -376,12 +374,12 @@ Line 2
 
 			expect(result).toHaveLength(3)
 			expect(result[0].isSummary).toBe(true)
-			expect(result[1].content).toBe("New response after summary")
-			expect(result[2].content).toBe("New user message")
+			expect((result[1] as any).content).toBe("New response after summary")
+			expect((result[2] as any).content).toBe("New user message")
 		})
 
 		it("should return all messages when no summary exists", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First" },
 				{ role: "assistant", content: "Second" },
 				{ role: "user", content: "Third" },
@@ -397,7 +395,7 @@ Line 2
 			// The cleanupAfterTruncation function would normally clear these,
 			// but even without cleanup, getEffectiveApiHistory should handle orphaned tags
 			const orphanedCondenseId = "deleted-summary-id"
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First", condenseParent: orphanedCondenseId },
 				{ role: "assistant", content: "Second", condenseParent: orphanedCondenseId },
 				{ role: "user", content: "Third", condenseParent: orphanedCondenseId },
@@ -413,7 +411,7 @@ Line 2
 
 	describe("getMessagesSinceLastSummary", () => {
 		it("should return all messages when no summary exists", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Third message" },
@@ -424,7 +422,7 @@ Line 2
 		})
 
 		it("should return messages since last summary including the summary", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Summary content", isSummary: true },
@@ -440,7 +438,7 @@ Line 2
 		})
 
 		it("should handle multiple summaries and return from the last one", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "user", content: "First summary", isSummary: true },
 				{ role: "assistant", content: "Middle message" },

+ 2 - 2
src/core/condense/__tests__/foldedFileContext.spec.ts

@@ -323,7 +323,7 @@ describe("foldedFileContext", () => {
 			expect(summaryMessage).toBeDefined()
 
 			// Each file should have its own content block
-			const contentArray = summaryMessage!.content as any[]
+			const contentArray = (summaryMessage as any).content as any[]
 
 			// Find the content blocks containing file contexts
 			const userFileBlock = contentArray.find(
@@ -381,7 +381,7 @@ describe("foldedFileContext", () => {
 			expect(summaryMessage).toBeDefined()
 
 			// The summary content should NOT contain any file context blocks
-			const contentArray = summaryMessage!.content as any[]
+			const contentArray = (summaryMessage as any).content as any[]
 			const fileContextBlock = contentArray.find(
 				(block: any) => block.type === "text" && block.text?.includes("## File Context"),
 			)

+ 113 - 105
src/core/condense/__tests__/index.spec.ts

@@ -2,11 +2,10 @@
 
 import type { Mock } from "vitest"
 
-import { Anthropic } from "@anthropic-ai/sdk"
 import { TelemetryService } from "@roo-code/telemetry"
 
 import { ApiHandler } from "../../../api"
-import { ApiMessage } from "../../task-persistence/apiMessages"
+import { RooMessage } from "../../task-persistence/rooMessage"
 import { maybeRemoveImageBlocks } from "../../../api/transform/image-cleaning"
 import {
 	summarizeConversation,
@@ -22,7 +21,7 @@ import {
 } from "../index"
 
 vi.mock("../../../api/transform/image-cleaning", () => ({
-	maybeRemoveImageBlocks: vi.fn((messages: ApiMessage[], _apiHandler: ApiHandler) => [...messages]),
+	maybeRemoveImageBlocks: vi.fn((messages: RooMessage[], _apiHandler: ApiHandler) => [...messages]),
 }))
 
 vi.mock("@roo-code/telemetry", () => ({
@@ -37,7 +36,7 @@ const taskId = "test-task-id"
 
 describe("extractCommandBlocks", () => {
 	it("should extract command blocks from string content", () => {
-		const message: ApiMessage = {
+		const message: any = {
 			role: "user",
 			content: 'Some text <command name="prr">/prr #123</command> more text',
 		}
@@ -47,7 +46,7 @@ describe("extractCommandBlocks", () => {
 	})
 
 	it("should extract multiple command blocks", () => {
-		const message: ApiMessage = {
+		const message: any = {
 			role: "user",
 			content: '<command name="prr">/prr #123</command> text <command name="mode">/mode code</command>',
 		}
@@ -57,7 +56,7 @@ describe("extractCommandBlocks", () => {
 	})
 
 	it("should extract command blocks from array content", () => {
-		const message: ApiMessage = {
+		const message: any = {
 			role: "user",
 			content: [
 				{ type: "text", text: "Some user text" },
@@ -70,7 +69,7 @@ describe("extractCommandBlocks", () => {
 	})
 
 	it("should return empty string when no command blocks found", () => {
-		const message: ApiMessage = {
+		const message: any = {
 			role: "user",
 			content: "Just regular text without commands",
 		}
@@ -80,7 +79,7 @@ describe("extractCommandBlocks", () => {
 	})
 
 	it("should handle multiline command blocks", () => {
-		const message: ApiMessage = {
+		const message: any = {
 			role: "user",
 			content: `<command name="prr">
 Line 1
@@ -94,7 +93,7 @@ Line 2
 	})
 
 	it("should handle command blocks with attributes", () => {
-		const message: ApiMessage = {
+		const message: any = {
 			role: "user",
 			content: '<command name="test" attr1="value1" attr2="value2">content</command>',
 		}
@@ -107,7 +106,7 @@ Line 2
 
 describe("injectSyntheticToolResults", () => {
 	it("should return messages unchanged when no orphan tool_calls exist", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{
 				role: "assistant",
@@ -126,7 +125,7 @@ describe("injectSyntheticToolResults", () => {
 	})
 
 	it("should inject synthetic tool_result for orphan tool_call", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{
 				role: "assistant",
@@ -141,17 +140,17 @@ describe("injectSyntheticToolResults", () => {
 		const result = injectSyntheticToolResults(messages)
 
 		expect(result.length).toBe(3)
-		expect(result[2].role).toBe("user")
+		expect((result[2] as any).role).toBe("tool")
 
-		const content = result[2].content as any[]
+		const content = (result[2] as any).content as any[]
 		expect(content.length).toBe(1)
-		expect(content[0].type).toBe("tool_result")
-		expect(content[0].tool_use_id).toBe("tool-orphan")
-		expect(content[0].content).toBe("Context condensation triggered. Tool execution deferred.")
+		expect(content[0].type).toBe("tool-result")
+		expect(content[0].toolCallId).toBe("tool-orphan")
+		expect(content[0].output.value).toBe("Context condensation triggered. Tool execution deferred.")
 	})
 
 	it("should inject synthetic tool_results for multiple orphan tool_calls", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{
 				role: "assistant",
@@ -167,14 +166,14 @@ describe("injectSyntheticToolResults", () => {
 		const result = injectSyntheticToolResults(messages)
 
 		expect(result.length).toBe(3)
-		const content = result[2].content as any[]
+		const content = (result[2] as any).content as any[]
 		expect(content.length).toBe(2)
-		expect(content[0].tool_use_id).toBe("tool-1")
-		expect(content[1].tool_use_id).toBe("tool-2")
+		expect(content[0].toolCallId).toBe("tool-1")
+		expect(content[1].toolCallId).toBe("tool-2")
 	})
 
 	it("should only inject for orphan tool_calls, not matched ones", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{
 				role: "assistant",
@@ -195,13 +194,13 @@ describe("injectSyntheticToolResults", () => {
 		const result = injectSyntheticToolResults(messages)
 
 		expect(result.length).toBe(4)
-		const syntheticContent = result[3].content as any[]
+		const syntheticContent = (result[3] as any).content as any[]
 		expect(syntheticContent.length).toBe(1)
-		expect(syntheticContent[0].tool_use_id).toBe("orphan-tool")
+		expect(syntheticContent[0].toolCallId).toBe("orphan-tool")
 	})
 
 	it("should handle messages with string content (no tool_use/tool_result)", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there!", ts: 2 },
 		]
@@ -216,7 +215,7 @@ describe("injectSyntheticToolResults", () => {
 	})
 
 	it("should handle tool_results spread across multiple user messages", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{
 				role: "assistant",
@@ -246,7 +245,7 @@ describe("injectSyntheticToolResults", () => {
 
 describe("getMessagesSinceLastSummary", () => {
 	it("should return all messages when there is no summary", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "How are you?", ts: 3 },
@@ -257,7 +256,7 @@ describe("getMessagesSinceLastSummary", () => {
 	})
 
 	it("should return messages since the last summary", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "Summary of conversation", ts: 3, isSummary: true },
@@ -274,7 +273,7 @@ describe("getMessagesSinceLastSummary", () => {
 	})
 
 	it("should handle multiple summary messages and return since the last one", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "user", content: "First summary", ts: 2, isSummary: true },
 			{ role: "assistant", content: "How are you?", ts: 3 },
@@ -295,7 +294,7 @@ describe("getMessagesSinceLastSummary", () => {
 	})
 
 	it("should return messages from user summary (fresh start model)", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1, condenseParent: "cond-1" },
 			{ role: "assistant", content: "Hi there", ts: 2, condenseParent: "cond-1" },
 			{ role: "user", content: "Summary content", ts: 3, isSummary: true, condenseId: "cond-1" },
@@ -304,14 +303,14 @@ describe("getMessagesSinceLastSummary", () => {
 
 		const result = getMessagesSinceLastSummary(messages)
 		expect(result[0].isSummary).toBe(true)
-		expect(result[0].role).toBe("user")
+		expect((result[0] as any).role).toBe("user")
 	})
 })
 
 describe("getEffectiveApiHistory", () => {
 	it("should return only summary when summary exists (fresh start model)", () => {
 		const condenseId = "test-condense-id"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First", condenseParent: condenseId },
 			{ role: "assistant", content: "Second", condenseParent: condenseId },
 			{ role: "user", content: "Third", condenseParent: condenseId },
@@ -331,7 +330,7 @@ describe("getEffectiveApiHistory", () => {
 
 	it("should include messages after summary in fresh start model", () => {
 		const condenseId = "test-condense-id"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First", condenseParent: condenseId },
 			{ role: "assistant", content: "Second", condenseParent: condenseId },
 			{
@@ -348,12 +347,12 @@ describe("getEffectiveApiHistory", () => {
 
 		expect(result).toHaveLength(3)
 		expect(result[0].isSummary).toBe(true)
-		expect(result[1].content).toBe("New response after summary")
-		expect(result[2].content).toBe("New user message")
+		expect((result[1] as any).content).toBe("New response after summary")
+		expect((result[2] as any).content).toBe("New user message")
 	})
 
 	it("should return all messages when no summary exists", () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First" },
 			{ role: "assistant", content: "Second" },
 			{ role: "user", content: "Third" },
@@ -366,7 +365,7 @@ describe("getEffectiveApiHistory", () => {
 
 	it("should restore messages when summary is deleted (rewind - orphaned condenseParent)", () => {
 		const orphanedCondenseId = "deleted-summary-id"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First", condenseParent: orphanedCondenseId },
 			{ role: "assistant", content: "Second", condenseParent: orphanedCondenseId },
 			{ role: "user", content: "Third", condenseParent: orphanedCondenseId },
@@ -382,7 +381,7 @@ describe("getEffectiveApiHistory", () => {
 	it("should filter out truncated messages within summary range", () => {
 		const condenseId = "cond-1"
 		const truncationId = "trunc-1"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First", condenseParent: condenseId },
 			{
 				role: "user",
@@ -406,12 +405,12 @@ describe("getEffectiveApiHistory", () => {
 		expect(result).toHaveLength(3)
 		expect(result[0].isSummary).toBe(true)
 		expect(result[1].isTruncationMarker).toBe(true)
-		expect(result[2].content).toBe("After truncation")
+		expect((result[2] as any).content).toBe("After truncation")
 	})
 
 	it("should filter out orphan tool_result blocks after fresh start condensation", () => {
 		const condenseId = "cond-1"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", condenseParent: condenseId },
 			{
 				role: "assistant",
@@ -443,7 +442,7 @@ describe("getEffectiveApiHistory", () => {
 
 	it("should keep tool_result blocks that have matching tool_use in fresh start", () => {
 		const condenseId = "cond-1"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", condenseParent: condenseId },
 			{
 				role: "user",
@@ -451,12 +450,14 @@ describe("getEffectiveApiHistory", () => {
 				isSummary: true,
 				condenseId,
 			},
-			// This tool_use is AFTER the summary, so it's not condensed away
+			// This tool-call is AFTER the summary, so it's not condensed away
 			{
 				role: "assistant",
-				content: [{ type: "tool_use", id: "tool-valid", name: "read_file", input: { path: "test.ts" } }],
+				content: [
+					{ type: "tool-call", toolCallId: "tool-valid", toolName: "read_file", input: { path: "test.ts" } },
+				],
 			},
-			// This tool_result has a matching tool_use, so it should be kept
+			// This tool_result has a matching tool-call, so it should be kept (legacy user message format)
 			{
 				role: "user",
 				content: [{ type: "tool_result", tool_use_id: "tool-valid", content: "file contents" }],
@@ -468,18 +469,23 @@ describe("getEffectiveApiHistory", () => {
 		// All messages after summary should be included
 		expect(result).toHaveLength(3)
 		expect(result[0].isSummary).toBe(true)
-		expect((result[1].content as any[])[0].id).toBe("tool-valid")
-		expect((result[2].content as any[])[0].tool_use_id).toBe("tool-valid")
+		expect(((result[1] as any).content as any[])[0].toolCallId).toBe("tool-valid")
+		expect(((result[2] as any).content as any[])[0].tool_use_id).toBe("tool-valid")
 	})
 
 	it("should filter orphan tool_results but keep other content in mixed user message", () => {
 		const condenseId = "cond-1"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", condenseParent: condenseId },
 			{
 				role: "assistant",
 				content: [
-					{ type: "tool_use", id: "tool-orphan", name: "attempt_completion", input: { result: "Done" } },
+					{
+						type: "tool-call",
+						toolCallId: "tool-orphan",
+						toolName: "attempt_completion",
+						input: { result: "Done" },
+					},
 				],
 				condenseParent: condenseId,
 			},
@@ -489,12 +495,14 @@ describe("getEffectiveApiHistory", () => {
 				isSummary: true,
 				condenseId,
 			},
-			// This tool_use is AFTER the summary
+			// This tool-call is AFTER the summary
 			{
 				role: "assistant",
-				content: [{ type: "tool_use", id: "tool-valid", name: "read_file", input: { path: "test.ts" } }],
+				content: [
+					{ type: "tool-call", toolCallId: "tool-valid", toolName: "read_file", input: { path: "test.ts" } },
+				],
 			},
-			// Mixed content: one orphan tool_result and one valid tool_result
+			// Mixed content: one orphan tool_result and one valid tool_result (legacy user message format)
 			{
 				role: "user",
 				content: [
@@ -506,18 +514,18 @@ describe("getEffectiveApiHistory", () => {
 
 		const result = getEffectiveApiHistory(messages)
 
-		// Summary + assistant with tool_use + filtered user message
+		// Summary + assistant with tool-call + filtered user message
 		expect(result).toHaveLength(3)
 		expect(result[0].isSummary).toBe(true)
 		// The user message should only contain the valid tool_result
-		const userContent = result[2].content as any[]
+		const userContent = (result[2] as any).content as any[]
 		expect(userContent).toHaveLength(1)
 		expect(userContent[0].tool_use_id).toBe("tool-valid")
 	})
 
 	it("should handle multiple orphan tool_results in a single message", () => {
 		const condenseId = "cond-1"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{
 				role: "assistant",
 				content: [
@@ -551,7 +559,7 @@ describe("getEffectiveApiHistory", () => {
 
 	it("should preserve non-tool_result content in user messages", () => {
 		const condenseId = "cond-1"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{
 				role: "assistant",
 				content: [
@@ -580,7 +588,7 @@ describe("getEffectiveApiHistory", () => {
 		// Summary + user message with only text (orphan tool_result filtered)
 		expect(result).toHaveLength(2)
 		expect(result[0].isSummary).toBe(true)
-		const userContent = result[1].content as any[]
+		const userContent = (result[1] as any).content as any[]
 		expect(userContent).toHaveLength(1)
 		expect(userContent[0].type).toBe("text")
 		expect(userContent[0].text).toBe("User added some text")
@@ -590,7 +598,7 @@ describe("getEffectiveApiHistory", () => {
 describe("cleanupAfterTruncation", () => {
 	it("should clear orphaned condenseParent references", () => {
 		const orphanedCondenseId = "deleted-summary"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First", condenseParent: orphanedCondenseId },
 			{ role: "assistant", content: "Second", condenseParent: orphanedCondenseId },
 			{ role: "user", content: "Third" },
@@ -605,7 +613,7 @@ describe("cleanupAfterTruncation", () => {
 
 	it("should keep condenseParent when summary still exists", () => {
 		const condenseId = "existing-summary"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First", condenseParent: condenseId },
 			{ role: "assistant", content: "Second", condenseParent: condenseId },
 			{
@@ -624,7 +632,7 @@ describe("cleanupAfterTruncation", () => {
 
 	it("should clear orphaned truncationParent references", () => {
 		const orphanedTruncationId = "deleted-truncation"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First", truncationParent: orphanedTruncationId },
 			{ role: "assistant", content: "Second" },
 		]
@@ -636,7 +644,7 @@ describe("cleanupAfterTruncation", () => {
 
 	it("should keep truncationParent when marker still exists", () => {
 		const truncationId = "existing-truncation"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First", truncationParent: truncationId },
 			{
 				role: "assistant",
@@ -654,7 +662,7 @@ describe("cleanupAfterTruncation", () => {
 	it("should handle mixed orphaned and valid references", () => {
 		const validCondenseId = "valid-cond"
 		const orphanedCondenseId = "orphaned-cond"
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First", condenseParent: orphanedCondenseId },
 			{ role: "assistant", content: "Second", condenseParent: validCondenseId },
 			{
@@ -712,7 +720,7 @@ describe("summarizeConversation", () => {
 	const defaultSystemPrompt = "You are a helpful assistant."
 
 	it("should not summarize when there are not enough messages", async () => {
-		const messages: ApiMessage[] = [{ role: "user", content: "Hello", ts: 1 }]
+		const messages: any[] = [{ role: "user", content: "Hello", ts: 1 }]
 
 		const result = await summarizeConversation({
 			messages,
@@ -729,7 +737,7 @@ describe("summarizeConversation", () => {
 	})
 
 	it("should create summary with user role (fresh start model)", async () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "How are you?", ts: 3 },
@@ -763,19 +771,19 @@ describe("summarizeConversation", () => {
 		}
 
 		// Summary message is a user message with just text (fresh start model)
-		expect(summaryMessage!.role).toBe("user")
-		expect(Array.isArray(summaryMessage!.content)).toBe(true)
-		const content = summaryMessage!.content as any[]
+		expect((summaryMessage! as any).role).toBe("user")
+		expect(Array.isArray((summaryMessage as any).content)).toBe(true)
+		const content = (summaryMessage as any).content as any[]
 		expect(content).toHaveLength(1)
 		expect(content[0].type).toBe("text")
 		expect(content[0].text).toContain("## Conversation Summary")
 		expect(content[0].text).toContain("This is a summary")
 
 		// Fresh start: effective API history should contain only the summary
-		const effectiveHistory = getEffectiveApiHistory(result.messages)
+		const effectiveHistory = getEffectiveApiHistory(result.messages as any)
 		expect(effectiveHistory).toHaveLength(1)
 		expect(effectiveHistory[0].isSummary).toBe(true)
-		expect(effectiveHistory[0].role).toBe("user")
+		expect((effectiveHistory[0] as any).role).toBe("user")
 
 		// Check the cost and token counts
 		expect(result.cost).toBe(0.05)
@@ -786,7 +794,7 @@ describe("summarizeConversation", () => {
 	})
 
 	it("should preserve command blocks from first message in summary", async () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{
 				role: "user",
 				content: 'Hello <command name="prr">/prr #123</command>',
@@ -808,7 +816,7 @@ describe("summarizeConversation", () => {
 		const summaryMessage = result.messages.find((m) => m.isSummary)
 		expect(summaryMessage).toBeDefined()
 
-		const content = summaryMessage!.content as any[]
+		const content = (summaryMessage as any).content as any[]
 		// Summary content is now split into separate text blocks
 		expect(content).toHaveLength(2)
 		expect(content[0].text).toContain("## Conversation Summary")
@@ -818,7 +826,7 @@ describe("summarizeConversation", () => {
 	})
 
 	it("should not include command blocks wrapper when no commands in first message", async () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "How are you?", ts: 3 },
@@ -836,14 +844,14 @@ describe("summarizeConversation", () => {
 		const summaryMessage = result.messages.find((m) => m.isSummary)
 		expect(summaryMessage).toBeDefined()
 
-		const content = summaryMessage!.content as any[]
+		const content = (summaryMessage as any).content as any[]
 		expect(content[0].text).not.toContain("<system-reminder>")
 		expect(content[0].text).not.toContain("Active Workflows")
 	})
 
 	it("should handle empty summary response and return error", async () => {
 		// We need enough messages to trigger summarization
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "How are you?", ts: 3 },
@@ -884,7 +892,7 @@ describe("summarizeConversation", () => {
 	})
 
 	it("should correctly format the request to the API", async () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "How are you?", ts: 3 },
@@ -921,7 +929,7 @@ describe("summarizeConversation", () => {
 	})
 
 	it("should include the original first user message in summarization input", async () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Initial ask", ts: 1 },
 			{ role: "assistant", content: "Ack", ts: 2 },
 			{ role: "user", content: "Follow-up", ts: 3 },
@@ -953,7 +961,7 @@ describe("summarizeConversation", () => {
 	})
 
 	it("should calculate newContextTokens correctly with systemPrompt", async () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "How are you?", ts: 3 },
@@ -992,7 +1000,7 @@ describe("summarizeConversation", () => {
 	})
 
 	it("should successfully summarize conversation", async () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "How are you?", ts: 3 },
@@ -1025,7 +1033,7 @@ describe("summarizeConversation", () => {
 		expect(result.messages.length).toBe(messages.length + 1)
 
 		// Fresh start: effective history should contain only the summary
-		const effectiveHistory = getEffectiveApiHistory(result.messages)
+		const effectiveHistory = getEffectiveApiHistory(result.messages as any)
 		expect(effectiveHistory.length).toBe(1)
 		expect(effectiveHistory[0].isSummary).toBe(true)
 
@@ -1037,7 +1045,7 @@ describe("summarizeConversation", () => {
 	})
 
 	it("should return error when API handler is invalid", async () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "How are you?", ts: 3 },
@@ -1081,7 +1089,7 @@ describe("summarizeConversation", () => {
 	})
 
 	it("should tag all messages with condenseParent (fresh start model)", async () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "How are you?", ts: 3 },
@@ -1107,7 +1115,7 @@ describe("summarizeConversation", () => {
 	})
 
 	it("should place summary message at end of messages array", async () => {
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "Hello", ts: 1 },
 			{ role: "assistant", content: "Hi there", ts: 2 },
 			{ role: "user", content: "How are you?", ts: 3 },
@@ -1125,7 +1133,7 @@ describe("summarizeConversation", () => {
 		// Summary should be the last message
 		const lastMessage = result.messages[result.messages.length - 1]
 		expect(lastMessage.isSummary).toBe(true)
-		expect(lastMessage.role).toBe("user")
+		expect((lastMessage as any).role).toBe("user")
 	})
 })
 
@@ -1136,7 +1144,7 @@ describe("summarizeConversation with custom settings", () => {
 	const localTaskId = "test-task"
 
 	// Sample messages for testing
-	const sampleMessages: ApiMessage[] = [
+	const sampleMessages: any[] = [
 		{ role: "user", content: "Hello", ts: 1 },
 		{ role: "assistant", content: "Hi there", ts: 2 },
 		{ role: "user", content: "How are you?", ts: 3 },
@@ -1289,7 +1297,7 @@ describe("summarizeConversation with custom settings", () => {
 
 describe("toolUseToText", () => {
 	it("should convert tool_use block with object input to text", () => {
-		const block: Anthropic.Messages.ToolUseBlockParam = {
+		const block: any = {
 			type: "tool_use",
 			id: "tool-123",
 			name: "read_file",
@@ -1302,7 +1310,7 @@ describe("toolUseToText", () => {
 	})
 
 	it("should convert tool_use block with nested object input to text", () => {
-		const block: Anthropic.Messages.ToolUseBlockParam = {
+		const block: any = {
 			type: "tool_use",
 			id: "tool-456",
 			name: "write_file",
@@ -1322,7 +1330,7 @@ describe("toolUseToText", () => {
 	})
 
 	it("should convert tool_use block with string input to text", () => {
-		const block: Anthropic.Messages.ToolUseBlockParam = {
+		const block: any = {
 			type: "tool_use",
 			id: "tool-789",
 			name: "execute_command",
@@ -1335,7 +1343,7 @@ describe("toolUseToText", () => {
 	})
 
 	it("should handle empty object input", () => {
-		const block: Anthropic.Messages.ToolUseBlockParam = {
+		const block: any = {
 			type: "tool_use",
 			id: "tool-empty",
 			name: "some_tool",
@@ -1350,7 +1358,7 @@ describe("toolUseToText", () => {
 
 describe("toolResultToText", () => {
 	it("should convert tool_result with string content to text", () => {
-		const block: Anthropic.Messages.ToolResultBlockParam = {
+		const block: any = {
 			type: "tool_result",
 			tool_use_id: "tool-123",
 			content: "File contents here",
@@ -1362,7 +1370,7 @@ describe("toolResultToText", () => {
 	})
 
 	it("should convert tool_result with error flag to text", () => {
-		const block: Anthropic.Messages.ToolResultBlockParam = {
+		const block: any = {
 			type: "tool_result",
 			tool_use_id: "tool-456",
 			content: "File not found",
@@ -1375,7 +1383,7 @@ describe("toolResultToText", () => {
 	})
 
 	it("should convert tool_result with array content to text", () => {
-		const block: Anthropic.Messages.ToolResultBlockParam = {
+		const block: any = {
 			type: "tool_result",
 			tool_use_id: "tool-789",
 			content: [
@@ -1390,7 +1398,7 @@ describe("toolResultToText", () => {
 	})
 
 	it("should handle tool_result with image in array content", () => {
-		const block: Anthropic.Messages.ToolResultBlockParam = {
+		const block: any = {
 			type: "tool_result",
 			tool_use_id: "tool-img",
 			content: [
@@ -1405,7 +1413,7 @@ describe("toolResultToText", () => {
 	})
 
 	it("should handle tool_result with no content", () => {
-		const block: Anthropic.Messages.ToolResultBlockParam = {
+		const block: any = {
 			type: "tool_result",
 			tool_use_id: "tool-empty",
 		}
@@ -1426,7 +1434,7 @@ describe("convertToolBlocksToText", () => {
 	})
 
 	it("should convert tool_use blocks to text blocks", () => {
-		const content: Anthropic.Messages.ContentBlockParam[] = [
+		const content: any[] = [
 			{
 				type: "tool_use",
 				id: "tool-123",
@@ -1438,12 +1446,12 @@ describe("convertToolBlocksToText", () => {
 		const result = convertToolBlocksToText(content)
 
 		expect(Array.isArray(result)).toBe(true)
-		expect((result as Anthropic.Messages.ContentBlockParam[])[0].type).toBe("text")
-		expect((result as Anthropic.Messages.TextBlockParam[])[0].text).toContain("[Tool Use: read_file]")
+		expect((result as any[])[0].type).toBe("text")
+		expect((result as any[])[0].text).toContain("[Tool Use: read_file]")
 	})
 
 	it("should convert tool_result blocks to text blocks", () => {
-		const content: Anthropic.Messages.ContentBlockParam[] = [
+		const content: any[] = [
 			{
 				type: "tool_result",
 				tool_use_id: "tool-123",
@@ -1454,12 +1462,12 @@ describe("convertToolBlocksToText", () => {
 		const result = convertToolBlocksToText(content)
 
 		expect(Array.isArray(result)).toBe(true)
-		expect((result as Anthropic.Messages.ContentBlockParam[])[0].type).toBe("text")
-		expect((result as Anthropic.Messages.TextBlockParam[])[0].text).toContain("[Tool Result]")
+		expect((result as any[])[0].type).toBe("text")
+		expect((result as any[])[0].text).toContain("[Tool Result]")
 	})
 
 	it("should preserve non-tool blocks unchanged", () => {
-		const content: Anthropic.Messages.ContentBlockParam[] = [
+		const content: any[] = [
 			{ type: "text", text: "Hello" },
 			{
 				type: "tool_use",
@@ -1473,16 +1481,16 @@ describe("convertToolBlocksToText", () => {
 		const result = convertToolBlocksToText(content)
 
 		expect(Array.isArray(result)).toBe(true)
-		const resultArray = result as Anthropic.Messages.ContentBlockParam[]
+		const resultArray = result as any[]
 		expect(resultArray).toHaveLength(3)
 		expect(resultArray[0]).toEqual({ type: "text", text: "Hello" })
 		expect(resultArray[1].type).toBe("text")
-		expect((resultArray[1] as Anthropic.Messages.TextBlockParam).text).toContain("[Tool Use: read_file]")
+		expect((resultArray[1] as any).text).toContain("[Tool Use: read_file]")
 		expect(resultArray[2]).toEqual({ type: "text", text: "World" })
 	})
 
 	it("should handle mixed content with multiple tool blocks", () => {
-		const content: Anthropic.Messages.ContentBlockParam[] = [
+		const content: any[] = [
 			{
 				type: "tool_use",
 				id: "tool-1",
@@ -1499,11 +1507,11 @@ describe("convertToolBlocksToText", () => {
 		const result = convertToolBlocksToText(content)
 
 		expect(Array.isArray(result)).toBe(true)
-		const resultArray = result as Anthropic.Messages.ContentBlockParam[]
+		const resultArray = result as any[]
 		expect(resultArray).toHaveLength(2)
-		expect((resultArray[0] as Anthropic.Messages.TextBlockParam).text).toContain("[Tool Use: read_file]")
-		expect((resultArray[1] as Anthropic.Messages.TextBlockParam).text).toContain("[Tool Result]")
-		expect((resultArray[1] as Anthropic.Messages.TextBlockParam).text).toContain("contents of a.ts")
+		expect((resultArray[0] as any).text).toContain("[Tool Use: read_file]")
+		expect((resultArray[1] as any).text).toContain("[Tool Result]")
+		expect((resultArray[1] as any).text).toContain("contents of a.ts")
 	})
 })
 

+ 11 - 11
src/core/condense/__tests__/nested-condense.spec.ts

@@ -9,7 +9,7 @@ describe("nested condensing scenarios", () => {
 			const condenseId2 = "condense-2"
 
 			// Simulate history after two nested condenses with user-role summaries
-			const history: ApiMessage[] = [
+			const history: any[] = [
 				// Original task - condensed in first condense
 				{ role: "user", content: "Build an app", ts: 100, condenseParent: condenseId1 },
 				// Messages from first condense
@@ -47,8 +47,8 @@ describe("nested condensing scenarios", () => {
 			expect(effectiveHistory.length).toBe(3)
 			expect(effectiveHistory[0].isSummary).toBe(true)
 			expect(effectiveHistory[0].condenseId).toBe(condenseId2) // Latest summary
-			expect(effectiveHistory[1].content).toBe("Database added")
-			expect(effectiveHistory[2].content).toBe("Now test it")
+			expect((effectiveHistory[1] as any).content).toBe("Database added")
+			expect((effectiveHistory[2] as any).content).toBe("Now test it")
 
 			// Verify NO condensed messages are included
 			const hasCondensedMessages = effectiveHistory.some(
@@ -68,7 +68,7 @@ describe("nested condensing scenarios", () => {
 			const hasSummary1 = messagesSinceLastSummary.some((m) => m.condenseId === condenseId1)
 			expect(hasSummary1).toBe(false)
 
-			const hasOriginalTask = messagesSinceLastSummary.some((m) => m.content === "Build an app")
+			const hasOriginalTask = messagesSinceLastSummary.some((m) => (m as any).content === "Build an app")
 			expect(hasOriginalTask).toBe(false)
 		})
 
@@ -77,7 +77,7 @@ describe("nested condensing scenarios", () => {
 			const condenseId2 = "condense-2"
 			const condenseId3 = "condense-3"
 
-			const history: ApiMessage[] = [
+			const history: any[] = [
 				// First condense content
 				{ role: "user", content: "Task", ts: 100, condenseParent: condenseId1 },
 				{
@@ -116,7 +116,7 @@ describe("nested condensing scenarios", () => {
 			// Should only contain Summary3 and current work
 			expect(effectiveHistory.length).toBe(2)
 			expect(effectiveHistory[0].condenseId).toBe(condenseId3)
-			expect(effectiveHistory[1].content).toBe("Current work")
+			expect((effectiveHistory[1] as any).content).toBe("Current work")
 
 			const messagesSinceLastSummary = getMessagesSinceLastSummary(effectiveHistory)
 			expect(messagesSinceLastSummary.length).toBe(2)
@@ -133,7 +133,7 @@ describe("nested condensing scenarios", () => {
 		it("should return consistent results when called with full history vs effective history", () => {
 			const condenseId = "condense-1"
 
-			const fullHistory: ApiMessage[] = [
+			const fullHistory: any[] = [
 				{ role: "user", content: "Original task", ts: 100, condenseParent: condenseId },
 				{ role: "assistant", content: "Response", ts: 200, condenseParent: condenseId },
 				{
@@ -166,7 +166,7 @@ describe("nested condensing scenarios", () => {
 			const condenseId2 = "condense-2"
 
 			// Scenario: Two nested condenses with user-role summaries
-			const fullHistory: ApiMessage[] = [
+			const fullHistory: any[] = [
 				{ role: "user", content: "Original task - should NOT appear", ts: 100, condenseParent: condenseId1 },
 				{ role: "assistant", content: "Old response", ts: 200, condenseParent: condenseId1 },
 				// First summary (user role, fresh-start model), then condensed again
@@ -197,9 +197,9 @@ describe("nested condensing scenarios", () => {
 
 			// The original task should NOT be included
 			const hasOriginalTask = messagesSinceLastSummary.some((m) =>
-				typeof m.content === "string"
-					? m.content.includes("Original task")
-					: JSON.stringify(m.content).includes("Original task"),
+				typeof (m as any).content === "string"
+					? (m as any).content.includes("Original task")
+					: JSON.stringify((m as any).content).includes("Original task"),
 			)
 			expect(hasOriginalTask).toBe(false)
 

+ 49 - 49
src/core/condense/__tests__/rewind-after-condense.spec.ts

@@ -24,7 +24,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 	describe("getEffectiveApiHistory", () => {
 		it("should return summary and messages after summary (fresh start model)", () => {
 			const condenseId = "summary-123"
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message", ts: 1, condenseParent: condenseId },
 				{ role: "assistant", content: "First response", ts: 2, condenseParent: condenseId },
 				{ role: "user", content: "Second message", ts: 3, condenseParent: condenseId },
@@ -39,12 +39,12 @@ describe("Rewind After Condense - Issue #8295", () => {
 			// Fresh start model: summary + all messages after it
 			expect(effective.length).toBe(3)
 			expect(effective[0].isSummary).toBe(true)
-			expect(effective[1].content).toBe("Third message")
-			expect(effective[2].content).toBe("Third response")
+			expect((effective[1] as any).content).toBe("Third message")
+			expect((effective[2] as any).content).toBe("Third response")
 		})
 
 		it("should include messages without condenseParent", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "Hello", ts: 1 },
 				{ role: "assistant", content: "Hi", ts: 2 },
 			]
@@ -64,7 +64,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 	describe("cleanupAfterTruncation", () => {
 		it("should clear condenseParent when summary message is deleted", () => {
 			const condenseId = "summary-123"
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message", ts: 1 },
 				{ role: "assistant", content: "First response", ts: 2, condenseParent: condenseId },
 				{ role: "user", content: "Second message", ts: 3, condenseParent: condenseId },
@@ -80,7 +80,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 
 		it("should preserve condenseParent when summary message still exists", () => {
 			const condenseId = "summary-123"
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message", ts: 1 },
 				{ role: "assistant", content: "First response", ts: 2, condenseParent: condenseId },
 				{ role: "user", content: "Summary", ts: 3, isSummary: true, condenseId },
@@ -95,7 +95,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 		it("should handle multiple condense operations with different IDs", () => {
 			const condenseId1 = "summary-1"
 			const condenseId2 = "summary-2"
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "Message 1", ts: 1, condenseParent: condenseId1 },
 				{ role: "user", content: "Summary 1", ts: 2, isSummary: true, condenseId: condenseId1 },
 				{ role: "user", content: "Message 2", ts: 3, condenseParent: condenseId2 },
@@ -111,7 +111,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 		})
 
 		it("should not modify messages without condenseParent", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "Hello", ts: 1 },
 				{ role: "assistant", content: "Hi", ts: 2 },
 			]
@@ -132,7 +132,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 			const condenseId = "summary-abc"
 
 			// Simulate a conversation after condensing (all prior messages tagged)
-			const fullHistory: ApiMessage[] = [
+			const fullHistory: any[] = [
 				{ role: "user", content: "Initial task", ts: 1, condenseParent: condenseId },
 				{ role: "assistant", content: "Working on it", ts: 2, condenseParent: condenseId },
 				{ role: "user", content: "Continue", ts: 3, condenseParent: condenseId },
@@ -152,11 +152,11 @@ describe("Rewind After Condense - Issue #8295", () => {
 			}
 
 			// Verify effective history after cleanup: all messages should be visible now
-			const effectiveAfterCleanup = getEffectiveApiHistory(cleanedAfterDeletingSummary)
+			const effectiveAfterCleanup = getEffectiveApiHistory(cleanedAfterDeletingSummary as any)
 			expect(effectiveAfterCleanup.length).toBe(3)
-			expect(effectiveAfterCleanup[0].content).toBe("Initial task")
-			expect(effectiveAfterCleanup[1].content).toBe("Working on it")
-			expect(effectiveAfterCleanup[2].content).toBe("Continue")
+			expect((effectiveAfterCleanup[0] as any).content).toBe("Initial task")
+			expect((effectiveAfterCleanup[1] as any).content).toBe("Working on it")
+			expect((effectiveAfterCleanup[2] as any).content).toBe("Continue")
 		})
 
 		it("should properly restore context after rewind when summary was deleted", () => {
@@ -165,7 +165,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 			// Scenario: Most of the conversation was condensed, but the summary was deleted.
 			// getEffectiveApiHistory already correctly handles orphaned messages (includes them
 			// when their summary doesn't exist). cleanupAfterTruncation cleans up the tags.
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "Start", ts: 1 },
 				{ role: "assistant", content: "Response 1", ts: 2, condenseParent: condenseId },
 				{ role: "user", content: "More", ts: 3, condenseParent: condenseId },
@@ -177,8 +177,8 @@ describe("Rewind After Condense - Issue #8295", () => {
 			// getEffectiveApiHistory already includes orphaned messages (summary doesn't exist)
 			const effectiveBefore = getEffectiveApiHistory(messages)
 			expect(effectiveBefore.length).toBe(5) // All messages visible since summary was deleted
-			expect(effectiveBefore[0].content).toBe("Start")
-			expect(effectiveBefore[1].content).toBe("Response 1")
+			expect((effectiveBefore[0] as any).content).toBe("Start")
+			expect((effectiveBefore[1] as any).content).toBe("Response 1")
 
 			// cleanupAfterTruncation clears the orphaned condenseParent tags for data hygiene
 			const cleaned = cleanupAfterTruncation(messages)
@@ -190,7 +190,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 			expect(cleaned[4].condenseParent).toBeUndefined()
 
 			// After cleanup, effective history is the same (all visible)
-			const effectiveAfter = getEffectiveApiHistory(cleaned)
+			const effectiveAfter = getEffectiveApiHistory(cleaned as any)
 			expect(effectiveAfter.length).toBe(5) // All messages visible
 		})
 
@@ -199,7 +199,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 
 			// Scenario: Messages were condensed and summary exists - fresh start model returns
 			// only the summary and messages after it, NOT messages before the summary
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "Start", ts: 1 },
 				{ role: "assistant", content: "Response 1", ts: 2, condenseParent: condenseId },
 				{ role: "user", content: "More", ts: 3, condenseParent: condenseId },
@@ -211,9 +211,9 @@ describe("Rewind After Condense - Issue #8295", () => {
 			// "Start" is NOT included because it's before the summary
 			const effective = getEffectiveApiHistory(messages)
 			expect(effective.length).toBe(2) // Summary, After summary (NOT Start)
-			expect(effective[0].content).toBe("Summary")
+			expect((effective[0] as any).content).toBe("Summary")
 			expect(effective[0].isSummary).toBe(true)
-			expect(effective[1].content).toBe("After summary")
+			expect((effective[1] as any).content).toBe("After summary")
 
 			// cleanupAfterTruncation should NOT clear condenseParent since summary exists
 			const cleaned = cleanupAfterTruncation(messages)
@@ -241,7 +241,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 				// Simulate post-condense state where summary has unique timestamp (firstKeptTs - 1)
 				// In real usage, condensed messages have timestamps like 100, 200, 300...
 				// and firstKeptTs is much larger, so firstKeptTs - 1 = 999 is unique
-				const messagesAfterCondense: ApiMessage[] = [
+				const messagesAfterCondense: any[] = [
 					{ role: "user", content: "Initial task", ts: 100 },
 					{ role: "assistant", content: "Response 1", ts: 200, condenseParent: condenseId },
 					{ role: "user", content: "Continue", ts: 300, condenseParent: condenseId },
@@ -281,7 +281,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 				const condenseId = "summary-lookup-test"
 				const firstKeptTs = 8
 
-				const messages: ApiMessage[] = [
+				const messages: any[] = [
 					{ role: "user", content: "Initial", ts: 1 },
 					{ role: "user", content: "Summary", ts: firstKeptTs - 1, isSummary: true, condenseId },
 					{ role: "assistant", content: "First kept message", ts: firstKeptTs },
@@ -320,7 +320,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 				// - msg2-msg7 tagged with condenseParent
 				// - summary inserted with ts = msg8.ts - 1
 				// - msg8, msg9, msg10 kept
-				const storageAfterCondense: ApiMessage[] = [
+				const storageAfterCondense: any[] = [
 					{ role: "user", content: "Task: Build a feature", ts: 100, condenseParent: condenseId },
 					{ role: "assistant", content: "I'll help with that", ts: 200, condenseParent: condenseId },
 					{ role: "user", content: "Start with the API", ts: 300, condenseParent: condenseId },
@@ -350,23 +350,23 @@ describe("Rewind After Condense - Issue #8295", () => {
 				expect(effective.length).toBe(4)
 
 				// Verify exact order and content
-				expect(effective[0].role).toBe("user")
+				expect((effective[0] as any).role).toBe("user")
 				expect(effective[0].isSummary).toBe(true)
-				expect(effective[0].content).toBe("Summary: Built API with validation, working on tests")
+				expect((effective[0] as any).content).toBe("Summary: Built API with validation, working on tests")
 
-				expect(effective[1].role).toBe("assistant")
-				expect(effective[1].content).toBe("Writing unit tests now")
+				expect((effective[1] as any).role).toBe("assistant")
+				expect((effective[1] as any).content).toBe("Writing unit tests now")
 
-				expect(effective[2].role).toBe("user")
-				expect(effective[2].content).toBe("Include edge cases")
+				expect((effective[2] as any).role).toBe("user")
+				expect((effective[2] as any).content).toBe("Include edge cases")
 
-				expect(effective[3].role).toBe("assistant")
-				expect(effective[3].content).toBe("Added edge case tests")
+				expect((effective[3] as any).role).toBe("assistant")
+				expect((effective[3] as any).content).toBe("Added edge case tests")
 
 				// Verify condensed messages are NOT in effective history
 				const condensedContents = ["I'll help with that", "Start with the API", "Creating API endpoints"]
 				for (const content of condensedContents) {
-					expect(effective.find((m) => m.content === content)).toBeUndefined()
+					expect(effective.find((m) => (m as any).content === content)).toBeUndefined()
 				}
 			})
 
@@ -380,7 +380,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 				// Second condense: summary1 + msg8-msg17 condensed, summary2 created
 				//
 				// Storage after double condense:
-				const storageAfterDoubleCondense: ApiMessage[] = [
+				const storageAfterDoubleCondense: any[] = [
 					// First message - condensed during the first condense
 					{ role: "user", content: "Initial task: Build a full app", ts: 100, condenseParent: condenseId1 },
 
@@ -437,22 +437,22 @@ describe("Rewind After Condense - Issue #8295", () => {
 				expect(effective.length).toBe(4)
 
 				// Verify exact order and content
-				expect(effective[0].role).toBe("user")
+				expect((effective[0] as any).role).toBe("user")
 				expect(effective[0].isSummary).toBe(true)
 				expect(effective[0].condenseId).toBe(condenseId2) // Must be the SECOND summary
-				expect(effective[0].content).toContain("Summary2")
+				expect((effective[0] as any).content).toContain("Summary2")
 
-				expect(effective[1].role).toBe("assistant")
-				expect(effective[1].content).toBe("Writing integration tests")
+				expect((effective[1] as any).role).toBe("assistant")
+				expect((effective[1] as any).content).toBe("Writing integration tests")
 
-				expect(effective[2].role).toBe("user")
-				expect(effective[2].content).toBe("Test the auth flow")
+				expect((effective[2] as any).role).toBe("user")
+				expect((effective[2] as any).content).toBe("Test the auth flow")
 
-				expect(effective[3].role).toBe("assistant")
-				expect(effective[3].content).toBe("Auth tests passing")
+				expect((effective[3] as any).role).toBe("assistant")
+				expect((effective[3] as any).content).toBe("Auth tests passing")
 
 				// Verify Summary1 is NOT in effective history (it's tagged with condenseParent)
-				const summary1 = effective.find((m) => m.content?.toString().includes("Summary1"))
+				const summary1 = effective.find((m) => (m as any).content?.toString().includes("Summary1"))
 				expect(summary1).toBeUndefined()
 
 				// Verify all condensed messages are NOT in effective history
@@ -464,7 +464,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 					"Implemented error handlers",
 				]
 				for (const content of condensedContents) {
-					expect(effective.find((m) => m.content === content)).toBeUndefined()
+					expect(effective.find((m) => (m as any).content === content)).toBeUndefined()
 				}
 			})
 
@@ -473,7 +473,7 @@ describe("Rewind After Condense - Issue #8295", () => {
 
 				// Verify that after condense, the effective history maintains proper
 				// user/assistant message alternation (important for API compatibility)
-				const storage: ApiMessage[] = [
+				const storage: any[] = [
 					{ role: "user", content: "Start task", ts: 100, condenseParent: condenseId },
 					{ role: "assistant", content: "Response 1", ts: 200, condenseParent: condenseId },
 					{ role: "user", content: "Continue", ts: 300, condenseParent: condenseId },
@@ -488,17 +488,17 @@ describe("Rewind After Condense - Issue #8295", () => {
 
 				// Verify the sequence: user(summary), assistant, user, assistant
 				// This is the fresh-start model with user-role summaries
-				expect(effective[0].role).toBe("user")
+				expect((effective[0] as any).role).toBe("user")
 				expect(effective[0].isSummary).toBe(true)
-				expect(effective[1].role).toBe("assistant")
-				expect(effective[2].role).toBe("user")
-				expect(effective[3].role).toBe("assistant")
+				expect((effective[1] as any).role).toBe("assistant")
+				expect((effective[2] as any).role).toBe("user")
+				expect((effective[3] as any).role).toBe("assistant")
 			})
 
 			it("should preserve timestamps in chronological order in effective history", () => {
 				const condenseId = "summary-timestamps"
 
-				const storage: ApiMessage[] = [
+				const storage: any[] = [
 					{ role: "user", content: "First", ts: 100, condenseParent: condenseId },
 					{ role: "assistant", content: "Condensed", ts: 200, condenseParent: condenseId },
 					{ role: "user", content: "Summary", ts: 299, isSummary: true, condenseId },

+ 138 - 86
src/core/condense/index.ts

@@ -1,11 +1,32 @@
-import Anthropic from "@anthropic-ai/sdk"
 import crypto from "crypto"
 
 import { TelemetryService } from "@roo-code/telemetry"
 
 import { t } from "../../i18n"
 import { ApiHandler, ApiHandlerCreateMessageMetadata } from "../../api"
-import { ApiMessage } from "../task-persistence/apiMessages"
+import {
+	type RooMessage,
+	type RooUserMessage,
+	type RooToolMessage,
+	type RooRoleMessage,
+	isRooAssistantMessage,
+	isRooToolMessage,
+	isRooUserMessage,
+	isRooRoleMessage,
+	type ToolCallPart,
+	type ToolResultPart,
+	type TextPart,
+	type AnyToolCallBlock,
+	type AnyToolResultBlock,
+	isAnyToolCallBlock,
+	isAnyToolResultBlock,
+	getToolCallId,
+	getToolCallName,
+	getToolCallInput,
+	getToolResultCallId,
+	getToolResultContent,
+	getToolResultIsError,
+} from "../task-persistence/rooMessage"
 import { maybeRemoveImageBlocks } from "../../api/transform/image-cleaning"
 import { findLast } from "../../shared/array"
 import { supportPrompt } from "../../shared/support-prompt"
@@ -15,13 +36,15 @@ import { generateFoldedFileContext } from "./foldedFileContext"
 export type { FoldedFileContextResult, FoldedFileContextOptions } from "./foldedFileContext"
 
 /**
- * Converts a tool_use block to a text representation.
- * This allows the conversation to be summarized without requiring the tools parameter.
+ * Converts a tool-call / tool_use block to a text representation.
+ * Accepts both AI SDK ToolCallPart (toolName, input) and legacy Anthropic format (name, input).
  */
-export function toolUseToText(block: Anthropic.Messages.ToolUseBlockParam): string {
+export function toolUseToText(block: AnyToolCallBlock): string {
+	const name = getToolCallName(block)
+	const rawInput = getToolCallInput(block)
 	let input: string
-	if (typeof block.input === "object" && block.input !== null) {
-		input = Object.entries(block.input)
+	if (typeof rawInput === "object" && rawInput !== null) {
+		input = Object.entries(rawInput)
 			.map(([key, value]) => {
 				const formattedValue =
 					typeof value === "object" && value !== null ? JSON.stringify(value, null, 2) : String(value)
@@ -29,33 +52,38 @@ export function toolUseToText(block: Anthropic.Messages.ToolUseBlockParam): stri
 			})
 			.join("\n")
 	} else {
-		input = String(block.input)
+		input = String(rawInput)
 	}
-	return `[Tool Use: ${block.name}]\n${input}`
+	return `[Tool Use: ${name}]\n${input}`
 }
 
 /**
- * Converts a tool_result block to a text representation.
- * This allows the conversation to be summarized without requiring the tools parameter.
+ * Converts a tool-result / tool_result block to a text representation.
+ * Accepts both AI SDK ToolResultPart and legacy Anthropic format.
  */
-export function toolResultToText(block: Anthropic.Messages.ToolResultBlockParam): string {
-	const errorSuffix = block.is_error ? " (Error)" : ""
-	if (typeof block.content === "string") {
-		return `[Tool Result${errorSuffix}]\n${block.content}`
-	} else if (Array.isArray(block.content)) {
-		const contentText = block.content
-			.map((contentBlock) => {
+export function toolResultToText(block: AnyToolResultBlock): string {
+	const isError = getToolResultIsError(block)
+	const errorSuffix = isError ? " (Error)" : ""
+	// AI SDK uses `output`, legacy uses `content`
+	const rawContent = getToolResultContent(block)
+	if (typeof rawContent === "string") {
+		return `[Tool Result${errorSuffix}]\n${rawContent}`
+	} else if (Array.isArray(rawContent)) {
+		const contentText = rawContent
+			.map((contentBlock: { type: string; text?: string }) => {
 				if (contentBlock.type === "text") {
 					return contentBlock.text
 				}
 				if (contentBlock.type === "image") {
 					return "[Image]"
 				}
-				// Handle any other content block types
-				return `[${(contentBlock as { type: string }).type}]`
+				return `[${contentBlock.type}]`
 			})
 			.join("\n")
 		return `[Tool Result${errorSuffix}]\n${contentText}`
+	} else if (rawContent && typeof rawContent === "object" && "value" in rawContent) {
+		// AI SDK ToolResultPart.output has shape { type: "text", value: string }
+		return `[Tool Result${errorSuffix}]\n${String((rawContent as { value: unknown }).value)}`
 	}
 	return `[Tool Result${errorSuffix}]`
 }
@@ -68,21 +96,21 @@ export function toolResultToText(block: Anthropic.Messages.ToolResultBlockParam)
  * @param content - The message content (string or array of content blocks)
  * @returns The transformed content with tool blocks converted to text blocks
  */
-export function convertToolBlocksToText(
-	content: string | Anthropic.Messages.ContentBlockParam[],
-): string | Anthropic.Messages.ContentBlockParam[] {
+export function convertToolBlocksToText(content: string | Array<{ type: string }>): string | Array<{ type: string }> {
 	if (typeof content === "string") {
 		return content
 	}
 
 	return content.map((block) => {
-		if (block.type === "tool_use") {
+		// Check both AI SDK (`tool-call`) and legacy (`tool_use`) discriminators
+		if (isAnyToolCallBlock(block)) {
 			return {
 				type: "text" as const,
 				text: toolUseToText(block),
 			}
 		}
-		if (block.type === "tool_result") {
+		// Check both AI SDK (`tool-result`) and legacy (`tool_result`) discriminators
+		if (isAnyToolResultBlock(block)) {
 			return {
 				type: "text" as const,
 				text: toolResultToText(block),
@@ -99,9 +127,9 @@ export function convertToolBlocksToText(
  * @param messages - The messages to transform
  * @returns The transformed messages with tool blocks converted to text
  */
-export function transformMessagesForCondensing<
-	T extends { role: string; content: string | Anthropic.Messages.ContentBlockParam[] },
->(messages: T[]): T[] {
+export function transformMessagesForCondensing<T extends { role: string; content: string | Array<{ type: string }> }>(
+	messages: T[],
+): T[] {
 	return messages.map((msg) => ({
 		...msg,
 		content: convertToolBlocksToText(msg.content),
@@ -131,24 +159,33 @@ The goal is for work to continue seamlessly after condensation - as if it never
  * @param messages - The conversation messages to process
  * @returns The messages with synthetic tool_results appended if needed
  */
-export function injectSyntheticToolResults(messages: ApiMessage[]): ApiMessage[] {
-	// Find all tool_call IDs in assistant messages
+export function injectSyntheticToolResults(messages: RooMessage[]): RooMessage[] {
+	// Find all tool-call IDs in assistant messages
 	const toolCallIds = new Set<string>()
-	// Find all tool_result IDs in user messages
+	// Find all tool-result IDs in user/tool messages
 	const toolResultIds = new Set<string>()
 
 	for (const msg of messages) {
-		if (msg.role === "assistant" && Array.isArray(msg.content)) {
+		if (isRooAssistantMessage(msg) && Array.isArray(msg.content)) {
 			for (const block of msg.content) {
-				if (block.type === "tool_use") {
-					toolCallIds.add(block.id)
+				if (isAnyToolCallBlock(block as { type: string })) {
+					toolCallIds.add(getToolCallId(block as AnyToolCallBlock))
 				}
 			}
 		}
-		if (msg.role === "user" && Array.isArray(msg.content)) {
+		if (isRooToolMessage(msg) && Array.isArray(msg.content)) {
 			for (const block of msg.content) {
-				if (block.type === "tool_result") {
-					toolResultIds.add(block.tool_use_id)
+				if (isAnyToolResultBlock(block as { type: string })) {
+					toolResultIds.add(getToolResultCallId(block as AnyToolResultBlock))
+				}
+			}
+		}
+		// Also check legacy user messages with tool_result blocks
+		if (isRooUserMessage(msg) && Array.isArray(msg.content)) {
+			for (const block of msg.content) {
+				const typedBlock = block as unknown as { type: string }
+				if (isAnyToolResultBlock(typedBlock)) {
+					toolResultIds.add(getToolResultCallId(typedBlock))
 				}
 			}
 		}
@@ -161,15 +198,16 @@ export function injectSyntheticToolResults(messages: ApiMessage[]): ApiMessage[]
 		return messages
 	}
 
-	// Inject synthetic tool_results as a new user message
-	const syntheticResults: Anthropic.Messages.ToolResultBlockParam[] = orphanIds.map((id) => ({
-		type: "tool_result" as const,
-		tool_use_id: id,
-		content: "Context condensation triggered. Tool execution deferred.",
+	// Inject synthetic tool_results as a new RooToolMessage
+	const syntheticResults: ToolResultPart[] = orphanIds.map((id) => ({
+		type: "tool-result" as const,
+		toolCallId: id,
+		toolName: "unknown",
+		output: { type: "text" as const, value: "Context condensation triggered. Tool execution deferred." },
 	}))
 
-	const syntheticMessage: ApiMessage = {
-		role: "user",
+	const syntheticMessage: RooToolMessage = {
+		role: "tool",
 		content: syntheticResults,
 		ts: Date.now(),
 	}
@@ -184,7 +222,10 @@ export function injectSyntheticToolResults(messages: ApiMessage[]): ApiMessage[]
  * @param message - The message to extract command blocks from
  * @returns A string containing all command blocks found, or empty string if none
  */
-export function extractCommandBlocks(message: ApiMessage): string {
+export function extractCommandBlocks(message: RooMessage): string {
+	if (!isRooRoleMessage(message)) {
+		return ""
+	}
 	const content = message.content
 	let text: string
 
@@ -193,7 +234,7 @@ export function extractCommandBlocks(message: ApiMessage): string {
 	} else if (Array.isArray(content)) {
 		// Concatenate all text blocks
 		text = content
-			.filter((block): block is Anthropic.Messages.TextBlockParam => block.type === "text")
+			.filter((block): block is TextPart => (block as { type: string }).type === "text")
 			.map((block) => block.text)
 			.join("\n")
 	} else {
@@ -212,7 +253,7 @@ export function extractCommandBlocks(message: ApiMessage): string {
 }
 
 export type SummarizeResponse = {
-	messages: ApiMessage[] // The messages after summarization
+	messages: RooMessage[] // The messages after summarization
 	summary: string // The summary text; empty string for no summary
 	cost: number // The cost of the summarization operation
 	newContextTokens?: number // The number of tokens in the context for the next API request
@@ -222,7 +263,7 @@ export type SummarizeResponse = {
 }
 
 export type SummarizeConversationOptions = {
-	messages: ApiMessage[]
+	messages: RooMessage[]
 	apiHandler: ApiHandler
 	systemPrompt: string
 	taskId: string
@@ -287,7 +328,7 @@ export async function summarizeConversation(options: SummarizeConversationOption
 	}
 
 	// Check if there's a recent summary in the messages (edge case)
-	const recentSummaryExists = messagesToSummarize.some((message: ApiMessage) => message.isSummary)
+	const recentSummaryExists = messagesToSummarize.some((message) => message.isSummary)
 
 	if (recentSummaryExists && messagesToSummarize.length <= 2) {
 		const error = t("common:errors.condensed_recently")
@@ -298,7 +339,7 @@ export async function summarizeConversation(options: SummarizeConversationOption
 	// This respects user's custom condensing prompt setting
 	const condenseInstructions = customCondensingPrompt?.trim() || supportPrompt.default.CONDENSE
 
-	const finalRequestMessage: Anthropic.MessageParam = {
+	const finalRequestMessage: RooUserMessage = {
 		role: "user",
 		content: condenseInstructions,
 	}
@@ -311,8 +352,15 @@ export async function summarizeConversation(options: SummarizeConversationOption
 	// This is necessary because some providers (like Bedrock via LiteLLM) require the `tools` parameter
 	// when tool blocks are present. By converting them to text, we can send the conversation for
 	// summarization without needing to pass the tools parameter.
+	// Filter out reasoning messages (no role/content) before transforming for the API
+	const messagesForApi = [...messagesWithToolResults, finalRequestMessage].filter(
+		(msg): msg is Exclude<RooMessage, { type: "reasoning" }> => "role" in msg,
+	)
 	const messagesWithTextToolBlocks = transformMessagesForCondensing(
-		maybeRemoveImageBlocks([...messagesWithToolResults, finalRequestMessage], apiHandler),
+		maybeRemoveImageBlocks(messagesForApi, apiHandler) as Array<{
+			role: string
+			content: string | Array<{ type: string }>
+		}>,
 	)
 
 	const requestMessages = messagesWithTextToolBlocks.map(({ role, content }) => ({ role, content }))
@@ -332,7 +380,7 @@ export async function summarizeConversation(options: SummarizeConversationOption
 	let outputTokens = 0
 
 	try {
-		const stream = apiHandler.createMessage(promptToUse, requestMessages, metadata)
+		const stream = apiHandler.createMessage(promptToUse, requestMessages as RooMessage[], metadata)
 
 		for await (const chunk of stream) {
 			if (chunk.type === "text") {
@@ -398,9 +446,7 @@ export async function summarizeConversation(options: SummarizeConversationOption
 	const commandBlocks = firstMessage ? extractCommandBlocks(firstMessage) : ""
 
 	// Build the summary content as separate text blocks
-	const summaryContent: Anthropic.Messages.ContentBlockParam[] = [
-		{ type: "text", text: `## Conversation Summary\n${summary}` },
-	]
+	const summaryContent: TextPart[] = [{ type: "text", text: `## Conversation Summary\n${summary}` }]
 
 	// Add command blocks (active workflows) in their own system-reminder block if present
 	if (commandBlocks) {
@@ -455,7 +501,7 @@ ${commandBlocks}
 	// The summary goes at the end of all messages.
 	const lastMsgTs = messages[messages.length - 1]?.ts ?? Date.now()
 
-	const summaryMessage: ApiMessage = {
+	const summaryMessage: RooUserMessage = {
 		role: "user", // Fresh start model: summary is a user message
 		content: summaryContent,
 		ts: lastMsgTs + 1, // Unique timestamp after last message
@@ -488,7 +534,7 @@ ${commandBlocks}
 
 	// Count the tokens in the context for the next API request
 	// After condense, the context will contain: system prompt + summary + tool definitions
-	const systemPromptMessage: ApiMessage = { role: "user", content: systemPrompt }
+	const systemPromptMessage: RooUserMessage = { role: "user", content: systemPrompt }
 
 	// Count actual summaryMessage content directly instead of using outputTokens as a proxy
 	// This ensures we account for wrapper text (## Conversation Summary, <system-reminder>, <environment_details>)
@@ -496,7 +542,7 @@ ${commandBlocks}
 		typeof message.content === "string" ? [{ text: message.content, type: "text" as const }] : message.content,
 	)
 
-	const messageTokens = await apiHandler.countTokens(contextBlocks)
+	const messageTokens = await apiHandler.countTokens(contextBlocks as Parameters<typeof apiHandler.countTokens>[0])
 
 	// Count tool definition tokens if tools are provided
 	let toolTokens = 0
@@ -516,7 +562,7 @@ ${commandBlocks}
  * Note: Summary messages are always created with role: "user" (fresh-start model),
  * so the first message since the last summary is guaranteed to be a user message.
  */
-export function getMessagesSinceLastSummary(messages: ApiMessage[]): ApiMessage[] {
+export function getMessagesSinceLastSummary(messages: RooMessage[]): RooMessage[] {
 	const lastSummaryIndexReverse = [...messages].reverse().findIndex((message) => message.isSummary)
 
 	if (lastSummaryIndexReverse === -1) {
@@ -543,7 +589,7 @@ export function getMessagesSinceLastSummary(messages: ApiMessage[]): ApiMessage[
  * @param messages - The full API conversation history including tagged messages
  * @returns The filtered history that should be sent to the API
  */
-export function getEffectiveApiHistory(messages: ApiMessage[]): ApiMessage[] {
+export function getEffectiveApiHistory(messages: RooMessage[]): RooMessage[] {
 	// Find the most recent summary message
 	const lastSummary = findLast(messages, (msg) => msg.isSummary === true)
 
@@ -552,42 +598,56 @@ export function getEffectiveApiHistory(messages: ApiMessage[]): ApiMessage[] {
 		const summaryIndex = messages.indexOf(lastSummary)
 		let messagesFromSummary = messages.slice(summaryIndex)
 
-		// Collect all tool_use IDs from assistant messages in the result
-		// This is needed to filter out orphan tool_result blocks that reference
-		// tool_use IDs from messages that were condensed away
-		const toolUseIds = new Set<string>()
+		// Collect all tool call IDs from assistant messages in the result.
+		// This is needed to filter out orphan tool results that reference
+		// tool call IDs from messages that were condensed away.
+		const toolCallIds = new Set<string>()
 		for (const msg of messagesFromSummary) {
-			if (msg.role === "assistant" && Array.isArray(msg.content)) {
-				for (const block of msg.content) {
-					if (block.type === "tool_use" && (block as Anthropic.Messages.ToolUseBlockParam).id) {
-						toolUseIds.add((block as Anthropic.Messages.ToolUseBlockParam).id)
+			if (isRooAssistantMessage(msg) && Array.isArray(msg.content)) {
+				for (const part of msg.content) {
+					if (isAnyToolCallBlock(part as { type: string })) {
+						toolCallIds.add(getToolCallId(part as AnyToolCallBlock))
 					}
 				}
 			}
 		}
 
-		// Filter out orphan tool_result blocks from user messages
+		// Filter out orphan tool results from tool messages
 		messagesFromSummary = messagesFromSummary
 			.map((msg) => {
-				if (msg.role === "user" && Array.isArray(msg.content)) {
-					const filteredContent = msg.content.filter((block) => {
-						if (block.type === "tool_result") {
-							return toolUseIds.has((block as Anthropic.Messages.ToolResultBlockParam).tool_use_id)
+				if (isRooToolMessage(msg) && Array.isArray(msg.content)) {
+					const filteredContent = msg.content.filter((part) => {
+						if (part.type === "tool-result") {
+							return toolCallIds.has((part as ToolResultPart).toolCallId)
 						}
 						return true
 					})
-					// If all content was filtered out, mark for removal
 					if (filteredContent.length === 0) {
 						return null
 					}
-					// If some content was filtered, return updated message
 					if (filteredContent.length !== msg.content.length) {
 						return { ...msg, content: filteredContent }
 					}
 				}
+				// Also handle legacy user messages that may contain tool_result blocks
+				if (isRooUserMessage(msg) && Array.isArray(msg.content)) {
+					const filteredContent = msg.content.filter((block) => {
+						const typedBlock = block as unknown as { type: string }
+						if (isAnyToolResultBlock(typedBlock)) {
+							return toolCallIds.has(getToolResultCallId(typedBlock))
+						}
+						return true
+					})
+					if (filteredContent.length === 0) {
+						return null
+					}
+					if (filteredContent.length !== msg.content.length) {
+						return { ...msg, content: filteredContent as typeof msg.content }
+					}
+				}
 				return msg
 			})
-			.filter((msg): msg is ApiMessage => msg !== null)
+			.filter((msg): msg is RooMessage => msg !== null)
 
 		// Still need to filter out any truncated messages within this range
 		const existingTruncationIds = new Set<string>()
@@ -598,7 +658,6 @@ export function getEffectiveApiHistory(messages: ApiMessage[]): ApiMessage[] {
 		}
 
 		return messagesFromSummary.filter((msg) => {
-			// Filter out truncated messages if their truncation marker exists
 			if (msg.truncationParent && existingTruncationIds.has(msg.truncationParent)) {
 				return false
 			}
@@ -609,9 +668,7 @@ export function getEffectiveApiHistory(messages: ApiMessage[]): ApiMessage[] {
 	// No summary - filter based on condenseParent and truncationParent as before
 	// This handles the case of orphaned condenseParent tags (summary was deleted via rewind)
 
-	// Collect all condenseIds of summaries that exist in the current history
 	const existingSummaryIds = new Set<string>()
-	// Collect all truncationIds of truncation markers that exist in the current history
 	const existingTruncationIds = new Set<string>()
 
 	for (const msg of messages) {
@@ -623,15 +680,10 @@ export function getEffectiveApiHistory(messages: ApiMessage[]): ApiMessage[] {
 		}
 	}
 
-	// Filter out messages whose condenseParent points to an existing summary
-	// or whose truncationParent points to an existing truncation marker.
-	// Messages with orphaned parents (summary/marker was deleted) are included.
 	return messages.filter((msg) => {
-		// Filter out condensed messages if their summary exists
 		if (msg.condenseParent && existingSummaryIds.has(msg.condenseParent)) {
 			return false
 		}
-		// Filter out truncated messages if their truncation marker exists
 		if (msg.truncationParent && existingTruncationIds.has(msg.truncationParent)) {
 			return false
 		}
@@ -650,7 +702,7 @@ export function getEffectiveApiHistory(messages: ApiMessage[]): ApiMessage[] {
  * @param messages - The API conversation history after truncation
  * @returns The cleaned history with orphaned condenseParent and truncationParent fields cleared
  */
-export function cleanupAfterTruncation(messages: ApiMessage[]): ApiMessage[] {
+export function cleanupAfterTruncation(messages: RooMessage[]): RooMessage[] {
 	// Collect all condenseIds of summaries that still exist
 	const existingSummaryIds = new Set<string>()
 	// Collect all truncationIds of truncation markers that still exist
@@ -682,7 +734,7 @@ export function cleanupAfterTruncation(messages: ApiMessage[]): ApiMessage[] {
 		if (needsUpdate) {
 			// Create a new object without orphaned parent references
 			const { condenseParent, truncationParent, ...rest } = msg
-			const result: ApiMessage = rest as ApiMessage
+			const result = rest as RooMessage
 
 			// Keep condenseParent if its summary still exists
 			if (condenseParent && existingSummaryIds.has(condenseParent)) {

+ 17 - 17
src/core/context-management/__tests__/context-management.spec.ts

@@ -6,7 +6,7 @@ import type { ModelInfo } from "@roo-code/types"
 import { TelemetryService } from "@roo-code/telemetry"
 
 import { BaseProvider } from "../../../api/providers/base-provider"
-import { ApiMessage } from "../../task-persistence/apiMessages"
+
 import * as condenseModule from "../../condense"
 
 import {
@@ -61,7 +61,7 @@ describe("Context Management", () => {
 	 */
 	describe("truncateConversation", () => {
 		it("should retain the first message", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Third message" },
@@ -80,7 +80,7 @@ describe("Context Management", () => {
 		})
 
 		it("should remove the specified fraction of messages (rounded to even number)", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Third message" },
@@ -103,7 +103,7 @@ describe("Context Management", () => {
 
 			// Marker should be at index 3 (at the boundary, after truncated messages)
 			expect(result.messages[3].isTruncationMarker).toBe(true)
-			expect(result.messages[3].role).toBe("user")
+			expect((result.messages[3] as any).role).toBe("user")
 
 			// Messages at indices 3 and 4 from original should NOT be tagged (now at indices 4 and 5)
 			expect(result.messages[4].truncationParent).toBeUndefined()
@@ -111,7 +111,7 @@ describe("Context Management", () => {
 		})
 
 		it("should round to an even number of messages to remove", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Third message" },
@@ -131,7 +131,7 @@ describe("Context Management", () => {
 		})
 
 		it("should handle edge case with fracToRemove = 0", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Third message" },
@@ -145,7 +145,7 @@ describe("Context Management", () => {
 		})
 
 		it("should handle edge case with fracToRemove = 1", () => {
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Third message" },
@@ -167,7 +167,7 @@ describe("Context Management", () => {
 
 			// Marker should be at index 3 (at the boundary)
 			expect(result.messages[3].isTruncationMarker).toBe(true)
-			expect(result.messages[3].role).toBe("user")
+			expect((result.messages[3] as any).role).toBe("user")
 
 			// Last message should NOT be tagged (now at index 4)
 			expect(result.messages[4].truncationParent).toBeUndefined()
@@ -273,7 +273,7 @@ describe("Context Management", () => {
 			maxTokens,
 		})
 
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "user", content: "Third message" },
@@ -446,7 +446,7 @@ describe("Context Management", () => {
 			// Test case 1: Small content that won't push us over the threshold
 			const smallContent = [{ type: "text" as const, text: "Small content" }]
 			const smallContentTokens = await estimateTokenCount(smallContent, mockApiHandler)
-			const messagesWithSmallContent: ApiMessage[] = [
+			const messagesWithSmallContent: any[] = [
 				...messages.slice(0, -1),
 				{ role: messages[messages.length - 1].role, content: smallContent },
 			]
@@ -482,7 +482,7 @@ describe("Context Management", () => {
 				},
 			]
 			const largeContentTokens = await estimateTokenCount(largeContent, mockApiHandler)
-			const messagesWithLargeContent: ApiMessage[] = [
+			const messagesWithLargeContent: any[] = [
 				...messages.slice(0, -1),
 				{ role: messages[messages.length - 1].role, content: largeContent },
 			]
@@ -510,7 +510,7 @@ describe("Context Management", () => {
 			// Test case 3: Very large content that will definitely exceed threshold
 			const veryLargeContent = [{ type: "text" as const, text: "X".repeat(1000) }]
 			const veryLargeContentTokens = await estimateTokenCount(veryLargeContent, mockApiHandler)
-			const messagesWithVeryLargeContent: ApiMessage[] = [
+			const messagesWithVeryLargeContent: any[] = [
 				...messages.slice(0, -1),
 				{ role: messages[messages.length - 1].role, content: veryLargeContent },
 			]
@@ -858,7 +858,7 @@ describe("Context Management", () => {
 			maxTokens,
 		})
 
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "user", content: "Third message" },
@@ -1067,7 +1067,7 @@ describe("Context Management", () => {
 			maxTokens,
 		})
 
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "user", content: "Third message" },
@@ -1275,7 +1275,7 @@ describe("Context Management", () => {
 		})
 
 		// Reuse across tests for consistency
-		const messages: ApiMessage[] = [
+		const messages: any[] = [
 			{ role: "user", content: "First message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "user", content: "Third message" },
@@ -1623,7 +1623,7 @@ describe("Context Management", () => {
 			const modelInfo = createModelInfo(100000, 30000)
 			const totalTokens = 70001 // Above threshold to trigger truncation
 
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Third message" },
@@ -1665,7 +1665,7 @@ describe("Context Management", () => {
 			const modelInfo = createModelInfo(100000, 30000)
 			const totalTokens = 70001 // Above threshold to trigger truncation
 
-			const messages: ApiMessage[] = [
+			const messages: any[] = [
 				{ role: "user", content: "First message" },
 				{ role: "assistant", content: "Second message" },
 				{ role: "user", content: "Third message" },

+ 26 - 27
src/core/context-management/__tests__/truncation.spec.ts

@@ -2,10 +2,9 @@ import { describe, it, expect, beforeEach } from "vitest"
 import { TelemetryService } from "@roo-code/telemetry"
 import { truncateConversation } from "../index"
 import { getEffectiveApiHistory, cleanupAfterTruncation } from "../../condense"
-import { ApiMessage } from "../../task-persistence/apiMessages"
 
 describe("Non-Destructive Sliding Window Truncation", () => {
-	let messages: ApiMessage[]
+	let messages: any[]
 
 	beforeEach(() => {
 		// Initialize TelemetryService for tests
@@ -66,8 +65,8 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 			expect(marker!.isTruncationMarker).toBe(true)
 			expect(marker!.truncationId).toBeDefined()
 			expect(marker!.truncationId).toBe(result.truncationId)
-			expect(marker!.role).toBe("user")
-			expect(marker!.content).toContain("Sliding window truncation")
+			expect((marker as any).role).toBe("user")
+			expect((marker as any).content).toContain("Sliding window truncation")
 		})
 
 		it("should return truncationId and messagesRemoved", () => {
@@ -80,7 +79,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 
 		it("should round messagesToRemove to an even number", () => {
 			// Test with 12 messages (1 initial + 11 conversation)
-			const manyMessages: ApiMessage[] = [
+			const manyMessages: any[] = [
 				{ role: "user", content: "Initial", ts: 1000 },
 				...Array.from({ length: 11 }, (_, i) => ({
 					role: (i % 2 === 0 ? "assistant" : "user") as "assistant" | "user",
@@ -99,7 +98,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 	describe("getEffectiveApiHistory()", () => {
 		it("should filter out truncated messages when truncation marker exists", () => {
 			const truncationResult = truncateConversation(messages, 0.5, "test-task-id")
-			const effective = getEffectiveApiHistory(truncationResult.messages)
+			const effective = getEffectiveApiHistory(truncationResult.messages as any)
 
 			// Should exclude 4 truncated messages but keep the first message and truncation marker
 			// Original: 11 messages
@@ -108,7 +107,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 			expect(effective.length).toBe(8)
 
 			// First message should be present
-			expect(effective[0].content).toBe("Initial task")
+			expect((effective[0] as any).content).toBe("Initial task")
 
 			// Truncation marker should be present
 			expect(effective[1].isTruncationMarker).toBe(true)
@@ -127,19 +126,19 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 			// Remove the truncation marker (simulate rewind past truncation)
 			const messagesWithoutMarker = truncationResult.messages.filter((msg) => !msg.isTruncationMarker)
 
-			const effective = getEffectiveApiHistory(messagesWithoutMarker)
+			const effective = getEffectiveApiHistory(messagesWithoutMarker as any)
 
 			// All messages should be visible now
 			expect(effective.length).toBe(messages.length)
 
 			// Verify first and last messages are present
-			expect(effective[0].content).toBe("Initial task")
-			expect(effective[effective.length - 1].content).toBe("Message 6")
+			expect((effective[0] as any).content).toBe("Initial task")
+			expect((effective[effective.length - 1] as any).content).toBe("Message 6")
 		})
 
 		it("should handle both condenseParent and truncationParent filtering", () => {
 			// Create a scenario with both condensing and truncation
-			const messagesWithCondense: ApiMessage[] = [
+			const messagesWithCondense: any[] = [
 				{ role: "user", content: "Initial", ts: 1000 },
 				{ role: "assistant", content: "Msg 1", ts: 1100, condenseParent: "condense-1" },
 				{ role: "user", content: "Msg 2", ts: 1200, condenseParent: "condense-1" },
@@ -155,7 +154,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 			]
 
 			const truncationResult = truncateConversation(messagesWithCondense, 0.5, "test-task-id")
-			const effective = getEffectiveApiHistory(truncationResult.messages)
+			const effective = getEffectiveApiHistory(truncationResult.messages as any)
 
 			// Should filter both condensed messages and truncated messages
 			// Messages with condenseParent="condense-1" should be filtered (summary exists)
@@ -199,7 +198,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 		})
 
 		it("should handle both condenseParent and truncationParent cleanup", () => {
-			const messagesWithBoth: ApiMessage[] = [
+			const messagesWithBoth: any[] = [
 				{ role: "user", content: "Initial", ts: 1000 },
 				{ role: "assistant", content: "Msg 1", ts: 1100, condenseParent: "orphan-condense" },
 				{ role: "user", content: "Msg 2", ts: 1200, truncationParent: "orphan-truncation" },
@@ -214,7 +213,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 		})
 
 		it("should preserve valid parent references", () => {
-			const messagesWithValidParents: ApiMessage[] = [
+			const messagesWithValidParents: any[] = [
 				{ role: "user", content: "Initial", ts: 1000 },
 				{ role: "assistant", content: "Msg 1", ts: 1100, condenseParent: "valid-condense" },
 				{
@@ -248,7 +247,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 			const truncationResult = truncateConversation(messages, 0.5, "test-task-id")
 
 			// Step 2: Verify messages are hidden initially
-			const effectiveBeforeRewind = getEffectiveApiHistory(truncationResult.messages)
+			const effectiveBeforeRewind = getEffectiveApiHistory(truncationResult.messages as any)
 			expect(effectiveBeforeRewind.length).toBeLessThan(messages.length)
 
 			// Step 3: Simulate rewind by removing truncation marker and subsequent messages
@@ -260,7 +259,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 			const cleanedAfterRewind = cleanupAfterTruncation(messagesAfterRewind)
 
 			// Step 5: Get effective history after cleanup
-			const effectiveAfterRewind = getEffectiveApiHistory(cleanedAfterRewind)
+			const effectiveAfterRewind = getEffectiveApiHistory(cleanedAfterRewind as any)
 
 			// All original messages before the marker should be restored
 			expect(effectiveAfterRewind.length).toBe(markerIndex)
@@ -276,8 +275,8 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 			const firstTruncation = truncateConversation(messages, 0.5, "task-1")
 
 			// Step 2: Get effective history and simulate more messages being added
-			const effectiveAfterFirst = getEffectiveApiHistory(firstTruncation.messages)
-			const moreMessages: ApiMessage[] = [
+			const effectiveAfterFirst = getEffectiveApiHistory(firstTruncation.messages as any)
+			const moreMessages: any[] = [
 				...firstTruncation.messages,
 				{ role: "user", content: "New message 1", ts: 3000 },
 				{ role: "assistant", content: "New response 1", ts: 3100 },
@@ -289,7 +288,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 			const secondTruncation = truncateConversation(moreMessages, 0.5, "task-1")
 
 			// Step 4: Get effective history after second truncation
-			const effectiveAfterSecond = getEffectiveApiHistory(secondTruncation.messages)
+			const effectiveAfterSecond = getEffectiveApiHistory(secondTruncation.messages as any)
 
 			// Should have messages hidden by both truncations filtered out
 			const firstMarker = secondTruncation.messages.find(
@@ -319,8 +318,8 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 
 			// Step 2: Add more messages AFTER getting effective history
 			// This simulates real usage where we only send effective messages to API
-			const effectiveAfterFirst = getEffectiveApiHistory(firstTruncation.messages)
-			const moreMessages: ApiMessage[] = [
+			const effectiveAfterFirst = getEffectiveApiHistory(firstTruncation.messages as any)
+			const moreMessages: any[] = [
 				...firstTruncation.messages, // Keep full history with tagged messages
 				{ role: "user", content: "New message 1", ts: 3000 },
 				{ role: "assistant", content: "New response 1", ts: 3100 },
@@ -341,7 +340,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 			const cleaned = cleanupAfterTruncation(afterSecondRewind)
 
 			// Step 6: Get effective history
-			const effective = getEffectiveApiHistory(cleaned)
+			const effective = getEffectiveApiHistory(cleaned as any)
 
 			// The second truncation marker should be removed
 			const hasSecondTruncationMarker = effective.some(
@@ -376,7 +375,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 		})
 
 		it("should handle truncateConversation with very few messages", () => {
-			const fewMessages: ApiMessage[] = [
+			const fewMessages: any[] = [
 				{ role: "user", content: "Initial", ts: 1000 },
 				{ role: "assistant", content: "Response", ts: 1100 },
 			]
@@ -392,7 +391,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 		it("should handle truncating all visible messages except first", () => {
 			// This tests the edge case where visibleIndices[messagesToRemove + 1] would be undefined
 			// 3 messages total: first is preserved, 2 others can be truncated
-			const threeMessages: ApiMessage[] = [
+			const threeMessages: any[] = [
 				{ role: "user", content: "Initial", ts: 1000 },
 				{ role: "assistant", content: "Response 1", ts: 1100 },
 				{ role: "user", content: "Message 2", ts: 1200 },
@@ -411,7 +410,7 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 
 			// First message should be untouched
 			expect(result.messages[0].truncationParent).toBeUndefined()
-			expect(result.messages[0].content).toBe("Initial")
+			expect((result.messages[0] as any).content).toBe("Initial")
 
 			// Messages at indices 1 and 2 should be tagged
 			expect(result.messages[1].truncationParent).toBe(result.truncationId)
@@ -419,11 +418,11 @@ describe("Non-Destructive Sliding Window Truncation", () => {
 
 			// Marker should be at the end (index 3)
 			expect(result.messages[3].isTruncationMarker).toBe(true)
-			expect(result.messages[3].role).toBe("user")
+			expect((result.messages[3] as any).role).toBe("user")
 		})
 
 		it("should handle empty condenseParent and truncationParent gracefully", () => {
-			const messagesWithoutTags: ApiMessage[] = [
+			const messagesWithoutTags: any[] = [
 				{ role: "user", content: "Message 1", ts: 1000 },
 				{ role: "assistant", content: "Response 1", ts: 1100 },
 			]

+ 20 - 21
src/core/context-management/index.ts

@@ -1,11 +1,11 @@
-import { Anthropic } from "@anthropic-ai/sdk"
 import crypto from "crypto"
 
 import { TelemetryService } from "@roo-code/telemetry"
 
 import { ApiHandler, ApiHandlerCreateMessageMetadata } from "../../api"
 import { MAX_CONDENSE_THRESHOLD, MIN_CONDENSE_THRESHOLD, summarizeConversation, SummarizeResponse } from "../condense"
-import { ApiMessage } from "../task-persistence/apiMessages"
+import type { RooMessage, ContentBlockParam } from "../task-persistence/rooMessage"
+import { isRooRoleMessage } from "../task-persistence/rooMessage"
 import { ANTHROPIC_DEFAULT_MAX_TOKENS } from "@roo-code/types"
 import { RooIgnoreController } from "../ignore/RooIgnoreController"
 
@@ -28,23 +28,22 @@ export const TOKEN_BUFFER_PERCENTAGE = 0.1
 /**
  * Counts tokens for user content using the provider's token counting implementation.
  *
- * @param {Array<Anthropic.Messages.ContentBlockParam>} content - The content to count tokens for
- * @param {ApiHandler} apiHandler - The API handler to use for token counting
- * @returns {Promise<number>} A promise resolving to the token count
+ * @param content - The content to count tokens for
+ * @param apiHandler - The API handler to use for token counting
+ * @returns A promise resolving to the token count
  */
-export async function estimateTokenCount(
-	content: Array<Anthropic.Messages.ContentBlockParam>,
-	apiHandler: ApiHandler,
-): Promise<number> {
+export async function estimateTokenCount(content: ContentBlockParam[], apiHandler: ApiHandler): Promise<number> {
 	if (!content || content.length === 0) return 0
-	return apiHandler.countTokens(content)
+	// countTokens accepts Anthropic.Messages.ContentBlockParam[] — our { type, text? }
+	// blocks are structurally compatible with TextBlockParam.
+	return apiHandler.countTokens(content as Parameters<typeof apiHandler.countTokens>[0])
 }
 
 /**
  * Result of truncation operation, includes the truncation ID for UI events.
  */
 export type TruncationResult = {
-	messages: ApiMessage[]
+	messages: RooMessage[]
 	truncationId: string
 	messagesRemoved: number
 }
@@ -59,12 +58,12 @@ export type TruncationResult = {
  * This implements non-destructive sliding window truncation, allowing messages to be
  * restored if the user rewinds past the truncation point.
  *
- * @param {ApiMessage[]} messages - The conversation messages.
+ * @param {RooMessage[]} messages - The conversation messages.
  * @param {number} fracToRemove - The fraction (between 0 and 1) of messages (excluding the first) to hide.
  * @param {string} taskId - The task ID for the conversation, used for telemetry
  * @returns {TruncationResult} Object containing the tagged messages, truncation ID, and count of messages removed.
  */
-export function truncateConversation(messages: ApiMessage[], fracToRemove: number, taskId: string): TruncationResult {
+export function truncateConversation(messages: RooMessage[], fracToRemove: number, taskId: string): TruncationResult {
 	TelemetryService.instance.captureSlidingWindowTruncation(taskId)
 
 	const truncationId = crypto.randomUUID()
@@ -110,7 +109,7 @@ export function truncateConversation(messages: ApiMessage[], fracToRemove: numbe
 
 	// Insert truncation marker at the actual boundary (between last truncated and first kept)
 	const firstKeptTs = messages[firstKeptVisibleIndex]?.ts ?? Date.now()
-	const truncationMarker: ApiMessage = {
+	const truncationMarker: RooMessage = {
 		role: "user",
 		content: `[Sliding window truncation: ${messagesToRemove} messages hidden to reduce context]`,
 		ts: firstKeptTs - 1,
@@ -203,11 +202,11 @@ export function willManageContext({
  * Falls back to sliding window truncation if condensation is unavailable or fails.
  *
  * @param {ContextManagementOptions} options - The options for truncation/condensation
- * @returns {Promise<ApiMessage[]>} The original, condensed, or truncated conversation messages.
+ * @returns {Promise<RooMessage[]>} The original, condensed, or truncated conversation messages.
  */
 
 export type ContextManagementOptions = {
-	messages: ApiMessage[]
+	messages: RooMessage[]
 	totalTokens: number
 	contextWindow: number
 	maxTokens?: number | null
@@ -242,7 +241,7 @@ export type ContextManagementResult = SummarizeResponse & {
  * Conditionally manages conversation context (condense and fallback truncation).
  *
  * @param {ContextManagementOptions} options - The options for truncation/condensation
- * @returns {Promise<ApiMessage[]>} The original, condensed, or truncated conversation messages.
+ * @returns {Promise<RooMessage[]>} The original, condensed, or truncated conversation messages.
  */
 export async function manageContext({
 	messages,
@@ -271,9 +270,9 @@ export async function manageContext({
 
 	// Estimate tokens for the last message (which is always a user message)
 	const lastMessage = messages[messages.length - 1]
-	const lastMessageContent = lastMessage.content
+	const lastMessageContent = isRooRoleMessage(lastMessage) ? lastMessage.content : ""
 	const lastMessageTokens = Array.isArray(lastMessageContent)
-		? await estimateTokenCount(lastMessageContent, apiHandler)
+		? await estimateTokenCount(lastMessageContent as ContentBlockParam[], apiHandler)
 		: await estimateTokenCount([{ type: "text", text: lastMessageContent as string }], apiHandler)
 
 	// Calculate total effective tokens (totalTokens never includes the last message)
@@ -348,9 +347,9 @@ export async function manageContext({
 		)
 
 		for (const msg of effectiveMessages) {
-			const content = msg.content
+			const content = isRooRoleMessage(msg) ? msg.content : undefined
 			if (Array.isArray(content)) {
-				newContextTokensAfterTruncation += await estimateTokenCount(content, apiHandler)
+				newContextTokensAfterTruncation += await estimateTokenCount(content as ContentBlockParam[], apiHandler)
 			} else if (typeof content === "string") {
 				newContextTokensAfterTruncation += await estimateTokenCount(
 					[{ type: "text", text: content }],

+ 6 - 184
src/core/mentions/__tests__/processUserContentMentions.spec.ts

@@ -74,81 +74,7 @@ describe("processUserContentMentions", () => {
 			expect(result.mode).toBeUndefined()
 		})
 
-		it("should process tool_result blocks with string content", async () => {
-			const userContent = [
-				{
-					type: "tool_result" as const,
-					tool_use_id: "123",
-					content: "<user_message>Tool feedback</user_message>",
-				},
-			]
-
-			const result = await processUserContentMentions({
-				userContent,
-				cwd: "/test",
-				urlContentFetcher: mockUrlContentFetcher,
-				fileContextTracker: mockFileContextTracker,
-			})
-
-			expect(parseMentions).toHaveBeenCalled()
-			// String content is now converted to array format to support content blocks
-			expect(result.content[0]).toEqual({
-				type: "tool_result",
-				tool_use_id: "123",
-				content: [
-					{
-						type: "text",
-						text: "parsed: <user_message>Tool feedback</user_message>",
-					},
-				],
-			})
-			expect(result.mode).toBeUndefined()
-		})
-
-		it("should process tool_result blocks with array content", async () => {
-			const userContent = [
-				{
-					type: "tool_result" as const,
-					tool_use_id: "123",
-					content: [
-						{
-							type: "text" as const,
-							text: "<user_message>Array task</user_message>",
-						},
-						{
-							type: "text" as const,
-							text: "Regular text",
-						},
-					],
-				},
-			]
-
-			const result = await processUserContentMentions({
-				userContent,
-				cwd: "/test",
-				urlContentFetcher: mockUrlContentFetcher,
-				fileContextTracker: mockFileContextTracker,
-			})
-
-			expect(parseMentions).toHaveBeenCalledTimes(1)
-			expect(result.content[0]).toEqual({
-				type: "tool_result",
-				tool_use_id: "123",
-				content: [
-					{
-						type: "text",
-						text: "parsed: <user_message>Array task</user_message>",
-					},
-					{
-						type: "text",
-						text: "Regular text",
-					},
-				],
-			})
-			expect(result.mode).toBeUndefined()
-		})
-
-		it("should handle mixed content types", async () => {
+		it("should handle mixed content types (text + image)", async () => {
 			const userContent = [
 				{
 					type: "text" as const,
@@ -156,44 +82,25 @@ describe("processUserContentMentions", () => {
 				},
 				{
 					type: "image" as const,
-					source: {
-						type: "base64" as const,
-						media_type: "image/png" as const,
-						data: "base64data",
-					},
-				},
-				{
-					type: "tool_result" as const,
-					tool_use_id: "456",
-					content: "<user_message>Feedback</user_message>",
+					image: "base64data",
+					mediaType: "image/png",
 				},
 			]
 
 			const result = await processUserContentMentions({
-				userContent,
+				userContent: userContent as any,
 				cwd: "/test",
 				urlContentFetcher: mockUrlContentFetcher,
 				fileContextTracker: mockFileContextTracker,
 			})
 
-			expect(parseMentions).toHaveBeenCalledTimes(2)
-			expect(result.content).toHaveLength(3)
+			expect(parseMentions).toHaveBeenCalledTimes(1)
+			expect(result.content).toHaveLength(2)
 			expect(result.content[0]).toEqual({
 				type: "text",
 				text: "parsed: <user_message>First task</user_message>",
 			})
 			expect(result.content[1]).toEqual(userContent[1]) // Image block unchanged
-			// String content is now converted to array format to support content blocks
-			expect(result.content[2]).toEqual({
-				type: "tool_result",
-				tool_use_id: "456",
-				content: [
-					{
-						type: "text",
-						text: "parsed: <user_message>Feedback</user_message>",
-					},
-				],
-			})
 			expect(result.mode).toBeUndefined()
 		})
 	})
@@ -288,90 +195,5 @@ describe("processUserContentMentions", () => {
 				text: "command help",
 			})
 		})
-
-		it("should include slash command content in tool_result string content", async () => {
-			vi.mocked(parseMentions).mockResolvedValueOnce({
-				text: "parsed tool output",
-				slashCommandHelp: "command help",
-				mode: undefined,
-				contentBlocks: [],
-			})
-
-			const userContent = [
-				{
-					type: "tool_result" as const,
-					tool_use_id: "123",
-					content: "<user_message>Tool output</user_message>",
-				},
-			]
-
-			const result = await processUserContentMentions({
-				userContent,
-				cwd: "/test",
-				urlContentFetcher: mockUrlContentFetcher,
-				fileContextTracker: mockFileContextTracker,
-			})
-
-			expect(result.content).toHaveLength(1)
-			expect(result.content[0]).toEqual({
-				type: "tool_result",
-				tool_use_id: "123",
-				content: [
-					{
-						type: "text",
-						text: "parsed tool output",
-					},
-					{
-						type: "text",
-						text: "command help",
-					},
-				],
-			})
-		})
-
-		it("should include slash command content in tool_result array content", async () => {
-			vi.mocked(parseMentions).mockResolvedValueOnce({
-				text: "parsed array item",
-				slashCommandHelp: "command help",
-				mode: undefined,
-				contentBlocks: [],
-			})
-
-			const userContent = [
-				{
-					type: "tool_result" as const,
-					tool_use_id: "123",
-					content: [
-						{
-							type: "text" as const,
-							text: "<user_message>Array item</user_message>",
-						},
-					],
-				},
-			]
-
-			const result = await processUserContentMentions({
-				userContent,
-				cwd: "/test",
-				urlContentFetcher: mockUrlContentFetcher,
-				fileContextTracker: mockFileContextTracker,
-			})
-
-			expect(result.content).toHaveLength(1)
-			expect(result.content[0]).toEqual({
-				type: "tool_result",
-				tool_use_id: "123",
-				content: [
-					{
-						type: "text",
-						text: "parsed array item",
-					},
-					{
-						type: "text",
-						text: "command help",
-					},
-				],
-			})
-		})
 	})
 })

+ 12 - 117
src/core/mentions/processUserContentMentions.ts

@@ -1,19 +1,19 @@
-import { Anthropic } from "@anthropic-ai/sdk"
+import type { TextPart, ImagePart } from "../task-persistence/rooMessage"
 import { parseMentions, ParseMentionsResult, MentionContentBlock } from "./index"
 import { UrlContentFetcher } from "../../services/browser/UrlContentFetcher"
 import { FileContextTracker } from "../context-tracking/FileContextTracker"
 
 export interface ProcessUserContentMentionsResult {
-	content: Anthropic.Messages.ContentBlockParam[]
+	content: Array<TextPart | ImagePart>
 	mode?: string // Mode from the first slash command that has one
 }
 
 /**
- * Converts MentionContentBlocks to Anthropic text blocks.
+ * Converts MentionContentBlocks to TextPart blocks.
  * Each file/folder mention becomes a separate text block formatted
  * to look like a read_file tool result.
  */
-function contentBlocksToAnthropicBlocks(contentBlocks: MentionContentBlock[]): Anthropic.Messages.TextBlockParam[] {
+function contentBlocksToTextParts(contentBlocks: MentionContentBlock[]): TextPart[] {
 	return contentBlocks.map((block) => ({
 		type: "text" as const,
 		text: block.content,
@@ -37,7 +37,7 @@ export async function processUserContentMentions({
 	includeDiagnosticMessages = true,
 	maxDiagnosticMessages = 50,
 }: {
-	userContent: Anthropic.Messages.ContentBlockParam[]
+	userContent: Array<TextPart | ImagePart>
 	cwd: string
 	urlContentFetcher: UrlContentFetcher
 	fileContextTracker: FileContextTracker
@@ -49,13 +49,8 @@ export async function processUserContentMentions({
 	// Track the first mode found from slash commands
 	let commandMode: string | undefined
 
-	// Process userContent array, which contains various block types:
-	// TextBlockParam, ImageBlockParam, ToolUseBlockParam, and ToolResultBlockParam.
-	// We need to apply parseMentions() to:
-	// 1. All TextBlockParam's text (first user message)
-	// 2. ToolResultBlockParam's content/context text arrays if it contains
-	// "<user_message>" - we place all user generated content in this tag
-	// so it can effectively be used as a marker for when we should parse mentions.
+	// Process userContent array, which contains text and image parts.
+	// We need to apply parseMentions() to TextPart's text that contains "<user_message>".
 	const content = (
 		await Promise.all(
 			userContent.map(async (block) => {
@@ -82,7 +77,7 @@ export async function processUserContentMentions({
 						// 1. User's text (with @ mentions replaced by clean paths)
 						// 2. File/folder content blocks (formatted like read_file results)
 						// 3. Slash command help (if any)
-						const blocks: Anthropic.Messages.ContentBlockParam[] = [
+						const blocks: Array<TextPart | ImagePart> = [
 							{
 								...block,
 								text: result.text,
@@ -91,7 +86,7 @@ export async function processUserContentMentions({
 
 						// Add file/folder content as separate blocks
 						if (result.contentBlocks.length > 0) {
-							blocks.push(...contentBlocksToAnthropicBlocks(result.contentBlocks))
+							blocks.push(...contentBlocksToTextParts(result.contentBlocks))
 						}
 
 						if (result.slashCommandHelp) {
@@ -103,115 +98,15 @@ export async function processUserContentMentions({
 						return blocks
 					}
 
-					return block
-				} else if (block.type === "tool_result") {
-					if (typeof block.content === "string") {
-						if (shouldProcessMentions(block.content)) {
-							const result = await parseMentions(
-								block.content,
-								cwd,
-								urlContentFetcher,
-								fileContextTracker,
-								rooIgnoreController,
-								showRooIgnoredFiles,
-								includeDiagnosticMessages,
-								maxDiagnosticMessages,
-							)
-							// Capture the first mode found
-							if (!commandMode && result.mode) {
-								commandMode = result.mode
-							}
-
-							// Build content array with file blocks included
-							const contentParts: Array<{ type: "text"; text: string }> = [
-								{
-									type: "text" as const,
-									text: result.text,
-								},
-							]
-
-							// Add file/folder content blocks
-							for (const contentBlock of result.contentBlocks) {
-								contentParts.push({
-									type: "text" as const,
-									text: contentBlock.content,
-								})
-							}
-
-							if (result.slashCommandHelp) {
-								contentParts.push({
-									type: "text" as const,
-									text: result.slashCommandHelp,
-								})
-							}
-
-							return {
-								...block,
-								content: contentParts,
-							}
-						}
-
-						return block
-					} else if (Array.isArray(block.content)) {
-						const parsedContent = (
-							await Promise.all(
-								block.content.map(async (contentBlock) => {
-									if (contentBlock.type === "text" && shouldProcessMentions(contentBlock.text)) {
-										const result = await parseMentions(
-											contentBlock.text,
-											cwd,
-											urlContentFetcher,
-											fileContextTracker,
-											rooIgnoreController,
-											showRooIgnoredFiles,
-											includeDiagnosticMessages,
-											maxDiagnosticMessages,
-										)
-										// Capture the first mode found
-										if (!commandMode && result.mode) {
-											commandMode = result.mode
-										}
-
-										// Build blocks array with file content
-										const blocks: Array<{ type: "text"; text: string }> = [
-											{
-												...contentBlock,
-												text: result.text,
-											},
-										]
-
-										// Add file/folder content blocks
-										for (const cb of result.contentBlocks) {
-											blocks.push({
-												type: "text" as const,
-												text: cb.content,
-											})
-										}
-
-										if (result.slashCommandHelp) {
-											blocks.push({
-												type: "text" as const,
-												text: result.slashCommandHelp,
-											})
-										}
-										return blocks
-									}
-
-									return contentBlock
-								}),
-							)
-						).flat()
-
-						return { ...block, content: parsedContent }
-					}
-
 					return block
 				}
 
+				// Legacy backward compat: tool_result / tool-result blocks from older formats
+				// are passed through unchanged (tool results are now in separate RooToolMessages).
 				return block
 			}),
 		)
 	).flat()
 
-	return { content, mode: commandMode }
+	return { content: content as Array<TextPart | ImagePart>, mode: commandMode }
 }

+ 1 - 1
src/core/message-manager/index.ts

@@ -168,7 +168,7 @@ export class MessageManager {
 			// at or after the cutoff to use as the actual boundary.
 			// This ensures assistant messages that preceded the user's response are preserved.
 			const firstUserMsgIndexToRemove = apiHistory.findIndex(
-				(m) => m.ts !== undefined && m.ts >= cutoffTs && m.role === "user",
+				(m) => m.ts !== undefined && m.ts >= cutoffTs && "role" in m && m.role === "user",
 			)
 
 			if (firstUserMsgIndexToRemove !== -1) {

+ 10 - 12
src/core/prompts/responses.ts

@@ -1,6 +1,6 @@
-import { Anthropic } from "@anthropic-ai/sdk"
 import * as path from "path"
 import * as diff from "diff"
+import type { TextPart, ImagePart } from "../task-persistence/rooMessage"
 import { RooIgnoreController, LOCK_TEXT_SYMBOL } from "../ignore/RooIgnoreController"
 import { RooProtectedController } from "../protect/RooProtectedController"
 
@@ -96,13 +96,10 @@ Otherwise, if you have not completed the task and do not need additional informa
 			available_servers: availableServers.length > 0 ? availableServers : [],
 		}),
 
-	toolResult: (
-		text: string,
-		images?: string[],
-	): string | Array<Anthropic.TextBlockParam | Anthropic.ImageBlockParam> => {
+	toolResult: (text: string, images?: string[]): string | Array<TextPart | ImagePart> => {
 		if (images && images.length > 0) {
-			const textBlock: Anthropic.TextBlockParam = { type: "text", text }
-			const imageBlocks: Anthropic.ImageBlockParam[] = formatImagesIntoBlocks(images)
+			const textBlock: TextPart = { type: "text", text }
+			const imageBlocks: ImagePart[] = formatImagesIntoBlocks(images)
 			// Placing images after text leads to better results
 			return [textBlock, ...imageBlocks]
 		} else {
@@ -110,7 +107,7 @@ Otherwise, if you have not completed the task and do not need additional informa
 		}
 	},
 
-	imageBlocks: (images?: string[]): Anthropic.ImageBlockParam[] => {
+	imageBlocks: (images?: string[]): ImagePart[] => {
 		return formatImagesIntoBlocks(images)
 	},
 
@@ -202,16 +199,17 @@ Otherwise, if you have not completed the task and do not need additional informa
 }
 
 // to avoid circular dependency
-const formatImagesIntoBlocks = (images?: string[]): Anthropic.ImageBlockParam[] => {
+const formatImagesIntoBlocks = (images?: string[]): ImagePart[] => {
 	return images
 		? images.map((dataUrl) => {
 				// data:image/png;base64,base64string
 				const [rest, base64] = dataUrl.split(",")
 				const mimeType = rest.split(":")[1].split(";")[0]
 				return {
-					type: "image",
-					source: { type: "base64", media_type: mimeType, data: base64 },
-				} as Anthropic.ImageBlockParam
+					type: "image" as const,
+					image: base64,
+					mediaType: mimeType,
+				}
 			})
 		: []
 }

+ 33 - 0
src/core/task-persistence/__tests__/rooMessage.spec.ts

@@ -17,6 +17,7 @@ import {
 	type ReasoningPart,
 	type RooMessageMetadata,
 	type RooMessageHistory,
+	getToolResultIsError,
 } from "../rooMessage"
 
 // ────────────────────────────────────────────────────────────────────────────
@@ -227,3 +228,35 @@ describe("RooMessageHistory", () => {
 		expect(history.messages).toHaveLength(4)
 	})
 })
+
+describe("getToolResultIsError", () => {
+	it("returns legacy is_error for tool_result blocks", () => {
+		const block = {
+			type: "tool_result" as const,
+			tool_use_id: "tool-1",
+			content: "failed",
+			is_error: true,
+		}
+		expect(getToolResultIsError(block)).toBe(true)
+	})
+
+	it("detects [ERROR] prefix in AI SDK tool-result string output", () => {
+		const block = {
+			type: "tool-result" as const,
+			toolCallId: "tool-1",
+			toolName: "read_file",
+			output: { type: "text" as const, value: "[ERROR] failed to read file" },
+		}
+		expect(getToolResultIsError(block)).toBe(true)
+	})
+
+	it("detects [ERROR] prefix in AI SDK tool-result text object output", () => {
+		const block = {
+			type: "tool-result" as const,
+			toolCallId: "tool-1",
+			toolName: "read_file",
+			output: { type: "text" as const, value: "[ERROR] permission denied" },
+		}
+		expect(getToolResultIsError(block)).toBe(true)
+	})
+})

+ 13 - 0
src/core/task-persistence/converters/anthropicToRoo.ts

@@ -290,6 +290,19 @@ export function convertAnthropicToRooMessages(messages: ApiMessage[]): RooMessag
 					}
 					continue
 				}
+
+				// Redacted thinking blocks (Anthropic safety filter)
+				// Convert to AI SDK reasoning part with redactedData in providerOptions
+				if (partAny.type === "redacted_thinking") {
+					content.push({
+						type: "reasoning",
+						text: "",
+						providerOptions: {
+							anthropic: { redactedData: partAny.data as string },
+						},
+					} as ReasoningPart)
+					continue
+				}
 			}
 
 			const assistantMsg: RooAssistantMessage = {

+ 30 - 2
src/core/task-persistence/index.ts

@@ -4,7 +4,35 @@ export { readTaskMessages, saveTaskMessages } from "./taskMessages"
 export { taskMetadata } from "./taskMetadata"
 export type { RooMessage, RooMessageHistory, RooMessageMetadata } from "./rooMessage"
 export type { RooUserMessage, RooAssistantMessage, RooToolMessage, RooReasoningMessage } from "./rooMessage"
-export { isRooUserMessage, isRooAssistantMessage, isRooToolMessage, isRooReasoningMessage } from "./rooMessage"
-export type { TextPart, ImagePart, FilePart, ToolCallPart, ToolResultPart, ReasoningPart } from "./rooMessage"
+export type { RooRoleMessage } from "./rooMessage"
+export {
+	isRooUserMessage,
+	isRooAssistantMessage,
+	isRooToolMessage,
+	isRooReasoningMessage,
+	isRooRoleMessage,
+} from "./rooMessage"
+export type {
+	TextPart,
+	ImagePart,
+	FilePart,
+	ToolCallPart,
+	ToolResultPart,
+	ReasoningPart,
+	UserContentPart,
+	ContentBlockParam,
+} from "./rooMessage"
+export type { LegacyToolUseBlock, LegacyToolResultBlock, AnyToolCallBlock, AnyToolResultBlock } from "./rooMessage"
+export {
+	isAnyToolCallBlock,
+	isAnyToolResultBlock,
+	getToolCallId,
+	getToolCallName,
+	getToolCallInput,
+	getToolResultCallId,
+	getToolResultContent,
+	getToolResultIsError,
+	setToolResultCallId,
+} from "./rooMessage"
 export { convertAnthropicToRooMessages } from "./converters/anthropicToRoo"
 export { flattenModelMessagesToStringContent } from "./messageUtils"

+ 166 - 0
src/core/task-persistence/rooMessage.ts

@@ -13,6 +13,20 @@ import type { UserModelMessage, AssistantModelMessage, ToolModelMessage, Assista
 // Re-export AI SDK content part types for convenience
 export type { TextPart, ImagePart, FilePart, ToolCallPart, ToolResultPart } from "ai"
 
+import type { TextPart, ImagePart, FilePart, ToolCallPart, ToolResultPart } from "ai"
+
+/**
+ * Union of content parts that can appear in a user message's content array.
+ */
+export type UserContentPart = TextPart | ImagePart | FilePart
+
+/**
+ * A minimal content block with a type discriminator and optional text.
+ * Structurally compatible with Anthropic's `TextBlockParam` (which `countTokens` accepts)
+ * without importing provider-specific types.
+ */
+export type ContentBlockParam = { type: string; text?: string }
+
 /**
  * `ReasoningPart` is used by the AI SDK in `AssistantContent` but is not directly
  * exported from `"ai"`. We extract it from the `AssistantContent` union to get the
@@ -101,6 +115,12 @@ export interface RooReasoningMessage extends RooMessageMetadata {
  */
 export type RooMessage = RooUserMessage | RooAssistantMessage | RooToolMessage | RooReasoningMessage
 
+/**
+ * Union of RooMessage types that have a `role` property (i.e. everything except
+ * {@link RooReasoningMessage}). Useful for narrowing before accessing `.role` or `.content`.
+ */
+export type RooRoleMessage = RooUserMessage | RooAssistantMessage | RooToolMessage
+
 // ────────────────────────────────────────────────────────────────────────────
 // Storage Wrapper
 // ────────────────────────────────────────────────────────────────────────────
@@ -150,3 +170,149 @@ export function isRooToolMessage(msg: RooMessage): msg is RooToolMessage {
 export function isRooReasoningMessage(msg: RooMessage): msg is RooReasoningMessage {
 	return "type" in msg && (msg as RooReasoningMessage).type === "reasoning" && !("role" in msg)
 }
+
+/**
+ * Type guard that checks whether a message is a {@link RooRoleMessage}
+ * (i.e. any message with a `role` property — user, assistant, or tool).
+ */
+export function isRooRoleMessage(msg: RooMessage): msg is RooRoleMessage {
+	return "role" in msg
+}
+
+// ────────────────────────────────────────────────────────────────────────────
+// Content Part Type Guards
+// ────────────────────────────────────────────────────────────────────────────
+
+/** Type guard for AI SDK `TextPart` content blocks. */
+export function isTextPart(part: { type: string }): part is TextPart {
+	return part.type === "text"
+}
+
+/** Type guard for AI SDK `ToolCallPart` content blocks. */
+export function isToolCallPart(part: { type: string }): part is ToolCallPart {
+	return part.type === "tool-call"
+}
+
+/** Type guard for AI SDK `ToolResultPart` content blocks. */
+export function isToolResultPart(part: { type: string }): part is ToolResultPart {
+	return part.type === "tool-result"
+}
+
+/** Type guard for AI SDK `ImagePart` content blocks. */
+export function isImagePart(part: { type: string }): part is ImagePart {
+	return part.type === "image"
+}
+
+// ────────────────────────────────────────────────────────────────────────────
+// Legacy (Anthropic) Block Types — for dual-format backward compatibility
+// ────────────────────────────────────────────────────────────────────────────
+
+/** Legacy Anthropic `tool_use` content block shape (persisted data from older versions). */
+export interface LegacyToolUseBlock {
+	type: "tool_use"
+	id: string
+	name: string
+	input: unknown
+}
+
+/** Legacy Anthropic `tool_result` content block shape (persisted data from older versions). */
+export interface LegacyToolResultBlock {
+	type: "tool_result"
+	tool_use_id: string
+	content?: string | ContentBlockParam[]
+	is_error?: boolean
+}
+
+/** Union of AI SDK `ToolCallPart` and legacy Anthropic `tool_use` block. */
+export type AnyToolCallBlock = ToolCallPart | LegacyToolUseBlock
+
+/** Union of AI SDK `ToolResultPart` and legacy Anthropic `tool_result` block. */
+export type AnyToolResultBlock = ToolResultPart | LegacyToolResultBlock
+
+// ────────────────────────────────────────────────────────────────────────────
+// Dual-Format Type Guards
+// ────────────────────────────────────────────────────────────────────────────
+
+/** Type guard matching both AI SDK `tool-call` and legacy Anthropic `tool_use` blocks. */
+export function isAnyToolCallBlock(block: { type: string }): block is AnyToolCallBlock {
+	return block.type === "tool-call" || block.type === "tool_use"
+}
+
+/** Type guard matching both AI SDK `tool-result` and legacy Anthropic `tool_result` blocks. */
+export function isAnyToolResultBlock(block: { type: string }): block is AnyToolResultBlock {
+	return block.type === "tool-result" || block.type === "tool_result"
+}
+
+// ────────────────────────────────────────────────────────────────────────────
+// Dual-Format Accessor Helpers
+// ────────────────────────────────────────────────────────────────────────────
+
+/** Get the tool call ID from either format. */
+export function getToolCallId(block: AnyToolCallBlock): string {
+	return block.type === "tool-call" ? block.toolCallId : block.id
+}
+
+/** Get the tool name from either format. */
+export function getToolCallName(block: AnyToolCallBlock): string {
+	return block.type === "tool-call" ? block.toolName : block.name
+}
+
+/** Get the tool call arguments/input from either format. */
+export function getToolCallInput(block: AnyToolCallBlock): unknown {
+	return block.input
+}
+
+/** Get the referenced tool call ID from a tool result in either format. */
+export function getToolResultCallId(block: AnyToolResultBlock): string {
+	return block.type === "tool-result" ? block.toolCallId : block.tool_use_id
+}
+
+/** Get the tool result content/output from either format. */
+export function getToolResultContent(block: AnyToolResultBlock): unknown {
+	if (block.type === "tool-result") {
+		return block.output
+	}
+	return block.content
+}
+
+/** Get the error flag from a tool result in either format. */
+export function getToolResultIsError(block: AnyToolResultBlock): boolean | undefined {
+	if (block.type === "tool-result") {
+		// AI SDK ToolResultPart has no dedicated error field.
+		// We use the established "[ERROR]" prefix convention in text output.
+		const output: unknown = block.output
+		if (typeof output === "string") {
+			return output.trimStart().startsWith("[ERROR]")
+		}
+		if (Array.isArray(output)) {
+			return output.some(
+				(item) =>
+					typeof item === "object" &&
+					item !== null &&
+					"type" in item &&
+					(item as { type?: string }).type === "text" &&
+					"value" in item &&
+					typeof (item as { value?: unknown }).value === "string" &&
+					(item as { value: string }).value.trimStart().startsWith("[ERROR]"),
+			)
+		}
+		if (
+			output &&
+			typeof output === "object" &&
+			"value" in output &&
+			typeof (output as { value: unknown }).value === "string"
+		) {
+			return (output as { value: string }).value.trimStart().startsWith("[ERROR]")
+		}
+		return undefined
+	}
+	return block.is_error
+}
+
+/** Set the tool result's reference to a tool call ID, returning a new block. */
+export function setToolResultCallId(block: AnyToolResultBlock, id: string): AnyToolResultBlock {
+	if (block.type === "tool-result") {
+		return { ...block, toolCallId: id }
+	}
+	return { ...block, tool_use_id: id }
+}

+ 478 - 380
src/core/task/Task.ts

@@ -7,6 +7,7 @@ import EventEmitter from "events"
 
 import { AskIgnoredError } from "./AskIgnoredError"
 
+// Note: Anthropic SDK import retained for types used by the API handler interface
 import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
 import debounce from "lodash.debounce"
@@ -59,6 +60,7 @@ import { CloudService, BridgeOrchestrator } from "@roo-code/cloud"
 
 // api
 import { ApiHandler, ApiHandlerCreateMessageMetadata, buildApiHandler } from "../../api"
+import type { AssistantModelMessage } from "ai"
 import { ApiStream, GroundingSource } from "../../api/transform/stream"
 import { maybeRemoveImageBlocks } from "../../api/transform/image-cleaning"
 
@@ -116,6 +118,29 @@ import {
 	readTaskMessages,
 	saveTaskMessages,
 	taskMetadata,
+	type RooMessage,
+	type RooUserMessage,
+	type RooAssistantMessage,
+	type RooToolMessage,
+	type RooReasoningMessage,
+	type TextPart,
+	type ImagePart,
+	type ToolCallPart,
+	type ToolResultPart,
+	type UserContentPart,
+	type AnyToolCallBlock,
+	type AnyToolResultBlock,
+	isRooUserMessage,
+	isRooAssistantMessage,
+	isRooToolMessage,
+	isRooReasoningMessage,
+	isRooRoleMessage,
+	isAnyToolResultBlock,
+	getToolCallId,
+	getToolCallName,
+	getToolResultContent,
+	readRooMessages,
+	saveRooMessages,
 } from "../task-persistence"
 import { getEnvironmentDetails } from "../environment/getEnvironmentDetails"
 import { checkContextWindowExceededError } from "../context/context-management/context-error-handling"
@@ -313,7 +338,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 	didEditFile: boolean = false
 
 	// LLM Messages & Chat Messages
-	apiConversationHistory: ApiMessage[] = []
+	apiConversationHistory: RooMessage[] = []
 	clineMessages: ClineMessage[] = []
 
 	// Ask
@@ -353,8 +378,9 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 	assistantMessageContent: AssistantMessageContent[] = []
 	presentAssistantMessageLocked = false
 	presentAssistantMessageHasPendingUpdates = false
-	userMessageContent: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam | Anthropic.ToolResultBlockParam)[] = []
+	userMessageContent: Array<TextPart | ImagePart> = []
 	userMessageContentReady = false
+	pendingToolResults: Array<ToolResultPart> = []
 
 	/**
 	 * Flag indicating whether the assistant message for the current streaming session
@@ -371,24 +397,24 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 	assistantMessageSavedToHistory = false
 
 	/**
-	 * Push a tool_result block to userMessageContent, preventing duplicates.
-	 * Duplicate tool_use_ids cause API errors.
+	 * Push a tool result to pendingToolResults, preventing duplicates.
+	 * Duplicate toolCallIds cause API errors.
 	 *
-	 * @param toolResult - The tool_result block to add
+	 * @param toolResult - The ToolResultPart to add
 	 * @returns true if added, false if duplicate was skipped
 	 */
-	public pushToolResultToUserContent(toolResult: Anthropic.ToolResultBlockParam): boolean {
-		const existingResult = this.userMessageContent.find(
-			(block): block is Anthropic.ToolResultBlockParam =>
-				block.type === "tool_result" && block.tool_use_id === toolResult.tool_use_id,
+	public pushToolResultToUserContent(toolResult: ToolResultPart): boolean {
+		const existingResult = this.pendingToolResults.find(
+			(block): block is ToolResultPart =>
+				block.type === "tool-result" && block.toolCallId === toolResult.toolCallId,
 		)
 		if (existingResult) {
 			console.warn(
-				`[Task#pushToolResultToUserContent] Skipping duplicate tool_result for tool_use_id: ${toolResult.tool_use_id}`,
+				`[Task#pushToolResultToUserContent] Skipping duplicate tool_result for toolCallId: ${toolResult.toolCallId}`,
 			)
 			return false
 		}
-		this.userMessageContent.push(toolResult)
+		this.pendingToolResults.push(toolResult)
 		return true
 	}
 
@@ -1011,103 +1037,58 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 
 	// API Messages
 
-	private async getSavedApiConversationHistory(): Promise<ApiMessage[]> {
-		return readApiMessages({ taskId: this.taskId, globalStoragePath: this.globalStoragePath })
+	private async getSavedApiConversationHistory(): Promise<RooMessage[]> {
+		return readRooMessages({ taskId: this.taskId, globalStoragePath: this.globalStoragePath })
 	}
 
-	private async addToApiConversationHistory(message: Anthropic.MessageParam, reasoning?: string) {
-		// Capture the encrypted_content / thought signatures from the provider (e.g., OpenAI Responses API, Google GenAI) if present.
-		// We only persist data reported by the current response body.
+	private async addToApiConversationHistory(message: RooMessage) {
+		// Handle RooReasoningMessage (has `type` instead of `role`)
+		if (!("role" in message)) {
+			this.apiConversationHistory.push({ ...message, ts: message.ts ?? Date.now() })
+			await this.saveApiConversationHistory()
+			return
+		}
+
 		const handler = this.api as ApiHandler & {
 			getResponseId?: () => string | undefined
 			getEncryptedContent?: () => { encrypted_content: string; id?: string } | undefined
-			getThoughtSignature?: () => string | undefined
-			getSummary?: () => any[] | undefined
-			getReasoningDetails?: () => any[] | undefined
-			getRedactedThinkingBlocks?: () => Array<{ type: "redacted_thinking"; data: string }> | undefined
 		}
 
 		if (message.role === "assistant") {
 			const responseId = handler.getResponseId?.()
-			const reasoningData = handler.getEncryptedContent?.()
-			const thoughtSignature = handler.getThoughtSignature?.()
-			const reasoningSummary = handler.getSummary?.()
-			const reasoningDetails = handler.getReasoningDetails?.()
 
-			// Only Anthropic's API expects/validates the special `thinking` content block signature.
-			// Other providers (notably Gemini 3) use different signature semantics (e.g. `thoughtSignature`)
-			// and require round-tripping the signature in their own format.
-			const modelId = getModelId(this.apiConfiguration)
-			const apiProvider = this.apiConfiguration.apiProvider
-			const apiProtocol = getApiProtocol(
-				apiProvider && !isRetiredProvider(apiProvider) ? apiProvider : undefined,
-				modelId,
-			)
-			const isAnthropicProtocol = apiProtocol === "anthropic"
+			// Check if the message is already in native AI SDK format (from result.response.messages).
+			// These messages have providerOptions on content parts (reasoning signatures, etc.)
+			// and don't need manual block injection.
+			const hasNativeFormat =
+				Array.isArray(message.content) &&
+				(message.content as Array<{ providerOptions?: unknown }>).some((p) => p.providerOptions)
+
+			if (hasNativeFormat) {
+				// Store directly — the AI SDK response message already has reasoning parts
+				// with providerOptions (signatures, redactedData, etc.) in the correct format.
+				this.apiConversationHistory.push({
+					...message,
+					...(responseId ? { id: responseId } : {}),
+					ts: message.ts ?? Date.now(),
+				})
+				await this.saveApiConversationHistory()
+				return
+			}
+
+			// Fallback path: store the manually-constructed message with responseId and timestamp.
+			// This handles non-AI-SDK providers and AI SDK responses without reasoning
+			// (text-only or text + tool calls where no content parts carry providerOptions).
+			const reasoningData = handler.getEncryptedContent?.()
 
-			// Start from the original assistant message
-			const messageWithTs: any = {
+			const messageWithTs: RooAssistantMessage & { content: any } = {
 				...message,
 				...(responseId ? { id: responseId } : {}),
 				ts: Date.now(),
 			}
 
-			// Store reasoning_details array if present (for models like Gemini 3)
-			if (reasoningDetails) {
-				messageWithTs.reasoning_details = reasoningDetails
-			}
-
-			// Store reasoning: Anthropic thinking (with signature), plain text (most providers), or encrypted (OpenAI Native)
-			// Skip if reasoning_details already contains the reasoning (to avoid duplication)
-			if (isAnthropicProtocol && reasoning && thoughtSignature && !reasoningDetails) {
-				// Anthropic provider with extended thinking: Store as proper `thinking` block
-				// This format passes through anthropic-filter.ts and is properly round-tripped
-				// for interleaved thinking with tool use (required by Anthropic API)
-				const thinkingBlock = {
-					type: "thinking",
-					thinking: reasoning,
-					signature: thoughtSignature,
-				}
-
-				if (typeof messageWithTs.content === "string") {
-					messageWithTs.content = [
-						thinkingBlock,
-						{ type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
-					]
-				} else if (Array.isArray(messageWithTs.content)) {
-					messageWithTs.content = [thinkingBlock, ...messageWithTs.content]
-				} else if (!messageWithTs.content) {
-					messageWithTs.content = [thinkingBlock]
-				}
-
-				// Also insert any redacted_thinking blocks after the thinking block.
-				// Anthropic returns these when safety filters trigger on reasoning content.
-				// They must be passed back verbatim for proper reasoning continuity.
-				const redactedBlocks = handler.getRedactedThinkingBlocks?.()
-				if (redactedBlocks && Array.isArray(messageWithTs.content)) {
-					// Insert after the thinking block (index 1, right after thinking at index 0)
-					messageWithTs.content.splice(1, 0, ...redactedBlocks)
-				}
-			} else if (reasoning && !reasoningDetails) {
-				// Other providers (non-Anthropic): Store as generic reasoning block
-				const reasoningBlock = {
-					type: "reasoning",
-					text: reasoning,
-					summary: reasoningSummary ?? ([] as any[]),
-				}
-
-				if (typeof messageWithTs.content === "string") {
-					messageWithTs.content = [
-						reasoningBlock,
-						{ type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
-					]
-				} else if (Array.isArray(messageWithTs.content)) {
-					messageWithTs.content = [reasoningBlock, ...messageWithTs.content]
-				} else if (!messageWithTs.content) {
-					messageWithTs.content = [reasoningBlock]
-				}
-			} else if (reasoningData?.encrypted_content) {
-				// OpenAI Native encrypted reasoning
+			// OpenAI Native encrypted reasoning — the only non-AI-SDK reasoning format still needed
+			if (reasoningData?.encrypted_content) {
 				const reasoningBlock = {
 					type: "reasoning",
 					summary: [] as any[],
@@ -1116,10 +1097,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 				}
 
 				if (typeof messageWithTs.content === "string") {
-					messageWithTs.content = [
-						reasoningBlock,
-						{ type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
-					]
+					messageWithTs.content = [reasoningBlock, { type: "text", text: messageWithTs.content } as TextPart]
 				} else if (Array.isArray(messageWithTs.content)) {
 					messageWithTs.content = [reasoningBlock, ...messageWithTs.content]
 				} else if (!messageWithTs.content) {
@@ -1127,59 +1105,44 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 				}
 			}
 
-			// For non-Anthropic providers (e.g., Gemini 3), persist the thought signature as its own
-			// content block so converters can attach it back to the correct provider-specific fields.
-			// Note: For Anthropic extended thinking, the signature is already included in the thinking block above.
-			if (thoughtSignature && !isAnthropicProtocol) {
-				const thoughtSignatureBlock = {
-					type: "thoughtSignature",
-					thoughtSignature,
-				}
-
-				if (typeof messageWithTs.content === "string") {
-					messageWithTs.content = [
-						{ type: "text", text: messageWithTs.content } satisfies Anthropic.Messages.TextBlockParam,
-						thoughtSignatureBlock,
-					]
-				} else if (Array.isArray(messageWithTs.content)) {
-					messageWithTs.content = [...messageWithTs.content, thoughtSignatureBlock]
-				} else if (!messageWithTs.content) {
-					messageWithTs.content = [thoughtSignatureBlock]
-				}
-			}
-
 			this.apiConversationHistory.push(messageWithTs)
 		} else {
-			// For user messages, validate tool_result IDs ONLY when the immediately previous *effective* message
-			// is an assistant message.
-			//
-			// If the previous effective message is also a user message (e.g., summary + a new user message),
-			// validating against any earlier assistant message can incorrectly inject placeholder tool_results.
+			// For user/tool messages, validate tool_result IDs ONLY when the immediately previous
+			// *effective* message is an assistant message.
 			const effectiveHistoryForValidation = getEffectiveApiHistory(this.apiConversationHistory)
 			const lastEffective = effectiveHistoryForValidation[effectiveHistoryForValidation.length - 1]
-			const historyForValidation = lastEffective?.role === "assistant" ? effectiveHistoryForValidation : []
+			const lastIsAssistant = lastEffective ? isRooAssistantMessage(lastEffective) : false
+			const historyForValidation = lastIsAssistant ? effectiveHistoryForValidation : []
 
 			// If the previous effective message is NOT an assistant, convert tool_result blocks to text blocks.
-			// This prevents orphaned tool_results from being filtered out by getEffectiveApiHistory.
-			// This can happen when condensing occurs after the assistant sends tool_uses but before
-			// the user responds - the tool_use blocks get condensed away, leaving orphaned tool_results.
-			let messageToAdd = message
-			if (lastEffective?.role !== "assistant" && Array.isArray(message.content)) {
+			let messageToAdd: RooMessage = message
+			if (!lastIsAssistant && isRooUserMessage(message) && Array.isArray(message.content)) {
+				const normalizedUserContent = message.content.map((block) => {
+					const typedBlock = block as unknown as { type: string }
+					if (!isAnyToolResultBlock(typedBlock)) {
+						return block
+					}
+					const raw = getToolResultContent(typedBlock)
+					const textValue = (() => {
+						if (typeof raw === "string") return raw
+						if (raw && typeof raw === "object" && "value" in raw && typeof raw.value === "string") {
+							return raw.value
+						}
+						return JSON.stringify(raw)
+					})()
+					return {
+						type: "text" as const,
+						text: `Tool result:\n${textValue}`,
+					}
+				})
 				messageToAdd = {
 					...message,
-					content: message.content.map((block) =>
-						block.type === "tool_result"
-							? {
-									type: "text" as const,
-									text: `Tool result:\n${typeof block.content === "string" ? block.content : JSON.stringify(block.content)}`,
-								}
-							: block,
-					),
+					content: normalizedUserContent,
 				}
 			}
 
 			const validatedMessage = validateAndFixToolResultIds(messageToAdd, historyForValidation)
-			const messageWithTs = { ...validatedMessage, ts: Date.now() }
+			const messageWithTs: RooMessage = { ...validatedMessage, ts: Date.now() }
 			this.apiConversationHistory.push(messageWithTs)
 		}
 
@@ -1190,7 +1153,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 	// For API requests, consecutive same-role messages are merged via mergeConsecutiveApiMessages()
 	// so rewind/edit behavior can still reference original message boundaries.
 
-	async overwriteApiConversationHistory(newHistory: ApiMessage[]) {
+	async overwriteApiConversationHistory(newHistory: RooMessage[]) {
 		this.apiConversationHistory = newHistory
 		await this.saveApiConversationHistory()
 	}
@@ -1212,7 +1175,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 	 */
 	public async flushPendingToolResultsToHistory(): Promise<boolean> {
 		// Only flush if there's actually pending content to save
-		if (this.userMessageContent.length === 0) {
+		if (this.userMessageContent.length === 0 && this.pendingToolResults.length === 0) {
 			return true
 		}
 
@@ -1246,25 +1209,31 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 			return false
 		}
 
-		// Save the user message with tool_result blocks
-		const userMessage: Anthropic.MessageParam = {
-			role: "user",
-			content: this.userMessageContent,
+		// Save pending tool results as a RooToolMessage
+		if (this.pendingToolResults.length > 0) {
+			const toolMessage: RooToolMessage = {
+				role: "tool",
+				content: [...this.pendingToolResults],
+				ts: Date.now(),
+			}
+			this.apiConversationHistory.push(toolMessage)
 		}
 
-		// Validate and fix tool_result IDs when the previous *effective* message is an assistant message.
-		const effectiveHistoryForValidation = getEffectiveApiHistory(this.apiConversationHistory)
-		const lastEffective = effectiveHistoryForValidation[effectiveHistoryForValidation.length - 1]
-		const historyForValidation = lastEffective?.role === "assistant" ? effectiveHistoryForValidation : []
-		const validatedMessage = validateAndFixToolResultIds(userMessage, historyForValidation)
-		const userMessageWithTs = { ...validatedMessage, ts: Date.now() }
-		this.apiConversationHistory.push(userMessageWithTs as ApiMessage)
+		// Save any text/image user content as a RooUserMessage
+		if (this.userMessageContent.length > 0) {
+			const userMessage: RooUserMessage = {
+				role: "user",
+				content: [...this.userMessageContent],
+				ts: Date.now(),
+			}
+			this.apiConversationHistory.push(userMessage)
+		}
 
 		const saved = await this.saveApiConversationHistory()
 
 		if (saved) {
-			// Clear the pending content since it's now saved
 			this.userMessageContent = []
+			this.pendingToolResults = []
 		} else {
 			console.warn(
 				`[Task#${this.taskId}] flushPendingToolResultsToHistory: save failed, retaining pending tool results in memory`,
@@ -1276,11 +1245,16 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 
 	private async saveApiConversationHistory(): Promise<boolean> {
 		try {
-			await saveApiMessages({
+			const saved = await saveRooMessages({
 				messages: structuredClone(this.apiConversationHistory),
 				taskId: this.taskId,
 				globalStoragePath: this.globalStoragePath,
 			})
+			// saveRooMessages historically returned void in some tests/mocks; treat only explicit false as failure.
+			if (saved === false) {
+				console.error("Failed to save API conversation history: saveRooMessages returned false")
+				return false
+			}
 			return true
 		} catch (error) {
 			console.error("Failed to save API conversation history:", error)
@@ -1886,7 +1860,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 			)
 			return
 		}
-		await this.overwriteApiConversationHistory(messages)
+		await this.overwriteApiConversationHistory(messages as RooMessage[])
 
 		const contextCondense: ContextCondense = {
 			summary,
@@ -2144,7 +2118,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 			}
 			this.isInitialized = true
 
-			const imageBlocks: Anthropic.ImageBlockParam[] = formatResponse.imageBlocks(images)
+			const imageBlocks: ImagePart[] = formatResponse.imageBlocks(images)
 
 			// Task starting
 			await this.initiateTaskLoop([
@@ -2260,91 +2234,136 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 
 		// Make sure that the api conversation history can be resumed by the API,
 		// even if it goes out of sync with cline messages.
-		let existingApiConversationHistory: ApiMessage[] = await this.getSavedApiConversationHistory()
-
-		// Tool blocks are always preserved; native tool calling only.
+		const existingApiConversationHistory: RooMessage[] = await this.getSavedApiConversationHistory()
 
-		// if the last message is an assistant message, we need to check if there's tool use since every tool use has to have a tool response
-		// if there's no tool use and only a text block, then we can just add a user message
-		// (note this isn't relevant anymore since we use custom tool prompts instead of tool use blocks, but this is here for legacy purposes in case users resume old tasks)
+		// If the last message is an assistant message with tool calls, every tool call
+		// needs a corresponding tool result. Create a RooToolMessage with "interrupted"
+		// results for any missing ones.
+		// If the last message is a user message, check the preceding assistant for
+		// unmatched tool calls and fill in missing tool results.
+		// In RooMessage format, tool results live in RooToolMessage (not in user messages).
 
-		// if the last message is a user message, we can need to get the assistant message before it to see if it made tool calls, and if so, fill in the remaining tool responses with 'interrupted'
+		let modifiedOldUserContent: UserContentPart[]
+		let modifiedApiConversationHistory: RooMessage[]
 
-		let modifiedOldUserContent: Anthropic.Messages.ContentBlockParam[] // either the last message if its user message, or the user message before the last (assistant) message
-		let modifiedApiConversationHistory: ApiMessage[] // need to remove the last user message to replace with new modified user message
 		if (existingApiConversationHistory.length > 0) {
-			const lastMessage = existingApiConversationHistory[existingApiConversationHistory.length - 1]
-
-			if (lastMessage.role === "assistant") {
-				const content = Array.isArray(lastMessage.content)
-					? lastMessage.content
-					: [{ type: "text", text: lastMessage.content }]
-				const hasToolUse = content.some((block) => block.type === "tool_use")
-
-				if (hasToolUse) {
-					const toolUseBlocks = content.filter(
-						(block) => block.type === "tool_use",
-					) as Anthropic.Messages.ToolUseBlock[]
-					const toolResponses: Anthropic.ToolResultBlockParam[] = toolUseBlocks.map((block) => ({
-						type: "tool_result",
-						tool_use_id: block.id,
-						content: "Task was interrupted before this tool call could be completed.",
+			// Find the last message that has a role (skip RooReasoningMessage items)
+			let lastMsgIndex = existingApiConversationHistory.length - 1
+			while (lastMsgIndex >= 0 && isRooReasoningMessage(existingApiConversationHistory[lastMsgIndex])) {
+				lastMsgIndex--
+			}
+
+			if (lastMsgIndex < 0) {
+				throw new Error("Unexpected: No user or assistant messages in API conversation history")
+			}
+
+			const lastMessage = existingApiConversationHistory[lastMsgIndex]
+
+			if (isRooAssistantMessage(lastMessage)) {
+				const content = Array.isArray(lastMessage.content) ? lastMessage.content : []
+				const toolCallParts = content.filter((part): part is ToolCallPart => part.type === "tool-call")
+
+				if (toolCallParts.length > 0) {
+					const toolResults: ToolResultPart[] = toolCallParts.map((tc) => ({
+						type: "tool-result" as const,
+						toolCallId: tc.toolCallId,
+						toolName: tc.toolName,
+						output: {
+							type: "text" as const,
+							value: "Task was interrupted before this tool call could be completed.",
+						},
 					}))
-					modifiedApiConversationHistory = [...existingApiConversationHistory] // no changes
-					modifiedOldUserContent = [...toolResponses]
+					const toolMessage: RooToolMessage = { role: "tool", content: toolResults }
+					modifiedApiConversationHistory = [...existingApiConversationHistory, toolMessage]
+					modifiedOldUserContent = []
 				} else {
 					modifiedApiConversationHistory = [...existingApiConversationHistory]
 					modifiedOldUserContent = []
 				}
-			} else if (lastMessage.role === "user") {
-				const previousAssistantMessage: ApiMessage | undefined =
-					existingApiConversationHistory[existingApiConversationHistory.length - 2]
-
-				const existingUserContent: Anthropic.Messages.ContentBlockParam[] = Array.isArray(lastMessage.content)
-					? lastMessage.content
-					: [{ type: "text", text: lastMessage.content }]
-				if (previousAssistantMessage && previousAssistantMessage.role === "assistant") {
+			} else if (isRooUserMessage(lastMessage)) {
+				// Find the preceding assistant message (skip tool/reasoning messages)
+				let prevAssistantIndex = lastMsgIndex - 1
+				while (
+					prevAssistantIndex >= 0 &&
+					!isRooAssistantMessage(existingApiConversationHistory[prevAssistantIndex])
+				) {
+					prevAssistantIndex--
+				}
+				const previousAssistantMessage =
+					prevAssistantIndex >= 0 ? existingApiConversationHistory[prevAssistantIndex] : undefined
+
+				// Extract existing user content for initiateTaskLoop
+				const existingUserContent: UserContentPart[] = Array.isArray(lastMessage.content)
+					? (lastMessage.content as UserContentPart[])
+					: [{ type: "text" as const, text: String(lastMessage.content) }]
+
+				if (previousAssistantMessage && isRooAssistantMessage(previousAssistantMessage)) {
 					const assistantContent = Array.isArray(previousAssistantMessage.content)
 						? previousAssistantMessage.content
-						: [{ type: "text", text: previousAssistantMessage.content }]
+						: []
+					const toolCallParts = assistantContent.filter(
+						(part): part is ToolCallPart => part.type === "tool-call",
+					)
+
+					if (toolCallParts.length > 0) {
+						// Collect tool call IDs that already have results (in tool messages between assistant and user)
+						const answeredToolCallIds = new Set<string>()
+						for (let i = prevAssistantIndex + 1; i < lastMsgIndex; i++) {
+							const msg = existingApiConversationHistory[i]
+							if (isRooToolMessage(msg) && Array.isArray(msg.content)) {
+								for (const part of msg.content) {
+									if (part.type === "tool-result") {
+										answeredToolCallIds.add((part as ToolResultPart).toolCallId)
+									}
+								}
+							}
+						}
 
-					const toolUseBlocks = assistantContent.filter(
-						(block) => block.type === "tool_use",
-					) as Anthropic.Messages.ToolUseBlock[]
+						const missingToolCalls = toolCallParts.filter((tc) => !answeredToolCallIds.has(tc.toolCallId))
 
-					if (toolUseBlocks.length > 0) {
-						const existingToolResults = existingUserContent.filter(
-							(block) => block.type === "tool_result",
-						) as Anthropic.ToolResultBlockParam[]
+						// Remove last user message; add missing tool results as a RooToolMessage
+						const historyWithoutLastUser = existingApiConversationHistory.slice(0, lastMsgIndex)
 
-						const missingToolResponses: Anthropic.ToolResultBlockParam[] = toolUseBlocks
-							.filter(
-								(toolUse) => !existingToolResults.some((result) => result.tool_use_id === toolUse.id),
-							)
-							.map((toolUse) => ({
-								type: "tool_result",
-								tool_use_id: toolUse.id,
-								content: "Task was interrupted before this tool call could be completed.",
+						if (missingToolCalls.length > 0) {
+							const missingResults: ToolResultPart[] = missingToolCalls.map((tc) => ({
+								type: "tool-result" as const,
+								toolCallId: tc.toolCallId,
+								toolName: tc.toolName,
+								output: {
+									type: "text" as const,
+									value: "Task was interrupted before this tool call could be completed.",
+								},
 							}))
+							const toolMessage: RooToolMessage = { role: "tool", content: missingResults }
+							modifiedApiConversationHistory = [...historyWithoutLastUser, toolMessage]
+						} else {
+							modifiedApiConversationHistory = historyWithoutLastUser
+						}
 
-						modifiedApiConversationHistory = existingApiConversationHistory.slice(0, -1) // removes the last user message
-						modifiedOldUserContent = [...existingUserContent, ...missingToolResponses]
+						// Strip any legacy tool_result / tool-result blocks from old user content
+						modifiedOldUserContent = existingUserContent.filter(
+							(block) => !isAnyToolResultBlock(block as { type: string }),
+						)
 					} else {
-						modifiedApiConversationHistory = existingApiConversationHistory.slice(0, -1)
+						modifiedApiConversationHistory = existingApiConversationHistory.slice(0, lastMsgIndex)
 						modifiedOldUserContent = [...existingUserContent]
 					}
 				} else {
-					modifiedApiConversationHistory = existingApiConversationHistory.slice(0, -1)
+					modifiedApiConversationHistory = existingApiConversationHistory.slice(0, lastMsgIndex)
 					modifiedOldUserContent = [...existingUserContent]
 				}
+			} else if (isRooToolMessage(lastMessage)) {
+				// Last message is a tool result — no user message was added yet
+				modifiedApiConversationHistory = [...existingApiConversationHistory]
+				modifiedOldUserContent = []
 			} else {
-				throw new Error("Unexpected: Last message is not a user or assistant message")
+				throw new Error("Unexpected: Last message is not a user, assistant, or tool message")
 			}
 		} else {
 			throw new Error("Unexpected: No existing API conversation history")
 		}
 
-		let newUserContent: Anthropic.Messages.ContentBlockParam[] = [...modifiedOldUserContent]
+		let newUserContent: UserContentPart[] = [...modifiedOldUserContent]
 
 		const agoText = ((): string => {
 			const timestamp = lastClineMessage?.ts ?? Date.now()
@@ -2627,26 +2646,25 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 		const environmentDetails = await getEnvironmentDetails(this, true)
 		let lastUserMsgIndex = -1
 		for (let i = this.apiConversationHistory.length - 1; i >= 0; i--) {
-			if (this.apiConversationHistory[i].role === "user") {
+			const msg = this.apiConversationHistory[i]
+			if ("role" in msg && msg.role === "user") {
 				lastUserMsgIndex = i
 				break
 			}
 		}
 		if (lastUserMsgIndex >= 0) {
-			const lastUserMsg = this.apiConversationHistory[lastUserMsgIndex]
+			const lastUserMsg = this.apiConversationHistory[lastUserMsgIndex] as any
 			if (Array.isArray(lastUserMsg.content)) {
 				// Remove any existing environment_details blocks before adding fresh ones
-				const contentWithoutEnvDetails = lastUserMsg.content.filter(
-					(block: Anthropic.Messages.ContentBlockParam) => {
-						if (block.type === "text" && typeof block.text === "string") {
-							const isEnvironmentDetailsBlock =
-								block.text.trim().startsWith("<environment_details>") &&
-								block.text.trim().endsWith("</environment_details>")
-							return !isEnvironmentDetailsBlock
-						}
-						return true
-					},
-				)
+				const contentWithoutEnvDetails = lastUserMsg.content.filter((block: any) => {
+					if (block.type === "text" && typeof block.text === "string") {
+						const isEnvironmentDetailsBlock =
+							block.text.trim().startsWith("<environment_details>") &&
+							block.text.trim().endsWith("</environment_details>")
+						return !isEnvironmentDetailsBlock
+					}
+					return true
+				})
 				// Add fresh environment details
 				lastUserMsg.content = [...contentWithoutEnvDetails, { type: "text" as const, text: environmentDetails }]
 			}
@@ -2662,7 +2680,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 
 	// Task Loop
 
-	private async initiateTaskLoop(userContent: Anthropic.Messages.ContentBlockParam[]): Promise<void> {
+	private async initiateTaskLoop(userContent: UserContentPart[]): Promise<void> {
 		// Kicks off the checkpoints initialization process in the background.
 		getCheckpointService(this)
 
@@ -2697,11 +2715,11 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 	}
 
 	public async recursivelyMakeClineRequests(
-		userContent: Anthropic.Messages.ContentBlockParam[],
+		userContent: UserContentPart[],
 		includeFileDetails: boolean = false,
 	): Promise<boolean> {
 		interface StackItem {
-			userContent: Anthropic.Messages.ContentBlockParam[]
+			userContent: UserContentPart[]
 			includeFileDetails: boolean
 			retryAttempt?: number
 			userMessageWasRemoved?: boolean // Track if user message was removed due to empty response
@@ -2792,7 +2810,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 			} = (await this.providerRef.deref()?.getState()) ?? {}
 
 			const { content: parsedUserContent, mode: slashCommandMode } = await processUserContentMentions({
-				userContent: currentUserContent,
+				userContent: currentUserContent as Array<TextPart | ImagePart>,
 				cwd: this.cwd,
 				urlContentFetcher: this.urlContentFetcher,
 				fileContextTracker: this.fileContextTracker,
@@ -2846,7 +2864,8 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 			const shouldAddUserMessage =
 				((currentItem.retryAttempt ?? 0) === 0 && !isEmptyUserContent) || currentItem.userMessageWasRemoved
 			if (shouldAddUserMessage) {
-				await this.addToApiConversationHistory({ role: "user", content: finalUserContent })
+				const userMessage: RooUserMessage = { role: "user", content: finalUserContent }
+				await this.addToApiConversationHistory(userMessage)
 				TelemetryService.instance.captureConversationMessage(this.taskId, "user")
 			}
 
@@ -2951,6 +2970,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 				this.assistantMessageContent = []
 				this.didCompleteReadingStream = false
 				this.userMessageContent = []
+				this.pendingToolResults = []
 				this.userMessageContentReady = false
 				this.didRejectTool = false
 				this.didAlreadyUseTool = false
@@ -2981,6 +3001,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 				const stream = this.attemptApiRequest(currentItem.retryAttempt ?? 0, { skipProviderRateLimit: true })
 				let assistantMessage = ""
 				let reasoningMessage = ""
+				let responseAssistantMessage: AssistantModelMessage | undefined
 				let pendingGroundingSources: GroundingSource[] = []
 				this.isStreaming = true
 
@@ -3123,6 +3144,9 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 								presentAssistantMessage(this)
 								break
 							}
+							case "response_message":
+								responseAssistantMessage = chunk.message
+								break
 						}
 
 						if (this.abort) {
@@ -3528,7 +3552,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 					}
 
 					// Build the assistant message content array
-					const assistantContent: Array<Anthropic.TextBlockParam | Anthropic.ToolUseBlockParam> = []
+					const assistantContent: Array<TextPart | ToolCallPart> = []
 
 					// Add text content if present
 					if (assistantMessage) {
@@ -3563,9 +3587,9 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 								}
 								seenToolUseIds.add(sanitizedId)
 								assistantContent.push({
-									type: "tool_use" as const,
-									id: sanitizedId,
-									name: mcpBlock.name, // Original dynamic name
+									type: "tool-call" as const,
+									toolCallId: sanitizedId,
+									toolName: mcpBlock.name, // Original dynamic name
 									input: mcpBlock.arguments, // Direct tool arguments
 								})
 							}
@@ -3593,9 +3617,9 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 								const toolNameForHistory = toolUse.originalName ?? toolUse.name
 
 								assistantContent.push({
-									type: "tool_use" as const,
-									id: sanitizedId,
-									name: toolNameForHistory,
+									type: "tool-call" as const,
+									toolCallId: sanitizedId,
+									toolName: toolNameForHistory,
 									input,
 								})
 							}
@@ -3606,7 +3630,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 					// truncate any tools that come after it and inject error tool_results.
 					// This prevents orphaned tools when delegation disposes the parent task.
 					const newTaskIndex = assistantContent.findIndex(
-						(block) => block.type === "tool_use" && block.name === "new_task",
+						(block) => block.type === "tool-call" && (block as ToolCallPart).toolName === "new_task",
 					)
 
 					if (newTaskIndex !== -1 && newTaskIndex < assistantContent.length - 1) {
@@ -3627,13 +3651,18 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 
 						// Pre-inject error tool_results for truncated tools
 						for (const tool of truncatedTools) {
-							if (tool.type === "tool_use" && (tool as Anthropic.ToolUseBlockParam).id) {
+							if (tool.type !== "tool-call") continue
+							const toolCallId = getToolCallId(tool as AnyToolCallBlock)
+							const toolName = getToolCallName(tool as AnyToolCallBlock)
+							if (toolCallId) {
 								this.pushToolResultToUserContent({
-									type: "tool_result",
-									tool_use_id: (tool as Anthropic.ToolUseBlockParam).id,
-									content:
-										"This tool was not executed because new_task was called in the same message turn. The new_task tool must be the last tool in a message.",
-									is_error: true,
+									type: "tool-result",
+									toolCallId: sanitizeToolUseId(toolCallId),
+									toolName,
+									output: {
+										type: "text",
+										value: "[ERROR] This tool was not executed because new_task was called in the same message turn. The new_task tool must be the last tool in a message.",
+									},
 								})
 							}
 						}
@@ -3643,10 +3672,40 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 					// This is critical for new_task: when it triggers delegation, flushPendingToolResultsToHistory()
 					// will save the user message with tool_results. The assistant message must already be in history
 					// so that tool_result blocks appear AFTER their corresponding tool_use blocks.
-					await this.addToApiConversationHistory(
-						{ role: "assistant", content: assistantContent },
-						reasoningMessage || undefined,
-					)
+					let assistantMessageForHistory: RooAssistantMessage
+					if (responseAssistantMessage) {
+						// AI SDK response message is already in native format with providerOptions —
+						// store directly without manual reasoning/signature reconstruction.
+						// If new_task isolation truncated local tool-calls, apply the same truncation
+						// to the native response message so persisted history stays consistent.
+						let normalizedResponseMessage = responseAssistantMessage
+						if (Array.isArray(normalizedResponseMessage.content)) {
+							const responseNewTaskIndex = normalizedResponseMessage.content.findIndex(
+								(part) => part.type === "tool-call" && part.toolName === "new_task",
+							)
+							if (
+								responseNewTaskIndex !== -1 &&
+								responseNewTaskIndex < normalizedResponseMessage.content.length - 1
+							) {
+								normalizedResponseMessage = {
+									...normalizedResponseMessage,
+									content: normalizedResponseMessage.content.slice(0, responseNewTaskIndex + 1),
+								}
+							}
+						}
+						assistantMessageForHistory = {
+							...normalizedResponseMessage,
+							ts: Date.now(),
+						}
+					} else {
+						// Fallback: manual construction for non-AI-SDK providers
+						assistantMessageForHistory = {
+							role: "assistant",
+							content: assistantContent,
+							ts: Date.now(),
+						}
+					}
+					await this.addToApiConversationHistory(assistantMessageForHistory)
 					this.assistantMessageSavedToHistory = true
 
 					TelemetryService.instance.captureConversationMessage(this.taskId, "assistant")
@@ -3711,11 +3770,37 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 						this.consecutiveNoToolUseCount = 0
 					}
 
+					// Save pending tool results to conversation history as a RooToolMessage.
+					// After the RooMessage migration, tool results are in pendingToolResults
+					// (separate from userMessageContent) and must be explicitly saved.
+					// We don't use flushPendingToolResultsToHistory() here because that also
+					// flushes userMessageContent — which should instead go via the stack to
+					// become part of the next iteration's user message.
+					if (this.pendingToolResults.length > 0) {
+						const toolMessage: RooToolMessage = {
+							role: "tool",
+							content: [...this.pendingToolResults],
+							ts: Date.now(),
+						}
+						const previousHistoryLength = this.apiConversationHistory.length
+						this.apiConversationHistory.push(toolMessage)
+						const saved = await this.saveApiConversationHistory()
+						if (saved) {
+							this.pendingToolResults = []
+						} else {
+							// Keep pending results for retry and roll back in-memory insertion to avoid duplicates.
+							this.apiConversationHistory = this.apiConversationHistory.slice(0, previousHistoryLength)
+							console.warn(
+								`[Task#${this.taskId}] Failed to persist pending tool results in main loop; keeping pending results for retry`,
+							)
+						}
+					}
+
 					// Push to stack if there's content OR if we're paused waiting for a subtask.
 					// When paused, we push an empty item so the loop continues to the pause check.
 					if (this.userMessageContent.length > 0 || this.isPaused) {
 						stack.push({
-							userContent: [...this.userMessageContent], // Create a copy to avoid mutation issues
+							userContent: [...this.userMessageContent] as UserContentPart[], // Create a copy to avoid mutation issues
 							includeFileDetails: false, // Subsequent iterations don't need file details
 						})
 
@@ -3745,7 +3830,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 					let state = await this.providerRef.deref()?.getState()
 					if (this.apiConversationHistory.length > 0) {
 						const lastMessage = this.apiConversationHistory[this.apiConversationHistory.length - 1]
-						if (lastMessage.role === "user") {
+						if ("role" in lastMessage && lastMessage.role === "user") {
 							// Remove the last user message that we added earlier
 							this.apiConversationHistory.pop()
 						}
@@ -3806,7 +3891,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 							await this.addToApiConversationHistory({
 								role: "user",
 								content: currentUserContent,
-							})
+							} as RooMessage)
 
 							await this.say(
 								"error",
@@ -4016,7 +4101,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 			})
 
 			if (truncateResult.messages !== this.apiConversationHistory) {
-				await this.overwriteApiConversationHistory(truncateResult.messages)
+				await this.overwriteApiConversationHistory(truncateResult.messages as RooMessage[])
 			}
 
 			if (truncateResult.summary) {
@@ -4147,11 +4232,11 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 			// This allows us to show an in-progress indicator to the user
 			// We use the centralized willManageContext helper to avoid duplicating threshold logic
 			const lastMessage = this.apiConversationHistory[this.apiConversationHistory.length - 1]
-			const lastMessageContent = lastMessage?.content
+			const lastMessageContent = isRooRoleMessage(lastMessage) ? lastMessage.content : undefined
 			let lastMessageTokens = 0
 			if (lastMessageContent) {
 				lastMessageTokens = Array.isArray(lastMessageContent)
-					? await this.api.countTokens(lastMessageContent)
+					? await this.api.countTokens(lastMessageContent as Parameters<typeof this.api.countTokens>[0])
 					: await this.api.countTokens([{ type: "text", text: lastMessageContent as string }])
 			}
 
@@ -4244,7 +4329,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 					rooIgnoreController: this.rooIgnoreController,
 				})
 				if (truncateResult.messages !== this.apiConversationHistory) {
-					await this.overwriteApiConversationHistory(truncateResult.messages)
+					await this.overwriteApiConversationHistory(truncateResult.messages as RooMessage[])
 				}
 				if (truncateResult.error) {
 					await this.say("condense_context_error", truncateResult.error)
@@ -4309,7 +4394,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 		// mergeConsecutiveApiMessages implementation) without mutating stored history.
 		const mergedForApi = mergeConsecutiveApiMessages(messagesSinceLastSummary, { roles: ["user"] })
 		const messagesWithoutImages = maybeRemoveImageBlocks(mergedForApi, this.api)
-		const cleanConversationHistory = this.buildCleanConversationHistory(messagesWithoutImages as ApiMessage[])
+		const cleanConversationHistory = this.buildCleanConversationHistory(messagesWithoutImages)
 
 		// Check auto-approval limits
 		const approvalResult = await this.autoApprovalHandler.checkAutoApprovalLimits(
@@ -4387,12 +4472,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 		// Reset the flag after using it
 		this.skipPrevResponseIdOnce = false
 
-		// The provider accepts reasoning items alongside standard messages; cast to the expected parameter type.
-		const stream = this.api.createMessage(
-			systemPrompt,
-			cleanConversationHistory as unknown as Anthropic.Messages.MessageParam[],
-			metadata,
-		)
+		const stream = this.api.createMessage(systemPrompt, cleanConversationHistory, metadata)
 		const iterator = stream[Symbol.asyncIterator]()
 
 		// Set up abort handling - when the signal is aborted, clean up the controller reference
@@ -4562,147 +4642,165 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 		return checkpointSave(this, force, suppressMessage)
 	}
 
-	private buildCleanConversationHistory(
-		messages: ApiMessage[],
-	): Array<
-		Anthropic.Messages.MessageParam | { type: "reasoning"; encrypted_content: string; id?: string; summary?: any[] }
-	> {
-		type ReasoningItemForRequest = {
-			type: "reasoning"
-			encrypted_content: string
-			id?: string
-			summary?: any[]
-		}
-
-		const cleanConversationHistory: (Anthropic.Messages.MessageParam | ReasoningItemForRequest)[] = []
-
-		for (const msg of messages) {
-			// Standalone reasoning: send encrypted, skip plain text
-			if (msg.type === "reasoning") {
-				if (msg.encrypted_content) {
-					cleanConversationHistory.push({
-						type: "reasoning",
-						summary: msg.summary,
-						encrypted_content: msg.encrypted_content!,
-						...(msg.id ? { id: msg.id } : {}),
-					})
+	/**
+	 * Prepares conversation history for the API request by sanitizing stored
+	 * RooMessage items into valid AI SDK ModelMessage format.
+	 *
+	 * Condense/truncation filtering is handled upstream by getEffectiveApiHistory.
+	 * This method:
+	 *
+	 * - Removes RooReasoningMessage items (standalone encrypted reasoning with no `role`)
+	 * - Converts custom content blocks in assistant messages to valid AI SDK parts:
+	 *   - `thinking` (Anthropic) → `reasoning` part with signature in providerOptions
+	 *   - `redacted_thinking` (Anthropic) → stripped (no AI SDK equivalent)
+	 *   - `thoughtSignature` (Gemini) → extracted and attached to first tool-call providerOptions
+	 *   - `reasoning` with `encrypted_content` but no `text` → stripped (invalid reasoning part)
+	 * - Carries `reasoning_details` (OpenRouter) through to providerOptions
+	 * - Strips all reasoning when the provider does not support it
+	 */
+	private buildCleanConversationHistory(messages: RooMessage[]): RooMessage[] {
+		const preserveReasoning = this.api.getModel().info.preserveReasoning === true || this.api.isAiSdkProvider()
+
+		return messages
+			.filter((msg) => {
+				// Always remove standalone RooReasoningMessage items (no `role` field → invalid ModelMessage)
+				if (isRooReasoningMessage(msg)) {
+					return false
+				}
+				return true
+			})
+			.map((msg) => {
+				if (!isRooAssistantMessage(msg) || !Array.isArray(msg.content)) {
+					return msg
 				}
-				continue
-			}
-
-			// Preferred path: assistant message with embedded reasoning as first content block
-			if (msg.role === "assistant") {
-				const rawContent = msg.content
-
-				const contentArray: Anthropic.Messages.ContentBlockParam[] = Array.isArray(rawContent)
-					? (rawContent as Anthropic.Messages.ContentBlockParam[])
-					: rawContent !== undefined
-						? ([
-								{ type: "text", text: rawContent } satisfies Anthropic.Messages.TextBlockParam,
-							] as Anthropic.Messages.ContentBlockParam[])
-						: []
-
-				const [first, ...rest] = contentArray
 
-				// Check if this message has reasoning_details (OpenRouter format for Gemini 3, etc.)
-				const msgWithDetails = msg
-				if (msgWithDetails.reasoning_details && Array.isArray(msgWithDetails.reasoning_details)) {
-					// Build the assistant message with reasoning_details
-					let assistantContent: Anthropic.Messages.MessageParam["content"]
+				// Detect native AI SDK format: content parts already have providerOptions
+				// (stored directly from result.response.messages). These don't need legacy sanitization.
+				const isNativeFormat = (msg.content as Array<{ providerOptions?: unknown }>).some(
+					(p) => p.providerOptions,
+				)
 
-					if (contentArray.length === 0) {
-						assistantContent = ""
-					} else if (contentArray.length === 1 && contentArray[0].type === "text") {
-						assistantContent = (contentArray[0] as Anthropic.Messages.TextBlockParam).text
-					} else {
-						assistantContent = contentArray
+				if (isNativeFormat) {
+					// Native format: only strip reasoning if the provider doesn't support it
+					if (!preserveReasoning) {
+						const filtered = (msg.content as Array<{ type: string }>).filter((p) => p.type !== "reasoning")
+						return {
+							...msg,
+							content: filtered.length > 0 ? filtered : [{ type: "text" as const, text: "" }],
+						} as unknown as RooMessage
 					}
+					// Pass through unchanged — already in valid AI SDK format
+					return msg
+				}
 
-					// Create message with reasoning_details property
-					cleanConversationHistory.push({
-						role: "assistant",
-						content: assistantContent,
-						reasoning_details: msgWithDetails.reasoning_details,
-					} as any)
+				// Legacy path: sanitize old-format messages with custom block types
+				// (thinking, redacted_thinking, thoughtSignature)
 
-					continue
+				// Extract thoughtSignature block (Gemini 3) before filtering
+				let thoughtSignature: string | undefined
+				for (const part of msg.content) {
+					const partAny = part as unknown as { type?: string; thoughtSignature?: string }
+					if (partAny.type === "thoughtSignature" && partAny.thoughtSignature) {
+						thoughtSignature = partAny.thoughtSignature
+					}
 				}
 
-				// Embedded reasoning: encrypted (send) or plain text (skip)
-				const hasEncryptedReasoning =
-					first && (first as any).type === "reasoning" && typeof (first as any).encrypted_content === "string"
-				const hasPlainTextReasoning =
-					first && (first as any).type === "reasoning" && typeof (first as any).text === "string"
-
-				if (hasEncryptedReasoning) {
-					const reasoningBlock = first as any
-
-					// Send as separate reasoning item (OpenAI Native)
-					cleanConversationHistory.push({
-						type: "reasoning",
-						summary: reasoningBlock.summary ?? [],
-						encrypted_content: reasoningBlock.encrypted_content,
-						...(reasoningBlock.id ? { id: reasoningBlock.id } : {}),
-					})
+				const sanitized: Array<{ type: string; [key: string]: unknown }> = []
+				let appliedThoughtSignature = false
 
-					// Send assistant message without reasoning
-					let assistantContent: Anthropic.Messages.MessageParam["content"]
+				for (const part of msg.content) {
+					const partType = (part as { type: string }).type
 
-					if (rest.length === 0) {
-						assistantContent = ""
-					} else if (rest.length === 1 && rest[0].type === "text") {
-						assistantContent = (rest[0] as Anthropic.Messages.TextBlockParam).text
-					} else {
-						assistantContent = rest
+					if (partType === "thinking") {
+						// Anthropic extended thinking → AI SDK reasoning part
+						if (!preserveReasoning) continue
+						const thinkingPart = part as unknown as { thinking?: string; signature?: string }
+						if (typeof thinkingPart.thinking === "string" && thinkingPart.thinking.length > 0) {
+							const reasoningPart: Record<string, unknown> = {
+								type: "reasoning",
+								text: thinkingPart.thinking,
+							}
+							if (thinkingPart.signature) {
+								reasoningPart.providerOptions = {
+									anthropic: { signature: thinkingPart.signature },
+									bedrock: { signature: thinkingPart.signature },
+								}
+							}
+							sanitized.push(reasoningPart as (typeof sanitized)[number])
+						}
+						continue
 					}
 
-					cleanConversationHistory.push({
-						role: "assistant",
-						content: assistantContent,
-					} satisfies Anthropic.Messages.MessageParam)
-
-					continue
-				} else if (hasPlainTextReasoning) {
-					// Preserve plain-text reasoning blocks for:
-					// - models explicitly opting in via preserveReasoning
-					// - AI SDK providers (provider packages decide what to include in the native request)
-					const shouldPreserveForApi =
-						this.api.getModel().info.preserveReasoning === true || this.api.isAiSdkProvider()
+					if (partType === "redacted_thinking") {
+						// No AI SDK equivalent — strip
+						continue
+					}
 
-					let assistantContent: Anthropic.Messages.MessageParam["content"]
+					if (partType === "thoughtSignature") {
+						// Extracted above, will be attached to first tool-call — strip block
+						continue
+					}
 
-					if (shouldPreserveForApi) {
-						assistantContent = contentArray
-					} else {
-						// Strip reasoning out - stored for history only, not sent back to API
-						if (rest.length === 0) {
-							assistantContent = ""
-						} else if (rest.length === 1 && rest[0].type === "text") {
-							assistantContent = (rest[0] as Anthropic.Messages.TextBlockParam).text
-						} else {
-							assistantContent = rest
+					if (partType === "reasoning") {
+						if (!preserveReasoning) continue
+						const reasoningPart = part as unknown as { text?: string; encrypted_content?: string }
+						// Only valid if it has a `text` field (AI SDK schema requires it)
+						if (typeof reasoningPart.text === "string" && reasoningPart.text.length > 0) {
+							sanitized.push(part as (typeof sanitized)[number])
 						}
+						// Blocks with encrypted_content but no text are invalid → skip
+						continue
 					}
 
-					cleanConversationHistory.push({
-						role: "assistant",
-						content: assistantContent,
-					} satisfies Anthropic.Messages.MessageParam)
+					if (partType === "tool-call" && thoughtSignature && !appliedThoughtSignature) {
+						// Attach Gemini thoughtSignature to the first tool-call
+						const toolCall = { ...(part as object) } as Record<string, unknown>
+						toolCall.providerOptions = {
+							...((toolCall.providerOptions as Record<string, unknown>) ?? {}),
+							google: { thoughtSignature },
+							vertex: { thoughtSignature },
+						}
+						sanitized.push(toolCall as (typeof sanitized)[number])
+						appliedThoughtSignature = true
+						continue
+					}
 
-					continue
+					// text, tool-call, tool-result, file — pass through
+					sanitized.push(part as (typeof sanitized)[number])
 				}
-			}
 
-			// Default path for regular messages (no embedded reasoning)
-			if (msg.role) {
-				cleanConversationHistory.push({
-					role: msg.role,
-					content: msg.content as Anthropic.Messages.ContentBlockParam[] | string,
+				const content = sanitized.length > 0 ? sanitized : [{ type: "text" as const, text: "" }]
+
+				// Carry reasoning_details through to providerOptions for OpenRouter round-tripping
+				const rawReasoningDetails = (msg as unknown as { reasoning_details?: Record<string, unknown>[] })
+					.reasoning_details
+				const validReasoningDetails = rawReasoningDetails?.filter((detail) => {
+					switch (detail.type) {
+						case "reasoning.encrypted":
+							return typeof detail.data === "string" && detail.data.length > 0
+						case "reasoning.text":
+							return typeof detail.text === "string"
+						case "reasoning.summary":
+							return typeof detail.summary === "string"
+						default:
+							return false
+					}
 				})
-			}
-		}
 
-		return cleanConversationHistory
+				const result: Record<string, unknown> = {
+					...msg,
+					content,
+				}
+
+				if (validReasoningDetails && validReasoningDetails.length > 0) {
+					result.providerOptions = {
+						...((msg as unknown as { providerOptions?: Record<string, unknown> }).providerOptions ?? {}),
+						openrouter: { reasoning_details: validReasoningDetails },
+					}
+				}
+
+				return result as unknown as RooMessage
+			})
 	}
 	public async checkpointRestore(options: CheckpointRestoreOptions) {
 		return checkpointRestore(this, options)

+ 52 - 25
src/core/task/__tests__/Task.persistence.spec.ts

@@ -15,6 +15,7 @@ import { ContextProxy } from "../../config/ContextProxy"
 
 const {
 	mockSaveApiMessages,
+	mockSaveRooMessages,
 	mockSaveTaskMessages,
 	mockReadApiMessages,
 	mockReadTaskMessages,
@@ -22,6 +23,7 @@ const {
 	mockPWaitFor,
 } = vi.hoisted(() => ({
 	mockSaveApiMessages: vi.fn().mockResolvedValue(undefined),
+	mockSaveRooMessages: vi.fn().mockResolvedValue(undefined),
 	mockSaveTaskMessages: vi.fn().mockResolvedValue(undefined),
 	mockReadApiMessages: vi.fn().mockResolvedValue([]),
 	mockReadTaskMessages: vi.fn().mockResolvedValue([]),
@@ -75,6 +77,7 @@ vi.mock("p-wait-for", () => ({
 
 vi.mock("../../task-persistence", () => ({
 	saveApiMessages: mockSaveApiMessages,
+	saveRooMessages: mockSaveRooMessages,
 	saveTaskMessages: mockSaveTaskMessages,
 	readApiMessages: mockReadApiMessages,
 	readTaskMessages: mockReadTaskMessages,
@@ -251,7 +254,7 @@ describe("Task persistence", () => {
 
 	describe("saveApiConversationHistory", () => {
 		it("returns true on success", async () => {
-			mockSaveApiMessages.mockResolvedValueOnce(undefined)
+			mockSaveRooMessages.mockResolvedValueOnce(undefined)
 
 			const task = new Task({
 				provider: mockProvider,
@@ -273,7 +276,7 @@ describe("Task persistence", () => {
 			vi.useFakeTimers()
 
 			// All 3 retry attempts must fail for retrySaveApiConversationHistory to return false
-			mockSaveApiMessages
+			mockSaveRooMessages
 				.mockRejectedValueOnce(new Error("fail 1"))
 				.mockRejectedValueOnce(new Error("fail 2"))
 				.mockRejectedValueOnce(new Error("fail 3"))
@@ -290,7 +293,29 @@ describe("Task persistence", () => {
 			const result = await promise
 
 			expect(result).toBe(false)
-			expect(mockSaveApiMessages).toHaveBeenCalledTimes(3)
+			expect(mockSaveRooMessages).toHaveBeenCalledTimes(3)
+
+			vi.useRealTimers()
+		})
+
+		it("returns false when saveRooMessages resolves false", async () => {
+			vi.useFakeTimers()
+
+			mockSaveRooMessages.mockResolvedValue(false)
+
+			const task = new Task({
+				provider: mockProvider,
+				apiConfiguration: mockApiConfig,
+				task: "test task",
+				startTask: false,
+			})
+
+			const promise = task.retrySaveApiConversationHistory()
+			await vi.runAllTimersAsync()
+			const result = await promise
+
+			expect(result).toBe(false)
+			expect(mockSaveRooMessages).toHaveBeenCalledTimes(3)
 
 			vi.useRealTimers()
 		})
@@ -298,7 +323,7 @@ describe("Task persistence", () => {
 		it("succeeds on 2nd retry attempt", async () => {
 			vi.useFakeTimers()
 
-			mockSaveApiMessages.mockRejectedValueOnce(new Error("fail 1")).mockResolvedValueOnce(undefined) // succeeds on 2nd try
+			mockSaveRooMessages.mockRejectedValueOnce(new Error("fail 1")).mockResolvedValueOnce(undefined) // succeeds on 2nd try
 
 			const task = new Task({
 				provider: mockProvider,
@@ -312,13 +337,13 @@ describe("Task persistence", () => {
 			const result = await promise
 
 			expect(result).toBe(true)
-			expect(mockSaveApiMessages).toHaveBeenCalledTimes(2)
+			expect(mockSaveRooMessages).toHaveBeenCalledTimes(2)
 
 			vi.useRealTimers()
 		})
 
 		it("snapshots the array before passing to saveApiMessages", async () => {
-			mockSaveApiMessages.mockResolvedValueOnce(undefined)
+			mockSaveRooMessages.mockResolvedValueOnce(undefined)
 
 			const task = new Task({
 				provider: mockProvider,
@@ -335,9 +360,9 @@ describe("Task persistence", () => {
 
 			await task.retrySaveApiConversationHistory()
 
-			expect(mockSaveApiMessages).toHaveBeenCalledTimes(1)
+			expect(mockSaveRooMessages).toHaveBeenCalledTimes(1)
 
-			const callArgs = mockSaveApiMessages.mock.calls[0][0]
+			const callArgs = mockSaveRooMessages.mock.calls[0][0]
 			// The messages passed should be a COPY, not the live reference
 			expect(callArgs.messages).not.toBe(task.apiConversationHistory)
 			// But the content should be the same
@@ -409,7 +434,7 @@ describe("Task persistence", () => {
 
 	describe("flushPendingToolResultsToHistory persistence", () => {
 		it("retains userMessageContent on save failure", async () => {
-			mockSaveApiMessages.mockRejectedValueOnce(new Error("disk full"))
+			mockSaveRooMessages.mockRejectedValueOnce(new Error("disk full"))
 
 			const task = new Task({
 				provider: mockProvider,
@@ -421,27 +446,28 @@ describe("Task persistence", () => {
 			// Skip waiting for assistant message
 			task.assistantMessageSavedToHistory = true
 
-			task.userMessageContent = [
+			task.pendingToolResults = [
 				{
-					type: "tool_result",
-					tool_use_id: "tool-fail",
-					content: "Result that should be retained",
+					type: "tool-result",
+					toolCallId: "tool-fail",
+					toolName: "read_file",
+					output: { type: "text", value: "Result that should be retained" },
 				},
 			]
 
 			const saved = await task.flushPendingToolResultsToHistory()
 
 			expect(saved).toBe(false)
-			// userMessageContent should NOT be cleared on failure
-			expect(task.userMessageContent.length).toBeGreaterThan(0)
-			expect(task.userMessageContent[0]).toMatchObject({
-				type: "tool_result",
-				tool_use_id: "tool-fail",
+			// pendingToolResults should NOT be cleared on failure
+			expect(task.pendingToolResults.length).toBeGreaterThan(0)
+			expect(task.pendingToolResults[0]).toMatchObject({
+				type: "tool-result",
+				toolCallId: "tool-fail",
 			})
 		})
 
 		it("clears userMessageContent on save success", async () => {
-			mockSaveApiMessages.mockResolvedValueOnce(undefined)
+			mockSaveRooMessages.mockResolvedValueOnce(undefined)
 
 			const task = new Task({
 				provider: mockProvider,
@@ -453,19 +479,20 @@ describe("Task persistence", () => {
 			// Skip waiting for assistant message
 			task.assistantMessageSavedToHistory = true
 
-			task.userMessageContent = [
+			task.pendingToolResults = [
 				{
-					type: "tool_result",
-					tool_use_id: "tool-ok",
-					content: "Result that should be cleared",
+					type: "tool-result",
+					toolCallId: "tool-ok",
+					toolName: "read_file",
+					output: { type: "text", value: "Result that should be cleared" },
 				},
 			]
 
 			const saved = await task.flushPendingToolResultsToHistory()
 
 			expect(saved).toBe(true)
-			// userMessageContent should be cleared on success
-			expect(task.userMessageContent).toEqual([])
+			// pendingToolResults should be cleared on success
+			expect(task.pendingToolResults).toEqual([])
 		})
 	})
 })

+ 68 - 72
src/core/task/__tests__/Task.spec.ts

@@ -528,7 +528,7 @@ describe("Cline", () => {
 					} as ModelInfo,
 				})
 
-				clineWithImages.apiConversationHistory = conversationHistory
+				clineWithImages.apiConversationHistory = conversationHistory as any
 
 				// Test with model that doesn't support images
 				const [clineWithoutImages, taskWithoutImages] = Task.create({
@@ -550,7 +550,7 @@ describe("Cline", () => {
 					} as ModelInfo,
 				})
 
-				clineWithoutImages.apiConversationHistory = conversationHistory
+				clineWithoutImages.apiConversationHistory = conversationHistory as any
 
 				// Mock abort state for both instances
 				Object.defineProperty(clineWithImages, "abort", {
@@ -590,7 +590,7 @@ describe("Cline", () => {
 							{ type: "image", source: { type: "base64", media_type: "image/jpeg", data: "base64data" } },
 						],
 					},
-				]
+				] as any
 
 				clineWithImages.abandoned = true
 				await taskWithImages.catch(() => {})
@@ -893,7 +893,7 @@ describe("Cline", () => {
 									text: "<user_message>Check 'some/path' (see below for file content)</user_message>",
 								},
 							],
-						} as Anthropic.ToolResultBlockParam,
+						} as any,
 						{
 							type: "tool_result",
 							tool_use_id: "test-id-2",
@@ -903,7 +903,7 @@ describe("Cline", () => {
 									text: "Regular tool result with 'path' (see below for file content)",
 								},
 							],
-						} as Anthropic.ToolResultBlockParam,
+						} as any,
 					]
 
 					const { content: processedContent } = await processUserContentMentions({
@@ -924,20 +924,12 @@ describe("Cline", () => {
 						"<user_message>Text with 'some/path' (see below for file content) in user_message tags</user_message>",
 					)
 
-					// user_message tag content should be processed
-					const toolResult1 = processedContent[2] as Anthropic.ToolResultBlockParam
-					const content1 = Array.isArray(toolResult1.content) ? toolResult1.content[0] : toolResult1.content
-					expect((content1 as Anthropic.TextBlockParam).text).toContain("processed:")
-					expect((content1 as Anthropic.TextBlockParam).text).toContain(
-						"<user_message>Check 'some/path' (see below for file content)</user_message>",
-					)
+					// tool_result blocks are passed through unchanged (no longer processed by processUserContentMentions)
+					const toolResult1 = processedContent[2] as any
+					expect(toolResult1.type).toBe("tool_result")
 
-					// Regular tool result should not be processed
-					const toolResult2 = processedContent[3] as Anthropic.ToolResultBlockParam
-					const content2 = Array.isArray(toolResult2.content) ? toolResult2.content[0] : toolResult2.content
-					expect((content2 as Anthropic.TextBlockParam).text).toBe(
-						"Regular tool result with 'path' (see below for file content)",
-					)
+					const toolResult2 = processedContent[3] as any
+					expect(toolResult2.type).toBe("tool_result")
 
 					await cline.abortTask(true)
 					await task.catch(() => {})
@@ -2051,17 +2043,18 @@ describe("pushToolResultToUserContent", () => {
 			startTask: false,
 		})
 
-		const toolResult: Anthropic.ToolResultBlockParam = {
-			type: "tool_result",
-			tool_use_id: "test-id-1",
-			content: "Test result",
+		const toolResult = {
+			type: "tool-result" as const,
+			toolCallId: "test-id-1",
+			toolName: "read_file",
+			output: { type: "text", value: "Test result" },
 		}
 
-		const added = task.pushToolResultToUserContent(toolResult)
+		const added = task.pushToolResultToUserContent(toolResult as any)
 
 		expect(added).toBe(true)
-		expect(task.userMessageContent).toHaveLength(1)
-		expect(task.userMessageContent[0]).toEqual(toolResult)
+		expect(task.pendingToolResults).toHaveLength(1)
+		expect(task.pendingToolResults[0]).toEqual(toolResult)
 	})
 
 	it("should prevent duplicate tool_result with same tool_use_id", () => {
@@ -2072,37 +2065,39 @@ describe("pushToolResultToUserContent", () => {
 			startTask: false,
 		})
 
-		const toolResult1: Anthropic.ToolResultBlockParam = {
-			type: "tool_result",
-			tool_use_id: "duplicate-id",
-			content: "First result",
+		const toolResult1 = {
+			type: "tool-result" as const,
+			toolCallId: "duplicate-id",
+			toolName: "read_file",
+			output: { type: "text", value: "First result" },
 		}
 
-		const toolResult2: Anthropic.ToolResultBlockParam = {
-			type: "tool_result",
-			tool_use_id: "duplicate-id",
-			content: "Second result (should be skipped)",
+		const toolResult2 = {
+			type: "tool-result" as const,
+			toolCallId: "duplicate-id",
+			toolName: "read_file",
+			output: { type: "text", value: "Second result (should be skipped)" },
 		}
 
 		// Spy on console.warn to verify warning is logged
 		const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {})
 
 		// Add first result - should succeed
-		const added1 = task.pushToolResultToUserContent(toolResult1)
+		const added1 = task.pushToolResultToUserContent(toolResult1 as any)
 		expect(added1).toBe(true)
-		expect(task.userMessageContent).toHaveLength(1)
+		expect(task.pendingToolResults).toHaveLength(1)
 
 		// Add second result with same ID - should be skipped
-		const added2 = task.pushToolResultToUserContent(toolResult2)
+		const added2 = task.pushToolResultToUserContent(toolResult2 as any)
 		expect(added2).toBe(false)
-		expect(task.userMessageContent).toHaveLength(1)
+		expect(task.pendingToolResults).toHaveLength(1)
 
 		// Verify only the first result is in the array
-		expect(task.userMessageContent[0]).toEqual(toolResult1)
+		expect(task.pendingToolResults[0]).toEqual(toolResult1)
 
 		// Verify warning was logged
 		expect(warnSpy).toHaveBeenCalledWith(
-			expect.stringContaining("Skipping duplicate tool_result for tool_use_id: duplicate-id"),
+			expect.stringContaining("Skipping duplicate tool_result for toolCallId: duplicate-id"),
 		)
 
 		warnSpy.mockRestore()
@@ -2116,26 +2111,28 @@ describe("pushToolResultToUserContent", () => {
 			startTask: false,
 		})
 
-		const toolResult1: Anthropic.ToolResultBlockParam = {
-			type: "tool_result",
-			tool_use_id: "id-1",
-			content: "Result 1",
+		const toolResult1 = {
+			type: "tool-result" as const,
+			toolCallId: "id-1",
+			toolName: "read_file",
+			output: { type: "text", value: "Result 1" },
 		}
 
-		const toolResult2: Anthropic.ToolResultBlockParam = {
-			type: "tool_result",
-			tool_use_id: "id-2",
-			content: "Result 2",
+		const toolResult2 = {
+			type: "tool-result" as const,
+			toolCallId: "id-2",
+			toolName: "write_to_file",
+			output: { type: "text", value: "Result 2" },
 		}
 
-		const added1 = task.pushToolResultToUserContent(toolResult1)
-		const added2 = task.pushToolResultToUserContent(toolResult2)
+		const added1 = task.pushToolResultToUserContent(toolResult1 as any)
+		const added2 = task.pushToolResultToUserContent(toolResult2 as any)
 
 		expect(added1).toBe(true)
 		expect(added2).toBe(true)
-		expect(task.userMessageContent).toHaveLength(2)
-		expect(task.userMessageContent[0]).toEqual(toolResult1)
-		expect(task.userMessageContent[1]).toEqual(toolResult2)
+		expect(task.pendingToolResults).toHaveLength(2)
+		expect(task.pendingToolResults[0]).toEqual(toolResult1)
+		expect(task.pendingToolResults[1]).toEqual(toolResult2)
 	})
 
 	it("should handle tool_result with is_error flag", () => {
@@ -2146,18 +2143,19 @@ describe("pushToolResultToUserContent", () => {
 			startTask: false,
 		})
 
-		const errorResult: Anthropic.ToolResultBlockParam = {
-			type: "tool_result",
-			tool_use_id: "error-id",
-			content: "Error message",
-			is_error: true,
+		const errorResult = {
+			type: "tool-result" as const,
+			toolCallId: "error-id",
+			toolName: "execute_command",
+			output: { type: "text", value: "Error message" },
+			isError: true,
 		}
 
-		const added = task.pushToolResultToUserContent(errorResult)
+		const added = task.pushToolResultToUserContent(errorResult as any)
 
 		expect(added).toBe(true)
-		expect(task.userMessageContent).toHaveLength(1)
-		expect(task.userMessageContent[0]).toEqual(errorResult)
+		expect(task.pendingToolResults).toHaveLength(1)
+		expect(task.pendingToolResults[0]).toEqual(errorResult)
 	})
 
 	it("should not interfere with other content types in userMessageContent", () => {
@@ -2169,23 +2167,21 @@ describe("pushToolResultToUserContent", () => {
 		})
 
 		// Add text and image blocks manually
-		task.userMessageContent.push(
-			{ type: "text", text: "Some text" },
-			{ type: "image", source: { type: "base64", media_type: "image/png", data: "base64data" } },
-		)
+		task.userMessageContent.push({ type: "text", text: "Some text" })
 
-		const toolResult: Anthropic.ToolResultBlockParam = {
-			type: "tool_result",
-			tool_use_id: "test-id",
-			content: "Result",
+		const toolResult = {
+			type: "tool-result" as const,
+			toolCallId: "test-id",
+			toolName: "read_file",
+			output: { type: "text", value: "Result" },
 		}
 
-		const added = task.pushToolResultToUserContent(toolResult)
+		const added = task.pushToolResultToUserContent(toolResult as any)
 
 		expect(added).toBe(true)
-		expect(task.userMessageContent).toHaveLength(3)
+		expect(task.userMessageContent).toHaveLength(1)
 		expect(task.userMessageContent[0].type).toBe("text")
-		expect(task.userMessageContent[1].type).toBe("image")
-		expect(task.userMessageContent[2]).toEqual(toolResult)
+		expect(task.pendingToolResults).toHaveLength(1)
+		expect(task.pendingToolResults[0]).toEqual(toolResult)
 	})
 })

+ 57 - 49
src/core/task/__tests__/flushPendingToolResultsToHistory.spec.ts

@@ -249,29 +249,30 @@ describe("flushPendingToolResultsToHistory", () => {
 			startTask: false,
 		})
 
-		// Set up pending tool result in userMessageContent
-		task.userMessageContent = [
+		// Set up pending tool result in pendingToolResults
+		task.pendingToolResults = [
 			{
-				type: "tool_result",
-				tool_use_id: "tool-123",
-				content: "File written successfully",
+				type: "tool-result",
+				toolCallId: "tool-123",
+				toolName: "write_to_file",
+				output: { type: "text", value: "File written successfully" },
 			},
 		]
 
 		await task.flushPendingToolResultsToHistory()
 
-		// Should have saved 1 user message
+		// Should have saved 1 tool message
 		expect(task.apiConversationHistory.length).toBe(1)
 
-		// Check user message with tool result
-		const userMessage = task.apiConversationHistory[0]
-		expect(userMessage.role).toBe("user")
-		expect(Array.isArray(userMessage.content)).toBe(true)
-		expect((userMessage.content as any[])[0].type).toBe("tool_result")
-		expect((userMessage.content as any[])[0].tool_use_id).toBe("tool-123")
+		// Check tool message with tool result
+		const toolMessage = task.apiConversationHistory[0] as any
+		expect(toolMessage.role).toBe("tool")
+		expect(Array.isArray(toolMessage.content)).toBe(true)
+		expect((toolMessage.content as any[])[0].type).toBe("tool-result")
+		expect((toolMessage.content as any[])[0].toolCallId).toBe("tool-123")
 	})
 
-	it("should clear userMessageContent after flushing", async () => {
+	it("should clear pendingToolResults after flushing", async () => {
 		const task = new Task({
 			provider: mockProvider,
 			apiConfiguration: mockApiConfig,
@@ -280,18 +281,19 @@ describe("flushPendingToolResultsToHistory", () => {
 		})
 
 		// Set up pending tool result
-		task.userMessageContent = [
+		task.pendingToolResults = [
 			{
-				type: "tool_result",
-				tool_use_id: "tool-456",
-				content: "Command executed",
+				type: "tool-result",
+				toolCallId: "tool-456",
+				toolName: "execute_command",
+				output: { type: "text", value: "Command executed" },
 			},
 		]
 
 		await task.flushPendingToolResultsToHistory()
 
-		// userMessageContent should be cleared
-		expect(task.userMessageContent.length).toBe(0)
+		// pendingToolResults should be cleared
+		expect(task.pendingToolResults.length).toBe(0)
 	})
 
 	it("should handle multiple tool results in a single flush", async () => {
@@ -303,27 +305,29 @@ describe("flushPendingToolResultsToHistory", () => {
 		})
 
 		// Set up multiple pending tool results
-		task.userMessageContent = [
+		task.pendingToolResults = [
 			{
-				type: "tool_result",
-				tool_use_id: "tool-1",
-				content: "First result",
+				type: "tool-result",
+				toolCallId: "tool-1",
+				toolName: "read_file",
+				output: { type: "text", value: "First result" },
 			},
 			{
-				type: "tool_result",
-				tool_use_id: "tool-2",
-				content: "Second result",
+				type: "tool-result",
+				toolCallId: "tool-2",
+				toolName: "write_to_file",
+				output: { type: "text", value: "Second result" },
 			},
 		]
 
 		await task.flushPendingToolResultsToHistory()
 
-		// Check user message has both tool results
-		const userMessage = task.apiConversationHistory[0]
-		expect(Array.isArray(userMessage.content)).toBe(true)
-		expect((userMessage.content as any[]).length).toBe(2)
-		expect((userMessage.content as any[])[0].tool_use_id).toBe("tool-1")
-		expect((userMessage.content as any[])[1].tool_use_id).toBe("tool-2")
+		// Check tool message has both tool results
+		const toolMessage = task.apiConversationHistory[0] as any
+		expect(Array.isArray(toolMessage.content)).toBe(true)
+		expect((toolMessage.content as any[]).length).toBe(2)
+		expect((toolMessage.content as any[])[0].toolCallId).toBe("tool-1")
+		expect((toolMessage.content as any[])[1].toolCallId).toBe("tool-2")
 	})
 
 	it("should add timestamp to saved messages", async () => {
@@ -336,11 +340,12 @@ describe("flushPendingToolResultsToHistory", () => {
 
 		const beforeTs = Date.now()
 
-		task.userMessageContent = [
+		task.pendingToolResults = [
 			{
-				type: "tool_result",
-				tool_use_id: "tool-ts",
-				content: "Result",
+				type: "tool-result",
+				toolCallId: "tool-ts",
+				toolName: "read_file",
+				output: { type: "text", value: "Result" },
 			},
 		]
 
@@ -365,11 +370,12 @@ describe("flushPendingToolResultsToHistory", () => {
 		task.assistantMessageSavedToHistory = true
 
 		// Set up pending tool result
-		task.userMessageContent = [
+		task.pendingToolResults = [
 			{
-				type: "tool_result",
-				tool_use_id: "tool-skip-wait",
-				content: "Result when flag is true",
+				type: "tool-result",
+				toolCallId: "tool-skip-wait",
+				toolName: "read_file",
+				output: { type: "text", value: "Result when flag is true" },
 			},
 		]
 
@@ -383,7 +389,7 @@ describe("flushPendingToolResultsToHistory", () => {
 
 		// Should still save the message
 		expect(task.apiConversationHistory.length).toBe(1)
-		expect((task.apiConversationHistory[0].content as any[])[0].tool_use_id).toBe("tool-skip-wait")
+		expect(((task.apiConversationHistory[0] as any).content as any[])[0].toolCallId).toBe("tool-skip-wait")
 	})
 
 	it("should wait for assistantMessageSavedToHistory when flag is false", async () => {
@@ -398,11 +404,12 @@ describe("flushPendingToolResultsToHistory", () => {
 		expect(task.assistantMessageSavedToHistory).toBe(false)
 
 		// Set up pending tool result
-		task.userMessageContent = [
+		task.pendingToolResults = [
 			{
-				type: "tool_result",
-				tool_use_id: "tool-wait",
-				content: "Result when flag is false",
+				type: "tool-result",
+				toolCallId: "tool-wait",
+				toolName: "read_file",
+				output: { type: "text", value: "Result when flag is false" },
 			},
 		]
 
@@ -430,11 +437,12 @@ describe("flushPendingToolResultsToHistory", () => {
 		task.assistantMessageSavedToHistory = false
 
 		// Set up pending tool result
-		task.userMessageContent = [
+		task.pendingToolResults = [
 			{
-				type: "tool_result",
-				tool_use_id: "tool-aborted",
-				content: "Should not be saved",
+				type: "tool-result",
+				toolCallId: "tool-aborted",
+				toolName: "read_file",
+				output: { type: "text", value: "Should not be saved" },
 			},
 		]
 

+ 2 - 2
src/core/task/__tests__/grounding-sources.test.ts

@@ -240,7 +240,7 @@ Sources: [1](https://example.com), [2](https://another.com)
 
 		// Verify the API conversation history contains the cleaned message
 		expect(task.apiConversationHistory).toHaveLength(1)
-		expect(task.apiConversationHistory[0].content).toEqual([
+		expect((task.apiConversationHistory[0] as any).content).toEqual([
 			{ type: "text", text: "This is the main response content." },
 		])
 	})
@@ -273,7 +273,7 @@ Sources: [1](https://example.com), [2](https://another.com)
 		})
 
 		// Message should remain unchanged
-		expect(task.apiConversationHistory[0].content).toEqual([
+		expect((task.apiConversationHistory[0] as any).content).toEqual([
 			{ type: "text", text: "This is a regular response without any sources." },
 		])
 	})

+ 4 - 4
src/core/task/__tests__/mergeConsecutiveApiMessages.spec.ts

@@ -11,12 +11,12 @@ describe("mergeConsecutiveApiMessages", () => {
 		])
 
 		expect(merged).toHaveLength(2)
-		expect(merged[0].role).toBe("user")
-		expect(merged[0].content).toEqual([
+		expect((merged[0] as any).role).toBe("user")
+		expect((merged[0] as any).content).toEqual([
 			{ type: "text", text: "A" },
 			{ type: "text", text: "B" },
 		])
-		expect(merged[1].role).toBe("assistant")
+		expect((merged[1] as any).role).toBe("assistant")
 	})
 
 	it("merges regular user message into a summary (API shaping only)", () => {
@@ -27,7 +27,7 @@ describe("mergeConsecutiveApiMessages", () => {
 
 		expect(merged).toHaveLength(1)
 		expect(merged[0].isSummary).toBe(true)
-		expect(merged[0].content).toEqual([
+		expect((merged[0] as any).content).toEqual([
 			{ type: "text", text: "Summary" },
 			{ type: "text", text: "After" },
 		])

+ 78 - 159
src/core/task/__tests__/reasoning-preservation.test.ts

@@ -186,8 +186,7 @@ describe("Task reasoning preservation", () => {
 		} as ProviderSettings
 	})
 
-	it("should append reasoning to assistant message when preserveReasoning is true", async () => {
-		// Create a task instance
+	it("should store native AI SDK format messages directly when providerOptions present", async () => {
 		const task = new Task({
 			provider: mockProvider as ClineProvider,
 			apiConfiguration: mockApiConfiguration,
@@ -195,58 +194,49 @@ describe("Task reasoning preservation", () => {
 			startTask: false,
 		})
 
-		// Mock the API to return a model with preserveReasoning enabled
-		const mockModelInfo: ModelInfo = {
-			contextWindow: 16000,
-			supportsPromptCache: true,
-			preserveReasoning: true,
-		}
+		// Avoid disk writes in this test
+		;(task as any).saveApiConversationHistory = vi.fn().mockResolvedValue(undefined)
 
 		task.api = {
-			getModel: vi.fn().mockReturnValue({
-				id: "test-model",
-				info: mockModelInfo,
-			}),
+			getResponseId: vi.fn().mockReturnValue("resp_123"),
 		} as any
 
-		// Mock the API conversation history
 		task.apiConversationHistory = []
 
-		// Simulate adding an assistant message with reasoning
-		const assistantMessage = "Here is my response to your question."
-		const reasoningMessage = "Let me think about this step by step. First, I need to..."
-
-		// Spy on addToApiConversationHistory
-		const addToApiHistorySpy = vi.spyOn(task as any, "addToApiConversationHistory")
+		// Simulate a native AI SDK response message (has providerOptions on reasoning part)
+		await (task as any).addToApiConversationHistory({
+			role: "assistant",
+			content: [
+				{
+					type: "reasoning",
+					text: "Let me think about this...",
+					providerOptions: {
+						anthropic: { signature: "sig_abc123" },
+					},
+				},
+				{ type: "text", text: "Here is my response." },
+			],
+		})
 
-		await (task as any).addToApiConversationHistory(
-			{
-				role: "assistant",
-				content: [{ type: "text", text: assistantMessage }],
-			},
-			reasoningMessage,
-		)
+		expect(task.apiConversationHistory).toHaveLength(1)
+		const stored = task.apiConversationHistory[0] as any
 
-		// Verify that reasoning was stored as a separate reasoning block
-		expect(addToApiHistorySpy).toHaveBeenCalledWith(
+		expect(stored.role).toBe("assistant")
+		expect(stored.id).toBe("resp_123")
+		// Content preserved exactly as-is (no manual block injection)
+		expect(stored.content).toEqual([
 			{
-				role: "assistant",
-				content: [{ type: "text", text: assistantMessage }],
+				type: "reasoning",
+				text: "Let me think about this...",
+				providerOptions: {
+					anthropic: { signature: "sig_abc123" },
+				},
 			},
-			reasoningMessage,
-		)
-
-		// Verify the API conversation history contains the message with reasoning block
-		expect(task.apiConversationHistory).toHaveLength(1)
-		expect(task.apiConversationHistory[0].role).toBe("assistant")
-		expect(task.apiConversationHistory[0].content).toEqual([
-			{ type: "reasoning", text: reasoningMessage, summary: [] },
-			{ type: "text", text: assistantMessage },
+			{ type: "text", text: "Here is my response." },
 		])
 	})
 
-	it("should store reasoning blocks even when preserveReasoning is false", async () => {
-		// Create a task instance
+	it("should store messages without providerOptions via fallback path", async () => {
 		const task = new Task({
 			provider: mockProvider as ClineProvider,
 			apiConfiguration: mockApiConfiguration,
@@ -254,42 +244,27 @@ describe("Task reasoning preservation", () => {
 			startTask: false,
 		})
 
-		// Mock the API to return a model with preserveReasoning disabled (or undefined)
-		const mockModelInfo: ModelInfo = {
-			contextWindow: 16000,
-			supportsPromptCache: true,
-			preserveReasoning: false,
-		}
+		// Avoid disk writes in this test
+		;(task as any).saveApiConversationHistory = vi.fn().mockResolvedValue(undefined)
 
 		task.api = {
-			getModel: vi.fn().mockReturnValue({
-				id: "test-model",
-				info: mockModelInfo,
-			}),
+			getResponseId: vi.fn().mockReturnValue(undefined),
+			getEncryptedContent: vi.fn().mockReturnValue(undefined),
 		} as any
 
-		// Mock the API conversation history
 		task.apiConversationHistory = []
 
-		// Add an assistant message while passing reasoning separately (Task does this in normal streaming).
-		const assistantMessage = "Here is my response to your question."
-		const reasoningMessage = "Let me think about this step by step. First, I need to..."
-
-		await (task as any).addToApiConversationHistory(
-			{
-				role: "assistant",
-				content: [{ type: "text", text: assistantMessage }],
-			},
-			reasoningMessage,
-		)
+		// Non-AI-SDK message (no providerOptions on content parts)
+		await (task as any).addToApiConversationHistory({
+			role: "assistant",
+			content: [{ type: "text", text: "Here is my response." }],
+		})
 
-		// Verify the API conversation history contains a reasoning block (storage is unconditional)
 		expect(task.apiConversationHistory).toHaveLength(1)
-		expect(task.apiConversationHistory[0].role).toBe("assistant")
-		expect(task.apiConversationHistory[0].content).toEqual([
-			{ type: "reasoning", text: reasoningMessage, summary: [] },
-			{ type: "text", text: assistantMessage },
-		])
+		const stored = task.apiConversationHistory[0] as any
+
+		expect(stored.role).toBe("assistant")
+		expect(stored.content).toEqual([{ type: "text", text: "Here is my response." }])
 	})
 
 	it("should handle empty reasoning message gracefully when preserveReasoning is true", async () => {
@@ -319,61 +294,15 @@ describe("Task reasoning preservation", () => {
 		task.apiConversationHistory = []
 
 		const assistantMessage = "Here is my response."
-		const reasoningMessage = "" // Empty reasoning
-
-		await (task as any).addToApiConversationHistory(
-			{
-				role: "assistant",
-				content: [{ type: "text", text: assistantMessage }],
-			},
-			reasoningMessage || undefined,
-		)
-
-		// Verify no reasoning blocks were added when reasoning is empty
-		expect(task.apiConversationHistory[0].content).toEqual([{ type: "text", text: "Here is my response." }])
-	})
 
-	it("should handle undefined preserveReasoning (defaults to false)", async () => {
-		// Create a task instance
-		const task = new Task({
-			provider: mockProvider as ClineProvider,
-			apiConfiguration: mockApiConfiguration,
-			task: "Test task",
-			startTask: false,
+		await (task as any).addToApiConversationHistory({
+			role: "assistant",
+			content: [{ type: "text", text: assistantMessage }],
 		})
 
-		// Mock the API to return a model without preserveReasoning field (undefined)
-		const mockModelInfo: ModelInfo = {
-			contextWindow: 16000,
-			supportsPromptCache: true,
-			// preserveReasoning is undefined
-		}
-
-		task.api = {
-			getModel: vi.fn().mockReturnValue({
-				id: "test-model",
-				info: mockModelInfo,
-			}),
-		} as any
-
-		// Mock the API conversation history
-		task.apiConversationHistory = []
-
-		const assistantMessage = "Here is my response."
-		const reasoningMessage = "Some reasoning here."
-
-		await (task as any).addToApiConversationHistory(
-			{
-				role: "assistant",
-				content: [{ type: "text", text: assistantMessage }],
-			},
-			reasoningMessage,
-		)
-
-		// Verify reasoning is stored even when preserveReasoning is undefined
-		expect(task.apiConversationHistory[0].content).toEqual([
-			{ type: "reasoning", text: reasoningMessage, summary: [] },
-			{ type: "text", text: assistantMessage },
+		// Verify no reasoning blocks were added when no reasoning is present
+		expect((task.apiConversationHistory[0] as any).content).toEqual([
+			{ type: "text", text: "Here is my response." },
 		])
 	})
 
@@ -423,7 +352,7 @@ describe("Task reasoning preservation", () => {
 		})
 	})
 
-	it("should store plain text reasoning from streaming for all providers", async () => {
+	it("should store native format with redacted thinking in providerOptions", async () => {
 		const task = new Task({
 			provider: mockProvider as ClineProvider,
 			apiConfiguration: mockApiConfiguration,
@@ -434,50 +363,40 @@ describe("Task reasoning preservation", () => {
 		// Avoid disk writes in this test
 		;(task as any).saveApiConversationHistory = vi.fn().mockResolvedValue(undefined)
 
-		// Mock API handler without getEncryptedContent (like Anthropic, Gemini, etc.)
 		task.api = {
-			getModel: vi.fn().mockReturnValue({
-				id: "test-model",
-				info: {
-					contextWindow: 16000,
-					supportsPromptCache: true,
-				},
-			}),
+			getResponseId: vi.fn().mockReturnValue("resp_456"),
 		} as any
 
-		// Simulate the new path: passing reasoning as a parameter
-		const reasoningText = "Let me analyze this carefully. First, I'll consider the requirements..."
-		const assistantText = "Here is my response."
+		task.apiConversationHistory = []
 
-		await (task as any).addToApiConversationHistory(
-			{
-				role: "assistant",
-				content: [{ type: "text", text: assistantText }],
-			},
-			reasoningText,
-		)
+		// Simulate native format with redacted thinking (as AI SDK provides it)
+		await (task as any).addToApiConversationHistory({
+			role: "assistant",
+			content: [
+				{
+					type: "reasoning",
+					text: "Visible reasoning...",
+					providerOptions: {
+						anthropic: { signature: "sig_visible" },
+					},
+				},
+				{
+					type: "reasoning",
+					text: "",
+					providerOptions: {
+						anthropic: { redactedData: "redacted_payload_abc" },
+					},
+				},
+				{ type: "text", text: "My answer." },
+			],
+		})
 
 		expect(task.apiConversationHistory).toHaveLength(1)
 		const stored = task.apiConversationHistory[0] as any
 
-		expect(stored.role).toBe("assistant")
-		expect(Array.isArray(stored.content)).toBe(true)
-
-		const [reasoningBlock, textBlock] = stored.content
-
-		// Verify reasoning is stored with plain text, not encrypted
-		expect(reasoningBlock).toMatchObject({
-			type: "reasoning",
-			text: reasoningText,
-			summary: [],
-		})
-
-		// Verify there's no encrypted_content field (that's only for OpenAI Native)
-		expect(reasoningBlock.encrypted_content).toBeUndefined()
-
-		expect(textBlock).toMatchObject({
-			type: "text",
-			text: assistantText,
-		})
+		// All content preserved as-is including redacted reasoning
+		expect(stored.content).toHaveLength(3)
+		expect(stored.content[0].providerOptions.anthropic.signature).toBe("sig_visible")
+		expect(stored.content[1].providerOptions.anthropic.redactedData).toBe("redacted_payload_abc")
 	})
 })

+ 9 - 4
src/core/task/__tests__/task-tool-history.spec.ts

@@ -1,3 +1,4 @@
+import type { RooMessage } from "../../task-persistence/rooMessage"
 import { describe, it, expect, beforeEach, vi } from "vitest"
 import { Anthropic } from "@anthropic-ai/sdk"
 
@@ -67,7 +68,7 @@ describe("Task Tool History Handling", () => {
 
 	describe("convertToOpenAiMessages format", () => {
 		it("should properly convert tool_use to tool_calls format", () => {
-			const anthropicMessage: Anthropic.Messages.MessageParam = {
+			const anthropicMessage: any = {
 				role: "assistant",
 				content: [
 					{
@@ -84,7 +85,9 @@ describe("Task Tool History Handling", () => {
 			}
 
 			// Simulate what convertToOpenAiMessages does
-			const toolUseBlocks = (anthropicMessage.content as any[]).filter((block) => block.type === "tool_use")
+			const toolUseBlocks = ((anthropicMessage as any).content as any[]).filter(
+				(block) => block.type === "tool_use",
+			)
 
 			const tool_calls = toolUseBlocks.map((toolMessage) => ({
 				id: toolMessage.id,
@@ -107,7 +110,7 @@ describe("Task Tool History Handling", () => {
 		})
 
 		it("should properly convert tool_result to tool role messages", () => {
-			const anthropicMessage: Anthropic.Messages.MessageParam = {
+			const anthropicMessage: any = {
 				role: "user",
 				content: [
 					{
@@ -119,7 +122,9 @@ describe("Task Tool History Handling", () => {
 			}
 
 			// Simulate what convertToOpenAiMessages does
-			const toolMessages = (anthropicMessage.content as any[]).filter((block) => block.type === "tool_result")
+			const toolMessages = ((anthropicMessage as any).content as any[]).filter(
+				(block) => block.type === "tool_result",
+			)
 
 			const openAiToolMessages = toolMessages.map((toolMessage) => ({
 				role: "tool" as const,

+ 134 - 136
src/core/task/__tests__/validateToolResultIds.spec.ts

@@ -1,5 +1,5 @@
-import { Anthropic } from "@anthropic-ai/sdk"
 import { TelemetryService } from "@roo-code/telemetry"
+import type { RooMessage } from "../../task-persistence/rooMessage"
 import {
 	validateAndFixToolResultIds,
 	ToolResultIdMismatchError,
@@ -23,18 +23,18 @@ describe("validateAndFixToolResultIds", () => {
 
 	describe("when there is no previous assistant message", () => {
 		it("should return the user message unchanged", () => {
-			const userMessage: Anthropic.MessageParam = {
-				role: "user",
+			const userMessage = {
+				role: "user" as const,
 				content: [
 					{
-						type: "tool_result",
+						type: "tool_result" as const,
 						tool_use_id: "tool-123",
 						content: "Result",
 					},
 				],
-			}
+			} as unknown as RooMessage
 
-			const result = validateAndFixToolResultIds(userMessage, [])
+			const result = validateAndFixToolResultIds(userMessage as any, [])
 
 			expect(result).toEqual(userMessage)
 		})
@@ -42,7 +42,7 @@ describe("validateAndFixToolResultIds", () => {
 
 	describe("when tool_result IDs match tool_use IDs", () => {
 		it("should return the user message unchanged for single tool", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -54,7 +54,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -65,13 +65,13 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
 			expect(result).toEqual(userMessage)
 		})
 
 		it("should return the user message unchanged for multiple tools", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -89,7 +89,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -105,7 +105,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
 			expect(result).toEqual(userMessage)
 		})
@@ -113,7 +113,7 @@ describe("validateAndFixToolResultIds", () => {
 
 	describe("when tool_result IDs do not match tool_use IDs", () => {
 		it("should fix single mismatched tool_use_id by position", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -125,7 +125,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -136,16 +136,16 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Anthropic.ToolResultBlockParam[]
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			expect(resultContent[0].tool_use_id).toBe("correct-id-123")
 			expect(resultContent[0].content).toBe("File content")
 		})
 
 		it("should fix multiple mismatched tool_use_ids by position", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -163,7 +163,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -179,16 +179,16 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Anthropic.ToolResultBlockParam[]
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			expect(resultContent[0].tool_use_id).toBe("correct-1")
 			expect(resultContent[1].tool_use_id).toBe("correct-2")
 		})
 
 		it("should partially fix when some IDs match and some don't", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -206,7 +206,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -222,10 +222,10 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Anthropic.ToolResultBlockParam[]
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			expect(resultContent[0].tool_use_id).toBe("id-1")
 			expect(resultContent[1].tool_use_id).toBe("id-2")
 		})
@@ -233,7 +233,7 @@ describe("validateAndFixToolResultIds", () => {
 
 	describe("when user message has non-tool_result content", () => {
 		it("should preserve text blocks alongside tool_result blocks", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -245,7 +245,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -260,20 +260,20 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Array<Anthropic.ToolResultBlockParam | Anthropic.TextBlockParam>
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			expect(resultContent[0].type).toBe("tool_result")
-			expect((resultContent[0] as Anthropic.ToolResultBlockParam).tool_use_id).toBe("tool-123")
+			expect(resultContent[0].tool_use_id ?? resultContent[0].toolCallId).toBe("tool-123")
 			expect(resultContent[1].type).toBe("text")
-			expect((resultContent[1] as Anthropic.TextBlockParam).text).toBe("Additional context")
+			expect(resultContent[1].text).toBe("Additional context")
 		})
 	})
 
 	describe("when assistant message has non-tool_use content", () => {
 		it("should only consider tool_use blocks for matching", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -289,7 +289,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -300,17 +300,17 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Anthropic.ToolResultBlockParam[]
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			expect(resultContent[0].tool_use_id).toBe("tool-123")
 		})
 	})
 
 	describe("when user message content is a string", () => {
 		it("should return the message unchanged", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -322,12 +322,12 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: "Just a plain text message",
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
 			expect(result).toEqual(userMessage)
 		})
@@ -335,12 +335,12 @@ describe("validateAndFixToolResultIds", () => {
 
 	describe("when assistant message content is a string", () => {
 		it("should return the user message unchanged", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: "Just some text, no tool use",
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -351,7 +351,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
 			expect(result).toEqual(userMessage)
 		})
@@ -359,7 +359,7 @@ describe("validateAndFixToolResultIds", () => {
 
 	describe("when there are more tool_results than tool_uses", () => {
 		it("should filter out orphaned tool_results with invalid IDs", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -371,7 +371,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -387,10 +387,10 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Anthropic.ToolResultBlockParam[]
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			// Only one tool_result should remain - the first one gets fixed to tool-1
 			expect(resultContent.length).toBe(1)
 			expect(resultContent[0].tool_use_id).toBe("tool-1")
@@ -399,7 +399,7 @@ describe("validateAndFixToolResultIds", () => {
 		it("should filter out duplicate tool_results when one already has a valid ID", () => {
 			// This is the exact scenario from the PostHog error:
 			// 2 tool_results (call_08230257, call_55577629), 1 tool_use (call_55577629)
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -411,7 +411,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -427,10 +427,10 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Anthropic.ToolResultBlockParam[]
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			// Should only keep one tool_result since there's only one tool_use
 			// The first invalid one gets fixed to the valid ID, then the second one
 			// (which already has that ID) becomes a duplicate and is filtered out
@@ -439,7 +439,7 @@ describe("validateAndFixToolResultIds", () => {
 		})
 
 		it("should preserve text blocks while filtering orphaned tool_results", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -451,7 +451,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -471,22 +471,22 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Array<Anthropic.ToolResultBlockParam | Anthropic.TextBlockParam>
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			// Should have tool_result + text block, orphaned tool_result filtered out
 			expect(resultContent.length).toBe(2)
 			expect(resultContent[0].type).toBe("tool_result")
-			expect((resultContent[0] as Anthropic.ToolResultBlockParam).tool_use_id).toBe("tool-1")
+			expect(resultContent[0].tool_use_id ?? resultContent[0].toolCallId).toBe("tool-1")
 			expect(resultContent[1].type).toBe("text")
-			expect((resultContent[1] as Anthropic.TextBlockParam).text).toBe("Some additional context")
+			expect(resultContent[1].text).toBe("Some additional context")
 		})
 
 		// Verifies fix for GitHub #10465: Terminal fallback race condition can generate
 		// duplicate tool_results with the same valid tool_use_id, causing API protocol violations.
 		it("should filter out duplicate tool_results with identical valid tool_use_ids (terminal fallback scenario)", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -499,7 +499,7 @@ describe("validateAndFixToolResultIds", () => {
 			}
 
 			// Two tool_results with the SAME valid tool_use_id from terminal fallback race condition
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -515,10 +515,10 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Anthropic.ToolResultBlockParam[]
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 
 			// Only ONE tool_result should remain to prevent API protocol violation
 			expect(resultContent.length).toBe(1)
@@ -527,7 +527,7 @@ describe("validateAndFixToolResultIds", () => {
 		})
 
 		it("should preserve text blocks while deduplicating tool_results with same valid ID", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -539,7 +539,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -559,24 +559,24 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Array<Anthropic.ToolResultBlockParam | Anthropic.TextBlockParam>
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 
 			// Should have: 1 tool_result + 1 text block (duplicate filtered out)
 			expect(resultContent.length).toBe(2)
 			expect(resultContent[0].type).toBe("tool_result")
-			expect((resultContent[0] as Anthropic.ToolResultBlockParam).tool_use_id).toBe("tool-123")
-			expect((resultContent[0] as Anthropic.ToolResultBlockParam).content).toBe("First result")
+			expect(resultContent[0].tool_use_id ?? resultContent[0].toolCallId).toBe("tool-123")
+			expect(resultContent[0].content ?? resultContent[0].output.value).toBe("First result")
 			expect(resultContent[1].type).toBe("text")
-			expect((resultContent[1] as Anthropic.TextBlockParam).text).toBe("Environment details here")
+			expect(resultContent[1].text).toBe("Environment details here")
 		})
 	})
 
 	describe("when there are more tool_uses than tool_results", () => {
 		it("should fix the available tool_results and add missing ones", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -594,7 +594,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -605,23 +605,23 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Anthropic.ToolResultBlockParam[]
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			// Should now have 2 tool_results: one fixed and one added for the missing tool_use
 			expect(resultContent.length).toBe(2)
-			// The missing tool_result is prepended
-			expect(resultContent[0].tool_use_id).toBe("tool-2")
-			expect(resultContent[0].content).toBe("Tool execution was interrupted before completion.")
-			// The original is fixed
+			// The missing tool_result is prepended (AI SDK format)
+			expect(resultContent[0].toolCallId).toBe("tool-2")
+			expect(resultContent[0].output.value).toBe("Tool execution was interrupted before completion.")
+			// The original is fixed (legacy format, tool_use_id updated)
 			expect(resultContent[1].tool_use_id).toBe("tool-1")
 		})
 	})
 
 	describe("when tool_results are completely missing", () => {
 		it("should add missing tool_result for single tool_use", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -633,7 +633,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -643,23 +643,21 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Array<Anthropic.ToolResultBlockParam | Anthropic.TextBlockParam>
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			expect(resultContent.length).toBe(2)
-			// Missing tool_result should be prepended
-			expect(resultContent[0].type).toBe("tool_result")
-			expect((resultContent[0] as Anthropic.ToolResultBlockParam).tool_use_id).toBe("tool-123")
-			expect((resultContent[0] as Anthropic.ToolResultBlockParam).content).toBe(
-				"Tool execution was interrupted before completion.",
-			)
+			// Missing tool_result should be prepended (AI SDK format)
+			expect(resultContent[0].type).toBe("tool-result")
+			expect(resultContent[0].toolCallId).toBe("tool-123")
+			expect(resultContent[0].output.value).toBe("Tool execution was interrupted before completion.")
 			// Original text block should be preserved
 			expect(resultContent[1].type).toBe("text")
 		})
 
 		it("should add missing tool_results for multiple tool_uses", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -677,7 +675,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -687,22 +685,22 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Array<Anthropic.ToolResultBlockParam | Anthropic.TextBlockParam>
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			expect(resultContent.length).toBe(3)
-			// Both missing tool_results should be prepended
-			expect(resultContent[0].type).toBe("tool_result")
-			expect((resultContent[0] as Anthropic.ToolResultBlockParam).tool_use_id).toBe("tool-1")
-			expect(resultContent[1].type).toBe("tool_result")
-			expect((resultContent[1] as Anthropic.ToolResultBlockParam).tool_use_id).toBe("tool-2")
+			// Both missing tool_results should be prepended (AI SDK format)
+			expect(resultContent[0].type).toBe("tool-result")
+			expect(resultContent[0].toolCallId).toBe("tool-1")
+			expect(resultContent[1].type).toBe("tool-result")
+			expect(resultContent[1].toolCallId).toBe("tool-2")
 			// Original text should be preserved
 			expect(resultContent[2].type).toBe("text")
 		})
 
 		it("should add only the missing tool_results when some exist", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -720,7 +718,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -731,21 +729,21 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Anthropic.ToolResultBlockParam[]
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			expect(resultContent.length).toBe(2)
-			// Missing tool_result for tool-2 should be prepended
-			expect(resultContent[0].tool_use_id).toBe("tool-2")
-			expect(resultContent[0].content).toBe("Tool execution was interrupted before completion.")
+			// Missing tool_result for tool-2 should be prepended (AI SDK format)
+			expect(resultContent[0].toolCallId).toBe("tool-2")
+			expect(resultContent[0].output.value).toBe("Tool execution was interrupted before completion.")
 			// Existing tool_result should be preserved
 			expect(resultContent[1].tool_use_id).toBe("tool-1")
 			expect(resultContent[1].content).toBe("Content for tool 1")
 		})
 
 		it("should handle empty user content array by adding all missing tool_results", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -757,25 +755,25 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [],
 			}
 
-			const result = validateAndFixToolResultIds(userMessage, [assistantMessage])
+			const result = validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
-			expect(Array.isArray(result.content)).toBe(true)
-			const resultContent = result.content as Anthropic.ToolResultBlockParam[]
+			expect(Array.isArray((result as any).content)).toBe(true)
+			const resultContent = (result as any).content as any[]
 			expect(resultContent.length).toBe(1)
-			expect(resultContent[0].type).toBe("tool_result")
-			expect(resultContent[0].tool_use_id).toBe("tool-1")
-			expect(resultContent[0].content).toBe("Tool execution was interrupted before completion.")
+			expect(resultContent[0].type).toBe("tool-result")
+			expect(resultContent[0].toolCallId).toBe("tool-1")
+			expect(resultContent[0].output.value).toBe("Tool execution was interrupted before completion.")
 		})
 	})
 
 	describe("telemetry", () => {
 		it("should call captureException for both missing and mismatch when there is a mismatch", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -787,7 +785,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -798,7 +796,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			validateAndFixToolResultIds(userMessage, [assistantMessage])
+			validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
 			// A mismatch also triggers missing detection since the wrong-id doesn't match any tool_use
 			expect(TelemetryService.instance.captureException).toHaveBeenCalledTimes(2)
@@ -823,7 +821,7 @@ describe("validateAndFixToolResultIds", () => {
 		})
 
 		it("should not call captureException when IDs match", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -835,7 +833,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -846,7 +844,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			validateAndFixToolResultIds(userMessage, [assistantMessage])
+			validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
 			expect(TelemetryService.instance.captureException).not.toHaveBeenCalled()
 		})
@@ -884,7 +882,7 @@ describe("validateAndFixToolResultIds", () => {
 
 	describe("telemetry for missing tool_results", () => {
 		it("should call captureException when tool_results are missing", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -896,7 +894,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -906,7 +904,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			validateAndFixToolResultIds(userMessage, [assistantMessage])
+			validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
 			expect(TelemetryService.instance.captureException).toHaveBeenCalledTimes(1)
 			expect(TelemetryService.instance.captureException).toHaveBeenCalledWith(
@@ -921,7 +919,7 @@ describe("validateAndFixToolResultIds", () => {
 		})
 
 		it("should call captureException twice when both mismatch and missing occur", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -939,7 +937,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -951,7 +949,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			validateAndFixToolResultIds(userMessage, [assistantMessage])
+			validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
 			// Should be called twice: once for missing, once for mismatch
 			expect(TelemetryService.instance.captureException).toHaveBeenCalledTimes(2)
@@ -966,7 +964,7 @@ describe("validateAndFixToolResultIds", () => {
 		})
 
 		it("should not call captureException for missing when all tool_results exist", () => {
-			const assistantMessage: Anthropic.MessageParam = {
+			const assistantMessage = {
 				role: "assistant",
 				content: [
 					{
@@ -978,7 +976,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			const userMessage: Anthropic.MessageParam = {
+			const userMessage = {
 				role: "user",
 				content: [
 					{
@@ -989,7 +987,7 @@ describe("validateAndFixToolResultIds", () => {
 				],
 			}
 
-			validateAndFixToolResultIds(userMessage, [assistantMessage])
+			validateAndFixToolResultIds(userMessage as any, [assistantMessage] as any)
 
 			expect(TelemetryService.instance.captureException).not.toHaveBeenCalled()
 		})

+ 26 - 11
src/core/task/mergeConsecutiveApiMessages.ts

@@ -1,12 +1,15 @@
-import { Anthropic } from "@anthropic-ai/sdk"
+import type { RooMessage } from "../task-persistence/rooMessage"
+import { isRooReasoningMessage } from "../task-persistence/rooMessage"
 
-import type { ApiMessage } from "../task-persistence"
+type Role = "user" | "assistant" | "tool"
 
-type Role = ApiMessage["role"]
-
-function normalizeContentToBlocks(content: ApiMessage["content"]): Anthropic.Messages.ContentBlockParam[] {
+/**
+ * Normalizes message content to an array of content parts.
+ * Handles both string and array content formats.
+ */
+function normalizeContentToArray(content: unknown): unknown[] {
 	if (Array.isArray(content)) {
-		return content as Anthropic.Messages.ContentBlockParam[]
+		return content
 	}
 	if (content === undefined || content === null) {
 		return []
@@ -19,19 +22,28 @@ function normalizeContentToBlocks(content: ApiMessage["content"]): Anthropic.Mes
  *
  * Used for *API request shaping only* (do not use for storage), so rewind/edit operations
  * can still reference the original individual messages.
+ *
+ * `RooReasoningMessage` items (which have no role) are always passed through unmerged.
  */
-export function mergeConsecutiveApiMessages(messages: ApiMessage[], options?: { roles?: Role[] }): ApiMessage[] {
+export function mergeConsecutiveApiMessages(messages: RooMessage[], options?: { roles?: Role[] }): RooMessage[] {
 	if (messages.length <= 1) {
 		return messages
 	}
 
 	const mergeRoles = new Set<Role>(options?.roles ?? ["user"]) // default: user only
-	const out: ApiMessage[] = []
+	const out: RooMessage[] = []
 
 	for (const msg of messages) {
+		// RooReasoningMessage has no role — always pass through unmerged
+		if (isRooReasoningMessage(msg)) {
+			out.push(msg)
+			continue
+		}
+
 		const prev = out[out.length - 1]
+		const prevHasRole = prev && !isRooReasoningMessage(prev)
 		const canMerge =
-			prev &&
+			prevHasRole &&
 			prev.role === msg.role &&
 			mergeRoles.has(msg.role) &&
 			// Allow merging regular messages into a summary (API-only shaping),
@@ -45,14 +57,17 @@ export function mergeConsecutiveApiMessages(messages: ApiMessage[], options?: {
 			continue
 		}
 
-		const mergedContent = [...normalizeContentToBlocks(prev.content), ...normalizeContentToBlocks(msg.content)]
+		const mergedContent = [
+			...normalizeContentToArray((prev as any).content),
+			...normalizeContentToArray((msg as any).content),
+		]
 
 		// Preserve the newest ts to keep chronological ordering for downstream logic.
 		out[out.length - 1] = {
 			...prev,
 			content: mergedContent,
 			ts: Math.max(prev.ts ?? 0, msg.ts ?? 0) || prev.ts || msg.ts,
-		}
+		} as RooMessage
 	}
 
 	return out

+ 111 - 102
src/core/task/validateToolResultIds.ts

@@ -1,6 +1,22 @@
-import { Anthropic } from "@anthropic-ai/sdk"
 import { TelemetryService } from "@roo-code/telemetry"
 import { findLastIndex } from "../../shared/array"
+import type {
+	RooMessage,
+	RooRoleMessage,
+	ToolCallPart,
+	ToolResultPart,
+	AnyToolCallBlock,
+	AnyToolResultBlock,
+} from "../task-persistence/rooMessage"
+import {
+	isRooRoleMessage,
+	isAnyToolCallBlock,
+	isAnyToolResultBlock,
+	getToolCallId as sharedGetToolCallId,
+	getToolCallName,
+	getToolResultCallId as sharedGetToolResultCallId,
+	setToolResultCallId as sharedSetToolResultCallId,
+} from "../task-persistence/rooMessage"
 
 /**
  * Custom error class for tool result ID mismatches.
@@ -19,8 +35,8 @@ export class ToolResultIdMismatchError extends Error {
 
 /**
  * Custom error class for missing tool results.
- * Used for structured error tracking via PostHog when tool_use blocks
- * don't have corresponding tool_result blocks.
+ * Used for structured error tracking via PostHog when tool-call blocks
+ * don't have corresponding tool-result blocks.
  */
 export class MissingToolResultError extends Error {
 	constructor(
@@ -33,116 +49,117 @@ export class MissingToolResultError extends Error {
 	}
 }
 
+/** Local aliases for shared dual-format helpers. */
+const isToolCallBlock = isAnyToolCallBlock
+const isToolResultBlock = isAnyToolResultBlock
+const getToolCallId = sharedGetToolCallId
+const getToolResultCallId = sharedGetToolResultCallId
+const setToolResultCallId = sharedSetToolResultCallId
+
 /**
- * Validates and fixes tool_result IDs in a user message against the previous assistant message.
+ * Validates and fixes tool result IDs in a user/tool message against the previous assistant message.
  *
- * This is a centralized validation that catches all tool_use/tool_result issues
+ * This is a centralized validation that catches all tool-call/tool-result issues
  * before messages are added to the API conversation history. It handles scenarios like:
  * - Race conditions during streaming
  * - Message editing scenarios
  * - Resume/delegation scenarios
- * - Missing tool_result blocks for tool_use calls
+ * - Missing tool-result blocks for tool-call calls
  *
- * @param userMessage - The user message being added to history
+ * @param userMessage - The user or tool message being added to history
  * @param apiConversationHistory - The conversation history to find the previous assistant message from
- * @returns The validated user message with corrected tool_use_ids and any missing tool_results added
+ * @returns The validated message with corrected tool call IDs and any missing tool results added
  */
-export function validateAndFixToolResultIds(
-	userMessage: Anthropic.MessageParam,
-	apiConversationHistory: Anthropic.MessageParam[],
-): Anthropic.MessageParam {
-	// Only process user messages with array content
-	if (userMessage.role !== "user" || !Array.isArray(userMessage.content)) {
+export function validateAndFixToolResultIds(userMessage: RooMessage, apiConversationHistory: RooMessage[]): RooMessage {
+	// Only process messages with array content that have a role
+	if (!isRooRoleMessage(userMessage) || !Array.isArray(userMessage.content)) {
 		return userMessage
 	}
 
 	// Find the previous assistant message from conversation history
-	const prevAssistantIdx = findLastIndex(apiConversationHistory, (msg) => msg.role === "assistant")
+	const prevAssistantIdx = findLastIndex(apiConversationHistory, (msg) => "role" in msg && msg.role === "assistant")
 	if (prevAssistantIdx === -1) {
 		return userMessage
 	}
 
 	const previousAssistantMessage = apiConversationHistory[prevAssistantIdx]
 
-	// Get tool_use blocks from the assistant message
+	// Get tool-call blocks from the assistant message
+	if (!isRooRoleMessage(previousAssistantMessage)) {
+		return userMessage
+	}
 	const assistantContent = previousAssistantMessage.content
 	if (!Array.isArray(assistantContent)) {
 		return userMessage
 	}
 
-	const toolUseBlocks = assistantContent.filter((block): block is Anthropic.ToolUseBlock => block.type === "tool_use")
+	const toolCallBlocks = (assistantContent as Array<{ type: string }>).filter(isToolCallBlock)
 
-	// No tool_use blocks to match against - no validation needed
-	if (toolUseBlocks.length === 0) {
+	// No tool-call blocks to match against - no validation needed
+	if (toolCallBlocks.length === 0) {
 		return userMessage
 	}
 
-	// Find tool_result blocks in the user message
-	let toolResults = userMessage.content.filter(
-		(block): block is Anthropic.ToolResultBlockParam => block.type === "tool_result",
-	)
+	// Find tool-result blocks in the user/tool message
+	const contentArray = userMessage.content as Array<{ type: string }>
+	let toolResults = contentArray.filter(isToolResultBlock)
 
-	// Deduplicate tool_result blocks to prevent API protocol violations (GitHub #10465)
-	// This serves as a safety net for any potential race conditions that could generate
-	// duplicate tool_results with the same tool_use_id. The root cause (approval feedback
-	// creating duplicate results) has been fixed in presentAssistantMessage.ts, but this
-	// deduplication remains as a defensive measure for unknown edge cases.
+	// Deduplicate tool-result blocks to prevent API protocol violations (GitHub #10465)
 	const seenToolResultIds = new Set<string>()
-	const deduplicatedContent = userMessage.content.filter((block) => {
-		if (block.type !== "tool_result") {
+	const deduplicatedContent = contentArray.filter((block) => {
+		if (!isToolResultBlock(block)) {
 			return true
 		}
-		if (seenToolResultIds.has(block.tool_use_id)) {
+		const callId = getToolResultCallId(block)
+		if (seenToolResultIds.has(callId)) {
 			return false // Duplicate - filter out
 		}
-		seenToolResultIds.add(block.tool_use_id)
+		seenToolResultIds.add(callId)
 		return true
 	})
 
 	userMessage = {
 		...userMessage,
 		content: deduplicatedContent,
-	}
+	} as RooMessage
 
-	toolResults = deduplicatedContent.filter(
-		(block): block is Anthropic.ToolResultBlockParam => block.type === "tool_result",
-	)
+	toolResults = deduplicatedContent.filter(isToolResultBlock)
 
-	// Build a set of valid tool_use IDs
-	const validToolUseIds = new Set(toolUseBlocks.map((block) => block.id))
+	// Build a set of valid tool-call IDs
+	const validToolCallIds = new Set(toolCallBlocks.map(getToolCallId))
 
-	// Build a set of existing tool_result IDs
-	const existingToolResultIds = new Set(toolResults.map((r) => r.tool_use_id))
+	// Build a set of existing tool-result IDs
+	const existingToolResultIds = new Set(toolResults.map(getToolResultCallId))
 
-	// Check for missing tool_results (tool_use IDs that don't have corresponding tool_results)
-	const missingToolUseIds = toolUseBlocks
-		.filter((toolUse) => !existingToolResultIds.has(toolUse.id))
-		.map((toolUse) => toolUse.id)
+	// Check for missing tool-results (tool-call IDs that don't have corresponding tool-results)
+	const missingToolCallIds = toolCallBlocks
+		.filter((tc) => !existingToolResultIds.has(getToolCallId(tc)))
+		.map(getToolCallId)
 
-	// Check if any tool_result has an invalid ID
-	const hasInvalidIds = toolResults.some((result) => !validToolUseIds.has(result.tool_use_id))
+	// Check if any tool-result has an invalid ID
+	const hasInvalidIds = toolResults.some((result) => !validToolCallIds.has(getToolResultCallId(result)))
 
-	// If no missing tool_results and no invalid IDs, no changes needed
-	if (missingToolUseIds.length === 0 && !hasInvalidIds) {
+	// If no missing tool-results and no invalid IDs, no changes needed
+	if (missingToolCallIds.length === 0 && !hasInvalidIds) {
 		return userMessage
 	}
 
 	// We have issues - need to fix them
-	const toolResultIdList = toolResults.map((r) => r.tool_use_id)
-	const toolUseIdList = toolUseBlocks.map((b) => b.id)
+	const toolResultIdList = toolResults.map(getToolResultCallId)
+	const toolCallIdList = toolCallBlocks.map(getToolCallId)
 
-	// Report missing tool_results to PostHog error tracking
-	if (missingToolUseIds.length > 0 && TelemetryService.hasInstance()) {
+	// Report missing tool-results to PostHog error tracking
+	if (missingToolCallIds.length > 0 && TelemetryService.hasInstance()) {
 		TelemetryService.instance.captureException(
 			new MissingToolResultError(
-				`Detected missing tool_result blocks. Missing tool_use IDs: [${missingToolUseIds.join(", ")}], existing tool_result IDs: [${toolResultIdList.join(", ")}]`,
-				missingToolUseIds,
+				`Detected missing tool_result blocks. Missing tool_use IDs: [${missingToolCallIds.join(", ")}], existing tool_result IDs: [${toolResultIdList.join(", ")}]`,
+				missingToolCallIds,
 				toolResultIdList,
 			),
 			{
-				missingToolUseIds,
+				missingToolUseIds: missingToolCallIds,
 				existingToolResultIds: toolResultIdList,
-				toolUseCount: toolUseBlocks.length,
+				toolUseCount: toolCallBlocks.length,
 				toolResultCount: toolResults.length,
 			},
 		)
@@ -152,83 +169,75 @@ export function validateAndFixToolResultIds(
 	if (hasInvalidIds && TelemetryService.hasInstance()) {
 		TelemetryService.instance.captureException(
 			new ToolResultIdMismatchError(
-				`Detected tool_result ID mismatch. tool_result IDs: [${toolResultIdList.join(", ")}], tool_use IDs: [${toolUseIdList.join(", ")}]`,
+				`Detected tool_result ID mismatch. tool_result IDs: [${toolResultIdList.join(", ")}], tool_use IDs: [${toolCallIdList.join(", ")}]`,
 				toolResultIdList,
-				toolUseIdList,
+				toolCallIdList,
 			),
 			{
 				toolResultIds: toolResultIdList,
-				toolUseIds: toolUseIdList,
+				toolUseIds: toolCallIdList,
 				toolResultCount: toolResults.length,
-				toolUseCount: toolUseBlocks.length,
+				toolUseCount: toolCallBlocks.length,
 			},
 		)
 	}
 
-	// Match tool_results to tool_uses by position and fix incorrect IDs
-	const usedToolUseIds = new Set<string>()
-	const contentArray = userMessage.content as Anthropic.Messages.ContentBlockParam[]
+	// Match tool-results to tool-calls by position and fix incorrect IDs
+	const usedToolCallIds = new Set<string>()
+	// userMessage was reassigned above with deduplicatedContent, so we know it has array content
+	const correctedContentArray = (userMessage as RooRoleMessage).content as Array<{ type: string }>
 
-	const correctedContent = contentArray
-		.map((block: Anthropic.Messages.ContentBlockParam) => {
-			if (block.type !== "tool_result") {
+	const correctedContent = correctedContentArray
+		.map((block) => {
+			if (!isToolResultBlock(block)) {
 				return block
 			}
 
+			const callId = getToolResultCallId(block)
+
 			// If the ID is already valid and not yet used, keep it
-			if (validToolUseIds.has(block.tool_use_id) && !usedToolUseIds.has(block.tool_use_id)) {
-				usedToolUseIds.add(block.tool_use_id)
+			if (validToolCallIds.has(callId) && !usedToolCallIds.has(callId)) {
+				usedToolCallIds.add(callId)
 				return block
 			}
 
-			// Find which tool_result index this block is by comparing references.
-			// This correctly handles duplicate tool_use_ids - we find the actual block's
-			// position among all tool_results, not the first block with a matching ID.
-			const toolResultIndex = toolResults.indexOf(block as Anthropic.ToolResultBlockParam)
+			// Find which tool-result index this block is by comparing references.
+			const toolResultIndex = toolResults.indexOf(block)
 
-			// Try to match by position - only fix if there's a corresponding tool_use
-			if (toolResultIndex !== -1 && toolResultIndex < toolUseBlocks.length) {
-				const correctId = toolUseBlocks[toolResultIndex].id
+			// Try to match by position - only fix if there's a corresponding tool-call
+			if (toolResultIndex !== -1 && toolResultIndex < toolCallBlocks.length) {
+				const correctId = getToolCallId(toolCallBlocks[toolResultIndex])
 				// Only use this ID if it hasn't been used yet
-				if (!usedToolUseIds.has(correctId)) {
-					usedToolUseIds.add(correctId)
-					return {
-						...block,
-						tool_use_id: correctId,
-					}
+				if (!usedToolCallIds.has(correctId)) {
+					usedToolCallIds.add(correctId)
+					return setToolResultCallId(block, correctId)
 				}
 			}
 
-			// No corresponding tool_use for this tool_result, or the ID is already used
+			// No corresponding tool-call for this tool-result, or the ID is already used
 			return null
 		})
 		.filter((block): block is NonNullable<typeof block> => block !== null)
 
-	// Add missing tool_result blocks for any tool_use that doesn't have one
-	const coveredToolUseIds = new Set(
-		correctedContent
-			.filter(
-				(b: Anthropic.Messages.ContentBlockParam): b is Anthropic.ToolResultBlockParam =>
-					b.type === "tool_result",
-			)
-			.map((r: Anthropic.ToolResultBlockParam) => r.tool_use_id),
-	)
-
-	const stillMissingToolUseIds = toolUseBlocks.filter((toolUse) => !coveredToolUseIds.has(toolUse.id))
-
-	// Build final content: add missing tool_results at the beginning if any
-	const missingToolResults: Anthropic.ToolResultBlockParam[] = stillMissingToolUseIds.map((toolUse) => ({
-		type: "tool_result" as const,
-		tool_use_id: toolUse.id,
-		content: "Tool execution was interrupted before completion.",
+	// Add missing tool-result blocks for any tool-call that doesn't have one
+	const coveredToolCallIds = new Set(correctedContent.filter(isToolResultBlock).map(getToolResultCallId))
+
+	const stillMissingToolCalls = toolCallBlocks.filter((tc) => !coveredToolCallIds.has(getToolCallId(tc)))
+
+	// Build final content: add missing tool-results at the beginning if any
+	// Create as AI SDK ToolResultPart format
+	const missingToolResults: ToolResultPart[] = stillMissingToolCalls.map((tc) => ({
+		type: "tool-result" as const,
+		toolCallId: getToolCallId(tc),
+		toolName: getToolCallName(tc),
+		output: { type: "text" as const, value: "Tool execution was interrupted before completion." },
 	}))
 
-	// Insert missing tool_results at the beginning of the content array
-	// This ensures they come before any text blocks that may summarize the results
+	// Insert missing tool-results at the beginning of the content array
 	const finalContent = missingToolResults.length > 0 ? [...missingToolResults, ...correctedContent] : correctedContent
 
 	return {
 		...userMessage,
 		content: finalContent,
-	}
+	} as RooMessage
 }

+ 13 - 14
src/core/webview/ClineProvider.ts

@@ -97,7 +97,13 @@ import { Task } from "../task/Task"
 
 import { webviewMessageHandler } from "./webviewMessageHandler"
 import type { ClineMessage, TodoItem } from "@roo-code/types"
-import { readApiMessages, saveApiMessages, saveTaskMessages } from "../task-persistence"
+import {
+	readApiMessages,
+	readRooMessages,
+	saveApiMessages,
+	saveTaskMessages,
+	type RooMessage,
+} from "../task-persistence"
 import { readTaskMessages } from "../task-persistence/taskMessages"
 import { getNonce } from "./getNonce"
 import { getUri } from "./getUri"
@@ -1721,7 +1727,7 @@ export class ClineProvider
 		taskDirPath: string
 		apiConversationHistoryFilePath: string
 		uiMessagesFilePath: string
-		apiConversationHistory: Anthropic.MessageParam[]
+		apiConversationHistory: RooMessage[]
 	}> {
 		const history = this.getGlobalState("taskHistory") ?? []
 		const historyItem = history.find((item) => item.id === id)
@@ -1736,22 +1742,15 @@ export class ClineProvider
 		const apiConversationHistoryFilePath = path.join(taskDirPath, GlobalFileNames.apiConversationHistory)
 		const uiMessagesFilePath = path.join(taskDirPath, GlobalFileNames.uiMessages)
 		const fileExists = await fileExistsAtPath(apiConversationHistoryFilePath)
-
-		let apiConversationHistory: Anthropic.MessageParam[] = []
-
-		if (fileExists) {
-			try {
-				apiConversationHistory = JSON.parse(await fs.readFile(apiConversationHistoryFilePath, "utf8"))
-			} catch (error) {
-				console.warn(
-					`[getTaskWithId] api_conversation_history.json corrupted for task ${id}, returning empty history: ${error instanceof Error ? error.message : String(error)}`,
-				)
-			}
-		} else {
+		if (!fileExists) {
 			console.warn(
 				`[getTaskWithId] api_conversation_history.json missing for task ${id}, returning empty history`,
 			)
 		}
+		const apiConversationHistory = await readRooMessages({
+			taskId: id,
+			globalStoragePath,
+		})
 
 		return {
 			historyItem,

+ 30 - 2
src/core/webview/__tests__/ClineProvider.spec.ts

@@ -1220,7 +1220,7 @@ describe("ClineProvider", () => {
 			// Setup Task instance with auto-mock from the top of the file
 			const mockCline = new Task(defaultTaskOptions) // Create a new mocked instance
 			mockCline.clineMessages = mockMessages // Set test-specific messages
-			mockCline.apiConversationHistory = mockApiHistory // Set API history
+			mockCline.apiConversationHistory = mockApiHistory as any // Set API history
 			await provider.addClineToStack(mockCline) // Add the mocked instance to the stack
 
 			// Mock getTaskWithId
@@ -1308,7 +1308,7 @@ describe("ClineProvider", () => {
 			// Setup Task instance with auto-mock from the top of the file
 			const mockCline = new Task(defaultTaskOptions) // Create a new mocked instance
 			mockCline.clineMessages = mockMessages // Set test-specific messages
-			mockCline.apiConversationHistory = mockApiHistory // Set API history
+			mockCline.apiConversationHistory = mockApiHistory as any // Set API history
 
 			// Explicitly mock the overwrite methods since they're not being called in the tests
 			mockCline.overwriteClineMessages = vi.fn()
@@ -3840,5 +3840,33 @@ describe("ClineProvider - Comprehensive Edit/Delete Edge Cases", () => {
 			// Restore the spy
 			vi.mocked(fsUtils.fileExistsAtPath).mockRestore()
 		})
+
+		it("reads v2 envelope format via readRooMessages", async () => {
+			const historyItem = { id: "v2-envelope-task", task: "test task", ts: Date.now() }
+			vi.mocked(mockContext.globalState.get).mockImplementation((key: string) => {
+				if (key === "taskHistory") {
+					return [historyItem]
+				}
+				return undefined
+			})
+
+			const fsUtils = await import("../../../utils/fs")
+			vi.spyOn(fsUtils, "fileExistsAtPath").mockResolvedValue(true)
+
+			const fsp = await import("fs/promises")
+			vi.mocked(fsp.readFile).mockResolvedValueOnce(
+				JSON.stringify({
+					version: 2,
+					messages: [{ role: "user", content: "hello from v2" }],
+				}) as never,
+			)
+
+			const result = await (provider as any).getTaskWithId("v2-envelope-task")
+
+			expect(result.historyItem).toEqual(historyItem)
+			expect(result.apiConversationHistory).toEqual([{ role: "user", content: "hello from v2" }])
+
+			vi.mocked(fsUtils.fileExistsAtPath).mockRestore()
+		})
 	})
 })

+ 1 - 3
src/core/webview/webviewMessageHandler.ts

@@ -384,9 +384,7 @@ export const webviewMessageHandler = async (
 					// Align API history truncation to the same user message timestamp if present
 					const userTs = m.ts
 					if (typeof userTs === "number") {
-						const apiIdx = currentCline.apiConversationHistory.findIndex(
-							(am: ApiMessage) => am.ts === userTs,
-						)
+						const apiIdx = currentCline.apiConversationHistory.findIndex((am) => (am as any).ts === userTs)
 						if (apiIdx !== -1) {
 							deleteFromApiIndex = apiIdx
 						}

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff