Przeglądaj źródła

Fix provider 400s: strip reasoning_details from messages, $ref from tool schemas (#11431)

Co-authored-by: daniel-lxs <[email protected]>
0xMink 2 dni temu
rodzic
commit
9e46d3e10c

+ 8 - 7
src/api/providers/__tests__/anthropic-vertex.spec.ts

@@ -246,7 +246,7 @@ describe("AnthropicVertexHandler", () => {
 			)
 		})
 
-		it("should pass messages directly to streamText as ModelMessage[]", async () => {
+		it("should sanitize and pass messages to streamText as ModelMessage[]", async () => {
 			mockStreamText.mockReturnValue(createMockStreamResult([]))
 
 			const stream = handler.createMessage(systemPrompt, mockMessages)
@@ -254,12 +254,13 @@ describe("AnthropicVertexHandler", () => {
 				// consume
 			}
 
-			// Messages are now already in ModelMessage format, passed directly to streamText
-			expect(mockStreamText).toHaveBeenCalledWith(
-				expect.objectContaining({
-					messages: mockMessages,
-				}),
-			)
+			// Messages are sanitized (allowlist: role, content, providerOptions) before passing to streamText
+			const callArgs = mockStreamText.mock.calls[0]![0]
+			expect(callArgs.messages).toHaveLength(2)
+			expect(callArgs.messages[0].role).toBe("user")
+			expect(callArgs.messages[0].content).toBe("Hello")
+			expect(callArgs.messages[1].role).toBe("assistant")
+			expect(callArgs.messages[1].content).toBe("Hi there!")
 		})
 
 		it("should pass tools through AI SDK conversion pipeline", async () => {

+ 39 - 0
src/api/providers/__tests__/anthropic.spec.ts

@@ -399,6 +399,45 @@ describe("AnthropicHandler", () => {
 			expect(endChunk).toBeDefined()
 		})
 
+		it("should strip reasoning_details and reasoning_content from messages before sending to API", async () => {
+			setupStreamTextMock([{ type: "text-delta", text: "test" }])
+
+			// Simulate messages with extra legacy fields that survive JSON deserialization
+			const messagesWithExtraFields = [
+				{
+					role: "user",
+					content: [{ type: "text" as const, text: "Hello" }],
+				},
+				{
+					role: "assistant",
+					content: [{ type: "text" as const, text: "Hi" }],
+					reasoning_details: [{ type: "thinking", thinking: "some reasoning" }],
+					reasoning_content: "some reasoning content",
+				},
+				{
+					role: "user",
+					content: [{ type: "text" as const, text: "Follow up" }],
+				},
+			] as any
+
+			const stream = handler.createMessage(systemPrompt, messagesWithExtraFields)
+
+			for await (const _chunk of stream) {
+				// Consume stream
+			}
+
+			// Verify streamText was called exactly once
+			expect(mockStreamText).toHaveBeenCalledTimes(1)
+			const callArgs = mockStreamText.mock.calls[0]![0]
+			for (const msg of callArgs.messages) {
+				expect(msg).not.toHaveProperty("reasoning_details")
+				expect(msg).not.toHaveProperty("reasoning_content")
+			}
+			// Verify the rest of the message is preserved
+			expect(callArgs.messages[1].role).toBe("assistant")
+			expect(callArgs.messages[1].content).toEqual([{ type: "text", text: "Hi" }])
+		})
+
 		it("should pass system prompt via system param when no systemProviderOptions", async () => {
 			setupStreamTextMock([{ type: "text-delta", text: "test" }])
 

+ 4 - 3
src/api/providers/anthropic-vertex.ts

@@ -1,6 +1,6 @@
 import type { Anthropic } from "@anthropic-ai/sdk"
 import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import {
 	type ModelInfo,
@@ -28,6 +28,7 @@ import {
 } from "../transform/ai-sdk"
 import { applyToolCacheOptions, applySystemPromptCaching } from "../transform/cache-breakpoints"
 import { calculateApiCostAnthropic } from "../../shared/cost"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
@@ -91,8 +92,8 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
 	): ApiStream {
 		const modelConfig = this.getModel()
 
-		// Convert messages to AI SDK format
-		const aiSdkMessages = messages as ModelMessage[]
+		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		// Convert tools to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 4 - 3
src/api/providers/anthropic.ts

@@ -1,5 +1,5 @@
 import { createAnthropic } from "@ai-sdk/anthropic"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import {
 	type ModelInfo,
@@ -26,6 +26,7 @@ import {
 } from "../transform/ai-sdk"
 import { applyToolCacheOptions, applySystemPromptCaching } from "../transform/cache-breakpoints"
 import { calculateApiCostAnthropic } from "../../shared/cost"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
@@ -77,8 +78,8 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 	): ApiStream {
 		const modelConfig = this.getModel()
 
-		// Convert messages to AI SDK format
-		const aiSdkMessages = messages as ModelMessage[]
+		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		// Convert tools to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 4 - 3
src/api/providers/azure.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createAzure } from "@ai-sdk/azure"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import { azureModels, azureDefaultModelInfo, type ModelInfo } from "@roo-code/types"
 
@@ -21,6 +21,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 const AZURE_DEFAULT_TEMPERATURE = 0
 
@@ -139,8 +140,8 @@ export class AzureHandler extends BaseProvider implements SingleCompletionHandle
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// Convert messages to AI SDK format
-		const aiSdkMessages = messages as ModelMessage[]
+		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 3 - 2
src/api/providers/baseten.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createBaseten } from "@ai-sdk/baseten"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import { basetenModels, basetenDefaultModelId, type ModelInfo } from "@roo-code/types"
 
@@ -21,6 +21,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 const BASETEN_DEFAULT_TEMPERATURE = 0.5
 
@@ -102,7 +103,7 @@ export class BasetenHandler extends BaseProvider implements SingleCompletionHand
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = messages as ModelMessage[]
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 4 - 3
src/api/providers/deepseek.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createDeepSeek } from "@ai-sdk/deepseek"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import { deepSeekModels, deepSeekDefaultModelId, DEEP_SEEK_DEFAULT_TEMPERATURE, type ModelInfo } from "@roo-code/types"
 
@@ -21,6 +21,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * DeepSeek provider using the dedicated @ai-sdk/deepseek package.
@@ -117,8 +118,8 @@ export class DeepSeekHandler extends BaseProvider implements SingleCompletionHan
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// Convert messages to AI SDK format
-		const aiSdkMessages = messages as ModelMessage[]
+		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 4 - 3
src/api/providers/fireworks.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createFireworks } from "@ai-sdk/fireworks"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import { fireworksModels, fireworksDefaultModelId, type ModelInfo } from "@roo-code/types"
 
@@ -21,6 +21,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 const FIREWORKS_DEFAULT_TEMPERATURE = 0.5
 
@@ -117,8 +118,8 @@ export class FireworksHandler extends BaseProvider implements SingleCompletionHa
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// Convert messages to AI SDK format
-		const aiSdkMessages = messages as ModelMessage[]
+		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 3 - 10
src/api/providers/lm-studio.ts

@@ -1,13 +1,5 @@
 import { Anthropic } from "@anthropic-ai/sdk"
-import {
-	streamText,
-	generateText,
-	ToolSet,
-	wrapLanguageModel,
-	extractReasoningMiddleware,
-	LanguageModel,
-	ModelMessage,
-} from "ai"
+import { streamText, generateText, ToolSet, wrapLanguageModel, extractReasoningMiddleware, LanguageModel } from "ai"
 
 import { type ModelInfo, openAiModelInfoSaneDefaults, LMSTUDIO_DEFAULT_TEMPERATURE } from "@roo-code/types"
 
@@ -27,6 +19,7 @@ import { OpenAICompatibleHandler, OpenAICompatibleConfig } from "./openai-compat
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { getModelsFromCache } from "./fetchers/modelCache"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 export class LmStudioHandler extends OpenAICompatibleHandler implements SingleCompletionHandler {
 	constructor(options: ApiHandlerOptions) {
@@ -65,7 +58,7 @@ export class LmStudioHandler extends OpenAICompatibleHandler implements SingleCo
 		const model = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = messages as ModelMessage[]
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 3 - 2
src/api/providers/minimax.ts

@@ -1,6 +1,6 @@
 import type { Anthropic } from "@anthropic-ai/sdk"
 import { createAnthropic } from "@ai-sdk/anthropic"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import { type ModelInfo, minimaxDefaultModelId, minimaxModels } from "@roo-code/types"
 
@@ -23,6 +23,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 export class MiniMaxHandler extends BaseProvider implements SingleCompletionHandler {
 	private client: ReturnType<typeof createAnthropic>
@@ -73,7 +74,7 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand
 		})
 
 		const mergedMessages = mergeEnvironmentDetailsForMiniMax(messages as any)
-		const aiSdkMessages = mergedMessages as ModelMessage[]
+		const aiSdkMessages = sanitizeMessagesForProvider(mergedMessages as RooMessage[])
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined
 		applyToolCacheOptions(aiSdkTools as Parameters<typeof applyToolCacheOptions>[0], metadata?.toolProviderOptions)

+ 3 - 2
src/api/providers/mistral.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createMistral } from "@ai-sdk/mistral"
-import { streamText, generateText, ToolSet, LanguageModel, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet, LanguageModel } from "ai"
 
 import {
 	mistralModels,
@@ -21,6 +21,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * Mistral provider using the dedicated @ai-sdk/mistral package.
@@ -145,7 +146,7 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
 		const languageModel = this.getLanguageModel()
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = messages as ModelMessage[]
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 3 - 2
src/api/providers/native-ollama.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createOllama } from "ollama-ai-provider-v2"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import { ModelInfo, openAiModelInfoSaneDefaults, DEEP_SEEK_DEFAULT_TEMPERATURE } from "@roo-code/types"
 
@@ -21,6 +21,7 @@ import { BaseProvider } from "./base-provider"
 import { getOllamaModels } from "./fetchers/ollama"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * NativeOllamaHandler using the ollama-ai-provider-v2 AI SDK community provider.
@@ -96,7 +97,7 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
 
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = messages as ModelMessage[]
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 4 - 3
src/api/providers/openai-compatible.ts

@@ -6,7 +6,7 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
 import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
-import { streamText, generateText, LanguageModel, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, LanguageModel, ToolSet } from "ai"
 
 import type { ModelInfo } from "@roo-code/types"
 
@@ -26,6 +26,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * Configuration options for creating an OpenAI-compatible provider.
@@ -143,8 +144,8 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si
 		const model = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// Convert messages to AI SDK format
-		const aiSdkMessages = messages as ModelMessage[]
+		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 2 - 1
src/api/providers/openai.ts

@@ -32,6 +32,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 // TODO: Rename this to OpenAICompatibleHandler. Also, I think the
 // `OpenAINativeHandler` can subclass from this, since it's obviously
@@ -107,7 +108,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = messages as ModelMessage[]
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 4 - 2
src/api/providers/openrouter.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createOpenRouter } from "@openrouter/ai-sdk-provider"
-import { streamText, generateText, ModelMessage } from "ai"
+import { streamText, generateText } from "ai"
 
 import {
 	type ModelRecord,
@@ -34,6 +34,7 @@ import { generateImageWithProvider, ImageGenerationResult } from "./utils/image-
 import type { ApiHandlerCreateMessageMetadata, SingleCompletionHandler } from "../index"
 import type { ApiStreamChunk, ApiStreamUsageChunk } from "../transform/stream"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 export class OpenRouterHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
@@ -149,7 +150,8 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 			? { "x-anthropic-beta": "fine-grained-tool-streaming-2025-05-14" }
 			: undefined
 
-		const aiSdkMessages = messages as ModelMessage[]
+		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		const openrouter = this.createOpenRouterProvider({ reasoning, headers })
 

+ 3 - 2
src/api/providers/requesty.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createRequesty, type RequestyProviderMetadata } from "@requesty/ai-sdk"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import { type ModelInfo, type ModelRecord, requestyDefaultModelId, requestyDefaultModelInfo } from "@roo-code/types"
 
@@ -25,6 +25,7 @@ import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from ".
 import { toRequestyServiceUrl } from "../../shared/utils/requesty"
 import { applyRouterToolPreferences } from "./utils/router-tool-preferences"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * Requesty provider using the dedicated @requesty/ai-sdk package.
@@ -180,7 +181,7 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
 		const { info, temperature } = await this.fetchModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = messages as ModelMessage[]
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 4 - 3
src/api/providers/roo.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
-import { streamText, generateText, type ModelMessage } from "ai"
+import { streamText, generateText } from "ai"
 
 import { rooDefaultModelId, getApiProtocol, type ImageGenerationApiMethod } from "@roo-code/types"
 import { CloudService } from "@roo-code/cloud"
@@ -27,6 +27,7 @@ import { getModels, getModelsFromCache } from "./fetchers/modelCache"
 import { generateImageWithProvider, generateImageWithImagesApi, ImageGenerationResult } from "./utils/image-generation"
 import { t } from "../../i18n"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 function getSessionToken(): string {
 	const token = CloudService.hasInstance() ? CloudService.instance.authService?.getSessionToken() : undefined
@@ -120,8 +121,8 @@ export class RooHandler extends BaseProvider implements SingleCompletionHandler
 		// Create per-request provider with fresh session token
 		const provider = this.createRooProvider({ reasoning, taskId: metadata?.taskId })
 
-		// RooMessage[] is already AI SDK-compatible, cast directly
-		const aiSdkMessages = messages as ModelMessage[]
+		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 		const tools = convertToolsForAiSdk(this.convertToolsForOpenAI(metadata?.tools))
 		applyToolCacheOptions(tools as Parameters<typeof applyToolCacheOptions>[0], metadata?.toolProviderOptions)
 

+ 8 - 5
src/api/providers/sambanova.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createSambaNova } from "sambanova-ai-provider"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import { sambaNovaModels, sambaNovaDefaultModelId, type ModelInfo } from "@roo-code/types"
 
@@ -22,6 +22,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 const SAMBANOVA_DEFAULT_TEMPERATURE = 0.7
 
@@ -118,10 +119,12 @@ export class SambaNovaHandler extends BaseProvider implements SingleCompletionHa
 		const { temperature, info } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// For models that don't support multi-part content (like DeepSeek), flatten messages to string content
-		// SambaNova's DeepSeek models expect string content, not array content
-		const castMessages = messages as ModelMessage[]
-		const aiSdkMessages = info.supportsImages ? castMessages : flattenAiSdkMessagesToStringContent(castMessages)
+		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
+		// For models that don't support multi-part content (like DeepSeek), flatten to string content.
+		const sanitizedMessages = sanitizeMessagesForProvider(messages)
+		const aiSdkMessages = info.supportsImages
+			? sanitizedMessages
+			: flattenAiSdkMessagesToStringContent(sanitizedMessages)
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 3 - 2
src/api/providers/vercel-ai-gateway.ts

@@ -1,5 +1,5 @@
 import { Anthropic } from "@anthropic-ai/sdk"
-import { createGateway, streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { createGateway, streamText, generateText, ToolSet } from "ai"
 
 import {
 	vercelAiGatewayDefaultModelId,
@@ -27,6 +27,7 @@ import { BaseProvider } from "./base-provider"
 import { getModels, getModelsFromCache } from "./fetchers/modelCache"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * Vercel AI Gateway provider using the built-in AI SDK gateway support.
@@ -117,7 +118,7 @@ export class VercelAiGatewayHandler extends BaseProvider implements SingleComple
 		const { id: modelId, info } = await this.fetchModel()
 		const languageModel = this.getLanguageModel(modelId)
 
-		const aiSdkMessages = messages as ModelMessage[]
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 3 - 2
src/api/providers/zai.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createZhipu } from "zhipu-ai-provider"
-import { streamText, generateText, ToolSet, ModelMessage } from "ai"
+import { streamText, generateText, ToolSet } from "ai"
 
 import {
 	internationalZAiModels,
@@ -29,6 +29,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
+import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * Z.ai provider using the dedicated zhipu-ai-provider package.
@@ -99,7 +100,7 @@ export class ZAiHandler extends BaseProvider implements SingleCompletionHandler
 		const { id: modelId, info, temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = messages as ModelMessage[]
+		const aiSdkMessages = sanitizeMessagesForProvider(messages)
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 172 - 0
src/api/transform/__tests__/sanitize-messages.spec.ts

@@ -0,0 +1,172 @@
+import { sanitizeMessagesForProvider } from "../sanitize-messages"
+import type { RooMessage } from "../../../core/task-persistence/rooMessage"
+
+describe("sanitizeMessagesForProvider", () => {
+	it("should preserve role and content on user messages", () => {
+		const messages: RooMessage[] = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
+
+		const result = sanitizeMessagesForProvider(messages)
+
+		expect(result).toHaveLength(1)
+		expect(result[0]).toEqual({
+			role: "user",
+			content: [{ type: "text", text: "Hello" }],
+		})
+	})
+
+	it("should preserve role, content, and providerOptions on assistant messages", () => {
+		const messages: RooMessage[] = [
+			{
+				role: "assistant",
+				content: [{ type: "text", text: "Hi" }],
+				providerOptions: { openrouter: { reasoning_details: [{ type: "reasoning.text", text: "thinking" }] } },
+			} as any,
+		]
+
+		const result = sanitizeMessagesForProvider(messages)
+
+		expect(result).toHaveLength(1)
+		expect(result[0]).toEqual({
+			role: "assistant",
+			content: [{ type: "text", text: "Hi" }],
+			providerOptions: { openrouter: { reasoning_details: [{ type: "reasoning.text", text: "thinking" }] } },
+		})
+	})
+
+	it("should strip reasoning_details from messages", () => {
+		const messages = [
+			{
+				role: "assistant",
+				content: [{ type: "text", text: "Response" }],
+				reasoning_details: [{ type: "reasoning.encrypted", data: "encrypted_data" }],
+			},
+		] as any as RooMessage[]
+
+		const result = sanitizeMessagesForProvider(messages)
+
+		expect(result).toHaveLength(1)
+		expect(result[0]).not.toHaveProperty("reasoning_details")
+		expect(result[0]).toEqual({
+			role: "assistant",
+			content: [{ type: "text", text: "Response" }],
+		})
+	})
+
+	it("should strip reasoning_content from messages", () => {
+		const messages = [
+			{
+				role: "assistant",
+				content: [{ type: "text", text: "Response" }],
+				reasoning_content: "some reasoning content",
+			},
+		] as any as RooMessage[]
+
+		const result = sanitizeMessagesForProvider(messages)
+
+		expect(result).toHaveLength(1)
+		expect(result[0]).not.toHaveProperty("reasoning_content")
+	})
+
+	it("should strip metadata fields (ts, condenseId, etc.)", () => {
+		const messages = [
+			{
+				role: "user",
+				content: "Hello",
+				ts: 1234567890,
+				condenseId: "cond-1",
+				condenseParent: "cond-0",
+				truncationId: "trunc-1",
+				truncationParent: "trunc-0",
+				isTruncationMarker: true,
+				isSummary: true,
+			},
+		] as any as RooMessage[]
+
+		const result = sanitizeMessagesForProvider(messages)
+
+		expect(result).toHaveLength(1)
+		expect(result[0]).toEqual({
+			role: "user",
+			content: "Hello",
+		})
+		expect(result[0]).not.toHaveProperty("ts")
+		expect(result[0]).not.toHaveProperty("condenseId")
+		expect(result[0]).not.toHaveProperty("condenseParent")
+		expect(result[0]).not.toHaveProperty("truncationId")
+		expect(result[0]).not.toHaveProperty("truncationParent")
+		expect(result[0]).not.toHaveProperty("isTruncationMarker")
+		expect(result[0]).not.toHaveProperty("isSummary")
+	})
+
+	it("should strip any unknown extra fields", () => {
+		const messages = [
+			{
+				role: "assistant",
+				content: [{ type: "text", text: "Hi" }],
+				some_future_field: "should be stripped",
+				another_unknown: 42,
+			},
+		] as any as RooMessage[]
+
+		const result = sanitizeMessagesForProvider(messages)
+
+		expect(result).toHaveLength(1)
+		expect(result[0]).not.toHaveProperty("some_future_field")
+		expect(result[0]).not.toHaveProperty("another_unknown")
+	})
+
+	it("should not include providerOptions key when undefined", () => {
+		const messages: RooMessage[] = [{ role: "user", content: "Hello" }]
+
+		const result = sanitizeMessagesForProvider(messages)
+
+		expect(result).toHaveLength(1)
+		expect(Object.keys(result[0])).toEqual(["role", "content"])
+	})
+
+	it("should handle mixed message types correctly", () => {
+		const messages = [
+			{
+				role: "user",
+				content: [{ type: "text", text: "Hello" }],
+				ts: 100,
+			},
+			{
+				role: "assistant",
+				content: [{ type: "text", text: "Hi" }],
+				reasoning_details: [{ type: "thinking", thinking: "some reasoning" }],
+				reasoning_content: "some reasoning content",
+				ts: 200,
+			},
+			{
+				role: "tool",
+				content: [{ type: "tool-result", toolCallId: "call_1", toolName: "test", result: "ok" }],
+				ts: 300,
+			},
+			{
+				role: "user",
+				content: [{ type: "text", text: "Follow up" }],
+				ts: 400,
+			},
+		] as any as RooMessage[]
+
+		const result = sanitizeMessagesForProvider(messages)
+
+		expect(result).toHaveLength(4)
+
+		for (const msg of result) {
+			expect(msg).not.toHaveProperty("reasoning_details")
+			expect(msg).not.toHaveProperty("reasoning_content")
+			expect(msg).not.toHaveProperty("ts")
+		}
+
+		expect(result[0]).toEqual({
+			role: "user",
+			content: [{ type: "text", text: "Hello" }],
+		})
+		expect(result[1]).toEqual({
+			role: "assistant",
+			content: [{ type: "text", text: "Hi" }],
+		})
+	})
+})

+ 32 - 0
src/api/transform/sanitize-messages.ts

@@ -0,0 +1,32 @@
+import type { ModelMessage } from "ai"
+import type { RooMessage, RooRoleMessage } from "../../core/task-persistence/rooMessage"
+import { isRooReasoningMessage } from "../../core/task-persistence/rooMessage"
+
+/**
+ * Sanitize RooMessage[] for provider APIs by allowlisting only the fields
+ * that the AI SDK expects on each message.
+ *
+ * Legacy fields like `reasoning_details`, `reasoning_content`, `ts`, `condenseId`,
+ * etc. survive JSON deserialization round-trips and cause providers to reject
+ * requests with "Extra inputs are not permitted" (Anthropic 400) or similar errors.
+ *
+ * This uses an allowlist approach: only `role`, `content`, and `providerOptions`
+ * are forwarded, ensuring any future extraneous fields are also stripped.
+ *
+ * RooReasoningMessage items (standalone encrypted reasoning with no `role`) are
+ * filtered out since they have no AI SDK equivalent.
+ */
+export function sanitizeMessagesForProvider(messages: RooMessage[]): ModelMessage[] {
+	return messages
+		.filter((msg): msg is RooRoleMessage => !isRooReasoningMessage(msg))
+		.map((msg) => {
+			const clean: Record<string, unknown> = {
+				role: msg.role,
+				content: msg.content,
+			}
+			if (msg.providerOptions !== undefined) {
+				clean.providerOptions = msg.providerOptions
+			}
+			return clean as ModelMessage
+		})
+}

+ 101 - 0
src/utils/__tests__/json-schema.spec.ts

@@ -585,4 +585,105 @@ describe("normalizeToolSchema", () => {
 			})
 		})
 	})
+
+	describe("$ref stripping", () => {
+		it("should strip $ref at the top level", () => {
+			const input = {
+				$ref: "#/components/schemas/Foo",
+				type: "object",
+				properties: {
+					name: { type: "string" },
+				},
+			}
+
+			const result = normalizeToolSchema(input)
+
+			expect(result.$ref).toBeUndefined()
+			expect(result.type).toBe("object")
+			expect(result.properties).toBeDefined()
+		})
+
+		it("should strip $ref in nested properties", () => {
+			const input = {
+				type: "object",
+				properties: {
+					response: {
+						$ref: "#/components/schemas/PlanningLogResponse",
+						type: "object",
+						properties: {
+							status: { type: "string" },
+						},
+					},
+				},
+			}
+
+			const result = normalizeToolSchema(input)
+
+			const props = result.properties as Record<string, Record<string, unknown>>
+			expect(props.response.$ref).toBeUndefined()
+			expect(props.response.type).toBe("object")
+		})
+
+		it("should strip $ref in deeply nested array items", () => {
+			const input = {
+				type: "object",
+				properties: {
+					items: {
+						type: "array",
+						items: {
+							type: "object",
+							properties: {
+								ref_field: {
+									$ref: "#/components/schemas/Nested",
+									type: "string",
+								},
+							},
+						},
+					},
+				},
+			}
+
+			const result = normalizeToolSchema(input)
+
+			const props = result.properties as Record<string, Record<string, unknown>>
+			const itemsSchema = props.items.items as Record<string, unknown>
+			const nestedProps = itemsSchema.properties as Record<string, Record<string, unknown>>
+			expect(nestedProps.ref_field.$ref).toBeUndefined()
+			expect(nestedProps.ref_field.type).toBe("string")
+		})
+
+		it("should handle $ref-only schema without crashing", () => {
+			const input = {
+				$ref: "#/components/schemas/Foo",
+			}
+
+			const result = normalizeToolSchema(input)
+
+			expect(result.$ref).toBeUndefined()
+			// Result is an empty schema (permissive) — acceptable for tool inputs
+			expect(result).not.toBeNull()
+			expect(result).toEqual({})
+		})
+
+		it("should strip $ref inside anyOf variants", () => {
+			const input = {
+				type: "object",
+				properties: {
+					field: {
+						anyOf: [
+							{ $ref: "#/components/schemas/TypeA", type: "string" },
+							{ type: "null" },
+						],
+					},
+				},
+			}
+
+			const result = normalizeToolSchema(input)
+
+			const props = result.properties as Record<string, Record<string, unknown>>
+			const anyOf = props.field.anyOf as Record<string, unknown>[]
+			expect(anyOf[0].$ref).toBeUndefined()
+			expect(anyOf[0].type).toBe("string")
+		})
+	})
 })

+ 1 - 0
src/utils/json-schema.ts

@@ -165,6 +165,7 @@ const NormalizedToolSchemaInternal: z.ZodType<Record<string, unknown>, z.ZodType
 					minItems,
 					maxItems,
 					uniqueItems,
+					$ref: _ref, // Strip $ref — unresolvable references break provider APIs (e.g. Gemini 400)
 					...rest
 				} = schema
 				const result: Record<string, unknown> = { ...rest }