2
0
Эх сурвалжийг харах

Revert "Fix provider 400s: strip reasoning_details from messages, $ref from tool schemas" (#11453)

Hannes Rudolph 8 цаг өмнө
parent
commit
b4d9f92b4d

+ 7 - 8
src/api/providers/__tests__/anthropic-vertex.spec.ts

@@ -246,7 +246,7 @@ describe("AnthropicVertexHandler", () => {
 			)
 		})
 
-		it("should sanitize and pass messages to streamText as ModelMessage[]", async () => {
+		it("should pass messages directly to streamText as ModelMessage[]", async () => {
 			mockStreamText.mockReturnValue(createMockStreamResult([]))
 
 			const stream = handler.createMessage(systemPrompt, mockMessages)
@@ -254,13 +254,12 @@ describe("AnthropicVertexHandler", () => {
 				// consume
 			}
 
-			// Messages are sanitized (allowlist: role, content, providerOptions) before passing to streamText
-			const callArgs = mockStreamText.mock.calls[0]![0]
-			expect(callArgs.messages).toHaveLength(2)
-			expect(callArgs.messages[0].role).toBe("user")
-			expect(callArgs.messages[0].content).toBe("Hello")
-			expect(callArgs.messages[1].role).toBe("assistant")
-			expect(callArgs.messages[1].content).toBe("Hi there!")
+			// Messages are now already in ModelMessage format, passed directly to streamText
+			expect(mockStreamText).toHaveBeenCalledWith(
+				expect.objectContaining({
+					messages: mockMessages,
+				}),
+			)
 		})
 
 		it("should pass tools through AI SDK conversion pipeline", async () => {

+ 0 - 39
src/api/providers/__tests__/anthropic.spec.ts

@@ -399,45 +399,6 @@ describe("AnthropicHandler", () => {
 			expect(endChunk).toBeDefined()
 		})
 
-		it("should strip reasoning_details and reasoning_content from messages before sending to API", async () => {
-			setupStreamTextMock([{ type: "text-delta", text: "test" }])
-
-			// Simulate messages with extra legacy fields that survive JSON deserialization
-			const messagesWithExtraFields = [
-				{
-					role: "user",
-					content: [{ type: "text" as const, text: "Hello" }],
-				},
-				{
-					role: "assistant",
-					content: [{ type: "text" as const, text: "Hi" }],
-					reasoning_details: [{ type: "thinking", thinking: "some reasoning" }],
-					reasoning_content: "some reasoning content",
-				},
-				{
-					role: "user",
-					content: [{ type: "text" as const, text: "Follow up" }],
-				},
-			] as any
-
-			const stream = handler.createMessage(systemPrompt, messagesWithExtraFields)
-
-			for await (const _chunk of stream) {
-				// Consume stream
-			}
-
-			// Verify streamText was called exactly once
-			expect(mockStreamText).toHaveBeenCalledTimes(1)
-			const callArgs = mockStreamText.mock.calls[0]![0]
-			for (const msg of callArgs.messages) {
-				expect(msg).not.toHaveProperty("reasoning_details")
-				expect(msg).not.toHaveProperty("reasoning_content")
-			}
-			// Verify the rest of the message is preserved
-			expect(callArgs.messages[1].role).toBe("assistant")
-			expect(callArgs.messages[1].content).toEqual([{ type: "text", text: "Hi" }])
-		})
-
 		it("should pass system prompt via system param when no systemProviderOptions", async () => {
 			setupStreamTextMock([{ type: "text-delta", text: "test" }])
 

+ 3 - 4
src/api/providers/anthropic-vertex.ts

@@ -1,6 +1,6 @@
 import type { Anthropic } from "@anthropic-ai/sdk"
 import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import {
 	type ModelInfo,
@@ -28,7 +28,6 @@ import {
 } from "../transform/ai-sdk"
 import { applyToolCacheOptions, applySystemPromptCaching } from "../transform/cache-breakpoints"
 import { calculateApiCostAnthropic } from "../../shared/cost"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
@@ -92,8 +91,8 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
 	): ApiStream {
 		const modelConfig = this.getModel()
 
-		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		// Convert messages to AI SDK format
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 3 - 4
src/api/providers/anthropic.ts

@@ -1,5 +1,5 @@
 import { createAnthropic } from "@ai-sdk/anthropic"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import {
 	type ModelInfo,
@@ -26,7 +26,6 @@ import {
 } from "../transform/ai-sdk"
 import { applyToolCacheOptions, applySystemPromptCaching } from "../transform/cache-breakpoints"
 import { calculateApiCostAnthropic } from "../../shared/cost"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
@@ -78,8 +77,8 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 	): ApiStream {
 		const modelConfig = this.getModel()
 
-		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		// Convert messages to AI SDK format
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 3 - 4
src/api/providers/azure.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createAzure } from "@ai-sdk/azure"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { azureModels, azureDefaultModelInfo, type ModelInfo } from "@roo-code/types"
 
@@ -21,7 +21,6 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 const AZURE_DEFAULT_TEMPERATURE = 0
 
@@ -155,8 +154,8 @@ export class AzureHandler extends BaseProvider implements SingleCompletionHandle
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		// Convert messages to AI SDK format
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 2 - 3
src/api/providers/baseten.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createBaseten } from "@ai-sdk/baseten"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { basetenModels, basetenDefaultModelId, type ModelInfo } from "@roo-code/types"
 
@@ -21,7 +21,6 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 const BASETEN_DEFAULT_TEMPERATURE = 0.5
 
@@ -116,7 +115,7 @@ export class BasetenHandler extends BaseProvider implements SingleCompletionHand
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 3 - 4
src/api/providers/deepseek.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createDeepSeek } from "@ai-sdk/deepseek"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { deepSeekModels, deepSeekDefaultModelId, DEEP_SEEK_DEFAULT_TEMPERATURE, type ModelInfo } from "@roo-code/types"
 
@@ -21,7 +21,6 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * DeepSeek provider using the dedicated @ai-sdk/deepseek package.
@@ -134,8 +133,8 @@ export class DeepSeekHandler extends BaseProvider implements SingleCompletionHan
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		// Convert messages to AI SDK format
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 3 - 4
src/api/providers/fireworks.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createFireworks } from "@ai-sdk/fireworks"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { fireworksModels, fireworksDefaultModelId, type ModelInfo } from "@roo-code/types"
 
@@ -21,7 +21,6 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 const FIREWORKS_DEFAULT_TEMPERATURE = 0.5
 
@@ -134,8 +133,8 @@ export class FireworksHandler extends BaseProvider implements SingleCompletionHa
 		const { temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		// Convert messages to AI SDK format
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 10 - 3
src/api/providers/lm-studio.ts

@@ -1,5 +1,13 @@
 import { Anthropic } from "@anthropic-ai/sdk"
-import { streamText, generateText, ToolSet, wrapLanguageModel, extractReasoningMiddleware, LanguageModel } from "ai"
+import {
+	streamText,
+	generateText,
+	ToolSet,
+	wrapLanguageModel,
+	extractReasoningMiddleware,
+	LanguageModel,
+	ModelMessage,
+} from "ai"
 
 import { type ModelInfo, openAiModelInfoSaneDefaults, LMSTUDIO_DEFAULT_TEMPERATURE } from "@roo-code/types"
 
@@ -19,7 +27,6 @@ import { OpenAICompatibleHandler, OpenAICompatibleConfig } from "./openai-compat
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { getModelsFromCache } from "./fetchers/modelCache"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 export class LmStudioHandler extends OpenAICompatibleHandler implements SingleCompletionHandler {
 	constructor(options: ApiHandlerOptions) {
@@ -58,7 +65,7 @@ export class LmStudioHandler extends OpenAICompatibleHandler implements SingleCo
 		const model = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 2 - 3
src/api/providers/minimax.ts

@@ -1,6 +1,6 @@
 import type { Anthropic } from "@anthropic-ai/sdk"
 import { createAnthropic } from "@ai-sdk/anthropic"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { type ModelInfo, minimaxDefaultModelId, minimaxModels } from "@roo-code/types"
 
@@ -23,7 +23,6 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 export class MiniMaxHandler extends BaseProvider implements SingleCompletionHandler {
 	private client: ReturnType<typeof createAnthropic>
@@ -74,7 +73,7 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand
 		})
 
 		const mergedMessages = mergeEnvironmentDetailsForMiniMax(messages as any)
-		const aiSdkMessages = sanitizeMessagesForProvider(mergedMessages as RooMessage[])
+		const aiSdkMessages = mergedMessages as ModelMessage[]
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined
 		applyToolCacheOptions(aiSdkTools as Parameters<typeof applyToolCacheOptions>[0], metadata?.toolProviderOptions)

+ 2 - 3
src/api/providers/mistral.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createMistral } from "@ai-sdk/mistral"
-import { streamText, generateText, ToolSet, LanguageModel } from "ai"
+import { streamText, generateText, ToolSet, LanguageModel, ModelMessage } from "ai"
 
 import {
 	mistralModels,
@@ -21,7 +21,6 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * Mistral provider using the dedicated @ai-sdk/mistral package.
@@ -158,7 +157,7 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
 		const languageModel = this.getLanguageModel()
 
 		// Convert messages to AI SDK format
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 2 - 3
src/api/providers/native-ollama.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createOllama } from "ollama-ai-provider-v2"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { ModelInfo, openAiModelInfoSaneDefaults, DEEP_SEEK_DEFAULT_TEMPERATURE } from "@roo-code/types"
 
@@ -21,7 +21,6 @@ import { BaseProvider } from "./base-provider"
 import { getOllamaModels } from "./fetchers/ollama"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * NativeOllamaHandler using the ollama-ai-provider-v2 AI SDK community provider.
@@ -97,7 +96,7 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
 
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 3 - 4
src/api/providers/openai-compatible.ts

@@ -6,7 +6,7 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
 import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
-import { streamText, generateText, LanguageModel, ToolSet } from "ai"
+import { streamText, generateText, LanguageModel, ToolSet, ModelMessage } from "ai"
 
 import type { ModelInfo } from "@roo-code/types"
 
@@ -26,7 +26,6 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * Configuration options for creating an OpenAI-compatible provider.
@@ -155,8 +154,8 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si
 		const model = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		// Convert messages to AI SDK format
+		const aiSdkMessages = messages as ModelMessage[]
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 1 - 2
src/api/providers/openai.ts

@@ -32,7 +32,6 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 // TODO: Rename this to OpenAICompatibleHandler. Also, I think the
 // `OpenAINativeHandler` can subclass from this, since it's obviously
@@ -108,7 +107,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 2 - 4
src/api/providers/openrouter.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createOpenRouter } from "@openrouter/ai-sdk-provider"
-import { streamText, generateText } from "ai"
+import { streamText, generateText, ModelMessage } from "ai"
 
 import {
 	type ModelRecord,
@@ -34,7 +34,6 @@ import { generateImageWithProvider, ImageGenerationResult } from "./utils/image-
 import type { ApiHandlerCreateMessageMetadata, SingleCompletionHandler } from "../index"
 import type { ApiStreamChunk, ApiStreamUsageChunk } from "../transform/stream"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 export class OpenRouterHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
@@ -153,8 +152,7 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 			? { "x-anthropic-beta": "fine-grained-tool-streaming-2025-05-14" }
 			: undefined
 
-		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openrouter = this.createOpenRouterProvider({ reasoning, headers })
 

+ 2 - 3
src/api/providers/requesty.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createRequesty, type RequestyProviderMetadata } from "@requesty/ai-sdk"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { type ModelInfo, type ModelRecord, requestyDefaultModelId, requestyDefaultModelInfo } from "@roo-code/types"
 
@@ -25,7 +25,6 @@ import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from ".
 import { toRequestyServiceUrl } from "../../shared/utils/requesty"
 import { applyRouterToolPreferences } from "./utils/router-tool-preferences"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * Requesty provider using the dedicated @requesty/ai-sdk package.
@@ -196,7 +195,7 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
 		const { info, temperature } = await this.fetchModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 3 - 4
src/api/providers/roo.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
-import { streamText, generateText } from "ai"
+import { streamText, generateText, type ModelMessage } from "ai"
 
 import { rooDefaultModelId, getApiProtocol, type ImageGenerationApiMethod } from "@roo-code/types"
 import { CloudService } from "@roo-code/cloud"
@@ -27,7 +27,6 @@ import { getModels, getModelsFromCache } from "./fetchers/modelCache"
 import { generateImageWithProvider, generateImageWithImagesApi, ImageGenerationResult } from "./utils/image-generation"
 import { t } from "../../i18n"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 type RooProviderMetadata = {
 	cost?: number
@@ -157,8 +156,8 @@ export class RooHandler extends BaseProvider implements SingleCompletionHandler
 		// Create per-request provider with fresh session token
 		const provider = this.createRooProvider({ reasoning, taskId: metadata?.taskId })
 
-		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		// RooMessage[] is already AI SDK-compatible, cast directly
+		const aiSdkMessages = messages as ModelMessage[]
 		const tools = convertToolsForAiSdk(this.convertToolsForOpenAI(metadata?.tools))
 		applyToolCacheOptions(tools as Parameters<typeof applyToolCacheOptions>[0], metadata?.toolProviderOptions)
 

+ 5 - 8
src/api/providers/sambanova.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createSambaNova } from "sambanova-ai-provider"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import { sambaNovaModels, sambaNovaDefaultModelId, type ModelInfo } from "@roo-code/types"
 
@@ -22,7 +22,6 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 const SAMBANOVA_DEFAULT_TEMPERATURE = 0.7
 
@@ -135,12 +134,10 @@ export class SambaNovaHandler extends BaseProvider implements SingleCompletionHa
 		const { temperature, info } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		// Sanitize messages for the provider API (allowlist: role, content, providerOptions).
-		// For models that don't support multi-part content (like DeepSeek), flatten to string content.
-		const sanitizedMessages = sanitizeMessagesForProvider(messages)
-		const aiSdkMessages = info.supportsImages
-			? sanitizedMessages
-			: flattenAiSdkMessagesToStringContent(sanitizedMessages)
+		// For models that don't support multi-part content (like DeepSeek), flatten messages to string content
+		// SambaNova's DeepSeek models expect string content, not array content
+		const castMessages = messages as ModelMessage[]
+		const aiSdkMessages = info.supportsImages ? castMessages : flattenAiSdkMessagesToStringContent(castMessages)
 
 		// Convert tools to OpenAI format first, then to AI SDK format
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)

+ 2 - 3
src/api/providers/vercel-ai-gateway.ts

@@ -1,5 +1,5 @@
 import { Anthropic } from "@anthropic-ai/sdk"
-import { createGateway, streamText, generateText, ToolSet } from "ai"
+import { createGateway, streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import {
 	vercelAiGatewayDefaultModelId,
@@ -27,7 +27,6 @@ import { BaseProvider } from "./base-provider"
 import { getModels, getModelsFromCache } from "./fetchers/modelCache"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * Vercel AI Gateway provider using the built-in AI SDK gateway support.
@@ -136,7 +135,7 @@ export class VercelAiGatewayHandler extends BaseProvider implements SingleComple
 		const { id: modelId, info } = await this.fetchModel()
 		const languageModel = this.getLanguageModel(modelId)
 
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 2 - 3
src/api/providers/zai.ts

@@ -1,6 +1,6 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { createZhipu } from "zhipu-ai-provider"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, ToolSet, ModelMessage } from "ai"
 
 import {
 	internationalZAiModels,
@@ -29,7 +29,6 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import type { RooMessage } from "../../core/task-persistence/rooMessage"
-import { sanitizeMessagesForProvider } from "../transform/sanitize-messages"
 
 /**
  * Z.ai provider using the dedicated zhipu-ai-provider package.
@@ -100,7 +99,7 @@ export class ZAiHandler extends BaseProvider implements SingleCompletionHandler
 		const { id: modelId, info, temperature } = this.getModel()
 		const languageModel = this.getLanguageModel()
 
-		const aiSdkMessages = sanitizeMessagesForProvider(messages)
+		const aiSdkMessages = messages as ModelMessage[]
 
 		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
 		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined

+ 0 - 172
src/api/transform/__tests__/sanitize-messages.spec.ts

@@ -1,172 +0,0 @@
-import { sanitizeMessagesForProvider } from "../sanitize-messages"
-import type { RooMessage } from "../../../core/task-persistence/rooMessage"
-
-describe("sanitizeMessagesForProvider", () => {
-	it("should preserve role and content on user messages", () => {
-		const messages: RooMessage[] = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
-
-		const result = sanitizeMessagesForProvider(messages)
-
-		expect(result).toHaveLength(1)
-		expect(result[0]).toEqual({
-			role: "user",
-			content: [{ type: "text", text: "Hello" }],
-		})
-	})
-
-	it("should preserve role, content, and providerOptions on assistant messages", () => {
-		const messages: RooMessage[] = [
-			{
-				role: "assistant",
-				content: [{ type: "text", text: "Hi" }],
-				providerOptions: { openrouter: { reasoning_details: [{ type: "reasoning.text", text: "thinking" }] } },
-			} as any,
-		]
-
-		const result = sanitizeMessagesForProvider(messages)
-
-		expect(result).toHaveLength(1)
-		expect(result[0]).toEqual({
-			role: "assistant",
-			content: [{ type: "text", text: "Hi" }],
-			providerOptions: { openrouter: { reasoning_details: [{ type: "reasoning.text", text: "thinking" }] } },
-		})
-	})
-
-	it("should strip reasoning_details from messages", () => {
-		const messages = [
-			{
-				role: "assistant",
-				content: [{ type: "text", text: "Response" }],
-				reasoning_details: [{ type: "reasoning.encrypted", data: "encrypted_data" }],
-			},
-		] as any as RooMessage[]
-
-		const result = sanitizeMessagesForProvider(messages)
-
-		expect(result).toHaveLength(1)
-		expect(result[0]).not.toHaveProperty("reasoning_details")
-		expect(result[0]).toEqual({
-			role: "assistant",
-			content: [{ type: "text", text: "Response" }],
-		})
-	})
-
-	it("should strip reasoning_content from messages", () => {
-		const messages = [
-			{
-				role: "assistant",
-				content: [{ type: "text", text: "Response" }],
-				reasoning_content: "some reasoning content",
-			},
-		] as any as RooMessage[]
-
-		const result = sanitizeMessagesForProvider(messages)
-
-		expect(result).toHaveLength(1)
-		expect(result[0]).not.toHaveProperty("reasoning_content")
-	})
-
-	it("should strip metadata fields (ts, condenseId, etc.)", () => {
-		const messages = [
-			{
-				role: "user",
-				content: "Hello",
-				ts: 1234567890,
-				condenseId: "cond-1",
-				condenseParent: "cond-0",
-				truncationId: "trunc-1",
-				truncationParent: "trunc-0",
-				isTruncationMarker: true,
-				isSummary: true,
-			},
-		] as any as RooMessage[]
-
-		const result = sanitizeMessagesForProvider(messages)
-
-		expect(result).toHaveLength(1)
-		expect(result[0]).toEqual({
-			role: "user",
-			content: "Hello",
-		})
-		expect(result[0]).not.toHaveProperty("ts")
-		expect(result[0]).not.toHaveProperty("condenseId")
-		expect(result[0]).not.toHaveProperty("condenseParent")
-		expect(result[0]).not.toHaveProperty("truncationId")
-		expect(result[0]).not.toHaveProperty("truncationParent")
-		expect(result[0]).not.toHaveProperty("isTruncationMarker")
-		expect(result[0]).not.toHaveProperty("isSummary")
-	})
-
-	it("should strip any unknown extra fields", () => {
-		const messages = [
-			{
-				role: "assistant",
-				content: [{ type: "text", text: "Hi" }],
-				some_future_field: "should be stripped",
-				another_unknown: 42,
-			},
-		] as any as RooMessage[]
-
-		const result = sanitizeMessagesForProvider(messages)
-
-		expect(result).toHaveLength(1)
-		expect(result[0]).not.toHaveProperty("some_future_field")
-		expect(result[0]).not.toHaveProperty("another_unknown")
-	})
-
-	it("should not include providerOptions key when undefined", () => {
-		const messages: RooMessage[] = [{ role: "user", content: "Hello" }]
-
-		const result = sanitizeMessagesForProvider(messages)
-
-		expect(result).toHaveLength(1)
-		expect(Object.keys(result[0])).toEqual(["role", "content"])
-	})
-
-	it("should handle mixed message types correctly", () => {
-		const messages = [
-			{
-				role: "user",
-				content: [{ type: "text", text: "Hello" }],
-				ts: 100,
-			},
-			{
-				role: "assistant",
-				content: [{ type: "text", text: "Hi" }],
-				reasoning_details: [{ type: "thinking", thinking: "some reasoning" }],
-				reasoning_content: "some reasoning content",
-				ts: 200,
-			},
-			{
-				role: "tool",
-				content: [{ type: "tool-result", toolCallId: "call_1", toolName: "test", result: "ok" }],
-				ts: 300,
-			},
-			{
-				role: "user",
-				content: [{ type: "text", text: "Follow up" }],
-				ts: 400,
-			},
-		] as any as RooMessage[]
-
-		const result = sanitizeMessagesForProvider(messages)
-
-		expect(result).toHaveLength(4)
-
-		for (const msg of result) {
-			expect(msg).not.toHaveProperty("reasoning_details")
-			expect(msg).not.toHaveProperty("reasoning_content")
-			expect(msg).not.toHaveProperty("ts")
-		}
-
-		expect(result[0]).toEqual({
-			role: "user",
-			content: [{ type: "text", text: "Hello" }],
-		})
-		expect(result[1]).toEqual({
-			role: "assistant",
-			content: [{ type: "text", text: "Hi" }],
-		})
-	})
-})

+ 0 - 32
src/api/transform/sanitize-messages.ts

@@ -1,32 +0,0 @@
-import type { ModelMessage } from "ai"
-import type { RooMessage, RooRoleMessage } from "../../core/task-persistence/rooMessage"
-import { isRooReasoningMessage } from "../../core/task-persistence/rooMessage"
-
-/**
- * Sanitize RooMessage[] for provider APIs by allowlisting only the fields
- * that the AI SDK expects on each message.
- *
- * Legacy fields like `reasoning_details`, `reasoning_content`, `ts`, `condenseId`,
- * etc. survive JSON deserialization round-trips and cause providers to reject
- * requests with "Extra inputs are not permitted" (Anthropic 400) or similar errors.
- *
- * This uses an allowlist approach: only `role`, `content`, and `providerOptions`
- * are forwarded, ensuring any future extraneous fields are also stripped.
- *
- * RooReasoningMessage items (standalone encrypted reasoning with no `role`) are
- * filtered out since they have no AI SDK equivalent.
- */
-export function sanitizeMessagesForProvider(messages: RooMessage[]): ModelMessage[] {
-	return messages
-		.filter((msg): msg is RooRoleMessage => !isRooReasoningMessage(msg))
-		.map((msg) => {
-			const clean: Record<string, unknown> = {
-				role: msg.role,
-				content: msg.content,
-			}
-			if (msg.providerOptions !== undefined) {
-				clean.providerOptions = msg.providerOptions
-			}
-			return clean as ModelMessage
-		})
-}

+ 0 - 101
src/utils/__tests__/json-schema.spec.ts

@@ -585,105 +585,4 @@ describe("normalizeToolSchema", () => {
 			})
 		})
 	})
-
-	describe("$ref stripping", () => {
-		it("should strip $ref at the top level", () => {
-			const input = {
-				$ref: "#/components/schemas/Foo",
-				type: "object",
-				properties: {
-					name: { type: "string" },
-				},
-			}
-
-			const result = normalizeToolSchema(input)
-
-			expect(result.$ref).toBeUndefined()
-			expect(result.type).toBe("object")
-			expect(result.properties).toBeDefined()
-		})
-
-		it("should strip $ref in nested properties", () => {
-			const input = {
-				type: "object",
-				properties: {
-					response: {
-						$ref: "#/components/schemas/PlanningLogResponse",
-						type: "object",
-						properties: {
-							status: { type: "string" },
-						},
-					},
-				},
-			}
-
-			const result = normalizeToolSchema(input)
-
-			const props = result.properties as Record<string, Record<string, unknown>>
-			expect(props.response.$ref).toBeUndefined()
-			expect(props.response.type).toBe("object")
-		})
-
-		it("should strip $ref in deeply nested array items", () => {
-			const input = {
-				type: "object",
-				properties: {
-					items: {
-						type: "array",
-						items: {
-							type: "object",
-							properties: {
-								ref_field: {
-									$ref: "#/components/schemas/Nested",
-									type: "string",
-								},
-							},
-						},
-					},
-				},
-			}
-
-			const result = normalizeToolSchema(input)
-
-			const props = result.properties as Record<string, Record<string, unknown>>
-			const itemsSchema = props.items.items as Record<string, unknown>
-			const nestedProps = itemsSchema.properties as Record<string, Record<string, unknown>>
-			expect(nestedProps.ref_field.$ref).toBeUndefined()
-			expect(nestedProps.ref_field.type).toBe("string")
-		})
-
-		it("should handle $ref-only schema without crashing", () => {
-			const input = {
-				$ref: "#/components/schemas/Foo",
-			}
-
-			const result = normalizeToolSchema(input)
-
-			expect(result.$ref).toBeUndefined()
-			// Result is an empty schema (permissive) — acceptable for tool inputs
-			expect(result).not.toBeNull()
-			expect(result).toEqual({})
-		})
-
-		it("should strip $ref inside anyOf variants", () => {
-			const input = {
-				type: "object",
-				properties: {
-					field: {
-						anyOf: [
-							{ $ref: "#/components/schemas/TypeA", type: "string" },
-							{ type: "null" },
-						],
-					},
-				},
-			}
-
-			const result = normalizeToolSchema(input)
-
-			const props = result.properties as Record<string, Record<string, unknown>>
-			const anyOf = props.field.anyOf as Record<string, unknown>[]
-			expect(anyOf[0].$ref).toBeUndefined()
-			expect(anyOf[0].type).toBe("string")
-		})
-	})
 })

+ 0 - 1
src/utils/json-schema.ts

@@ -165,7 +165,6 @@ const NormalizedToolSchemaInternal: z.ZodType<Record<string, unknown>, z.ZodType
 					minItems,
 					maxItems,
 					uniqueItems,
-					$ref: _ref, // Strip $ref — unresolvable references break provider APIs (e.g. Gemini 400)
 					...rest
 				} = schema
 				const result: Record<string, unknown> = { ...rest }