فهرست منبع

fix: validate Gemini thinkingLevel against model capabilities and handle empty streams (#11303)

* fix: validate Gemini thinkingLevel against model capabilities and handle empty streams

getGeminiReasoning() now validates the selected effort against the model's
supportsReasoningEffort array before sending it as thinkingLevel. When a
stale settings value (e.g. 'medium' from a different model) is not in the
supported set, it falls back to the model's default reasoningEffort.

GeminiHandler.createMessage() now tracks whether any text content was
yielded during streaming and handles NoOutputGeneratedError gracefully
instead of surfacing the cryptic 'No output generated' error.

* fix: guard thinkingLevel fallback against 'none' effort and add i18n TODO

The array validation fallback in getGeminiReasoning() now only triggers
when the selected effort IS a valid Gemini thinking level but not in
the model's supported set. Values like 'none' (explicit no-reasoning
signal) are no longer overridden by the model default.

Also adds a TODO for moving the empty-stream message to i18n.

* fix: track tool_call_start in hasContent to avoid false empty-stream warning

Tool-only responses (no text) are valid content. Without this,
agentic tool-call responses would incorrectly trigger the empty
response warning message.
Hannes Rudolph 6 روز پیش
والد
کامیت
12cddc9697

+ 80 - 0
src/api/providers/__tests__/gemini.spec.ts

@@ -1,5 +1,7 @@
 // npx vitest run src/api/providers/__tests__/gemini.spec.ts
 
+import { NoOutputGeneratedError } from "ai"
+
 const mockCaptureException = vitest.fn()
 
 vitest.mock("@roo-code/telemetry", () => ({
@@ -149,6 +151,84 @@ describe("GeminiHandler", () => {
 			)
 		})
 
+		it("should yield informative message when stream produces no text content", async () => {
+			// Stream with only reasoning (no text-delta) simulates thinking-only response
+			const mockFullStream = (async function* () {
+				yield { type: "reasoning-delta", id: "1", text: "thinking..." }
+			})()
+
+			mockStreamText.mockReturnValue({
+				fullStream: mockFullStream,
+				usage: Promise.resolve({ inputTokens: 10, outputTokens: 0 }),
+				providerMetadata: Promise.resolve({}),
+			})
+
+			const stream = handler.createMessage(systemPrompt, mockMessages)
+			const chunks = []
+
+			for await (const chunk of stream) {
+				chunks.push(chunk)
+			}
+
+			// Should have: reasoning chunk, empty-stream informative message, usage
+			const textChunks = chunks.filter((c) => c.type === "text")
+			expect(textChunks).toHaveLength(1)
+			expect(textChunks[0]).toEqual({
+				type: "text",
+				text: "Model returned an empty response. This may be caused by an unsupported thinking configuration or content filtering.",
+			})
+		})
+
+		it("should suppress NoOutputGeneratedError when no text content was yielded", async () => {
+			// Empty stream - nothing yielded at all
+			const mockFullStream = (async function* () {
+				// empty stream
+			})()
+
+			mockStreamText.mockReturnValue({
+				fullStream: mockFullStream,
+				usage: Promise.reject(new NoOutputGeneratedError({ message: "No output generated." })),
+				providerMetadata: Promise.resolve({}),
+			})
+
+			const stream = handler.createMessage(systemPrompt, mockMessages)
+			const chunks = []
+
+			// Should NOT throw - the error is suppressed
+			for await (const chunk of stream) {
+				chunks.push(chunk)
+			}
+
+			// Should have the informative empty-stream message only (no usage since it errored)
+			const textChunks = chunks.filter((c) => c.type === "text")
+			expect(textChunks).toHaveLength(1)
+			expect(textChunks[0]).toMatchObject({
+				type: "text",
+				text: expect.stringContaining("empty response"),
+			})
+		})
+
+		it("should re-throw NoOutputGeneratedError when text content was yielded", async () => {
+			// Stream yields text content but usage still throws NoOutputGeneratedError (unexpected)
+			const mockFullStream = (async function* () {
+				yield { type: "text-delta", text: "Hello" }
+			})()
+
+			mockStreamText.mockReturnValue({
+				fullStream: mockFullStream,
+				usage: Promise.reject(new NoOutputGeneratedError({ message: "No output generated." })),
+				providerMetadata: Promise.resolve({}),
+			})
+
+			const stream = handler.createMessage(systemPrompt, mockMessages)
+
+			await expect(async () => {
+				for await (const _chunk of stream) {
+					// consume stream
+				}
+			}).rejects.toThrow()
+		})
+
 		it("should handle API errors", async () => {
 			const mockError = new Error("Gemini API error")
 			// eslint-disable-next-line require-yield

+ 33 - 4
src/api/providers/gemini.ts

@@ -1,6 +1,6 @@
 import type { Anthropic } from "@anthropic-ai/sdk"
 import { createGoogleGenerativeAI, type GoogleGenerativeAIProvider } from "@ai-sdk/google"
-import { streamText, generateText, ToolSet } from "ai"
+import { streamText, generateText, NoOutputGeneratedError, ToolSet } from "ai"
 
 import {
 	type ModelInfo,
@@ -131,6 +131,9 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 			// Use streamText for streaming responses
 			const result = streamText(requestOptions)
 
+			// Track whether any text content was yielded (not just reasoning/thinking)
+			let hasContent = false
+
 			// Process the full stream to get all events including reasoning
 			for await (const part of result.fullStream) {
 				// Capture thoughtSignature from tool-call events (Gemini 3 thought signatures)
@@ -143,10 +146,22 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 				}
 
 				for (const chunk of processAiSdkStreamPart(part)) {
+					if (chunk.type === "text" || chunk.type === "tool_call_start") {
+						hasContent = true
+					}
 					yield chunk
 				}
 			}
 
+			// If the stream completed without yielding any text content, inform the user
+			// TODO: Move to i18n key common:errors.gemini.empty_response once translation pipeline is updated
+			if (!hasContent) {
+				yield {
+					type: "text" as const,
+					text: "Model returned an empty response. This may be caused by an unsupported thinking configuration or content filtering.",
+				}
+			}
+
 			// Extract grounding sources from providerMetadata if available
 			const providerMetadata = await result.providerMetadata
 			const groundingMetadata = providerMetadata?.google as
@@ -167,9 +182,23 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 			}
 
 			// Yield usage metrics at the end
-			const usage = await result.usage
-			if (usage) {
-				yield this.processUsageMetrics(usage, info, providerMetadata)
+			// Wrap in try-catch to handle NoOutputGeneratedError thrown by the AI SDK
+			// when the stream produces no output (e.g., thinking-only, safety block)
+			try {
+				const usage = await result.usage
+				if (usage) {
+					yield this.processUsageMetrics(usage, info, providerMetadata)
+				}
+			} catch (usageError) {
+				if (usageError instanceof NoOutputGeneratedError) {
+					// If we already yielded the empty-stream message, suppress this error
+					if (hasContent) {
+						throw usageError
+					}
+					// Otherwise the informative message was already yielded above — no-op
+				} else {
+					throw usageError
+				}
 			}
 		} catch (error) {
 			const errorMessage = error instanceof Error ? error.message : String(error)

+ 123 - 0
src/api/transform/__tests__/reasoning.spec.ts

@@ -765,6 +765,7 @@ describe("reasoning.ts", () => {
 			}
 
 			const result = getGeminiReasoning(options)
+			// "none" is not a valid GeminiThinkingLevel, so no fallback — returns undefined
 			expect(result).toBeUndefined()
 		})
 
@@ -838,6 +839,128 @@ describe("reasoning.ts", () => {
 			const result = getGeminiReasoning(options) as GeminiReasoningParams | undefined
 			expect(result).toEqual({ thinkingLevel: "medium", includeThoughts: true })
 		})
+
+		it("should fall back to model default when settings effort is not in supportsReasoningEffort array", () => {
+			// Simulates gemini-3-pro-preview which only supports ["low", "high"]
+			// but user has reasoningEffort: "medium" from a different model
+			const geminiModel: ModelInfo = {
+				...baseModel,
+				supportsReasoningEffort: ["low", "high"] as ModelInfo["supportsReasoningEffort"],
+				reasoningEffort: "low",
+			}
+
+			const settings: ProviderSettings = {
+				apiProvider: "gemini",
+				reasoningEffort: "medium",
+			}
+
+			const options: GetModelReasoningOptions = {
+				model: geminiModel,
+				reasoningBudget: undefined,
+				reasoningEffort: "medium",
+				settings,
+			}
+
+			const result = getGeminiReasoning(options) as GeminiReasoningParams | undefined
+			// "medium" is not in ["low", "high"], so falls back to model.reasoningEffort "low"
+			expect(result).toEqual({ thinkingLevel: "low", includeThoughts: true })
+		})
+
+		it("should return undefined when unsupported effort and model default is also invalid", () => {
+			const geminiModel: ModelInfo = {
+				...baseModel,
+				supportsReasoningEffort: ["low", "high"] as ModelInfo["supportsReasoningEffort"],
+				// No reasoningEffort default set
+			}
+
+			const settings: ProviderSettings = {
+				apiProvider: "gemini",
+				reasoningEffort: "medium",
+			}
+
+			const options: GetModelReasoningOptions = {
+				model: geminiModel,
+				reasoningBudget: undefined,
+				reasoningEffort: "medium",
+				settings,
+			}
+
+			const result = getGeminiReasoning(options)
+			// "medium" is not in ["low", "high"], fallback is undefined → returns undefined
+			expect(result).toBeUndefined()
+		})
+
+		it("should pass through effort that IS in the supportsReasoningEffort array", () => {
+			const geminiModel: ModelInfo = {
+				...baseModel,
+				supportsReasoningEffort: ["low", "high"] as ModelInfo["supportsReasoningEffort"],
+				reasoningEffort: "low",
+			}
+
+			const settings: ProviderSettings = {
+				apiProvider: "gemini",
+				reasoningEffort: "high",
+			}
+
+			const options: GetModelReasoningOptions = {
+				model: geminiModel,
+				reasoningBudget: undefined,
+				reasoningEffort: "high",
+				settings,
+			}
+
+			const result = getGeminiReasoning(options) as GeminiReasoningParams | undefined
+			// "high" IS in ["low", "high"], so it should be used directly
+			expect(result).toEqual({ thinkingLevel: "high", includeThoughts: true })
+		})
+
+		it("should skip validation when supportsReasoningEffort is boolean (not array)", () => {
+			const geminiModel: ModelInfo = {
+				...baseModel,
+				supportsReasoningEffort: true,
+				reasoningEffort: "low",
+			}
+
+			const settings: ProviderSettings = {
+				apiProvider: "gemini",
+				reasoningEffort: "medium",
+			}
+
+			const options: GetModelReasoningOptions = {
+				model: geminiModel,
+				reasoningBudget: undefined,
+				reasoningEffort: "medium",
+				settings,
+			}
+
+			const result = getGeminiReasoning(options) as GeminiReasoningParams | undefined
+			// boolean supportsReasoningEffort should not trigger array validation
+			expect(result).toEqual({ thinkingLevel: "medium", includeThoughts: true })
+		})
+
+		it("should fall back to model default when settings has 'minimal' but model only supports ['low', 'high']", () => {
+			const geminiModel: ModelInfo = {
+				...baseModel,
+				supportsReasoningEffort: ["low", "high"] as ModelInfo["supportsReasoningEffort"],
+				reasoningEffort: "low",
+			}
+
+			const settings: ProviderSettings = {
+				apiProvider: "gemini",
+				reasoningEffort: "minimal",
+			}
+
+			const options: GetModelReasoningOptions = {
+				model: geminiModel,
+				reasoningBudget: undefined,
+				reasoningEffort: "minimal",
+				settings,
+			}
+
+			const result = getGeminiReasoning(options) as GeminiReasoningParams | undefined
+			// "minimal" is not in ["low", "high"], falls back to "low"
+			expect(result).toEqual({ thinkingLevel: "low", includeThoughts: true })
+		})
 	})
 
 	describe("Integration scenarios", () => {

+ 12 - 2
src/api/transform/reasoning.ts

@@ -150,10 +150,20 @@ export const getGeminiReasoning = ({
 		return undefined
 	}
 
+	// Validate that the selected effort is supported by this specific model.
+	// e.g. gemini-3-pro-preview only supports ["low", "high"] — sending
+	// "medium" (carried over from a different model's settings) causes errors.
+	const effortToUse =
+		Array.isArray(model.supportsReasoningEffort) &&
+		isGeminiThinkingLevel(selectedEffort) &&
+		!model.supportsReasoningEffort.includes(selectedEffort)
+			? model.reasoningEffort
+			: selectedEffort
+
 	// Effort-based models on Google GenAI support minimal/low/medium/high levels.
-	if (!isGeminiThinkingLevel(selectedEffort)) {
+	if (!effortToUse || !isGeminiThinkingLevel(effortToUse)) {
 		return undefined
 	}
 
-	return { thinkingLevel: selectedEffort, includeThoughts: true }
+	return { thinkingLevel: effortToUse, includeThoughts: true }
 }