Просмотр исходного кода

[Condense] Skip condense and show error if the context grows (#4061)

* [Condense] Skip condense and show error if the context grows

* update tests

* changeset

* nit: error should be nonempty

* update translations

* add more errors

* add more test cases

* update translations

* pipe error back from truncate code

* add condense_context_error ClineMessage

* fixes

* translations
Canyon Robins 7 месяцев назад
Родитель
Сommit
2e5a1a8e1d
43 измененных файлов с 430 добавлено и 53 удалено
  1. 5 0
      .changeset/spotty-steaks-brake.md
  2. 1 0
      packages/types/src/message.ts
  3. 239 16
      src/core/condense/__tests__/index.test.ts
  4. 19 10
      src/core/condense/index.ts
  5. 5 2
      src/core/sliding-window/__tests__/sliding-window.test.ts
  6. 9 3
      src/core/sliding-window/index.ts
  7. 17 4
      src/core/task/Task.ts
  8. 6 1
      src/i18n/locales/ca/common.json
  9. 6 1
      src/i18n/locales/de/common.json
  10. 6 1
      src/i18n/locales/en/common.json
  11. 6 1
      src/i18n/locales/es/common.json
  12. 6 1
      src/i18n/locales/fr/common.json
  13. 6 1
      src/i18n/locales/hi/common.json
  14. 6 1
      src/i18n/locales/it/common.json
  15. 6 1
      src/i18n/locales/ja/common.json
  16. 6 1
      src/i18n/locales/ko/common.json
  17. 6 1
      src/i18n/locales/nl/common.json
  18. 6 1
      src/i18n/locales/pl/common.json
  19. 6 1
      src/i18n/locales/pt-BR/common.json
  20. 6 1
      src/i18n/locales/ru/common.json
  21. 6 1
      src/i18n/locales/tr/common.json
  22. 6 1
      src/i18n/locales/vi/common.json
  23. 6 1
      src/i18n/locales/zh-CN/common.json
  24. 6 1
      src/i18n/locales/zh-TW/common.json
  25. 3 1
      webview-ui/src/components/chat/ChatRow.tsx
  26. 13 0
      webview-ui/src/components/chat/ContextCondenseRow.tsx
  27. 1 0
      webview-ui/src/i18n/locales/ca/chat.json
  28. 1 0
      webview-ui/src/i18n/locales/de/chat.json
  29. 1 0
      webview-ui/src/i18n/locales/en/chat.json
  30. 1 0
      webview-ui/src/i18n/locales/es/chat.json
  31. 1 0
      webview-ui/src/i18n/locales/fr/chat.json
  32. 1 0
      webview-ui/src/i18n/locales/hi/chat.json
  33. 1 0
      webview-ui/src/i18n/locales/it/chat.json
  34. 1 0
      webview-ui/src/i18n/locales/ja/chat.json
  35. 1 0
      webview-ui/src/i18n/locales/ko/chat.json
  36. 1 0
      webview-ui/src/i18n/locales/nl/chat.json
  37. 1 0
      webview-ui/src/i18n/locales/pl/chat.json
  38. 1 0
      webview-ui/src/i18n/locales/pt-BR/chat.json
  39. 1 0
      webview-ui/src/i18n/locales/ru/chat.json
  40. 1 0
      webview-ui/src/i18n/locales/tr/chat.json
  41. 1 0
      webview-ui/src/i18n/locales/vi/chat.json
  42. 1 0
      webview-ui/src/i18n/locales/zh-CN/chat.json
  43. 1 0
      webview-ui/src/i18n/locales/zh-TW/chat.json

+ 5 - 0
.changeset/spotty-steaks-brake.md

@@ -0,0 +1,5 @@
+---
+"roo-cline": patch
+---
+
+Skips condense operations if the context size grows & shows an error

+ 1 - 0
packages/types/src/message.ts

@@ -50,6 +50,7 @@ export const clineSays = [
 	"rooignore_error",
 	"diff_error",
 	"condense_context",
+	"condense_context_error",
 	"codebase_search_result",
 ] as const
 

+ 239 - 16
src/core/condense/__tests__/index.test.ts

@@ -17,6 +17,7 @@ jest.mock("../../../services/telemetry/TelemetryService", () => ({
 }))
 
 const taskId = "test-task-id"
+const DEFAULT_PREV_CONTEXT_TOKENS = 1000
 
 describe("getMessagesSinceLastSummary", () => {
 	it("should return all messages when there is no summary", () => {
@@ -115,11 +116,18 @@ describe("summarizeConversation", () => {
 			{ role: "assistant", content: "Hi there", ts: 2 },
 		]
 
-		const result = await summarizeConversation(messages, mockApiHandler, defaultSystemPrompt, taskId)
+		const result = await summarizeConversation(
+			messages,
+			mockApiHandler,
+			defaultSystemPrompt,
+			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
+		)
 		expect(result.messages).toEqual(messages)
 		expect(result.cost).toBe(0)
 		expect(result.summary).toBe("")
 		expect(result.newContextTokens).toBeUndefined()
+		expect(result.error).toBeTruthy() // Error should be set for not enough messages
 		expect(mockApiHandler.createMessage).not.toHaveBeenCalled()
 	})
 
@@ -134,11 +142,18 @@ describe("summarizeConversation", () => {
 			{ role: "user", content: "Tell me more", ts: 7 },
 		]
 
-		const result = await summarizeConversation(messages, mockApiHandler, defaultSystemPrompt, taskId)
+		const result = await summarizeConversation(
+			messages,
+			mockApiHandler,
+			defaultSystemPrompt,
+			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
+		)
 		expect(result.messages).toEqual(messages)
 		expect(result.cost).toBe(0)
 		expect(result.summary).toBe("")
 		expect(result.newContextTokens).toBeUndefined()
+		expect(result.error).toBeTruthy() // Error should be set for recent summary
 		expect(mockApiHandler.createMessage).not.toHaveBeenCalled()
 	})
 
@@ -153,7 +168,13 @@ describe("summarizeConversation", () => {
 			{ role: "user", content: "Tell me more", ts: 7 },
 		]
 
-		const result = await summarizeConversation(messages, mockApiHandler, defaultSystemPrompt, taskId)
+		const result = await summarizeConversation(
+			messages,
+			mockApiHandler,
+			defaultSystemPrompt,
+			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
+		)
 
 		// Check that the API was called correctly
 		expect(mockApiHandler.createMessage).toHaveBeenCalled()
@@ -177,9 +198,10 @@ describe("summarizeConversation", () => {
 		expect(result.cost).toBe(0.05)
 		expect(result.summary).toBe("This is a summary")
 		expect(result.newContextTokens).toBe(250) // 150 output tokens + 100 from countTokens
+		expect(result.error).toBeUndefined()
 	})
 
-	it("should handle empty summary response", async () => {
+	it("should handle empty summary response and return error", async () => {
 		// We need enough messages to trigger summarization
 		const messages: ApiMessage[] = [
 			{ role: "user", content: "Hello", ts: 1 },
@@ -191,11 +213,6 @@ describe("summarizeConversation", () => {
 			{ role: "user", content: "Tell me more", ts: 7 },
 		]
 
-		// Mock console.warn before we call the function
-		const originalWarn = console.warn
-		const mockWarn = jest.fn()
-		console.warn = mockWarn
-
 		// Setup empty summary response with usage information
 		const emptyStream = (async function* () {
 			yield { type: "text" as const, text: "" }
@@ -211,16 +228,20 @@ describe("summarizeConversation", () => {
 			return messages.map(({ role, content }: { role: string; content: any }) => ({ role, content }))
 		})
 
-		const result = await summarizeConversation(messages, mockApiHandler, defaultSystemPrompt, taskId)
+		const result = await summarizeConversation(
+			messages,
+			mockApiHandler,
+			defaultSystemPrompt,
+			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
+		)
 
 		// Should return original messages when summary is empty
 		expect(result.messages).toEqual(messages)
 		expect(result.cost).toBe(0.02)
 		expect(result.summary).toBe("")
-		expect(mockWarn).toHaveBeenCalledWith("Received empty summary from API")
-
-		// Restore console.warn
-		console.warn = originalWarn
+		expect(result.error).toBeTruthy() // Error should be set
+		expect(result.newContextTokens).toBeUndefined()
 	})
 
 	it("should correctly format the request to the API", async () => {
@@ -234,7 +255,7 @@ describe("summarizeConversation", () => {
 			{ role: "user", content: "Tell me more", ts: 7 },
 		]
 
-		await summarizeConversation(messages, mockApiHandler, defaultSystemPrompt, taskId)
+		await summarizeConversation(messages, mockApiHandler, defaultSystemPrompt, taskId, DEFAULT_PREV_CONTEXT_TOKENS)
 
 		// Verify the final request message
 		const expectedFinalMessage = {
@@ -275,7 +296,13 @@ describe("summarizeConversation", () => {
 		// Override the mock for this test
 		mockApiHandler.createMessage = jest.fn().mockReturnValue(streamWithUsage) as any
 
-		const result = await summarizeConversation(messages, mockApiHandler, systemPrompt, taskId)
+		const result = await summarizeConversation(
+			messages,
+			mockApiHandler,
+			systemPrompt,
+			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
+		)
 
 		// Verify that countTokens was called with the correct messages including system prompt
 		expect(mockApiHandler.countTokens).toHaveBeenCalled()
@@ -284,6 +311,193 @@ describe("summarizeConversation", () => {
 		expect(result.newContextTokens).toBe(300) // 200 output tokens + 100 from countTokens
 		expect(result.cost).toBe(0.06)
 		expect(result.summary).toBe("This is a summary with system prompt")
+		expect(result.error).toBeUndefined()
+	})
+
+	it("should return error when new context tokens >= previous context tokens", async () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+			{ role: "user", content: "How are you?", ts: 3 },
+			{ role: "assistant", content: "I'm good", ts: 4 },
+			{ role: "user", content: "What's new?", ts: 5 },
+			{ role: "assistant", content: "Not much", ts: 6 },
+			{ role: "user", content: "Tell me more", ts: 7 },
+		]
+
+		// Create a stream that produces a summary
+		const streamWithLargeTokens = (async function* () {
+			yield { type: "text" as const, text: "This is a very long summary that uses many tokens" }
+			yield { type: "usage" as const, totalCost: 0.08, outputTokens: 500 }
+		})()
+
+		// Override the mock for this test
+		mockApiHandler.createMessage = jest.fn().mockReturnValue(streamWithLargeTokens) as any
+
+		// Mock countTokens to return a high value that when added to outputTokens (500)
+		// will be >= prevContextTokens (600)
+		mockApiHandler.countTokens = jest.fn().mockImplementation(() => Promise.resolve(200)) as any
+
+		const prevContextTokens = 600
+		const result = await summarizeConversation(
+			messages,
+			mockApiHandler,
+			defaultSystemPrompt,
+			taskId,
+			prevContextTokens,
+		)
+
+		// Should return original messages when context would grow
+		expect(result.messages).toEqual(messages)
+		expect(result.cost).toBe(0.08)
+		expect(result.summary).toBe("")
+		expect(result.error).toBeTruthy() // Error should be set
+		expect(result.newContextTokens).toBeUndefined()
+	})
+
+	it("should successfully summarize when new context tokens < previous context tokens", async () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+			{ role: "user", content: "How are you?", ts: 3 },
+			{ role: "assistant", content: "I'm good", ts: 4 },
+			{ role: "user", content: "What's new?", ts: 5 },
+			{ role: "assistant", content: "Not much", ts: 6 },
+			{ role: "user", content: "Tell me more", ts: 7 },
+		]
+
+		// Create a stream that produces a summary with reasonable token count
+		const streamWithSmallTokens = (async function* () {
+			yield { type: "text" as const, text: "Concise summary" }
+			yield { type: "usage" as const, totalCost: 0.03, outputTokens: 50 }
+		})()
+
+		// Override the mock for this test
+		mockApiHandler.createMessage = jest.fn().mockReturnValue(streamWithSmallTokens) as any
+
+		// Mock countTokens to return a small value so total is < prevContextTokens
+		mockApiHandler.countTokens = jest.fn().mockImplementation(() => Promise.resolve(30)) as any
+
+		const prevContextTokens = 200
+		const result = await summarizeConversation(
+			messages,
+			mockApiHandler,
+			defaultSystemPrompt,
+			taskId,
+			prevContextTokens,
+		)
+
+		// Should successfully summarize
+		expect(result.messages.length).toBe(messages.length + 1) // Original + summary
+		expect(result.cost).toBe(0.03)
+		expect(result.summary).toBe("Concise summary")
+		expect(result.error).toBeUndefined()
+		expect(result.newContextTokens).toBe(80) // 50 output tokens + 30 from countTokens
+		expect(result.newContextTokens).toBeLessThan(prevContextTokens)
+	})
+
+	it("should return error when not enough messages to summarize", async () => {
+		const messages: ApiMessage[] = [{ role: "user", content: "Hello", ts: 1 }]
+
+		const result = await summarizeConversation(
+			messages,
+			mockApiHandler,
+			defaultSystemPrompt,
+			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
+		)
+
+		// Should return original messages when not enough to summarize
+		expect(result.messages).toEqual(messages)
+		expect(result.cost).toBe(0)
+		expect(result.summary).toBe("")
+		expect(result.error).toBeTruthy() // Error should be set
+		expect(result.newContextTokens).toBeUndefined()
+		expect(mockApiHandler.createMessage).not.toHaveBeenCalled()
+	})
+
+	it("should return error when recent summary exists in kept messages", async () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+			{ role: "user", content: "How are you?", ts: 3 },
+			{ role: "assistant", content: "I'm good", ts: 4 },
+			{ role: "user", content: "What's new?", ts: 5 },
+			{ role: "assistant", content: "Recent summary", ts: 6, isSummary: true }, // Summary in last 3 messages
+			{ role: "user", content: "Tell me more", ts: 7 },
+		]
+
+		const result = await summarizeConversation(
+			messages,
+			mockApiHandler,
+			defaultSystemPrompt,
+			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
+		)
+
+		// Should return original messages when recent summary exists
+		expect(result.messages).toEqual(messages)
+		expect(result.cost).toBe(0)
+		expect(result.summary).toBe("")
+		expect(result.error).toBeTruthy() // Error should be set
+		expect(result.newContextTokens).toBeUndefined()
+		expect(mockApiHandler.createMessage).not.toHaveBeenCalled()
+	})
+
+	it("should return error when both condensing and main API handlers are invalid", async () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+			{ role: "user", content: "How are you?", ts: 3 },
+			{ role: "assistant", content: "I'm good", ts: 4 },
+			{ role: "user", content: "What's new?", ts: 5 },
+			{ role: "assistant", content: "Not much", ts: 6 },
+			{ role: "user", content: "Tell me more", ts: 7 },
+		]
+
+		// Create invalid handlers (missing createMessage)
+		const invalidMainHandler = {
+			countTokens: jest.fn(),
+			getModel: jest.fn(),
+			// createMessage is missing
+		} as unknown as ApiHandler
+
+		const invalidCondensingHandler = {
+			countTokens: jest.fn(),
+			getModel: jest.fn(),
+			// createMessage is missing
+		} as unknown as ApiHandler
+
+		// Mock console.error to verify error message
+		const originalError = console.error
+		const mockError = jest.fn()
+		console.error = mockError
+
+		const result = await summarizeConversation(
+			messages,
+			invalidMainHandler,
+			defaultSystemPrompt,
+			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
+			false,
+			undefined,
+			invalidCondensingHandler,
+		)
+
+		// Should return original messages when both handlers are invalid
+		expect(result.messages).toEqual(messages)
+		expect(result.cost).toBe(0)
+		expect(result.summary).toBe("")
+		expect(result.error).toBeTruthy() // Error should be set
+		expect(result.newContextTokens).toBeUndefined()
+
+		// Verify error was logged
+		expect(mockError).toHaveBeenCalledWith(
+			expect.stringContaining("Main API handler is also invalid for condensing"),
+		)
+
+		// Restore console.error
+		console.error = originalError
 	})
 })
 
@@ -373,6 +587,7 @@ describe("summarizeConversation with custom settings", () => {
 			mockMainApiHandler,
 			defaultSystemPrompt,
 			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
 			false,
 			customPrompt,
 		)
@@ -393,6 +608,7 @@ describe("summarizeConversation with custom settings", () => {
 			mockMainApiHandler,
 			defaultSystemPrompt,
 			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
 			false,
 			"  ", // Empty custom prompt
 		)
@@ -409,6 +625,7 @@ describe("summarizeConversation with custom settings", () => {
 			mockMainApiHandler,
 			defaultSystemPrompt,
 			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
 			false,
 			undefined, // No custom prompt
 		)
@@ -428,6 +645,7 @@ describe("summarizeConversation with custom settings", () => {
 			mockMainApiHandler,
 			defaultSystemPrompt,
 			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
 			false,
 			undefined,
 			mockCondensingApiHandler,
@@ -447,6 +665,7 @@ describe("summarizeConversation with custom settings", () => {
 			mockMainApiHandler,
 			defaultSystemPrompt,
 			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
 			false,
 			undefined,
 			undefined,
@@ -477,6 +696,7 @@ describe("summarizeConversation with custom settings", () => {
 			mockMainApiHandler,
 			defaultSystemPrompt,
 			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
 			false,
 			undefined,
 			invalidHandler,
@@ -503,6 +723,7 @@ describe("summarizeConversation with custom settings", () => {
 			mockMainApiHandler,
 			defaultSystemPrompt,
 			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
 			false,
 			"Custom prompt",
 		)
@@ -525,6 +746,7 @@ describe("summarizeConversation with custom settings", () => {
 			mockMainApiHandler,
 			defaultSystemPrompt,
 			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
 			false,
 			undefined,
 			mockCondensingApiHandler,
@@ -548,6 +770,7 @@ describe("summarizeConversation with custom settings", () => {
 			mockMainApiHandler,
 			defaultSystemPrompt,
 			taskId,
+			DEFAULT_PREV_CONTEXT_TOKENS,
 			true, // isAutomaticTrigger
 			"Custom prompt",
 			mockCondensingApiHandler,

+ 19 - 10
src/core/condense/index.ts

@@ -1,4 +1,5 @@
 import Anthropic from "@anthropic-ai/sdk"
+import { t } from "../../i18n"
 import { ApiHandler } from "../../api"
 import { ApiMessage } from "../task-persistence/apiMessages"
 import { maybeRemoveImageBlocks } from "../../api/transform/image-cleaning"
@@ -51,6 +52,7 @@ export type SummarizeResponse = {
 	summary: string // The summary text; empty string for no summary
 	cost: number // The cost of the summarization operation
 	newContextTokens?: number // The number of tokens in the context for the next API request
+	error?: string // Populated iff the operation fails: error message shown to the user on failure (see Task.ts)
 }
 
 /**
@@ -70,6 +72,7 @@ export type SummarizeResponse = {
  * @param {ApiHandler} apiHandler - The API handler to use for token counting (fallback if condensingApiHandler not provided)
  * @param {string} systemPrompt - The system prompt for API requests (fallback if customCondensingPrompt not provided)
  * @param {string} taskId - The task ID for the conversation, used for telemetry
+ * @param {number} prevContextTokens - The number of tokens currently in the context, used to ensure we don't grow the context
  * @param {boolean} isAutomaticTrigger - Whether the summarization is triggered automatically
  * @param {string} customCondensingPrompt - Optional custom prompt to use for condensing
  * @param {ApiHandler} condensingApiHandler - Optional specific API handler to use for condensing
@@ -80,6 +83,7 @@ export async function summarizeConversation(
 	apiHandler: ApiHandler,
 	systemPrompt: string,
 	taskId: string,
+	prevContextTokens: number,
 	isAutomaticTrigger?: boolean,
 	customCondensingPrompt?: string,
 	condensingApiHandler?: ApiHandler,
@@ -93,13 +97,18 @@ export async function summarizeConversation(
 	const response: SummarizeResponse = { messages, cost: 0, summary: "" }
 	const messagesToSummarize = getMessagesSinceLastSummary(messages.slice(0, -N_MESSAGES_TO_KEEP))
 	if (messagesToSummarize.length <= 1) {
-		return response // Not enough messages to warrant a summary
+		const error =
+			messages.length <= N_MESSAGES_TO_KEEP + 1
+				? t("common:errors.condense_not_enough_messages")
+				: t("common:errors.condensed_recently")
+		return { ...response, error }
 	}
 	const keepMessages = messages.slice(-N_MESSAGES_TO_KEEP)
 	// Check if there's a recent summary in the messages we're keeping
 	const recentSummaryExists = keepMessages.some((message) => message.isSummary)
 	if (recentSummaryExists) {
-		return response // We recently summarized these messages; it's too soon to summarize again.
+		const error = t("common:errors.condensed_recently")
+		return { ...response, error }
 	}
 	const finalRequestMessage: Anthropic.MessageParam = {
 		role: "user",
@@ -127,12 +136,8 @@ export async function summarizeConversation(
 			// Consider throwing an error or returning a specific error response.
 			console.error("Main API handler is also invalid for condensing. Cannot proceed.")
 			// Return an appropriate error structure for SummarizeResponse
-			return {
-				messages,
-				summary: "",
-				cost: 0,
-				newContextTokens: 0,
-			}
+			const error = t("common:errors.condense_handler_invalid")
+			return { ...response, error }
 		}
 	}
 
@@ -151,8 +156,8 @@ export async function summarizeConversation(
 	}
 	summary = summary.trim()
 	if (summary.length === 0) {
-		console.warn("Received empty summary from API")
-		return { ...response, cost }
+		const error = t("common:errors.condense_failed")
+		return { ...response, cost, error }
 	}
 	const summaryMessage: ApiMessage = {
 		role: "assistant",
@@ -172,6 +177,10 @@ export async function summarizeConversation(
 		typeof message.content === "string" ? [{ text: message.content, type: "text" as const }] : message.content,
 	)
 	const newContextTokens = outputTokens + (await apiHandler.countTokens(contextBlocks))
+	if (newContextTokens >= prevContextTokens) {
+		const error = t("common:errors.condense_context_grew")
+		return { ...response, cost, error }
+	}
 	return { messages: newMessages, summary, cost, newContextTokens }
 }
 

+ 5 - 2
src/core/sliding-window/__tests__/sliding-window.test.ts

@@ -532,6 +532,7 @@ describe("truncateConversationIfNeeded", () => {
 			mockApiHandler,
 			"System prompt",
 			taskId,
+			70001,
 			true,
 			undefined, // customCondensingPrompt
 			undefined, // condensingApiHandler
@@ -551,11 +552,12 @@ describe("truncateConversationIfNeeded", () => {
 	})
 
 	it("should fall back to truncateConversation when autoCondenseContext is true but summarization fails", async () => {
-		// Mock the summarizeConversation function to return empty summary
+		// Mock the summarizeConversation function to return an error
 		const mockSummarizeResponse: condenseModule.SummarizeResponse = {
 			messages: messages, // Original messages unchanged
-			summary: "", // Empty summary indicates failure
+			summary: "", // Empty summary
 			cost: 0.01,
+			error: "Summarization failed", // Error indicates failure
 		}
 
 		const summarizeSpy = jest
@@ -678,6 +680,7 @@ describe("truncateConversationIfNeeded", () => {
 			mockApiHandler,
 			"System prompt",
 			taskId,
+			60000,
 			true,
 			undefined, // customCondensingPrompt
 			undefined, // condensingApiHandler

+ 9 - 3
src/core/sliding-window/index.ts

@@ -96,6 +96,8 @@ export async function truncateConversationIfNeeded({
 	customCondensingPrompt,
 	condensingApiHandler,
 }: TruncateOptions): Promise<TruncateResponse> {
+	let error: string | undefined
+	let cost = 0
 	// Calculate the maximum tokens reserved for response
 	const reservedTokens = maxTokens || contextWindow * 0.2
 
@@ -122,11 +124,15 @@ export async function truncateConversationIfNeeded({
 				apiHandler,
 				systemPrompt,
 				taskId,
+				prevContextTokens,
 				true, // automatic trigger
 				customCondensingPrompt,
 				condensingApiHandler,
 			)
-			if (result.summary) {
+			if (result.error) {
+				error = result.error
+				cost = result.cost
+			} else {
 				return { ...result, prevContextTokens }
 			}
 		}
@@ -135,8 +141,8 @@ export async function truncateConversationIfNeeded({
 	// Fall back to sliding window truncation if needed
 	if (prevContextTokens > allowedTokens) {
 		const truncatedMessages = truncateConversation(messages, 0.5, taskId)
-		return { messages: truncatedMessages, prevContextTokens, summary: "", cost: 0 }
+		return { messages: truncatedMessages, prevContextTokens, summary: "", cost, error }
 	}
 	// No truncation or condensation needed
-	return { messages, summary: "", cost: 0, prevContextTokens }
+	return { messages, summary: "", cost, prevContextTokens, error }
 }

+ 17 - 4
src/core/task/Task.ts

@@ -509,26 +509,37 @@ export class Task extends EventEmitter<ClineEvents> {
 			}
 		}
 
+		const { contextTokens: prevContextTokens } = this.getTokenUsage()
 		const {
 			messages,
 			summary,
 			cost,
 			newContextTokens = 0,
+			error,
 		} = await summarizeConversation(
 			this.apiConversationHistory,
 			this.api, // Main API handler (fallback)
 			systemPrompt, // Default summarization prompt (fallback)
 			this.taskId,
+			prevContextTokens,
 			false, // manual trigger
 			customCondensingPrompt, // User's custom prompt
 			condensingApiHandler, // Specific handler for condensing
 		)
-		if (!summary) {
+		if (error) {
+			this.say(
+				"condense_context_error",
+				error,
+				undefined /* images */,
+				false /* partial */,
+				undefined /* checkpoint */,
+				undefined /* progressStatus */,
+				{ isNonInteractive: true } /* options */,
+			)
 			return
 		}
 		await this.overwriteApiConversationHistory(messages)
-		const { contextTokens } = this.getTokenUsage()
-		const contextCondense: ContextCondense = { summary, cost, newContextTokens, prevContextTokens: contextTokens }
+		const contextCondense: ContextCondense = { summary, cost, newContextTokens, prevContextTokens }
 		await this.say(
 			"condense_context",
 			undefined /* text */,
@@ -1598,7 +1609,9 @@ export class Task extends EventEmitter<ClineEvents> {
 			if (truncateResult.messages !== this.apiConversationHistory) {
 				await this.overwriteApiConversationHistory(truncateResult.messages)
 			}
-			if (truncateResult.summary) {
+			if (truncateResult.error) {
+				await this.say("condense_context_error", truncateResult.error)
+			} else if (truncateResult.summary) {
 				const { summary, cost, prevContextTokens, newContextTokens = 0 } = truncateResult
 				const contextCondense: ContextCondense = { summary, cost, newContextTokens, prevContextTokens }
 				await this.say(

+ 6 - 1
src/i18n/locales/ca/common.json

@@ -57,7 +57,12 @@
 		"custom_storage_path_unusable": "La ruta d'emmagatzematge personalitzada \"{{path}}\" no és utilitzable, s'utilitzarà la ruta predeterminada",
 		"cannot_access_path": "No es pot accedir a la ruta {{path}}: {{error}}",
 		"settings_import_failed": "Ha fallat la importació de la configuració: {{error}}.",
-		"mistake_limit_guidance": "Això pot indicar un error en el procés de pensament del model o la incapacitat d'utilitzar una eina correctament, que es pot mitigar amb orientació de l'usuari (p. ex. \"Prova de dividir la tasca en passos més petits\")."
+		"mistake_limit_guidance": "Això pot indicar un error en el procés de pensament del model o la incapacitat d'utilitzar una eina correctament, que es pot mitigar amb orientació de l'usuari (p. ex. \"Prova de dividir la tasca en passos més petits\").",
+		"condense_failed": "Ha fallat la condensació del context",
+		"condense_not_enough_messages": "No hi ha prou missatges per condensar el context",
+		"condensed_recently": "El context s'ha condensat recentment; s'omet aquest intent",
+		"condense_handler_invalid": "El gestor de l'API per condensar el context no és vàlid",
+		"condense_context_grew": "La mida del context ha augmentat durant la condensació; s'omet aquest intent"
 	},
 	"warnings": {
 		"no_terminal_content": "No s'ha seleccionat contingut de terminal",

+ 6 - 1
src/i18n/locales/de/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "Benutzerdefinierter Speicherpfad \"{{path}}\" ist nicht verwendbar, Standardpfad wird verwendet",
 		"cannot_access_path": "Zugriff auf Pfad {{path}} nicht möglich: {{error}}",
 		"settings_import_failed": "Fehler beim Importieren der Einstellungen: {{error}}.",
-		"mistake_limit_guidance": "Dies kann auf einen Fehler im Denkprozess des Modells oder die Unfähigkeit hinweisen, ein Tool richtig zu verwenden, was durch Benutzerführung behoben werden kann (z.B. \"Versuche, die Aufgabe in kleinere Schritte zu unterteilen\")."
+		"mistake_limit_guidance": "Dies kann auf einen Fehler im Denkprozess des Modells oder die Unfähigkeit hinweisen, ein Tool richtig zu verwenden, was durch Benutzerführung behoben werden kann (z.B. \"Versuche, die Aufgabe in kleinere Schritte zu unterteilen\").",
+		"condense_failed": "Fehler beim Verdichten des Kontexts",
+		"condense_not_enough_messages": "Nicht genügend Nachrichten zum Verdichten des Kontexts",
+		"condensed_recently": "Kontext wurde kürzlich verdichtet; dieser Versuch wird übersprungen",
+		"condense_handler_invalid": "API-Handler zum Verdichten des Kontexts ist ungültig",
+		"condense_context_grew": "Kontextgröße ist während der Verdichtung gewachsen; dieser Versuch wird übersprungen"
 	},
 	"warnings": {
 		"no_terminal_content": "Kein Terminal-Inhalt ausgewählt",

+ 6 - 1
src/i18n/locales/en/common.json

@@ -53,7 +53,12 @@
 		"cannot_access_path": "Cannot access path {{path}}: {{error}}",
 		"failed_update_project_mcp": "Failed to update project MCP servers",
 		"settings_import_failed": "Settings import failed: {{error}}.",
-		"mistake_limit_guidance": "This may indicate a failure in the model's thought process or inability to use a tool properly, which can be mitigated with some user guidance (e.g. \"Try breaking down the task into smaller steps\")."
+		"mistake_limit_guidance": "This may indicate a failure in the model's thought process or inability to use a tool properly, which can be mitigated with some user guidance (e.g. \"Try breaking down the task into smaller steps\").",
+		"condense_failed": "Failed to condense context",
+		"condense_not_enough_messages": "Not enough messages to condense context",
+		"condensed_recently": "Context was condensed recently; skipping this attempt",
+		"condense_handler_invalid": "API handler for condensing context is invalid",
+		"condense_context_grew": "Context size increased during condensing; skipping this attempt"
 	},
 	"warnings": {
 		"no_terminal_content": "No terminal content selected",

+ 6 - 1
src/i18n/locales/es/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "La ruta de almacenamiento personalizada \"{{path}}\" no es utilizable, se usará la ruta predeterminada",
 		"cannot_access_path": "No se puede acceder a la ruta {{path}}: {{error}}",
 		"settings_import_failed": "Error al importar la configuración: {{error}}.",
-		"mistake_limit_guidance": "Esto puede indicar un fallo en el proceso de pensamiento del modelo o la incapacidad de usar una herramienta correctamente, lo cual puede mitigarse con orientación del usuario (ej. \"Intenta dividir la tarea en pasos más pequeños\")."
+		"mistake_limit_guidance": "Esto puede indicar un fallo en el proceso de pensamiento del modelo o la incapacidad de usar una herramienta correctamente, lo cual puede mitigarse con orientación del usuario (ej. \"Intenta dividir la tarea en pasos más pequeños\").",
+		"condense_failed": "Error al condensar el contexto",
+		"condense_not_enough_messages": "No hay suficientes mensajes para condensar el contexto",
+		"condensed_recently": "El contexto se condensó recientemente; se omite este intento",
+		"condense_handler_invalid": "El manejador de API para condensar el contexto no es válido",
+		"condense_context_grew": "El tamaño del contexto aumentó durante la condensación; se omite este intento"
 	},
 	"warnings": {
 		"no_terminal_content": "No hay contenido de terminal seleccionado",

+ 6 - 1
src/i18n/locales/fr/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "Le chemin de stockage personnalisé \"{{path}}\" est inutilisable, le chemin par défaut sera utilisé",
 		"cannot_access_path": "Impossible d'accéder au chemin {{path}} : {{error}}",
 		"settings_import_failed": "Échec de l'importation des paramètres : {{error}}",
-		"mistake_limit_guidance": "Cela peut indiquer un échec dans le processus de réflexion du modèle ou une incapacité à utiliser un outil correctement, ce qui peut être atténué avec des conseils de l'utilisateur (par ex. \"Essaie de diviser la tâche en étapes plus petites\")."
+		"mistake_limit_guidance": "Cela peut indiquer un échec dans le processus de réflexion du modèle ou une incapacité à utiliser un outil correctement, ce qui peut être atténué avec des conseils de l'utilisateur (par ex. \"Essaie de diviser la tâche en étapes plus petites\").",
+		"condense_failed": "Échec de la condensation du contexte",
+		"condense_not_enough_messages": "Pas assez de messages pour condenser le contexte",
+		"condensed_recently": "Le contexte a été condensé récemment ; cette tentative est ignorée",
+		"condense_handler_invalid": "Le gestionnaire d'API pour condenser le contexte est invalide",
+		"condense_context_grew": "La taille du contexte a augmenté pendant la condensation ; cette tentative est ignorée"
 	},
 	"warnings": {
 		"no_terminal_content": "Aucun contenu de terminal sélectionné",

+ 6 - 1
src/i18n/locales/hi/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "कस्टम स्टोरेज पाथ \"{{path}}\" उपयोग योग्य नहीं है, डिफ़ॉल्ट पाथ का उपयोग किया जाएगा",
 		"cannot_access_path": "पाथ {{path}} तक पहुंच नहीं पा रहे हैं: {{error}}",
 		"settings_import_failed": "सेटिंग्स इम्पोर्ट करने में विफल: {{error}}।",
-		"mistake_limit_guidance": "यह मॉडल की सोच प्रक्रिया में विफलता या किसी टूल का सही उपयोग न कर पाने का संकेत हो सकता है, जिसे उपयोगकर्ता के मार्गदर्शन से ठीक किया जा सकता है (जैसे \"कार्य को छोटे चरणों में बांटने की कोशिश करें\")।"
+		"mistake_limit_guidance": "यह मॉडल की सोच प्रक्रिया में विफलता या किसी टूल का सही उपयोग न कर पाने का संकेत हो सकता है, जिसे उपयोगकर्ता के मार्गदर्शन से ठीक किया जा सकता है (जैसे \"कार्य को छोटे चरणों में बांटने की कोशिश करें\")।",
+		"condense_failed": "संदर्भ को संक्षिप्त करने में विफल",
+		"condense_not_enough_messages": "संदर्भ को संक्षिप्त करने के लिए पर्याप्त संदेश नहीं हैं",
+		"condensed_recently": "संदर्भ हाल ही में संक्षिप्त किया गया था; इस प्रयास को छोड़ा जा रहा है",
+		"condense_handler_invalid": "संदर्भ को संक्षिप्त करने के लिए API हैंडलर अमान्य है",
+		"condense_context_grew": "संक्षिप्तीकरण के दौरान संदर्भ का आकार बढ़ गया; इस प्रयास को छोड़ा जा रहा है"
 	},
 	"warnings": {
 		"no_terminal_content": "कोई टर्मिनल सामग्री चयनित नहीं",

+ 6 - 1
src/i18n/locales/it/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "Il percorso di archiviazione personalizzato \"{{path}}\" non è utilizzabile, verrà utilizzato il percorso predefinito",
 		"cannot_access_path": "Impossibile accedere al percorso {{path}}: {{error}}",
 		"settings_import_failed": "Importazione delle impostazioni fallita: {{error}}.",
-		"mistake_limit_guidance": "Questo può indicare un fallimento nel processo di pensiero del modello o l'incapacità di utilizzare correttamente uno strumento, che può essere mitigato con la guida dell'utente (ad es. \"Prova a suddividere l'attività in passaggi più piccoli\")."
+		"mistake_limit_guidance": "Questo può indicare un fallimento nel processo di pensiero del modello o l'incapacità di utilizzare correttamente uno strumento, che può essere mitigato con la guida dell'utente (ad es. \"Prova a suddividere l'attività in passaggi più piccoli\").",
+		"condense_failed": "Impossibile condensare il contesto",
+		"condense_not_enough_messages": "Non ci sono abbastanza messaggi per condensare il contesto",
+		"condensed_recently": "Il contesto è stato condensato di recente; questo tentativo viene saltato",
+		"condense_handler_invalid": "Il gestore API per condensare il contesto non è valido",
+		"condense_context_grew": "La dimensione del contesto è aumentata durante la condensazione; questo tentativo viene saltato"
 	},
 	"warnings": {
 		"no_terminal_content": "Nessun contenuto del terminale selezionato",

+ 6 - 1
src/i18n/locales/ja/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "カスタムストレージパス \"{{path}}\" が使用できないため、デフォルトパスを使用します",
 		"cannot_access_path": "パス {{path}} にアクセスできません:{{error}}",
 		"settings_import_failed": "設定のインポートに失敗しました:{{error}}",
-		"mistake_limit_guidance": "これは、モデルの思考プロセスの失敗やツールを適切に使用できないことを示している可能性があり、ユーザーのガイダンスによって軽減できます(例:「タスクをより小さなステップに分割してみてください」)。"
+		"mistake_limit_guidance": "これは、モデルの思考プロセスの失敗やツールを適切に使用できないことを示している可能性があり、ユーザーのガイダンスによって軽減できます(例:「タスクをより小さなステップに分割してみてください」)。",
+		"condense_failed": "コンテキストの圧縮に失敗しました",
+		"condense_not_enough_messages": "コンテキストを圧縮するのに十分なメッセージがありません",
+		"condensed_recently": "コンテキストは最近圧縮されました;この試行をスキップします",
+		"condense_handler_invalid": "コンテキストを圧縮するためのAPIハンドラーが無効です",
+		"condense_context_grew": "圧縮中にコンテキストサイズが増加しました;この試行をスキップします"
 	},
 	"warnings": {
 		"no_terminal_content": "選択されたターミナルコンテンツがありません",

+ 6 - 1
src/i18n/locales/ko/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "사용자 지정 저장 경로 \"{{path}}\"를 사용할 수 없어 기본 경로를 사용합니다",
 		"cannot_access_path": "경로 {{path}}에 접근할 수 없습니다: {{error}}",
 		"settings_import_failed": "설정 가져오기 실패: {{error}}.",
-		"mistake_limit_guidance": "이는 모델의 사고 과정 실패나 도구를 제대로 사용하지 못하는 것을 나타낼 수 있으며, 사용자 가이드를 통해 완화할 수 있습니다 (예: \"작업을 더 작은 단계로 나누어 시도해보세요\")."
+		"mistake_limit_guidance": "이는 모델의 사고 과정 실패나 도구를 제대로 사용하지 못하는 것을 나타낼 수 있으며, 사용자 가이드를 통해 완화할 수 있습니다 (예: \"작업을 더 작은 단계로 나누어 시도해보세요\").",
+		"condense_failed": "컨텍스트 압축에 실패했습니다",
+		"condense_not_enough_messages": "컨텍스트를 압축할 메시지가 충분하지 않습니다",
+		"condensed_recently": "컨텍스트가 최근 압축되었습니다; 이 시도를 건너뜁니다",
+		"condense_handler_invalid": "컨텍스트 압축을 위한 API 핸들러가 유효하지 않습니다",
+		"condense_context_grew": "압축 중 컨텍스트 크기가 증가했습니다; 이 시도를 건너뜁니다"
 	},
 	"warnings": {
 		"no_terminal_content": "선택된 터미널 내용이 없습니다",

+ 6 - 1
src/i18n/locales/nl/common.json

@@ -53,7 +53,12 @@
 		"cannot_access_path": "Kan pad {{path}} niet openen: {{error}}",
 		"failed_update_project_mcp": "Bijwerken van project MCP-servers mislukt",
 		"settings_import_failed": "Importeren van instellingen mislukt: {{error}}.",
-		"mistake_limit_guidance": "Dit kan duiden op een fout in het denkproces van het model of het onvermogen om een tool correct te gebruiken, wat kan worden verminderd met gebruikersbegeleiding (bijv. \"Probeer de taak op te delen in kleinere stappen\")."
+		"mistake_limit_guidance": "Dit kan duiden op een fout in het denkproces van het model of het onvermogen om een tool correct te gebruiken, wat kan worden verminderd met gebruikersbegeleiding (bijv. \"Probeer de taak op te delen in kleinere stappen\").",
+		"condense_failed": "Comprimeren van context mislukt",
+		"condense_not_enough_messages": "Niet genoeg berichten om context te comprimeren",
+		"condensed_recently": "Context is recent gecomprimeerd; deze poging wordt overgeslagen",
+		"condense_handler_invalid": "API-handler voor het comprimeren van context is ongeldig",
+		"condense_context_grew": "Contextgrootte nam toe tijdens comprimeren; deze poging wordt overgeslagen"
 	},
 	"warnings": {
 		"no_terminal_content": "Geen terminalinhoud geselecteerd",

+ 6 - 1
src/i18n/locales/pl/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "Niestandardowa ścieżka przechowywania \"{{path}}\" nie jest użyteczna, zostanie użyta domyślna ścieżka",
 		"cannot_access_path": "Nie można uzyskać dostępu do ścieżki {{path}}: {{error}}",
 		"settings_import_failed": "Nie udało się zaimportować ustawień: {{error}}.",
-		"mistake_limit_guidance": "To może wskazywać na błąd w procesie myślowym modelu lub niezdolność do prawidłowego użycia narzędzia, co można złagodzić poprzez wskazówki użytkownika (np. \"Spróbuj podzielić zadanie na mniejsze kroki\")."
+		"mistake_limit_guidance": "To może wskazywać na błąd w procesie myślowym modelu lub niezdolność do prawidłowego użycia narzędzia, co można złagodzić poprzez wskazówki użytkownika (np. \"Spróbuj podzielić zadanie na mniejsze kroki\").",
+		"condense_failed": "Nie udało się skondensować kontekstu",
+		"condense_not_enough_messages": "Za mało wiadomości do skondensowania kontekstu",
+		"condensed_recently": "Kontekst został niedawno skondensowany; pomijanie tej próby",
+		"condense_handler_invalid": "Nieprawidłowy handler API do kondensowania kontekstu",
+		"condense_context_grew": "Rozmiar kontekstu wzrósł podczas kondensacji; pomijanie tej próby"
 	},
 	"warnings": {
 		"no_terminal_content": "Nie wybrano zawartości terminala",

+ 6 - 1
src/i18n/locales/pt-BR/common.json

@@ -57,7 +57,12 @@
 		"custom_storage_path_unusable": "O caminho de armazenamento personalizado \"{{path}}\" não pode ser usado, será usado o caminho padrão",
 		"cannot_access_path": "Não é possível acessar o caminho {{path}}: {{error}}",
 		"settings_import_failed": "Falha ao importar configurações: {{error}}",
-		"mistake_limit_guidance": "Isso pode indicar uma falha no processo de pensamento do modelo ou incapacidade de usar uma ferramenta adequadamente, o que pode ser mitigado com orientação do usuário (ex. \"Tente dividir a tarefa em etapas menores\")."
+		"mistake_limit_guidance": "Isso pode indicar uma falha no processo de pensamento do modelo ou incapacidade de usar uma ferramenta adequadamente, o que pode ser mitigado com orientação do usuário (ex. \"Tente dividir a tarefa em etapas menores\").",
+		"condense_failed": "Falha ao condensar o contexto",
+		"condense_not_enough_messages": "Não há mensagens suficientes para condensar o contexto",
+		"condensed_recently": "O contexto foi condensado recentemente; pulando esta tentativa",
+		"condense_handler_invalid": "O manipulador de API para condensar o contexto é inválido",
+		"condense_context_grew": "O tamanho do contexto aumentou durante a condensação; pulando esta tentativa"
 	},
 	"warnings": {
 		"no_terminal_content": "Nenhum conteúdo do terminal selecionado",

+ 6 - 1
src/i18n/locales/ru/common.json

@@ -53,7 +53,12 @@
 		"cannot_access_path": "Невозможно получить доступ к пути {{path}}: {{error}}",
 		"failed_update_project_mcp": "Не удалось обновить серверы проекта MCP",
 		"settings_import_failed": "Не удалось импортировать настройки: {{error}}.",
-		"mistake_limit_guidance": "Это может указывать на сбой в процессе мышления модели или неспособность правильно использовать инструмент, что можно смягчить с помощью руководства пользователя (например, \"Попробуйте разбить задачу на более мелкие шаги\")."
+		"mistake_limit_guidance": "Это может указывать на сбой в процессе мышления модели или неспособность правильно использовать инструмент, что можно смягчить с помощью руководства пользователя (например, \"Попробуйте разбить задачу на более мелкие шаги\").",
+		"condense_failed": "Не удалось сжать контекст",
+		"condense_not_enough_messages": "Недостаточно сообщений для сжатия контекста",
+		"condensed_recently": "Контекст был недавно сжат; пропускаем эту попытку",
+		"condense_handler_invalid": "Обработчик API для сжатия контекста недействителен",
+		"condense_context_grew": "Размер контекста увеличился во время сжатия; пропускаем эту попытку"
 	},
 	"warnings": {
 		"no_terminal_content": "Не выбрано содержимое терминала",

+ 6 - 1
src/i18n/locales/tr/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "Özel depolama yolu \"{{path}}\" kullanılamıyor, varsayılan yol kullanılacak",
 		"cannot_access_path": "{{path}} yoluna erişilemiyor: {{error}}",
 		"settings_import_failed": "Ayarlar içe aktarılamadı: {{error}}.",
-		"mistake_limit_guidance": "Bu, modelin düşünce sürecindeki bir başarısızlığı veya bir aracı düzgün kullanamama durumunu gösterebilir, bu da kullanıcı rehberliği ile hafifletilebilir (örn. \"Görevi daha küçük adımlara bölmeyi deneyin\")."
+		"mistake_limit_guidance": "Bu, modelin düşünce sürecindeki bir başarısızlığı veya bir aracı düzgün kullanamama durumunu gösterebilir, bu da kullanıcı rehberliği ile hafifletilebilir (örn. \"Görevi daha küçük adımlara bölmeyi deneyin\").",
+		"condense_failed": "Bağlam sıkıştırılamadı",
+		"condense_not_enough_messages": "Bağlamı sıkıştırmak için yeterli mesaj yok",
+		"condensed_recently": "Bağlam yakın zamanda sıkıştırıldı; bu deneme atlanıyor",
+		"condense_handler_invalid": "Bağlamı sıkıştırmak için API işleyicisi geçersiz",
+		"condense_context_grew": "Sıkıştırma sırasında bağlam boyutu arttı; bu deneme atlanıyor"
 	},
 	"warnings": {
 		"no_terminal_content": "Seçili terminal içeriği yok",

+ 6 - 1
src/i18n/locales/vi/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "Đường dẫn lưu trữ tùy chỉnh \"{{path}}\" không thể sử dụng được, sẽ sử dụng đường dẫn mặc định",
 		"cannot_access_path": "Không thể truy cập đường dẫn {{path}}: {{error}}",
 		"settings_import_failed": "Nhập cài đặt thất bại: {{error}}.",
-		"mistake_limit_guidance": "Điều này có thể cho thấy sự thất bại trong quá trình suy nghĩ của mô hình hoặc không thể sử dụng công cụ đúng cách, có thể được giảm thiểu bằng hướng dẫn của người dùng (ví dụ: \"Hãy thử chia nhỏ nhiệm vụ thành các bước nhỏ hơn\")."
+		"mistake_limit_guidance": "Điều này có thể cho thấy sự thất bại trong quá trình suy nghĩ của mô hình hoặc không thể sử dụng công cụ đúng cách, có thể được giảm thiểu bằng hướng dẫn của người dùng (ví dụ: \"Hãy thử chia nhỏ nhiệm vụ thành các bước nhỏ hơn\").",
+		"condense_failed": "Không thể nén ngữ cảnh",
+		"condense_not_enough_messages": "Không đủ tin nhắn để nén ngữ cảnh",
+		"condensed_recently": "Ngữ cảnh đã được nén gần đây; bỏ qua lần thử này",
+		"condense_handler_invalid": "Trình xử lý API để nén ngữ cảnh không hợp lệ",
+		"condense_context_grew": "Kích thước ngữ cảnh tăng lên trong quá trình nén; bỏ qua lần thử này"
 	},
 	"warnings": {
 		"no_terminal_content": "Không có nội dung terminal được chọn",

+ 6 - 1
src/i18n/locales/zh-CN/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "自定义存储路径 \"{{path}}\" 不可用,将使用默认路径",
 		"cannot_access_path": "无法访问路径 {{path}}:{{error}}",
 		"settings_import_failed": "设置导入失败:{{error}}。",
-		"mistake_limit_guidance": "这可能表明模型思维过程失败或无法正确使用工具,可通过用户指导来缓解(例如\"尝试将任务分解为更小的步骤\")。"
+		"mistake_limit_guidance": "这可能表明模型思维过程失败或无法正确使用工具,可通过用户指导来缓解(例如\"尝试将任务分解为更小的步骤\")。",
+		"condense_failed": "压缩上下文失败",
+		"condense_not_enough_messages": "没有足够的对话来压缩上下文",
+		"condensed_recently": "上下文最近已压缩;跳过此次尝试",
+		"condense_handler_invalid": "压缩上下文的API处理程序无效",
+		"condense_context_grew": "压缩过程中上下文大小增加;跳过此次尝试"
 	},
 	"warnings": {
 		"no_terminal_content": "没有选择终端内容",

+ 6 - 1
src/i18n/locales/zh-TW/common.json

@@ -53,7 +53,12 @@
 		"custom_storage_path_unusable": "自訂儲存路徑 \"{{path}}\" 無法使用,將使用預設路徑",
 		"cannot_access_path": "無法存取路徑 {{path}}:{{error}}",
 		"settings_import_failed": "設定匯入失敗:{{error}}。",
-		"mistake_limit_guidance": "這可能表明模型思維過程失敗或無法正確使用工具,可透過使用者指導來緩解(例如「嘗試將工作分解為更小的步驟」)。"
+		"mistake_limit_guidance": "這可能表明模型思維過程失敗或無法正確使用工具,可透過使用者指導來緩解(例如「嘗試將工作分解為更小的步驟」)。",
+		"condense_failed": "壓縮上下文失敗",
+		"condense_not_enough_messages": "沒有足夠的訊息來壓縮上下文",
+		"condensed_recently": "上下文最近已壓縮;跳過此次嘗試",
+		"condense_handler_invalid": "壓縮上下文的 API 處理程式無效",
+		"condense_context_grew": "壓縮過程中上下文大小增加;跳過此次嘗試"
 	},
 	"warnings": {
 		"no_terminal_content": "沒有選擇終端機內容",

+ 3 - 1
webview-ui/src/components/chat/ChatRow.tsx

@@ -35,7 +35,7 @@ import { Markdown } from "./Markdown"
 import { CommandExecution } from "./CommandExecution"
 import { CommandExecutionError } from "./CommandExecutionError"
 import { AutoApprovedRequestLimitWarning } from "./AutoApprovedRequestLimitWarning"
-import { CondensingContextRow, ContextCondenseRow } from "./ContextCondenseRow"
+import { CondenseContextErrorRow, CondensingContextRow, ContextCondenseRow } from "./ContextCondenseRow"
 import CodebaseSearchResultsDisplay from "./CodebaseSearchResultsDisplay"
 
 interface ChatRowProps {
@@ -969,6 +969,8 @@ export const ChatRowContent = ({
 						return <CondensingContextRow />
 					}
 					return message.contextCondense ? <ContextCondenseRow {...message.contextCondense} /> : null
+				case "condense_context_error":
+					return <CondenseContextErrorRow errorText={message.text} />
 				case "codebase_search_result":
 					let parsed: {
 						content: {

+ 13 - 0
webview-ui/src/components/chat/ContextCondenseRow.tsx

@@ -59,3 +59,16 @@ export const CondensingContextRow = () => {
 		</div>
 	)
 }
+
+export const CondenseContextErrorRow = ({ errorText }: { errorText?: string }) => {
+	const { t } = useTranslation()
+	return (
+		<div className="flex flex-col gap-1">
+			<div className="flex items-center gap-2">
+				<span className="codicon codicon-warning text-vscode-editorWarning-foreground opacity-80 text-base -mb-0.5"></span>
+				<span className="font-bold text-vscode-foreground">{t("chat:contextCondense.errorHeader")}</span>
+			</div>
+			<span className="text-vscode-descriptionForeground text-sm">{errorText}</span>
+		</div>
+	)
+}

+ 1 - 0
webview-ui/src/i18n/locales/ca/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "Context condensat",
 		"condensing": "Condensant context...",
+		"errorHeader": "Error en condensar el context",
 		"tokens": "tokens"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/de/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "Kontext komprimiert",
 		"condensing": "Kontext wird komprimiert...",
+		"errorHeader": "Kontext konnte nicht komprimiert werden",
 		"tokens": "Tokens"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/en/chat.json

@@ -133,6 +133,7 @@
 	"contextCondense": {
 		"title": "Context Condensed",
 		"condensing": "Condensing context...",
+		"errorHeader": "Failed to condense context",
 		"tokens": "tokens"
 	},
 	"instructions": {

+ 1 - 0
webview-ui/src/i18n/locales/es/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "Contexto condensado",
 		"condensing": "Condensando contexto...",
+		"errorHeader": "Error al condensar el contexto",
 		"tokens": "tokens"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/fr/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "Contexte condensé",
 		"condensing": "Condensation du contexte...",
+		"errorHeader": "Échec de la condensation du contexte",
 		"tokens": "tokens"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/hi/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "संदर्भ संक्षिप्त किया गया",
 		"condensing": "संदर्भ संघनित कर रहा है...",
+		"errorHeader": "संदर्भ संघनित करने में विफल",
 		"tokens": "टोकन"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/it/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "Contesto condensato",
 		"condensing": "Condensazione del contesto...",
+		"errorHeader": "Impossibile condensare il contesto",
 		"tokens": "token"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/ja/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "コンテキスト要約",
 		"condensing": "コンテキストを圧縮中...",
+		"errorHeader": "コンテキストの圧縮に失敗しました",
 		"tokens": "トークン"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/ko/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "컨텍스트 요약됨",
 		"condensing": "컨텍스트 압축 중...",
+		"errorHeader": "컨텍스트 압축 실패",
 		"tokens": "토큰"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/nl/chat.json

@@ -212,6 +212,7 @@
 	"contextCondense": {
 		"title": "Context samengevat",
 		"condensing": "Context aan het samenvatten...",
+		"errorHeader": "Context samenvatten mislukt",
 		"tokens": "tokens"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/pl/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "Kontekst skondensowany",
 		"condensing": "Kondensowanie kontekstu...",
+		"errorHeader": "Nie udało się skondensować kontekstu",
 		"tokens": "tokeny"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/pt-BR/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "Contexto condensado",
 		"condensing": "Condensando contexto...",
+		"errorHeader": "Falha ao condensar contexto",
 		"tokens": "tokens"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/ru/chat.json

@@ -212,6 +212,7 @@
 	"contextCondense": {
 		"title": "Контекст сжат",
 		"condensing": "Сжатие контекста...",
+		"errorHeader": "Не удалось сжать контекст",
 		"tokens": "токены"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/tr/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "Bağlam Özetlendi",
 		"condensing": "Bağlam yoğunlaştırılıyor...",
+		"errorHeader": "Bağlam yoğunlaştırılamadı",
 		"tokens": "token"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/vi/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "Ngữ cảnh đã tóm tắt",
 		"condensing": "Đang cô đọng ngữ cảnh...",
+		"errorHeader": "Không thể cô đọng ngữ cảnh",
 		"tokens": "token"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/zh-CN/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "上下文已压缩",
 		"condensing": "正在压缩上下文...",
+		"errorHeader": "上下文压缩失败",
 		"tokens": "tokens"
 	},
 	"followUpSuggest": {

+ 1 - 0
webview-ui/src/i18n/locales/zh-TW/chat.json

@@ -202,6 +202,7 @@
 	"contextCondense": {
 		"title": "上下文已壓縮",
 		"condensing": "正在壓縮上下文...",
+		"errorHeader": "上下文壓縮失敗",
 		"tokens": "tokens"
 	},
 	"followUpSuggest": {