Browse Source

[Condense] Condense messages with an LLM rather than truncating (#3582)

Co-authored-by: Matt Rubens <[email protected]>
Canyon Robins 7 months ago
parent
commit
7c1185ed33
33 changed files with 863 additions and 60 deletions
  1. 5 0
      .changeset/large-bags-send.md
  2. 2 1
      evals/packages/types/src/roo-code.ts
  3. 336 0
      src/api/transform/__tests__/image-cleaning.test.ts
  4. 28 0
      src/api/transform/image-cleaning.ts
  5. 228 0
      src/core/condense/__tests__/index.test.ts
  6. 105 0
      src/core/condense/index.ts
  7. 11 10
      src/core/sliding-window/__tests__/sliding-window.test.ts
  8. 22 12
      src/core/sliding-window/index.ts
  9. 1 1
      src/core/task-persistence/apiMessages.ts
  10. 15 33
      src/core/task/Task.ts
  11. 3 0
      src/exports/roo-code.d.ts
  12. 3 0
      src/exports/types.ts
  13. 2 1
      src/schemas/index.ts
  14. 30 2
      src/shared/__tests__/experiments.test.ts
  15. 2 0
      src/shared/experiments.ts
  16. 2 0
      webview-ui/src/context/__tests__/ExtensionStateContext.test.tsx
  17. 4 0
      webview-ui/src/i18n/locales/ca/settings.json
  18. 4 0
      webview-ui/src/i18n/locales/de/settings.json
  19. 4 0
      webview-ui/src/i18n/locales/en/settings.json
  20. 4 0
      webview-ui/src/i18n/locales/es/settings.json
  21. 4 0
      webview-ui/src/i18n/locales/fr/settings.json
  22. 4 0
      webview-ui/src/i18n/locales/hi/settings.json
  23. 4 0
      webview-ui/src/i18n/locales/it/settings.json
  24. 4 0
      webview-ui/src/i18n/locales/ja/settings.json
  25. 4 0
      webview-ui/src/i18n/locales/ko/settings.json
  26. 4 0
      webview-ui/src/i18n/locales/nl/settings.json
  27. 4 0
      webview-ui/src/i18n/locales/pl/settings.json
  28. 4 0
      webview-ui/src/i18n/locales/pt-BR/settings.json
  29. 4 0
      webview-ui/src/i18n/locales/ru/settings.json
  30. 4 0
      webview-ui/src/i18n/locales/tr/settings.json
  31. 4 0
      webview-ui/src/i18n/locales/vi/settings.json
  32. 4 0
      webview-ui/src/i18n/locales/zh-CN/settings.json
  33. 4 0
      webview-ui/src/i18n/locales/zh-TW/settings.json

+ 5 - 0
.changeset/large-bags-send.md

@@ -0,0 +1,5 @@
+---
+"roo-cline": patch
+---
+
+Adds experimental feature to intelligently condense the task context

+ 2 - 1
evals/packages/types/src/roo-code.ts

@@ -297,7 +297,7 @@ export type CommandExecutionStatus = z.infer<typeof commandExecutionStatusSchema
  * ExperimentId
  * ExperimentId
  */
  */
 
 
-export const experimentIds = ["powerSteering"] as const
+export const experimentIds = ["autoCondenseContext", "powerSteering"] as const
 
 
 export const experimentIdsSchema = z.enum(experimentIds)
 export const experimentIdsSchema = z.enum(experimentIds)
 
 
@@ -308,6 +308,7 @@ export type ExperimentId = z.infer<typeof experimentIdsSchema>
  */
  */
 
 
 const experimentsSchema = z.object({
 const experimentsSchema = z.object({
+	autoCondenseContext: z.boolean(),
 	powerSteering: z.boolean(),
 	powerSteering: z.boolean(),
 })
 })
 
 

+ 336 - 0
src/api/transform/__tests__/image-cleaning.test.ts

@@ -0,0 +1,336 @@
+import { ApiHandler } from "../.."
+import { ApiMessage } from "../../../core/task-persistence/apiMessages"
+import { maybeRemoveImageBlocks } from "../image-cleaning"
+import { ModelInfo } from "../../../shared/api"
+
+describe("maybeRemoveImageBlocks", () => {
+	// Mock ApiHandler factory function
+	const createMockApiHandler = (supportsImages: boolean): ApiHandler => {
+		return {
+			getModel: jest.fn().mockReturnValue({
+				id: "test-model",
+				info: {
+					supportsImages,
+				} as ModelInfo,
+			}),
+			createMessage: jest.fn(),
+			countTokens: jest.fn(),
+		}
+	}
+
+	it("should handle empty messages array", () => {
+		const apiHandler = createMockApiHandler(true)
+		const messages: ApiMessage[] = []
+
+		const result = maybeRemoveImageBlocks(messages, apiHandler)
+
+		expect(result).toEqual([])
+		// No need to check if getModel was called since there are no messages to process
+	})
+
+	it("should not modify messages with no image blocks", () => {
+		const apiHandler = createMockApiHandler(true)
+		const messages: ApiMessage[] = [
+			{
+				role: "user",
+				content: "Hello, world!",
+			},
+			{
+				role: "assistant",
+				content: "Hi there!",
+			},
+		]
+
+		const result = maybeRemoveImageBlocks(messages, apiHandler)
+
+		expect(result).toEqual(messages)
+		// getModel is only called when content is an array, which is not the case here
+	})
+
+	it("should not modify messages with array content but no image blocks", () => {
+		const apiHandler = createMockApiHandler(true)
+		const messages: ApiMessage[] = [
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Hello, world!",
+					},
+					{
+						type: "text",
+						text: "How are you?",
+					},
+				],
+			},
+		]
+
+		const result = maybeRemoveImageBlocks(messages, apiHandler)
+
+		expect(result).toEqual(messages)
+		expect(apiHandler.getModel).toHaveBeenCalled()
+	})
+
+	it("should not modify image blocks when API handler supports images", () => {
+		const apiHandler = createMockApiHandler(true)
+		const messages: ApiMessage[] = [
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Check out this image:",
+					},
+					{
+						type: "image",
+						source: {
+							type: "base64",
+							media_type: "image/jpeg",
+							data: "base64-encoded-image-data",
+						},
+					},
+				],
+			},
+		]
+
+		const result = maybeRemoveImageBlocks(messages, apiHandler)
+
+		// Should not modify the messages since the API handler supports images
+		expect(result).toEqual(messages)
+		expect(apiHandler.getModel).toHaveBeenCalled()
+	})
+
+	it("should convert image blocks to text descriptions when API handler doesn't support images", () => {
+		const apiHandler = createMockApiHandler(false)
+		const messages: ApiMessage[] = [
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Check out this image:",
+					},
+					{
+						type: "image",
+						source: {
+							type: "base64",
+							media_type: "image/jpeg",
+							data: "base64-encoded-image-data",
+						},
+					},
+				],
+			},
+		]
+
+		const result = maybeRemoveImageBlocks(messages, apiHandler)
+
+		// Should convert image blocks to text descriptions
+		expect(result).toEqual([
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Check out this image:",
+					},
+					{
+						type: "text",
+						text: "[Referenced image in conversation]",
+					},
+				],
+			},
+		])
+		expect(apiHandler.getModel).toHaveBeenCalled()
+	})
+
+	it("should handle mixed content messages with multiple text and image blocks", () => {
+		const apiHandler = createMockApiHandler(false)
+		const messages: ApiMessage[] = [
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Here are some images:",
+					},
+					{
+						type: "image",
+						source: {
+							type: "base64",
+							media_type: "image/jpeg",
+							data: "image-data-1",
+						},
+					},
+					{
+						type: "text",
+						text: "And another one:",
+					},
+					{
+						type: "image",
+						source: {
+							type: "base64",
+							media_type: "image/png",
+							data: "image-data-2",
+						},
+					},
+				],
+			},
+		]
+
+		const result = maybeRemoveImageBlocks(messages, apiHandler)
+
+		// Should convert all image blocks to text descriptions
+		expect(result).toEqual([
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Here are some images:",
+					},
+					{
+						type: "text",
+						text: "[Referenced image in conversation]",
+					},
+					{
+						type: "text",
+						text: "And another one:",
+					},
+					{
+						type: "text",
+						text: "[Referenced image in conversation]",
+					},
+				],
+			},
+		])
+		expect(apiHandler.getModel).toHaveBeenCalled()
+	})
+
+	it("should handle multiple messages with image blocks", () => {
+		const apiHandler = createMockApiHandler(false)
+		const messages: ApiMessage[] = [
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Here's an image:",
+					},
+					{
+						type: "image",
+						source: {
+							type: "base64",
+							media_type: "image/jpeg",
+							data: "image-data-1",
+						},
+					},
+				],
+			},
+			{
+				role: "assistant",
+				content: "I see the image!",
+			},
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Here's another image:",
+					},
+					{
+						type: "image",
+						source: {
+							type: "base64",
+							media_type: "image/png",
+							data: "image-data-2",
+						},
+					},
+				],
+			},
+		]
+
+		const result = maybeRemoveImageBlocks(messages, apiHandler)
+
+		// Should convert all image blocks to text descriptions
+		expect(result).toEqual([
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Here's an image:",
+					},
+					{
+						type: "text",
+						text: "[Referenced image in conversation]",
+					},
+				],
+			},
+			{
+				role: "assistant",
+				content: "I see the image!",
+			},
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Here's another image:",
+					},
+					{
+						type: "text",
+						text: "[Referenced image in conversation]",
+					},
+				],
+			},
+		])
+		expect(apiHandler.getModel).toHaveBeenCalled()
+	})
+
+	it("should preserve additional message properties", () => {
+		const apiHandler = createMockApiHandler(false)
+		const messages: ApiMessage[] = [
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Here's an image:",
+					},
+					{
+						type: "image",
+						source: {
+							type: "base64",
+							media_type: "image/jpeg",
+							data: "image-data",
+						},
+					},
+				],
+				ts: 1620000000000,
+				isSummary: true,
+			},
+		]
+
+		const result = maybeRemoveImageBlocks(messages, apiHandler)
+
+		// Should convert image blocks to text descriptions while preserving additional properties
+		expect(result).toEqual([
+			{
+				role: "user",
+				content: [
+					{
+						type: "text",
+						text: "Here's an image:",
+					},
+					{
+						type: "text",
+						text: "[Referenced image in conversation]",
+					},
+				],
+				ts: 1620000000000,
+				isSummary: true,
+			},
+		])
+		expect(apiHandler.getModel).toHaveBeenCalled()
+	})
+})

+ 28 - 0
src/api/transform/image-cleaning.ts

@@ -0,0 +1,28 @@
+import { ApiHandler } from ".."
+import { ApiMessage } from "../../core/task-persistence/apiMessages"
+
+/* Removes image blocks from messages if they are not supported by the Api Handler */
+export function maybeRemoveImageBlocks(messages: ApiMessage[], apiHandler: ApiHandler): ApiMessage[] {
+	return messages.map((message) => {
+		// Handle array content (could contain image blocks).
+		let { content } = message
+		if (Array.isArray(content)) {
+			if (!apiHandler.getModel().info.supportsImages) {
+				// Convert image blocks to text descriptions.
+				content = content.map((block) => {
+					if (block.type === "image") {
+						// Convert image blocks to text descriptions.
+						// Note: We can't access the actual image content/url due to API limitations,
+						// but we can indicate that an image was present in the conversation.
+						return {
+							type: "text",
+							text: "[Referenced image in conversation]",
+						}
+					}
+					return block
+				})
+			}
+		}
+		return { ...message, content }
+	})
+}

+ 228 - 0
src/core/condense/__tests__/index.test.ts

@@ -0,0 +1,228 @@
+import { describe, expect, it, jest, beforeEach } from "@jest/globals"
+import { ApiHandler } from "../../../api"
+import { ApiMessage } from "../../task-persistence/apiMessages"
+import { maybeRemoveImageBlocks } from "../../../api/transform/image-cleaning"
+import { summarizeConversation, getMessagesSinceLastSummary, N_MESSAGES_TO_KEEP } from "../index"
+
+// Mock dependencies
+jest.mock("../../../api/transform/image-cleaning", () => ({
+	maybeRemoveImageBlocks: jest.fn((messages: ApiMessage[], _apiHandler: ApiHandler) => [...messages]),
+}))
+
+describe("getMessagesSinceLastSummary", () => {
+	it("should return all messages when there is no summary", () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+			{ role: "user", content: "How are you?", ts: 3 },
+		]
+
+		const result = getMessagesSinceLastSummary(messages)
+		expect(result).toEqual(messages)
+	})
+
+	it("should return messages since the last summary", () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+			{ role: "assistant", content: "Summary of conversation", ts: 3, isSummary: true },
+			{ role: "user", content: "How are you?", ts: 4 },
+			{ role: "assistant", content: "I'm good", ts: 5 },
+		]
+
+		const result = getMessagesSinceLastSummary(messages)
+		expect(result).toEqual([
+			{ role: "assistant", content: "Summary of conversation", ts: 3, isSummary: true },
+			{ role: "user", content: "How are you?", ts: 4 },
+			{ role: "assistant", content: "I'm good", ts: 5 },
+		])
+	})
+
+	it("should handle multiple summary messages and return since the last one", () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "First summary", ts: 2, isSummary: true },
+			{ role: "user", content: "How are you?", ts: 3 },
+			{ role: "assistant", content: "Second summary", ts: 4, isSummary: true },
+			{ role: "user", content: "What's new?", ts: 5 },
+		]
+
+		const result = getMessagesSinceLastSummary(messages)
+		expect(result).toEqual([
+			{ role: "assistant", content: "Second summary", ts: 4, isSummary: true },
+			{ role: "user", content: "What's new?", ts: 5 },
+		])
+	})
+
+	it("should handle empty messages array", () => {
+		const result = getMessagesSinceLastSummary([])
+		expect(result).toEqual([])
+	})
+})
+
+describe("summarizeConversation", () => {
+	// Mock ApiHandler
+	let mockApiHandler: ApiHandler
+	let mockStream: AsyncGenerator<any, void, unknown>
+
+	beforeEach(() => {
+		// Reset mocks
+		jest.clearAllMocks()
+
+		// Setup mock stream
+		mockStream = (async function* () {
+			yield { type: "text" as const, text: "This is " }
+			yield { type: "text" as const, text: "a summary" }
+		})()
+
+		// Setup mock API handler
+		mockApiHandler = {
+			createMessage: jest.fn().mockReturnValue(mockStream),
+			countTokens: jest.fn().mockImplementation(() => Promise.resolve(100)),
+			getModel: jest.fn().mockReturnValue({
+				id: "test-model",
+				info: {
+					contextWindow: 8000,
+					supportsImages: true,
+					supportsComputerUse: true,
+					supportsVision: true,
+					maxTokens: 4000,
+					supportsPromptCache: true,
+					maxCachePoints: 10,
+					minTokensPerCachePoint: 100,
+					cachableFields: ["system", "messages"],
+				},
+			}),
+		} as unknown as ApiHandler
+	})
+
+	it("should not summarize when there are not enough messages", async () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+		]
+
+		const result = await summarizeConversation(messages, mockApiHandler)
+		expect(result).toEqual(messages)
+		expect(mockApiHandler.createMessage).not.toHaveBeenCalled()
+	})
+
+	it("should not summarize when there was a recent summary", async () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+			{ role: "user", content: "How are you?", ts: 3 },
+			{ role: "assistant", content: "I'm good", ts: 4 },
+			{ role: "user", content: "What's new?", ts: 5 },
+			{ role: "assistant", content: "Not much", ts: 6, isSummary: true }, // Recent summary
+			{ role: "user", content: "Tell me more", ts: 7 },
+		]
+
+		const result = await summarizeConversation(messages, mockApiHandler)
+		expect(result).toEqual(messages)
+		expect(mockApiHandler.createMessage).not.toHaveBeenCalled()
+	})
+
+	it("should summarize conversation and insert summary message", async () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+			{ role: "user", content: "How are you?", ts: 3 },
+			{ role: "assistant", content: "I'm good", ts: 4 },
+			{ role: "user", content: "What's new?", ts: 5 },
+			{ role: "assistant", content: "Not much", ts: 6 },
+			{ role: "user", content: "Tell me more", ts: 7 },
+		]
+
+		const result = await summarizeConversation(messages, mockApiHandler)
+
+		// Check that the API was called correctly
+		expect(mockApiHandler.createMessage).toHaveBeenCalled()
+		expect(maybeRemoveImageBlocks).toHaveBeenCalled()
+
+		// Verify the structure of the result
+		// The result should be: original messages (except last N) + summary + last N messages
+		expect(result.length).toBe(messages.length + 1) // Original + summary
+
+		// Check that the summary message was inserted correctly
+		const summaryMessage = result[result.length - N_MESSAGES_TO_KEEP - 1]
+		expect(summaryMessage.role).toBe("assistant")
+		expect(summaryMessage.content).toBe("This is a summary")
+		expect(summaryMessage.isSummary).toBe(true)
+
+		// Check that the last N_MESSAGES_TO_KEEP messages are preserved
+		const lastMessages = messages.slice(-N_MESSAGES_TO_KEEP)
+		expect(result.slice(-N_MESSAGES_TO_KEEP)).toEqual(lastMessages)
+	})
+
+	it("should handle empty summary response", async () => {
+		// We need enough messages to trigger summarization
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+			{ role: "user", content: "How are you?", ts: 3 },
+			{ role: "assistant", content: "I'm good", ts: 4 },
+			{ role: "user", content: "What's new?", ts: 5 },
+			{ role: "assistant", content: "Not much", ts: 6 },
+			{ role: "user", content: "Tell me more", ts: 7 },
+		]
+
+		// Mock console.warn before we call the function
+		const originalWarn = console.warn
+		const mockWarn = jest.fn()
+		console.warn = mockWarn
+
+		// Setup empty summary response
+		const emptyStream = (async function* () {
+			yield { type: "text" as const, text: "" }
+		})()
+
+		// Create a new mock for createMessage that returns empty stream
+		const createMessageMock = jest.fn().mockReturnValue(emptyStream)
+		mockApiHandler.createMessage = createMessageMock as any
+
+		// We need to mock maybeRemoveImageBlocks to return the expected messages
+		;(maybeRemoveImageBlocks as jest.Mock).mockImplementationOnce((messages: any) => {
+			return messages.map(({ role, content }: { role: string; content: any }) => ({ role, content }))
+		})
+
+		const result = await summarizeConversation(messages, mockApiHandler)
+
+		// Should return original messages when summary is empty
+		expect(result).toEqual(messages)
+		expect(mockWarn).toHaveBeenCalledWith("Received empty summary from API")
+
+		// Restore console.warn
+		console.warn = originalWarn
+	})
+
+	it("should correctly format the request to the API", async () => {
+		const messages: ApiMessage[] = [
+			{ role: "user", content: "Hello", ts: 1 },
+			{ role: "assistant", content: "Hi there", ts: 2 },
+			{ role: "user", content: "How are you?", ts: 3 },
+			{ role: "assistant", content: "I'm good", ts: 4 },
+			{ role: "user", content: "What's new?", ts: 5 },
+			{ role: "assistant", content: "Not much", ts: 6 },
+			{ role: "user", content: "Tell me more", ts: 7 },
+		]
+
+		await summarizeConversation(messages, mockApiHandler)
+
+		// Verify the final request message
+		const expectedFinalMessage = {
+			role: "user",
+			content: "Summarize the conversation so far, as described in the prompt instructions.",
+		}
+
+		// Verify that createMessage was called with the correct prompt
+		expect(mockApiHandler.createMessage).toHaveBeenCalledWith(
+			expect.stringContaining("Your task is to create a detailed summary of the conversation"),
+			expect.any(Array),
+		)
+
+		// Check that maybeRemoveImageBlocks was called with the correct messages
+		const mockCallArgs = (maybeRemoveImageBlocks as jest.Mock).mock.calls[0][0] as any[]
+		expect(mockCallArgs[mockCallArgs.length - 1]).toEqual(expectedFinalMessage)
+	})
+})

+ 105 - 0
src/core/condense/index.ts

@@ -0,0 +1,105 @@
+import Anthropic from "@anthropic-ai/sdk"
+import { ApiHandler } from "../../api"
+import { ApiMessage } from "../task-persistence/apiMessages"
+import { maybeRemoveImageBlocks } from "../../api/transform/image-cleaning"
+
+export const N_MESSAGES_TO_KEEP = 3
+
+const SUMMARY_PROMPT = `\
+Your task is to create a detailed summary of the conversation so far, paying close attention to the user's explicit requests and your previous actions.
+This summary should be thorough in capturing technical details, code patterns, and architectural decisions that would be essential for continuing with the conversation and supporting any continuing tasks.
+
+Your summary should be structured as follows:
+Context: The context to continue the conversation with. If applicable based on the current task, this should include:
+  1. Previous Conversation: High level details about what was discussed throughout the entire conversation with the user. This should be written to allow someone to be able to follow the general overarching conversation flow.
+  2. Current Work: Describe in detail what was being worked on prior to this request to summarize the conversation. Pay special attention to the more recent messages in the conversation.
+  3. Key Technical Concepts: List all important technical concepts, technologies, coding conventions, and frameworks discussed, which might be relevant for continuing with this work.
+  4. Relevant Files and Code: If applicable, enumerate specific files and code sections examined, modified, or created for the task continuation. Pay special attention to the most recent messages and changes.
+  5. Problem Solving: Document problems solved thus far and any ongoing troubleshooting efforts.
+  6. Pending Tasks and Next Steps: Outline all pending tasks that you have explicitly been asked to work on, as well as list the next steps you will take for all outstanding work, if applicable. Include code snippets where they add clarity. For any next steps, include direct quotes from the most recent conversation showing exactly what task you were working on and where you left off. This should be verbatim to ensure there's no information loss in context between tasks.
+
+Example summary structure:
+1. Previous Conversation:
+  [Detailed description]
+2. Current Work:
+  [Detailed description]
+3. Key Technical Concepts:
+  - [Concept 1]
+  - [Concept 2]
+  - [...]
+4. Relevant Files and Code:
+  - [File Name 1]
+    - [Summary of why this file is important]
+    - [Summary of the changes made to this file, if any]
+    - [Important Code Snippet]
+  - [File Name 2]
+    - [Important Code Snippet]
+  - [...]
+5. Problem Solving:
+  [Detailed description]
+6. Pending Tasks and Next Steps:
+  - [Task 1 details & next steps]
+  - [Task 2 details & next steps]
+  - [...]
+
+Output only the summary of the conversation so far, without any additional commentary or explanation.
+`
+
+/**
+ * Summarizes the conversation messages using an LLM call
+ *
+ * @param {ApiMessage[]} messages - The conversation messages
+ * @param {ApiHandler} apiHandler - The API handler to use for token counting.
+ * @returns {ApiMessage[]} - The input messages, potentially including a new summary message before the last message.
+ */
+export async function summarizeConversation(messages: ApiMessage[], apiHandler: ApiHandler): Promise<ApiMessage[]> {
+	const messagesToSummarize = getMessagesSinceLastSummary(messages.slice(0, -N_MESSAGES_TO_KEEP))
+	if (messagesToSummarize.length <= 1) {
+		return messages // Not enough messages to warrant a summary
+	}
+	const keepMessages = messages.slice(-N_MESSAGES_TO_KEEP)
+	for (const message of keepMessages) {
+		if (message.isSummary) {
+			return messages // We recently summarized these messages; it's too soon to summarize again.
+		}
+	}
+	const finalRequestMessage: Anthropic.MessageParam = {
+		role: "user",
+		content: "Summarize the conversation so far, as described in the prompt instructions.",
+	}
+	const requestMessages = maybeRemoveImageBlocks([...messagesToSummarize, finalRequestMessage], apiHandler).map(
+		({ role, content }) => ({ role, content }),
+	)
+	// Note: this doesn't need to be a stream, consider using something like apiHandler.completePrompt
+	const stream = apiHandler.createMessage(SUMMARY_PROMPT, requestMessages)
+	let summary = ""
+	// TODO(canyon): compute usage and cost for this operation and update the global metrics.
+	for await (const chunk of stream) {
+		if (chunk.type === "text") {
+			summary += chunk.text
+		}
+	}
+	summary = summary.trim()
+	if (summary.length === 0) {
+		console.warn("Received empty summary from API")
+		return messages
+	}
+	const summaryMessage: ApiMessage = {
+		role: "assistant",
+		content: summary,
+		ts: keepMessages[0].ts,
+		isSummary: true,
+	}
+
+	return [...messages.slice(0, -N_MESSAGES_TO_KEEP), summaryMessage, ...keepMessages]
+}
+
+/* Returns the list of all messages since the last summary message, including the summary. Returns all messages if there is no summary. */
+export function getMessagesSinceLastSummary(messages: ApiMessage[]): ApiMessage[] {
+	let lastSummaryIndexReverse = [...messages].reverse().findIndex((message) => message.isSummary)
+	if (lastSummaryIndexReverse === -1) {
+		return messages
+	}
+	const lastSummaryIndex = messages.length - lastSummaryIndexReverse - 1
+	return messages.slice(lastSummaryIndex)
+}

+ 11 - 10
src/core/sliding-window/__tests__/sliding-window.test.ts

@@ -10,6 +10,7 @@ import {
 	truncateConversation,
 	truncateConversation,
 	truncateConversationIfNeeded,
 	truncateConversationIfNeeded,
 } from "../index"
 } from "../index"
+import { ApiMessage } from "../../task-persistence/apiMessages"
 
 
 // Create a mock ApiHandler for testing
 // Create a mock ApiHandler for testing
 class MockApiHandler extends BaseProvider {
 class MockApiHandler extends BaseProvider {
@@ -41,7 +42,7 @@ const mockApiHandler = new MockApiHandler()
  */
  */
 describe("truncateConversation", () => {
 describe("truncateConversation", () => {
 	it("should retain the first message", () => {
 	it("should retain the first message", () => {
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: ApiMessage[] = [
 			{ role: "user", content: "First message" },
 			{ role: "user", content: "First message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "user", content: "Third message" },
 			{ role: "user", content: "Third message" },
@@ -58,7 +59,7 @@ describe("truncateConversation", () => {
 	})
 	})
 
 
 	it("should remove the specified fraction of messages (rounded to even number)", () => {
 	it("should remove the specified fraction of messages (rounded to even number)", () => {
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: ApiMessage[] = [
 			{ role: "user", content: "First message" },
 			{ role: "user", content: "First message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "user", content: "Third message" },
 			{ role: "user", content: "Third message" },
@@ -77,7 +78,7 @@ describe("truncateConversation", () => {
 	})
 	})
 
 
 	it("should round to an even number of messages to remove", () => {
 	it("should round to an even number of messages to remove", () => {
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: ApiMessage[] = [
 			{ role: "user", content: "First message" },
 			{ role: "user", content: "First message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "user", content: "Third message" },
 			{ role: "user", content: "Third message" },
@@ -96,7 +97,7 @@ describe("truncateConversation", () => {
 	})
 	})
 
 
 	it("should handle edge case with fracToRemove = 0", () => {
 	it("should handle edge case with fracToRemove = 0", () => {
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: ApiMessage[] = [
 			{ role: "user", content: "First message" },
 			{ role: "user", content: "First message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "user", content: "Third message" },
 			{ role: "user", content: "Third message" },
@@ -108,7 +109,7 @@ describe("truncateConversation", () => {
 	})
 	})
 
 
 	it("should handle edge case with fracToRemove = 1", () => {
 	it("should handle edge case with fracToRemove = 1", () => {
-		const messages: Anthropic.Messages.MessageParam[] = [
+		const messages: ApiMessage[] = [
 			{ role: "user", content: "First message" },
 			{ role: "user", content: "First message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "assistant", content: "Second message" },
 			{ role: "user", content: "Third message" },
 			{ role: "user", content: "Third message" },
@@ -224,7 +225,7 @@ describe("truncateConversationIfNeeded", () => {
 		maxTokens,
 		maxTokens,
 	})
 	})
 
 
-	const messages: Anthropic.Messages.MessageParam[] = [
+	const messages: ApiMessage[] = [
 		{ role: "user", content: "First message" },
 		{ role: "user", content: "First message" },
 		{ role: "assistant", content: "Second message" },
 		{ role: "assistant", content: "Second message" },
 		{ role: "user", content: "Third message" },
 		{ role: "user", content: "Third message" },
@@ -328,7 +329,7 @@ describe("truncateConversationIfNeeded", () => {
 		// Test case 1: Small content that won't push us over the threshold
 		// Test case 1: Small content that won't push us over the threshold
 		const smallContent = [{ type: "text" as const, text: "Small content" }]
 		const smallContent = [{ type: "text" as const, text: "Small content" }]
 		const smallContentTokens = await estimateTokenCount(smallContent, mockApiHandler)
 		const smallContentTokens = await estimateTokenCount(smallContent, mockApiHandler)
-		const messagesWithSmallContent: Anthropic.Messages.MessageParam[] = [
+		const messagesWithSmallContent: ApiMessage[] = [
 			...messages.slice(0, -1),
 			...messages.slice(0, -1),
 			{ role: messages[messages.length - 1].role, content: smallContent },
 			{ role: messages[messages.length - 1].role, content: smallContent },
 		]
 		]
@@ -353,7 +354,7 @@ describe("truncateConversationIfNeeded", () => {
 			},
 			},
 		]
 		]
 		const largeContentTokens = await estimateTokenCount(largeContent, mockApiHandler)
 		const largeContentTokens = await estimateTokenCount(largeContent, mockApiHandler)
-		const messagesWithLargeContent: Anthropic.Messages.MessageParam[] = [
+		const messagesWithLargeContent: ApiMessage[] = [
 			...messages.slice(0, -1),
 			...messages.slice(0, -1),
 			{ role: messages[messages.length - 1].role, content: largeContent },
 			{ role: messages[messages.length - 1].role, content: largeContent },
 		]
 		]
@@ -372,7 +373,7 @@ describe("truncateConversationIfNeeded", () => {
 		// Test case 3: Very large content that will definitely exceed threshold
 		// Test case 3: Very large content that will definitely exceed threshold
 		const veryLargeContent = [{ type: "text" as const, text: "X".repeat(1000) }]
 		const veryLargeContent = [{ type: "text" as const, text: "X".repeat(1000) }]
 		const veryLargeContentTokens = await estimateTokenCount(veryLargeContent, mockApiHandler)
 		const veryLargeContentTokens = await estimateTokenCount(veryLargeContent, mockApiHandler)
-		const messagesWithVeryLargeContent: Anthropic.Messages.MessageParam[] = [
+		const messagesWithVeryLargeContent: ApiMessage[] = [
 			...messages.slice(0, -1),
 			...messages.slice(0, -1),
 			{ role: messages[messages.length - 1].role, content: veryLargeContent },
 			{ role: messages[messages.length - 1].role, content: veryLargeContent },
 		]
 		]
@@ -424,7 +425,7 @@ describe("getMaxTokens", () => {
 	})
 	})
 
 
 	// Reuse across tests for consistency
 	// Reuse across tests for consistency
-	const messages: Anthropic.Messages.MessageParam[] = [
+	const messages: ApiMessage[] = [
 		{ role: "user", content: "First message" },
 		{ role: "user", content: "First message" },
 		{ role: "assistant", content: "Second message" },
 		{ role: "assistant", content: "Second message" },
 		{ role: "user", content: "Third message" },
 		{ role: "user", content: "Third message" },

+ 22 - 12
src/core/sliding-window/index.ts

@@ -1,5 +1,7 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { Anthropic } from "@anthropic-ai/sdk"
 import { ApiHandler } from "../../api"
 import { ApiHandler } from "../../api"
+import { summarizeConversation } from "../condense"
+import { ApiMessage } from "../task-persistence/apiMessages"
 
 
 /**
 /**
  * Default percentage of the context window to use as a buffer when deciding when to truncate
  * Default percentage of the context window to use as a buffer when deciding when to truncate
@@ -27,14 +29,11 @@ export async function estimateTokenCount(
  * The first message is always retained, and a specified fraction (rounded to an even number)
  * The first message is always retained, and a specified fraction (rounded to an even number)
  * of messages from the beginning (excluding the first) is removed.
  * of messages from the beginning (excluding the first) is removed.
  *
  *
- * @param {Anthropic.Messages.MessageParam[]} messages - The conversation messages.
+ * @param {ApiMessage[]} messages - The conversation messages.
  * @param {number} fracToRemove - The fraction (between 0 and 1) of messages (excluding the first) to remove.
  * @param {number} fracToRemove - The fraction (between 0 and 1) of messages (excluding the first) to remove.
- * @returns {Anthropic.Messages.MessageParam[]} The truncated conversation messages.
+ * @returns {ApiMessage[]} The truncated conversation messages.
  */
  */
-export function truncateConversation(
-	messages: Anthropic.Messages.MessageParam[],
-	fracToRemove: number,
-): Anthropic.Messages.MessageParam[] {
+export function truncateConversation(messages: ApiMessage[], fracToRemove: number): ApiMessage[] {
 	const truncatedMessages = [messages[0]]
 	const truncatedMessages = [messages[0]]
 	const rawMessagesToRemove = Math.floor((messages.length - 1) * fracToRemove)
 	const rawMessagesToRemove = Math.floor((messages.length - 1) * fracToRemove)
 	const messagesToRemove = rawMessagesToRemove - (rawMessagesToRemove % 2)
 	const messagesToRemove = rawMessagesToRemove - (rawMessagesToRemove % 2)
@@ -48,20 +47,22 @@ export function truncateConversation(
  * Conditionally truncates the conversation messages if the total token count
  * Conditionally truncates the conversation messages if the total token count
  * exceeds the model's limit, considering the size of incoming content.
  * exceeds the model's limit, considering the size of incoming content.
  *
  *
- * @param {Anthropic.Messages.MessageParam[]} messages - The conversation messages.
+ * @param {ApiMessage[]} messages - The conversation messages.
  * @param {number} totalTokens - The total number of tokens in the conversation (excluding the last user message).
  * @param {number} totalTokens - The total number of tokens in the conversation (excluding the last user message).
  * @param {number} contextWindow - The context window size.
  * @param {number} contextWindow - The context window size.
  * @param {number} maxTokens - The maximum number of tokens allowed.
  * @param {number} maxTokens - The maximum number of tokens allowed.
  * @param {ApiHandler} apiHandler - The API handler to use for token counting.
  * @param {ApiHandler} apiHandler - The API handler to use for token counting.
- * @returns {Anthropic.Messages.MessageParam[]} The original or truncated conversation messages.
+ * @param {boolean} autoCondenseContext - Whether to use LLM summarization or sliding window implementation
+ * @returns {ApiMessage[]} The original or truncated conversation messages.
  */
  */
 
 
 type TruncateOptions = {
 type TruncateOptions = {
-	messages: Anthropic.Messages.MessageParam[]
+	messages: ApiMessage[]
 	totalTokens: number
 	totalTokens: number
 	contextWindow: number
 	contextWindow: number
 	maxTokens?: number | null
 	maxTokens?: number | null
 	apiHandler: ApiHandler
 	apiHandler: ApiHandler
+	autoCondenseContext?: boolean
 }
 }
 
 
 /**
 /**
@@ -69,7 +70,7 @@ type TruncateOptions = {
  * exceeds the model's limit, considering the size of incoming content.
  * exceeds the model's limit, considering the size of incoming content.
  *
  *
  * @param {TruncateOptions} options - The options for truncation
  * @param {TruncateOptions} options - The options for truncation
- * @returns {Promise<Anthropic.Messages.MessageParam[]>} The original or truncated conversation messages.
+ * @returns {Promise<ApiMessage[]>} The original or truncated conversation messages.
  */
  */
 export async function truncateConversationIfNeeded({
 export async function truncateConversationIfNeeded({
 	messages,
 	messages,
@@ -77,7 +78,8 @@ export async function truncateConversationIfNeeded({
 	contextWindow,
 	contextWindow,
 	maxTokens,
 	maxTokens,
 	apiHandler,
 	apiHandler,
-}: TruncateOptions): Promise<Anthropic.Messages.MessageParam[]> {
+	autoCondenseContext,
+}: TruncateOptions): Promise<ApiMessage[]> {
 	// Calculate the maximum tokens reserved for response
 	// Calculate the maximum tokens reserved for response
 	const reservedTokens = maxTokens || contextWindow * 0.2
 	const reservedTokens = maxTokens || contextWindow * 0.2
 
 
@@ -96,5 +98,13 @@ export async function truncateConversationIfNeeded({
 	const allowedTokens = contextWindow * (1 - TOKEN_BUFFER_PERCENTAGE) - reservedTokens
 	const allowedTokens = contextWindow * (1 - TOKEN_BUFFER_PERCENTAGE) - reservedTokens
 
 
 	// Determine if truncation is needed and apply if necessary
 	// Determine if truncation is needed and apply if necessary
-	return effectiveTokens > allowedTokens ? truncateConversation(messages, 0.5) : messages
+	if (effectiveTokens <= allowedTokens) {
+		return messages
+	} else if (autoCondenseContext) {
+		const summarizedMessages = await summarizeConversation(messages, apiHandler)
+		if (messages !== summarizedMessages) {
+			return summarizedMessages
+		}
+	}
+	return truncateConversation(messages, 0.5)
 }
 }

+ 1 - 1
src/core/task-persistence/apiMessages.ts

@@ -8,7 +8,7 @@ import { fileExistsAtPath } from "../../utils/fs"
 import { GlobalFileNames } from "../../shared/globalFileNames"
 import { GlobalFileNames } from "../../shared/globalFileNames"
 import { getTaskDirectoryPath } from "../../shared/storagePathManager"
 import { getTaskDirectoryPath } from "../../shared/storagePathManager"
 
 
-export type ApiMessage = Anthropic.MessageParam & { ts?: number }
+export type ApiMessage = Anthropic.MessageParam & { ts?: number; isSummary?: boolean }
 
 
 export async function readApiMessages({
 export async function readApiMessages({
 	taskId,
 	taskId,

+ 15 - 33
src/core/task/Task.ts

@@ -79,6 +79,9 @@ import {
 	checkpointDiff,
 	checkpointDiff,
 } from "../checkpoints"
 } from "../checkpoints"
 import { processUserContentMentions } from "../mentions/processUserContentMentions"
 import { processUserContentMentions } from "../mentions/processUserContentMentions"
+import { ApiMessage } from "../task-persistence/apiMessages"
+import { getMessagesSinceLastSummary } from "../condense"
+import { maybeRemoveImageBlocks } from "../../api/transform/image-cleaning"
 
 
 export type ClineEvents = {
 export type ClineEvents = {
 	message: [{ action: "created" | "updated"; message: ClineMessage }]
 	message: [{ action: "created" | "updated"; message: ClineMessage }]
@@ -155,7 +158,7 @@ export class Task extends EventEmitter<ClineEvents> {
 	didEditFile: boolean = false
 	didEditFile: boolean = false
 
 
 	// LLM Messages & Chat Messages
 	// LLM Messages & Chat Messages
-	apiConversationHistory: (Anthropic.MessageParam & { ts?: number })[] = []
+	apiConversationHistory: ApiMessage[] = []
 	clineMessages: ClineMessage[] = []
 	clineMessages: ClineMessage[] = []
 
 
 	// Ask
 	// Ask
@@ -284,7 +287,7 @@ export class Task extends EventEmitter<ClineEvents> {
 
 
 	// API Messages
 	// API Messages
 
 
-	private async getSavedApiConversationHistory(): Promise<Anthropic.MessageParam[]> {
+	private async getSavedApiConversationHistory(): Promise<ApiMessage[]> {
 		return readApiMessages({ taskId: this.taskId, globalStoragePath: this.globalStoragePath })
 		return readApiMessages({ taskId: this.taskId, globalStoragePath: this.globalStoragePath })
 	}
 	}
 
 
@@ -294,7 +297,7 @@ export class Task extends EventEmitter<ClineEvents> {
 		await this.saveApiConversationHistory()
 		await this.saveApiConversationHistory()
 	}
 	}
 
 
-	async overwriteApiConversationHistory(newHistory: Anthropic.MessageParam[]) {
+	async overwriteApiConversationHistory(newHistory: ApiMessage[]) {
 		this.apiConversationHistory = newHistory
 		this.apiConversationHistory = newHistory
 		await this.saveApiConversationHistory()
 		await this.saveApiConversationHistory()
 	}
 	}
@@ -697,8 +700,7 @@ export class Task extends EventEmitter<ClineEvents> {
 
 
 		// Make sure that the api conversation history can be resumed by the API,
 		// Make sure that the api conversation history can be resumed by the API,
 		// even if it goes out of sync with cline messages.
 		// even if it goes out of sync with cline messages.
-		let existingApiConversationHistory: Anthropic.Messages.MessageParam[] =
-			await this.getSavedApiConversationHistory()
+		let existingApiConversationHistory: ApiMessage[] = await this.getSavedApiConversationHistory()
 
 
 		// v2.0 xml tags refactor caveat: since we don't use tools anymore, we need to replace all tool use blocks with a text block since the API disallows conversations with tool uses and no tool schema
 		// v2.0 xml tags refactor caveat: since we don't use tools anymore, we need to replace all tool use blocks with a text block since the API disallows conversations with tool uses and no tool schema
 		const conversationWithoutToolBlocks = existingApiConversationHistory.map((message) => {
 		const conversationWithoutToolBlocks = existingApiConversationHistory.map((message) => {
@@ -742,7 +744,7 @@ export class Task extends EventEmitter<ClineEvents> {
 		// if the last message is a user message, we can need to get the assistant message before it to see if it made tool calls, and if so, fill in the remaining tool responses with 'interrupted'
 		// if the last message is a user message, we can need to get the assistant message before it to see if it made tool calls, and if so, fill in the remaining tool responses with 'interrupted'
 
 
 		let modifiedOldUserContent: Anthropic.Messages.ContentBlockParam[] // either the last message if its user message, or the user message before the last (assistant) message
 		let modifiedOldUserContent: Anthropic.Messages.ContentBlockParam[] // either the last message if its user message, or the user message before the last (assistant) message
-		let modifiedApiConversationHistory: Anthropic.Messages.MessageParam[] // need to remove the last user message to replace with new modified user message
+		let modifiedApiConversationHistory: ApiMessage[] // need to remove the last user message to replace with new modified user message
 		if (existingApiConversationHistory.length > 0) {
 		if (existingApiConversationHistory.length > 0) {
 			const lastMessage = existingApiConversationHistory[existingApiConversationHistory.length - 1]
 			const lastMessage = existingApiConversationHistory[existingApiConversationHistory.length - 1]
 
 
@@ -768,7 +770,7 @@ export class Task extends EventEmitter<ClineEvents> {
 					modifiedOldUserContent = []
 					modifiedOldUserContent = []
 				}
 				}
 			} else if (lastMessage.role === "user") {
 			} else if (lastMessage.role === "user") {
-				const previousAssistantMessage: Anthropic.Messages.MessageParam | undefined =
+				const previousAssistantMessage: ApiMessage | undefined =
 					existingApiConversationHistory[existingApiConversationHistory.length - 2]
 					existingApiConversationHistory[existingApiConversationHistory.length - 2]
 
 
 				const existingUserContent: Anthropic.Messages.ContentBlockParam[] = Array.isArray(lastMessage.content)
 				const existingUserContent: Anthropic.Messages.ContentBlockParam[] = Array.isArray(lastMessage.content)
@@ -1480,44 +1482,24 @@ export class Task extends EventEmitter<ClineEvents> {
 
 
 			const contextWindow = modelInfo.contextWindow
 			const contextWindow = modelInfo.contextWindow
 
 
+			const autoCondenseContext = experiments?.autoCondenseContext ?? false
 			const trimmedMessages = await truncateConversationIfNeeded({
 			const trimmedMessages = await truncateConversationIfNeeded({
 				messages: this.apiConversationHistory,
 				messages: this.apiConversationHistory,
 				totalTokens,
 				totalTokens,
 				maxTokens,
 				maxTokens,
 				contextWindow,
 				contextWindow,
 				apiHandler: this.api,
 				apiHandler: this.api,
+				autoCondenseContext,
 			})
 			})
-
 			if (trimmedMessages !== this.apiConversationHistory) {
 			if (trimmedMessages !== this.apiConversationHistory) {
 				await this.overwriteApiConversationHistory(trimmedMessages)
 				await this.overwriteApiConversationHistory(trimmedMessages)
 			}
 			}
 		}
 		}
 
 
-		// Clean conversation history by:
-		// 1. Converting to Anthropic.MessageParam by spreading only the API-required properties.
-		// 2. Converting image blocks to text descriptions if model doesn't support images.
-		const cleanConversationHistory = this.apiConversationHistory.map(({ role, content }) => {
-			// Handle array content (could contain image blocks).
-			if (Array.isArray(content)) {
-				if (!this.api.getModel().info.supportsImages) {
-					// Convert image blocks to text descriptions.
-					content = content.map((block) => {
-						if (block.type === "image") {
-							// Convert image blocks to text descriptions.
-							// Note: We can't access the actual image content/url due to API limitations,
-							// but we can indicate that an image was present in the conversation.
-							return {
-								type: "text",
-								text: "[Referenced image in conversation]",
-							}
-						}
-						return block
-					})
-				}
-			}
-
-			return { role, content }
-		})
+		const messagesSinceLastSummary = getMessagesSinceLastSummary(this.apiConversationHistory)
+		const cleanConversationHistory = maybeRemoveImageBlocks(messagesSinceLastSummary, this.api).map(
+			({ role, content }) => ({ role, content }),
+		)
 
 
 		const stream = this.api.createMessage(systemPrompt, cleanConversationHistory)
 		const stream = this.api.createMessage(systemPrompt, cleanConversationHistory)
 		const iterator = stream[Symbol.asyncIterator]()
 		const iterator = stream[Symbol.asyncIterator]()

+ 3 - 0
src/exports/roo-code.d.ts

@@ -100,6 +100,7 @@ type GlobalSettings = {
 	fuzzyMatchThreshold?: number | undefined
 	fuzzyMatchThreshold?: number | undefined
 	experiments?:
 	experiments?:
 		| {
 		| {
+				autoCondenseContext: boolean
 				powerSteering: boolean
 				powerSteering: boolean
 		  }
 		  }
 		| undefined
 		| undefined
@@ -786,6 +787,7 @@ type IpcMessage =
 								terminalCompressProgressBar?: boolean | undefined
 								terminalCompressProgressBar?: boolean | undefined
 								experiments?:
 								experiments?:
 									| {
 									| {
+											autoCondenseContext: boolean
 											powerSteering: boolean
 											powerSteering: boolean
 									  }
 									  }
 									| undefined
 									| undefined
@@ -1248,6 +1250,7 @@ type TaskCommand =
 					terminalCompressProgressBar?: boolean | undefined
 					terminalCompressProgressBar?: boolean | undefined
 					experiments?:
 					experiments?:
 						| {
 						| {
+								autoCondenseContext: boolean
 								powerSteering: boolean
 								powerSteering: boolean
 						  }
 						  }
 						| undefined
 						| undefined

+ 3 - 0
src/exports/types.ts

@@ -100,6 +100,7 @@ type GlobalSettings = {
 	fuzzyMatchThreshold?: number | undefined
 	fuzzyMatchThreshold?: number | undefined
 	experiments?:
 	experiments?:
 		| {
 		| {
+				autoCondenseContext: boolean
 				powerSteering: boolean
 				powerSteering: boolean
 		  }
 		  }
 		| undefined
 		| undefined
@@ -798,6 +799,7 @@ type IpcMessage =
 								terminalCompressProgressBar?: boolean | undefined
 								terminalCompressProgressBar?: boolean | undefined
 								experiments?:
 								experiments?:
 									| {
 									| {
+											autoCondenseContext: boolean
 											powerSteering: boolean
 											powerSteering: boolean
 									  }
 									  }
 									| undefined
 									| undefined
@@ -1262,6 +1264,7 @@ type TaskCommand =
 					terminalCompressProgressBar?: boolean | undefined
 					terminalCompressProgressBar?: boolean | undefined
 					experiments?:
 					experiments?:
 						| {
 						| {
+								autoCondenseContext: boolean
 								powerSteering: boolean
 								powerSteering: boolean
 						  }
 						  }
 						| undefined
 						| undefined

+ 2 - 1
src/schemas/index.ts

@@ -312,7 +312,7 @@ export type CommandExecutionStatus = z.infer<typeof commandExecutionStatusSchema
  * ExperimentId
  * ExperimentId
  */
  */
 
 
-export const experimentIds = ["powerSteering"] as const
+export const experimentIds = ["autoCondenseContext", "powerSteering"] as const
 
 
 export const experimentIdsSchema = z.enum(experimentIds)
 export const experimentIdsSchema = z.enum(experimentIds)
 
 
@@ -323,6 +323,7 @@ export type ExperimentId = z.infer<typeof experimentIdsSchema>
  */
  */
 
 
 const experimentsSchema = z.object({
 const experimentsSchema = z.object({
+	autoCondenseContext: z.boolean(),
 	powerSteering: z.boolean(),
 	powerSteering: z.boolean(),
 })
 })
 
 

+ 30 - 2
src/shared/__tests__/experiments.test.ts

@@ -10,17 +10,28 @@ describe("experiments", () => {
 		})
 		})
 	})
 	})
 
 
+	describe("AUTO_CONDENSE_CONTEXT", () => {
+		it("is configured correctly", () => {
+			expect(EXPERIMENT_IDS.AUTO_CONDENSE_CONTEXT).toBe("autoCondenseContext")
+			expect(experimentConfigsMap.AUTO_CONDENSE_CONTEXT).toMatchObject({
+				enabled: false,
+			})
+		})
+	})
+
 	describe("isEnabled", () => {
 	describe("isEnabled", () => {
-		it("returns false when experiment is not enabled", () => {
+		it("returns false when POWER_STEERING experiment is not enabled", () => {
 			const experiments: Record<ExperimentId, boolean> = {
 			const experiments: Record<ExperimentId, boolean> = {
 				powerSteering: false,
 				powerSteering: false,
+				autoCondenseContext: false,
 			}
 			}
 			expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.POWER_STEERING)).toBe(false)
 			expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.POWER_STEERING)).toBe(false)
 		})
 		})
 
 
-		it("returns true when experiment is enabled", () => {
+		it("returns true when experiment POWER_STEERING is enabled", () => {
 			const experiments: Record<ExperimentId, boolean> = {
 			const experiments: Record<ExperimentId, boolean> = {
 				powerSteering: true,
 				powerSteering: true,
+				autoCondenseContext: false,
 			}
 			}
 			expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.POWER_STEERING)).toBe(true)
 			expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.POWER_STEERING)).toBe(true)
 		})
 		})
@@ -28,8 +39,25 @@ describe("experiments", () => {
 		it("returns false when experiment is not present", () => {
 		it("returns false when experiment is not present", () => {
 			const experiments: Record<ExperimentId, boolean> = {
 			const experiments: Record<ExperimentId, boolean> = {
 				powerSteering: false,
 				powerSteering: false,
+				autoCondenseContext: false,
 			}
 			}
 			expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.POWER_STEERING)).toBe(false)
 			expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.POWER_STEERING)).toBe(false)
 		})
 		})
+
+		it("returns false when AUTO_CONDENSE_CONTEXT experiment is not enabled", () => {
+			const experiments: Record<ExperimentId, boolean> = {
+				powerSteering: false,
+				autoCondenseContext: false,
+			}
+			expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.AUTO_CONDENSE_CONTEXT)).toBe(false)
+		})
+
+		it("returns true when AUTO_CONDENSE_CONTEXT experiment is enabled", () => {
+			const experiments: Record<ExperimentId, boolean> = {
+				powerSteering: false,
+				autoCondenseContext: true,
+			}
+			expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.AUTO_CONDENSE_CONTEXT)).toBe(true)
+		})
 	})
 	})
 })
 })

+ 2 - 0
src/shared/experiments.ts

@@ -4,6 +4,7 @@ import { AssertEqual, Equals, Keys, Values } from "../utils/type-fu"
 export type { ExperimentId }
 export type { ExperimentId }
 
 
 export const EXPERIMENT_IDS = {
 export const EXPERIMENT_IDS = {
+	AUTO_CONDENSE_CONTEXT: "autoCondenseContext",
 	POWER_STEERING: "powerSteering",
 	POWER_STEERING: "powerSteering",
 } as const satisfies Record<string, ExperimentId>
 } as const satisfies Record<string, ExperimentId>
 
 
@@ -16,6 +17,7 @@ interface ExperimentConfig {
 }
 }
 
 
 export const experimentConfigsMap: Record<ExperimentKey, ExperimentConfig> = {
 export const experimentConfigsMap: Record<ExperimentKey, ExperimentConfig> = {
+	AUTO_CONDENSE_CONTEXT: { enabled: false },
 	POWER_STEERING: { enabled: false },
 	POWER_STEERING: { enabled: false },
 }
 }
 
 

+ 2 - 0
webview-ui/src/context/__tests__/ExtensionStateContext.test.tsx

@@ -215,6 +215,7 @@ describe("mergeExtensionState", () => {
 			apiConfiguration: { modelMaxThinkingTokens: 456, modelTemperature: 0.3 },
 			apiConfiguration: { modelMaxThinkingTokens: 456, modelTemperature: 0.3 },
 			experiments: {
 			experiments: {
 				powerSteering: true,
 				powerSteering: true,
+				autoCondenseContext: true,
 			} as Record<ExperimentId, boolean>,
 			} as Record<ExperimentId, boolean>,
 		}
 		}
 
 
@@ -227,6 +228,7 @@ describe("mergeExtensionState", () => {
 
 
 		expect(result.experiments).toEqual({
 		expect(result.experiments).toEqual({
 			powerSteering: true,
 			powerSteering: true,
+			autoCondenseContext: true,
 		})
 		})
 	})
 	})
 })
 })

+ 4 - 0
webview-ui/src/i18n/locales/ca/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Condensar intel·ligentment la finestra de context",
+			"description": "Utilitza una crida LLM per resumir la conversa anterior quan la finestra de context de la tasca està gairebé plena, en lloc d'eliminar missatges antics. Avís: el cost de resumir actualment no s'inclou en els costos d'API mostrats a la interfície."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Utilitzar estratègia diff unificada experimental",
 			"name": "Utilitzar estratègia diff unificada experimental",
 			"description": "Activar l'estratègia diff unificada experimental. Aquesta estratègia podria reduir el nombre de reintents causats per errors del model, però pot causar comportaments inesperats o edicions incorrectes. Activeu-la només si enteneu els riscos i esteu disposats a revisar acuradament tots els canvis."
 			"description": "Activar l'estratègia diff unificada experimental. Aquesta estratègia podria reduir el nombre de reintents causats per errors del model, però pot causar comportaments inesperats o edicions incorrectes. Activeu-la només si enteneu els riscos i esteu disposats a revisar acuradament tots els canvis."

+ 4 - 0
webview-ui/src/i18n/locales/de/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Kontextfenster intelligent komprimieren",
+			"description": "Verwendet einen LLM-Aufruf, um das vorherige Gespräch zusammenzufassen, wenn das Kontextfenster der Aufgabe fast voll ist, anstatt alte Nachrichten zu verwerfen. Hinweis: Die Kosten für die Zusammenfassung sind derzeit nicht in den in der Benutzeroberfläche angezeigten API-Kosten enthalten."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Experimentelle einheitliche Diff-Strategie verwenden",
 			"name": "Experimentelle einheitliche Diff-Strategie verwenden",
 			"description": "Aktiviert die experimentelle einheitliche Diff-Strategie. Diese Strategie könnte die Anzahl der durch Modellfehler verursachten Wiederholungen reduzieren, kann aber unerwartetes Verhalten oder falsche Bearbeitungen verursachen. Nur aktivieren, wenn du die Risiken verstehst und bereit bist, alle Änderungen sorgfältig zu überprüfen."
 			"description": "Aktiviert die experimentelle einheitliche Diff-Strategie. Diese Strategie könnte die Anzahl der durch Modellfehler verursachten Wiederholungen reduzieren, kann aber unerwartetes Verhalten oder falsche Bearbeitungen verursachen. Nur aktivieren, wenn du die Risiken verstehst und bereit bist, alle Änderungen sorgfältig zu überprüfen."

+ 4 - 0
webview-ui/src/i18n/locales/en/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Intelligently condense the context window",
+			"description": "Uses an LLM call to summarize the past conversation when the task's context window is almost full, rather than dropping old messages. Disclaimer: the cost of summarizing is not currently included in the API costs shown in the UI."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Use experimental unified diff strategy",
 			"name": "Use experimental unified diff strategy",
 			"description": "Enable the experimental unified diff strategy. This strategy might reduce the number of retries caused by model errors but may cause unexpected behavior or incorrect edits. Only enable if you understand the risks and are willing to carefully review all changes."
 			"description": "Enable the experimental unified diff strategy. This strategy might reduce the number of retries caused by model errors but may cause unexpected behavior or incorrect edits. Only enable if you understand the risks and are willing to carefully review all changes."

+ 4 - 0
webview-ui/src/i18n/locales/es/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Condensar inteligentemente la ventana de contexto",
+			"description": "Utiliza una llamada LLM para resumir la conversación anterior cuando la ventana de contexto de la tarea está casi llena, en lugar de eliminar mensajes antiguos. Aviso: el costo de resumir actualmente no está incluido en los costos de API mostrados en la interfaz."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Usar estrategia de diff unificada experimental",
 			"name": "Usar estrategia de diff unificada experimental",
 			"description": "Habilitar la estrategia de diff unificada experimental. Esta estrategia podría reducir el número de reintentos causados por errores del modelo, pero puede causar comportamientos inesperados o ediciones incorrectas. Habilítela solo si comprende los riesgos y está dispuesto a revisar cuidadosamente todos los cambios."
 			"description": "Habilitar la estrategia de diff unificada experimental. Esta estrategia podría reducir el número de reintentos causados por errores del modelo, pero puede causar comportamientos inesperados o ediciones incorrectas. Habilítela solo si comprende los riesgos y está dispuesto a revisar cuidadosamente todos los cambios."

+ 4 - 0
webview-ui/src/i18n/locales/fr/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Condenser intelligemment la fenêtre de contexte",
+			"description": "Utilise un appel LLM pour résumer la conversation précédente lorsque la fenêtre de contexte de la tâche est presque pleine, plutôt que de supprimer les anciens messages. Avertissement : le coût de la synthèse n'est actuellement pas inclus dans les coûts API affichés dans l'interface."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Utiliser la stratégie diff unifiée expérimentale",
 			"name": "Utiliser la stratégie diff unifiée expérimentale",
 			"description": "Activer la stratégie diff unifiée expérimentale. Cette stratégie pourrait réduire le nombre de tentatives causées par des erreurs de modèle, mais peut provoquer des comportements inattendus ou des modifications incorrectes. Activez-la uniquement si vous comprenez les risques et êtes prêt à examiner attentivement tous les changements."
 			"description": "Activer la stratégie diff unifiée expérimentale. Cette stratégie pourrait réduire le nombre de tentatives causées par des erreurs de modèle, mais peut provoquer des comportements inattendus ou des modifications incorrectes. Activez-la uniquement si vous comprenez les risques et êtes prêt à examiner attentivement tous les changements."

+ 4 - 0
webview-ui/src/i18n/locales/hi/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "संदर्भ विंडो को बुद्धिमानी से संघनित करें",
+			"description": "जब कार्य का संदर्भ विंडो लगभग भर जाता है, तो पुराने संदेशों को हटाने के बजाय पिछली बातचीत को संक्षेप में प्रस्तुत करने के लिए LLM कॉल का उपयोग करता है। अस्वीकरण: संक्षेपण की लागत वर्तमान में UI में दिखाए गए API लागतों में शामिल नहीं है।"
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "प्रायोगिक एकीकृत diff रणनीति का उपयोग करें",
 			"name": "प्रायोगिक एकीकृत diff रणनीति का उपयोग करें",
 			"description": "प्रायोगिक एकीकृत diff रणनीति सक्षम करें। यह रणनीति मॉडल त्रुटियों के कारण पुनः प्रयासों की संख्या को कम कर सकती है, लेकिन अप्रत्याशित व्यवहार या गलत संपादन का कारण बन सकती है। केवल तभी सक्षम करें जब आप जोखिमों को समझते हों और सभी परिवर्तनों की सावधानीपूर्वक समीक्षा करने के लिए तैयार हों।"
 			"description": "प्रायोगिक एकीकृत diff रणनीति सक्षम करें। यह रणनीति मॉडल त्रुटियों के कारण पुनः प्रयासों की संख्या को कम कर सकती है, लेकिन अप्रत्याशित व्यवहार या गलत संपादन का कारण बन सकती है। केवल तभी सक्षम करें जब आप जोखिमों को समझते हों और सभी परिवर्तनों की सावधानीपूर्वक समीक्षा करने के लिए तैयार हों।"

+ 4 - 0
webview-ui/src/i18n/locales/it/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Condensa intelligentemente la finestra di contesto",
+			"description": "Utilizza una chiamata LLM per riassumere la conversazione precedente quando la finestra di contesto dell'attività è quasi piena, invece di eliminare i messaggi vecchi. Avviso: il costo della sintesi non è attualmente incluso nei costi API mostrati nell'interfaccia."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Usa strategia diff unificata sperimentale",
 			"name": "Usa strategia diff unificata sperimentale",
 			"description": "Abilita la strategia diff unificata sperimentale. Questa strategia potrebbe ridurre il numero di tentativi causati da errori del modello, ma può causare comportamenti imprevisti o modifiche errate. Abilitala solo se comprendi i rischi e sei disposto a rivedere attentamente tutte le modifiche."
 			"description": "Abilita la strategia diff unificata sperimentale. Questa strategia potrebbe ridurre il numero di tentativi causati da errori del modello, ma può causare comportamenti imprevisti o modifiche errate. Abilitala solo se comprendi i rischi e sei disposto a rivedere attentamente tutte le modifiche."

+ 4 - 0
webview-ui/src/i18n/locales/ja/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "コンテキストウィンドウをインテリジェントに圧縮する",
+			"description": "タスクのコンテキストウィンドウがほぼいっぱいになったとき、古いメッセージを削除する代わりに、LLM呼び出しを使用して過去の会話を要約します。免責事項:要約のコストは現在UIに表示されるAPIコストには含まれていません。"
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "実験的な統合diff戦略を使用する",
 			"name": "実験的な統合diff戦略を使用する",
 			"description": "実験的な統合diff戦略を有効にします。この戦略はモデルエラーによる再試行の回数を減らす可能性がありますが、予期しない動作や不正確な編集を引き起こす可能性があります。リスクを理解し、すべての変更を注意深く確認する準備がある場合にのみ有効にしてください。"
 			"description": "実験的な統合diff戦略を有効にします。この戦略はモデルエラーによる再試行の回数を減らす可能性がありますが、予期しない動作や不正確な編集を引き起こす可能性があります。リスクを理解し、すべての変更を注意深く確認する準備がある場合にのみ有効にしてください。"

+ 4 - 0
webview-ui/src/i18n/locales/ko/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "컨텍스트 창을 지능적으로 압축",
+			"description": "작업의 컨텍스트 창이 거의 가득 찼을 때 이전 메시지를 삭제하는 대신 LLM 호출을 사용하여 이전 대화를 요약합니다. 참고: 요약 비용은 현재 UI에 표시된 API 비용에 포함되지 않습니다."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "실험적 통합 diff 전략 사용",
 			"name": "실험적 통합 diff 전략 사용",
 			"description": "실험적 통합 diff 전략을 활성화합니다. 이 전략은 모델 오류로 인한 재시도 횟수를 줄일 수 있지만 예기치 않은 동작이나 잘못된 편집을 일으킬 수 있습니다. 위험을 이해하고 모든 변경 사항을 신중하게 검토할 의향이 있는 경우에만 활성화하십시오."
 			"description": "실험적 통합 diff 전략을 활성화합니다. 이 전략은 모델 오류로 인한 재시도 횟수를 줄일 수 있지만 예기치 않은 동작이나 잘못된 편집을 일으킬 수 있습니다. 위험을 이해하고 모든 변경 사항을 신중하게 검토할 의향이 있는 경우에만 활성화하십시오."

+ 4 - 0
webview-ui/src/i18n/locales/nl/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Contextvenster intelligent comprimeren",
+			"description": "Gebruikt een LLM-aanroep om eerdere gesprekken samen te vatten wanneer het contextvenster van de taak bijna vol is, in plaats van oude berichten te verwijderen. Let op: de kosten van het samenvatten zijn momenteel niet inbegrepen in de API-kosten die in de interface worden getoond."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Experimentele unified diff-strategie gebruiken",
 			"name": "Experimentele unified diff-strategie gebruiken",
 			"description": "Schakel de experimentele unified diff-strategie in. Deze strategie kan het aantal herhalingen door model fouten verminderen, maar kan onverwacht gedrag of onjuiste bewerkingen veroorzaken. Alleen inschakelen als je de risico's begrijpt en wijzigingen zorgvuldig wilt controleren."
 			"description": "Schakel de experimentele unified diff-strategie in. Deze strategie kan het aantal herhalingen door model fouten verminderen, maar kan onverwacht gedrag of onjuiste bewerkingen veroorzaken. Alleen inschakelen als je de risico's begrijpt en wijzigingen zorgvuldig wilt controleren."

+ 4 - 0
webview-ui/src/i18n/locales/pl/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Inteligentnie kondensuj okno kontekstu",
+			"description": "Używa wywołania LLM do podsumowania wcześniejszej rozmowy, gdy okno kontekstu zadania jest prawie pełne, zamiast usuwać stare wiadomości. Zastrzeżenie: koszt podsumowania nie jest obecnie uwzględniony w kosztach API pokazywanych w interfejsie."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Użyj eksperymentalnej ujednoliconej strategii diff",
 			"name": "Użyj eksperymentalnej ujednoliconej strategii diff",
 			"description": "Włącz eksperymentalną ujednoliconą strategię diff. Ta strategia może zmniejszyć liczbę ponownych prób spowodowanych błędami modelu, ale może powodować nieoczekiwane zachowanie lub nieprawidłowe edycje. Włącz tylko jeśli rozumiesz ryzyko i jesteś gotów dokładnie przeglądać wszystkie zmiany."
 			"description": "Włącz eksperymentalną ujednoliconą strategię diff. Ta strategia może zmniejszyć liczbę ponownych prób spowodowanych błędami modelu, ale może powodować nieoczekiwane zachowanie lub nieprawidłowe edycje. Włącz tylko jeśli rozumiesz ryzyko i jesteś gotów dokładnie przeglądać wszystkie zmiany."

+ 4 - 0
webview-ui/src/i18n/locales/pt-BR/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Condensar inteligentemente a janela de contexto",
+			"description": "Usa uma chamada LLM para resumir a conversa anterior quando a janela de contexto da tarefa está quase cheia, em vez de descartar mensagens antigas. Aviso: o custo de resumir não está atualmente incluído nos custos de API mostrados na interface."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Usar estratégia diff unificada experimental",
 			"name": "Usar estratégia diff unificada experimental",
 			"description": "Ativar a estratégia diff unificada experimental. Esta estratégia pode reduzir o número de novas tentativas causadas por erros do modelo, mas pode causar comportamento inesperado ou edições incorretas. Ative apenas se compreender os riscos e estiver disposto a revisar cuidadosamente todas as alterações."
 			"description": "Ativar a estratégia diff unificada experimental. Esta estratégia pode reduzir o número de novas tentativas causadas por erros do modelo, mas pode causar comportamento inesperado ou edições incorretas. Ative apenas se compreender os riscos e estiver disposto a revisar cuidadosamente todas as alterações."

+ 4 - 0
webview-ui/src/i18n/locales/ru/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Интеллектуальное сжатие контекстного окна",
+			"description": "Использует вызов LLM для обобщения предыдущего разговора, когда контекстное окно задачи почти заполнено, вместо удаления старых сообщений. Примечание: стоимость обобщения в настоящее время не включена в стоимость API, отображаемую в интерфейсе."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Использовать экспериментальную стратегию унифицированного диффа",
 			"name": "Использовать экспериментальную стратегию унифицированного диффа",
 			"description": "Включает экспериментальную стратегию унифицированного диффа. Может уменьшить количество повторных попыток из-за ошибок модели, но может привести к неожиданному поведению или неверным правкам. Включайте только если готовы внимательно проверять все изменения."
 			"description": "Включает экспериментальную стратегию унифицированного диффа. Может уменьшить количество повторных попыток из-за ошибок модели, но может привести к неожиданному поведению или неверным правкам. Включайте только если готовы внимательно проверять все изменения."

+ 4 - 0
webview-ui/src/i18n/locales/tr/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Bağlam penceresini akıllıca sıkıştır",
+			"description": "Görevin bağlam penceresi neredeyse dolduğunda, eski mesajları atmak yerine önceki konuşmayı özetlemek için bir LLM çağrısı kullanır. Not: Özetleme maliyeti şu anda arayüzde gösterilen API maliyetlerine dahil değildir."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Deneysel birleştirilmiş diff stratejisini kullan",
 			"name": "Deneysel birleştirilmiş diff stratejisini kullan",
 			"description": "Deneysel birleştirilmiş diff stratejisini etkinleştir. Bu strateji, model hatalarından kaynaklanan yeniden deneme sayısını azaltabilir, ancak beklenmeyen davranışlara veya hatalı düzenlemelere neden olabilir. Yalnızca riskleri anlıyorsanız ve tüm değişiklikleri dikkatlice incelemeye istekliyseniz etkinleştirin."
 			"description": "Deneysel birleştirilmiş diff stratejisini etkinleştir. Bu strateji, model hatalarından kaynaklanan yeniden deneme sayısını azaltabilir, ancak beklenmeyen davranışlara veya hatalı düzenlemelere neden olabilir. Yalnızca riskleri anlıyorsanız ve tüm değişiklikleri dikkatlice incelemeye istekliyseniz etkinleştirin."

+ 4 - 0
webview-ui/src/i18n/locales/vi/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "Nén cửa sổ ngữ cảnh một cách thông minh",
+			"description": "Sử dụng một lệnh gọi LLM để tóm tắt cuộc trò chuyện trước đó khi cửa sổ ngữ cảnh của tác vụ gần đầy, thay vì loại bỏ các tin nhắn cũ. Lưu ý: chi phí tóm tắt hiện không được tính vào chi phí API hiển thị trong giao diện người dùng."
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Sử dụng chiến lược diff thống nhất thử nghiệm",
 			"name": "Sử dụng chiến lược diff thống nhất thử nghiệm",
 			"description": "Bật chiến lược diff thống nhất thử nghiệm. Chiến lược này có thể giảm số lần thử lại do lỗi mô hình nhưng có thể gây ra hành vi không mong muốn hoặc chỉnh sửa không chính xác. Chỉ bật nếu bạn hiểu rõ các rủi ro và sẵn sàng xem xét cẩn thận tất cả các thay đổi."
 			"description": "Bật chiến lược diff thống nhất thử nghiệm. Chiến lược này có thể giảm số lần thử lại do lỗi mô hình nhưng có thể gây ra hành vi không mong muốn hoặc chỉnh sửa không chính xác. Chỉ bật nếu bạn hiểu rõ các rủi ro và sẵn sàng xem xét cẩn thận tất cả các thay đổi."

+ 4 - 0
webview-ui/src/i18n/locales/zh-CN/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "智能压缩上下文窗口",
+			"description": "当任务上下文窗口接近填满时,使用 LLM 调用来总结过去的对话,而不是删除旧消息。注意:目前 UI 中显示的 API 费用不包括总结的成本。"
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "启用diff更新工具",
 			"name": "启用diff更新工具",
 			"description": "可减少因模型错误导致的重复尝试,但可能引发意外操作。启用前请确保理解风险并会仔细检查所有修改。"
 			"description": "可减少因模型错误导致的重复尝试,但可能引发意外操作。启用前请确保理解风险并会仔细检查所有修改。"

+ 4 - 0
webview-ui/src/i18n/locales/zh-TW/settings.json

@@ -396,6 +396,10 @@
 	},
 	},
 	"experimental": {
 	"experimental": {
 		"warning": "⚠️",
 		"warning": "⚠️",
+		"AUTO_CONDENSE_CONTEXT": {
+			"name": "智慧壓縮上下文視窗",
+			"description": "當工作的上下文視窗接近填滿時,使用 LLM 呼叫來摘要過去的對話,而非捨棄舊訊息。注意:目前 UI 中顯示的 API 費用並未包含摘要的成本。"
+		},
 		"DIFF_STRATEGY_UNIFIED": {
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "使用實驗性統一差異比對策略",
 			"name": "使用實驗性統一差異比對策略",
 			"description": "啟用實驗性的統一差異比對策略。此策略可能減少因模型錯誤而導致的重試次數,但也可能導致意外行為或錯誤的編輯。請務必了解風險,並願意仔細檢查所有變更後再啟用。"
 			"description": "啟用實驗性的統一差異比對策略。此策略可能減少因模型錯誤而導致的重試次數,但也可能導致意外行為或錯誤的編輯。請務必了解風險,並願意仔細檢查所有變更後再啟用。"