Просмотр исходного кода

[Condense] Add a slider for the context condense threshold (#3790)

* [Condense] Add a slider for the context condense threshold

* slider UI

* condense if we reach the threshold

* fixes

* test typing fixes

* add more truncate tests

* changeset

* update translations

* fix missing type
Canyon Robins 8 месяцев назад
Родитель
Сommit
1fe65364ee
34 измененных файлов с 349 добавлено и 51 удалено
  1. 5 0
      .changeset/whole-swans-cheer.md
  2. 127 0
      src/core/sliding-window/__tests__/sliding-window.test.ts
  3. 20 11
      src/core/sliding-window/index.ts
  4. 9 2
      src/core/task/Task.ts
  5. 3 0
      src/core/webview/ClineProvider.ts
  6. 22 0
      src/core/webview/__tests__/ClineProvider.test.ts
  7. 4 0
      src/core/webview/webviewMessageHandler.ts
  8. 3 0
      src/exports/roo-code.d.ts
  9. 3 0
      src/exports/types.ts
  10. 2 0
      src/schemas/index.ts
  11. 1 0
      src/shared/ExtensionMessage.ts
  12. 1 0
      src/shared/WebviewMessage.ts
  13. 2 2
      src/shared/experiments.ts
  14. 31 1
      webview-ui/src/components/settings/ExperimentalSettings.tsx
  15. 8 1
      webview-ui/src/components/settings/SettingsView.tsx
  16. 5 0
      webview-ui/src/context/ExtensionStateContext.tsx
  17. 1 0
      webview-ui/src/context/__tests__/ExtensionStateContext.test.tsx
  18. 6 2
      webview-ui/src/i18n/locales/ca/settings.json
  19. 6 2
      webview-ui/src/i18n/locales/de/settings.json
  20. 6 2
      webview-ui/src/i18n/locales/en/settings.json
  21. 6 2
      webview-ui/src/i18n/locales/es/settings.json
  22. 6 2
      webview-ui/src/i18n/locales/fr/settings.json
  23. 6 2
      webview-ui/src/i18n/locales/hi/settings.json
  24. 6 2
      webview-ui/src/i18n/locales/it/settings.json
  25. 6 2
      webview-ui/src/i18n/locales/ja/settings.json
  26. 6 2
      webview-ui/src/i18n/locales/ko/settings.json
  27. 6 2
      webview-ui/src/i18n/locales/nl/settings.json
  28. 6 2
      webview-ui/src/i18n/locales/pl/settings.json
  29. 6 2
      webview-ui/src/i18n/locales/pt-BR/settings.json
  30. 6 2
      webview-ui/src/i18n/locales/ru/settings.json
  31. 6 2
      webview-ui/src/i18n/locales/tr/settings.json
  32. 6 2
      webview-ui/src/i18n/locales/vi/settings.json
  33. 6 2
      webview-ui/src/i18n/locales/zh-CN/settings.json
  34. 6 2
      webview-ui/src/i18n/locales/zh-TW/settings.json

+ 5 - 0
.changeset/whole-swans-cheer.md

@@ -0,0 +1,5 @@
+---
+"roo-cline": patch
+---
+
+Adds a slider to configure threshold to trigger intelligent context condensing

+ 127 - 0
src/core/sliding-window/__tests__/sliding-window.test.ts

@@ -248,6 +248,8 @@ describe("truncateConversationIfNeeded", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 
@@ -277,6 +279,8 @@ describe("truncateConversationIfNeeded", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 
@@ -304,6 +308,8 @@ describe("truncateConversationIfNeeded", () => {
 			contextWindow: modelInfo1.contextWindow,
 			maxTokens: modelInfo1.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 
@@ -313,6 +319,8 @@ describe("truncateConversationIfNeeded", () => {
 			contextWindow: modelInfo2.contextWindow,
 			maxTokens: modelInfo2.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 
@@ -329,6 +337,8 @@ describe("truncateConversationIfNeeded", () => {
 			contextWindow: modelInfo1.contextWindow,
 			maxTokens: modelInfo1.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 
@@ -338,6 +348,8 @@ describe("truncateConversationIfNeeded", () => {
 			contextWindow: modelInfo2.contextWindow,
 			maxTokens: modelInfo2.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 
@@ -369,6 +381,8 @@ describe("truncateConversationIfNeeded", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(resultWithSmall).toEqual({
@@ -399,6 +413,8 @@ describe("truncateConversationIfNeeded", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(resultWithLarge.messages).not.toEqual(messagesWithLargeContent) // Should truncate
@@ -422,6 +438,8 @@ describe("truncateConversationIfNeeded", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(resultWithVeryLarge.messages).not.toEqual(messagesWithVeryLargeContent) // Should truncate
@@ -448,6 +466,8 @@ describe("truncateConversationIfNeeded", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(result).toEqual({
@@ -488,6 +508,7 @@ describe("truncateConversationIfNeeded", () => {
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
 			autoCondenseContext: true,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 
@@ -534,6 +555,7 @@ describe("truncateConversationIfNeeded", () => {
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
 			autoCondenseContext: true,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 
@@ -570,6 +592,7 @@ describe("truncateConversationIfNeeded", () => {
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
 			autoCondenseContext: false,
+			autoCondenseContextPercent: 50, // This shouldn't matter since autoCondenseContext is false
 			systemPrompt: "System prompt",
 		})
 
@@ -587,6 +610,94 @@ describe("truncateConversationIfNeeded", () => {
 		// Clean up
 		summarizeSpy.mockRestore()
 	})
+
+	it("should use summarizeConversation when autoCondenseContext is true and context percent exceeds threshold", async () => {
+		// Mock the summarizeConversation function
+		const mockSummary = "This is a summary of the conversation"
+		const mockCost = 0.05
+		const mockSummarizeResponse: condenseModule.SummarizeResponse = {
+			messages: [
+				{ role: "user", content: "First message" },
+				{ role: "assistant", content: mockSummary, isSummary: true },
+				{ role: "user", content: "Last message" },
+			],
+			summary: mockSummary,
+			cost: mockCost,
+			newContextTokens: 100,
+		}
+
+		const summarizeSpy = jest
+			.spyOn(condenseModule, "summarizeConversation")
+			.mockResolvedValue(mockSummarizeResponse)
+
+		const modelInfo = createModelInfo(100000, 30000)
+		// Set tokens to be below the allowedTokens threshold but above the percentage threshold
+		const contextWindow = modelInfo.contextWindow
+		const totalTokens = 60000 // Below allowedTokens but 60% of context window
+		const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }]
+
+		const result = await truncateConversationIfNeeded({
+			messages: messagesWithSmallContent,
+			totalTokens,
+			contextWindow,
+			maxTokens: modelInfo.maxTokens,
+			apiHandler: mockApiHandler,
+			autoCondenseContext: true,
+			autoCondenseContextPercent: 50, // Set threshold to 50% - our tokens are at 60%
+			systemPrompt: "System prompt",
+		})
+
+		// Verify summarizeConversation was called with the right parameters
+		expect(summarizeSpy).toHaveBeenCalledWith(messagesWithSmallContent, mockApiHandler, "System prompt")
+
+		// Verify the result contains the summary information
+		expect(result).toMatchObject({
+			messages: mockSummarizeResponse.messages,
+			summary: mockSummary,
+			cost: mockCost,
+			prevContextTokens: totalTokens,
+		})
+
+		// Clean up
+		summarizeSpy.mockRestore()
+	})
+
+	it("should not use summarizeConversation when autoCondenseContext is true but context percent is below threshold", async () => {
+		// Reset any previous mock calls
+		jest.clearAllMocks()
+		const summarizeSpy = jest.spyOn(condenseModule, "summarizeConversation")
+
+		const modelInfo = createModelInfo(100000, 30000)
+		// Set tokens to be below both the allowedTokens threshold and the percentage threshold
+		const contextWindow = modelInfo.contextWindow
+		const totalTokens = 40000 // 40% of context window
+		const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }]
+
+		const result = await truncateConversationIfNeeded({
+			messages: messagesWithSmallContent,
+			totalTokens,
+			contextWindow,
+			maxTokens: modelInfo.maxTokens,
+			apiHandler: mockApiHandler,
+			autoCondenseContext: true,
+			autoCondenseContextPercent: 50, // Set threshold to 50% - our tokens are at 40%
+			systemPrompt: "System prompt",
+		})
+
+		// Verify summarizeConversation was not called
+		expect(summarizeSpy).not.toHaveBeenCalled()
+
+		// Verify no truncation or summarization occurred
+		expect(result).toEqual({
+			messages: messagesWithSmallContent,
+			summary: "",
+			cost: 0,
+			prevContextTokens: totalTokens,
+		})
+
+		// Clean up
+		summarizeSpy.mockRestore()
+	})
 })
 
 /**
@@ -624,6 +735,8 @@ describe("getMaxTokens", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(result1).toEqual({
@@ -640,6 +753,8 @@ describe("getMaxTokens", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(result2.messages).not.toEqual(messagesWithSmallContent)
@@ -664,6 +779,8 @@ describe("getMaxTokens", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(result1).toEqual({
@@ -680,6 +797,8 @@ describe("getMaxTokens", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(result2.messages).not.toEqual(messagesWithSmallContent)
@@ -703,6 +822,8 @@ describe("getMaxTokens", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(result1.messages).toEqual(messagesWithSmallContent)
@@ -714,6 +835,8 @@ describe("getMaxTokens", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(result2).not.toEqual(messagesWithSmallContent)
@@ -735,6 +858,8 @@ describe("getMaxTokens", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(result1.messages).toEqual(messagesWithSmallContent)
@@ -746,6 +871,8 @@ describe("getMaxTokens", () => {
 			contextWindow: modelInfo.contextWindow,
 			maxTokens: modelInfo.maxTokens,
 			apiHandler: mockApiHandler,
+			autoCondenseContext: false,
+			autoCondenseContextPercent: 100,
 			systemPrompt: "System prompt",
 		})
 		expect(result2).not.toEqual(messagesWithSmallContent)

+ 20 - 11
src/core/sliding-window/index.ts

@@ -63,7 +63,8 @@ type TruncateOptions = {
 	contextWindow: number
 	maxTokens?: number | null
 	apiHandler: ApiHandler
-	autoCondenseContext?: boolean
+	autoCondenseContext: boolean
+	autoCondenseContextPercent: number
 	systemPrompt: string
 }
 
@@ -83,6 +84,7 @@ export async function truncateConversationIfNeeded({
 	maxTokens,
 	apiHandler,
 	autoCondenseContext,
+	autoCondenseContextPercent,
 	systemPrompt,
 }: TruncateOptions): Promise<TruncateResponse> {
 	// Calculate the maximum tokens reserved for response
@@ -96,21 +98,28 @@ export async function truncateConversationIfNeeded({
 		: await estimateTokenCount([{ type: "text", text: lastMessageContent as string }], apiHandler)
 
 	// Calculate total effective tokens (totalTokens never includes the last message)
-	const effectiveTokens = totalTokens + lastMessageTokens
+	const prevContextTokens = totalTokens + lastMessageTokens
 
 	// Calculate available tokens for conversation history
 	// Truncate if we're within TOKEN_BUFFER_PERCENTAGE of the context window
 	const allowedTokens = contextWindow * (1 - TOKEN_BUFFER_PERCENTAGE) - reservedTokens
 
-	// Determine if truncation is needed and apply if necessary
-	if (effectiveTokens <= allowedTokens) {
-		return { messages, summary: "", cost: 0, prevContextTokens: effectiveTokens }
-	} else if (autoCondenseContext) {
-		const result = await summarizeConversation(messages, apiHandler, systemPrompt)
-		if (result.summary) {
-			return { ...result, prevContextTokens: effectiveTokens }
+	if (autoCondenseContext) {
+		const contextPercent = (100 * prevContextTokens) / contextWindow
+		if (contextPercent >= autoCondenseContextPercent || prevContextTokens > allowedTokens) {
+			// Attempt to intelligently condense the context
+			const result = await summarizeConversation(messages, apiHandler, systemPrompt)
+			if (result.summary) {
+				return { ...result, prevContextTokens }
+			}
 		}
 	}
-	const truncatedMessages = truncateConversation(messages, 0.5)
-	return { messages: truncatedMessages, prevContextTokens: effectiveTokens, summary: "", cost: 0 }
+
+	// Fall back to sliding window truncation if needed
+	if (prevContextTokens > allowedTokens) {
+		const truncatedMessages = truncateConversation(messages, 0.5)
+		return { messages: truncatedMessages, prevContextTokens, summary: "", cost: 0 }
+	}
+	// No truncation or condensation needed
+	return { messages, summary: "", cost: 0, prevContextTokens }
 }

+ 9 - 2
src/core/task/Task.ts

@@ -1460,8 +1460,14 @@ export class Task extends EventEmitter<ClineEvents> {
 	}
 
 	public async *attemptApiRequest(retryAttempt: number = 0): ApiStream {
-		const { apiConfiguration, autoApprovalEnabled, alwaysApproveResubmit, requestDelaySeconds, experiments } =
-			(await this.providerRef.deref()?.getState()) ?? {}
+		const {
+			apiConfiguration,
+			autoApprovalEnabled,
+			alwaysApproveResubmit,
+			requestDelaySeconds,
+			experiments,
+			autoCondenseContextPercent = 100,
+		} = (await this.providerRef.deref()?.getState()) ?? {}
 
 		let rateLimitDelay = 0
 
@@ -1510,6 +1516,7 @@ export class Task extends EventEmitter<ClineEvents> {
 				contextWindow,
 				apiHandler: this.api,
 				autoCondenseContext,
+				autoCondenseContextPercent,
 				systemPrompt,
 			})
 			if (truncateResult.messages !== this.apiConversationHistory) {

+ 3 - 0
src/core/webview/ClineProvider.ts

@@ -1222,6 +1222,7 @@ export class ClineProvider extends EventEmitter<ClineProviderEvents> implements
 			alwaysAllowModeSwitch,
 			alwaysAllowSubtasks,
 			allowedMaxRequests,
+			autoCondenseContextPercent,
 			soundEnabled,
 			ttsEnabled,
 			ttsSpeed,
@@ -1293,6 +1294,7 @@ export class ClineProvider extends EventEmitter<ClineProviderEvents> implements
 			alwaysAllowModeSwitch: alwaysAllowModeSwitch ?? false,
 			alwaysAllowSubtasks: alwaysAllowSubtasks ?? false,
 			allowedMaxRequests: allowedMaxRequests ?? Infinity,
+			autoCondenseContextPercent: autoCondenseContextPercent ?? 100,
 			uriScheme: vscode.env.uriScheme,
 			currentTaskItem: this.getCurrentCline()?.taskId
 				? (taskHistory || []).find((item: HistoryItem) => item.id === this.getCurrentCline()?.taskId)
@@ -1396,6 +1398,7 @@ export class ClineProvider extends EventEmitter<ClineProviderEvents> implements
 			alwaysAllowModeSwitch: stateValues.alwaysAllowModeSwitch ?? false,
 			alwaysAllowSubtasks: stateValues.alwaysAllowSubtasks ?? false,
 			allowedMaxRequests: stateValues.allowedMaxRequests ?? Infinity,
+			autoCondenseContextPercent: stateValues.autoCondenseContextPercent ?? 100,
 			taskHistory: stateValues.taskHistory,
 			allowedCommands: stateValues.allowedCommands,
 			soundEnabled: stateValues.soundEnabled ?? false,

+ 22 - 0
src/core/webview/__tests__/ClineProvider.test.ts

@@ -412,6 +412,7 @@ describe("ClineProvider", () => {
 			showRooIgnoredFiles: true,
 			renderContext: "sidebar",
 			maxReadFileLine: 500,
+			autoCondenseContextPercent: 100,
 		}
 
 		const message: ExtensionMessage = {
@@ -583,6 +584,27 @@ describe("ClineProvider", () => {
 		expect(state.alwaysApproveResubmit).toBe(false)
 	})
 
+	test("autoCondenseContextPercent defaults to 100", async () => {
+		// Mock globalState.get to return undefined for autoCondenseContextPercent
+		;(mockContext.globalState.get as jest.Mock).mockImplementation((key: string) =>
+			key === "autoCondenseContextPercent" ? undefined : null,
+		)
+
+		const state = await provider.getState()
+		expect(state.autoCondenseContextPercent).toBe(100)
+	})
+
+	test("handles autoCondenseContextPercent message", async () => {
+		await provider.resolveWebviewView(mockWebviewView)
+		const messageHandler = (mockWebviewView.webview.onDidReceiveMessage as jest.Mock).mock.calls[0][0]
+
+		await messageHandler({ type: "autoCondenseContextPercent", value: 75 })
+
+		expect(updateGlobalStateSpy).toHaveBeenCalledWith("autoCondenseContextPercent", 75)
+		expect(mockContext.globalState.update).toHaveBeenCalledWith("autoCondenseContextPercent", 75)
+		expect(mockPostMessage).toHaveBeenCalled()
+	})
+
 	it("loads saved API config when switching modes", async () => {
 		await provider.resolveWebviewView(mockWebviewView)
 		const messageHandler = (mockWebviewView.webview.onDidReceiveMessage as jest.Mock).mock.calls[0][0]

+ 4 - 0
src/core/webview/webviewMessageHandler.ts

@@ -170,6 +170,10 @@ export const webviewMessageHandler = async (provider: ClineProvider, message: We
 		case "askResponse":
 			provider.getCurrentCline()?.handleWebviewAskResponse(message.askResponse!, message.text, message.images)
 			break
+		case "autoCondenseContextPercent":
+			await updateGlobalState("autoCondenseContextPercent", message.value)
+			await provider.postStateToWebview()
+			break
 		case "terminalOperation":
 			if (message.terminalOperation) {
 				provider.getCurrentCline()?.handleTerminalOperation(message.terminalOperation)

+ 3 - 0
src/exports/roo-code.d.ts

@@ -71,6 +71,7 @@ type GlobalSettings = {
 	alwaysAllowExecute?: boolean | undefined
 	allowedCommands?: string[] | undefined
 	allowedMaxRequests?: number | undefined
+	autoCondenseContextPercent?: number | undefined
 	browserToolEnabled?: boolean | undefined
 	browserViewportSize?: string | undefined
 	screenshotQuality?: number | undefined
@@ -805,6 +806,7 @@ type IpcMessage =
 								alwaysAllowExecute?: boolean | undefined
 								allowedCommands?: string[] | undefined
 								allowedMaxRequests?: number | undefined
+								autoCondenseContextPercent?: number | undefined
 								browserToolEnabled?: boolean | undefined
 								browserViewportSize?: string | undefined
 								screenshotQuality?: number | undefined
@@ -1279,6 +1281,7 @@ type TaskCommand =
 					alwaysAllowExecute?: boolean | undefined
 					allowedCommands?: string[] | undefined
 					allowedMaxRequests?: number | undefined
+					autoCondenseContextPercent?: number | undefined
 					browserToolEnabled?: boolean | undefined
 					browserViewportSize?: string | undefined
 					screenshotQuality?: number | undefined

+ 3 - 0
src/exports/types.ts

@@ -71,6 +71,7 @@ type GlobalSettings = {
 	alwaysAllowExecute?: boolean | undefined
 	allowedCommands?: string[] | undefined
 	allowedMaxRequests?: number | undefined
+	autoCondenseContextPercent?: number | undefined
 	browserToolEnabled?: boolean | undefined
 	browserViewportSize?: string | undefined
 	screenshotQuality?: number | undefined
@@ -819,6 +820,7 @@ type IpcMessage =
 								alwaysAllowExecute?: boolean | undefined
 								allowedCommands?: string[] | undefined
 								allowedMaxRequests?: number | undefined
+								autoCondenseContextPercent?: number | undefined
 								browserToolEnabled?: boolean | undefined
 								browserViewportSize?: string | undefined
 								screenshotQuality?: number | undefined
@@ -1295,6 +1297,7 @@ type TaskCommand =
 					alwaysAllowExecute?: boolean | undefined
 					allowedCommands?: string[] | undefined
 					allowedMaxRequests?: number | undefined
+					autoCondenseContextPercent?: number | undefined
 					browserToolEnabled?: boolean | undefined
 					browserViewportSize?: string | undefined
 					screenshotQuality?: number | undefined

+ 2 - 0
src/schemas/index.ts

@@ -750,6 +750,7 @@ export const globalSettingsSchema = z.object({
 	alwaysAllowExecute: z.boolean().optional(),
 	allowedCommands: z.array(z.string()).optional(),
 	allowedMaxRequests: z.number().optional(),
+	autoCondenseContextPercent: z.number().optional(),
 
 	browserToolEnabled: z.boolean().optional(),
 	browserViewportSize: z.string().optional(),
@@ -830,6 +831,7 @@ const globalSettingsRecord: GlobalSettingsRecord = {
 	alwaysAllowExecute: undefined,
 	allowedCommands: undefined,
 	allowedMaxRequests: undefined,
+	autoCondenseContextPercent: undefined,
 
 	browserToolEnabled: undefined,
 	browserViewportSize: undefined,

+ 1 - 0
src/shared/ExtensionMessage.ts

@@ -206,6 +206,7 @@ export type ExtensionState = Pick<
 	renderContext: "sidebar" | "editor"
 	settingsImportedAt?: number
 	historyPreviewCollapsed?: boolean
+	autoCondenseContextPercent: number
 }
 
 export type { ClineMessage, ClineAsk, ClineSay }

+ 1 - 0
src/shared/WebviewMessage.ts

@@ -59,6 +59,7 @@ export interface WebviewMessage {
 		| "alwaysAllowModeSwitch"
 		| "allowedMaxRequests"
 		| "alwaysAllowSubtasks"
+		| "autoCondenseContextPercent"
 		| "playSound"
 		| "playTts"
 		| "stopTts"

+ 2 - 2
src/shared/experiments.ts

@@ -4,8 +4,8 @@ import { AssertEqual, Equals, Keys, Values } from "../utils/type-fu"
 export type { ExperimentId }
 
 export const EXPERIMENT_IDS = {
-	AUTO_CONDENSE_CONTEXT: "autoCondenseContext",
 	POWER_STEERING: "powerSteering",
+	AUTO_CONDENSE_CONTEXT: "autoCondenseContext",
 } as const satisfies Record<string, ExperimentId>
 
 type _AssertExperimentIds = AssertEqual<Equals<ExperimentId, Values<typeof EXPERIMENT_IDS>>>
@@ -17,8 +17,8 @@ interface ExperimentConfig {
 }
 
 export const experimentConfigsMap: Record<ExperimentKey, ExperimentConfig> = {
-	AUTO_CONDENSE_CONTEXT: { enabled: false },
 	POWER_STEERING: { enabled: false },
+	AUTO_CONDENSE_CONTEXT: { enabled: false }, // Keep this last, there is a slider below it in the UI
 }
 
 export const experimentDefault = Object.fromEntries(

+ 31 - 1
webview-ui/src/components/settings/ExperimentalSettings.tsx

@@ -6,19 +6,24 @@ import { EXPERIMENT_IDS, experimentConfigsMap, ExperimentId } from "@roo/shared/
 
 import { cn } from "@/lib/utils"
 
-import { SetExperimentEnabled } from "./types"
+import { SetCachedStateField, SetExperimentEnabled } from "./types"
 import { SectionHeader } from "./SectionHeader"
 import { Section } from "./Section"
 import { ExperimentalFeature } from "./ExperimentalFeature"
+import { Slider } from "@/components/ui/"
 
 type ExperimentalSettingsProps = HTMLAttributes<HTMLDivElement> & {
 	experiments: Record<ExperimentId, boolean>
 	setExperimentEnabled: SetExperimentEnabled
+	autoCondenseContextPercent: number
+	setCachedStateField: SetCachedStateField<"autoCondenseContextPercent">
 }
 
 export const ExperimentalSettings = ({
 	experiments,
 	setExperimentEnabled,
+	autoCondenseContextPercent,
+	setCachedStateField,
 	className,
 	...props
 }: ExperimentalSettingsProps) => {
@@ -46,6 +51,31 @@ export const ExperimentalSettings = ({
 							}
 						/>
 					))}
+				{experiments[EXPERIMENT_IDS.AUTO_CONDENSE_CONTEXT] && (
+					<div className="flex flex-col gap-3 pl-3 border-l-2 border-vscode-button-background">
+						<div className="flex items-center gap-4 font-bold">
+							<span className="codicon codicon-fold" />
+							<div>{t("settings:experimental.autoCondenseContextPercent.label")}</div>
+						</div>
+						<div>
+							<div className="flex items-center gap-2">
+								<Slider
+									min={10}
+									max={100}
+									step={1}
+									value={[autoCondenseContextPercent]}
+									onValueChange={([value]) =>
+										setCachedStateField("autoCondenseContextPercent", value)
+									}
+								/>
+								<span className="w-20">{autoCondenseContextPercent}%</span>
+							</div>
+							<div className="text-vscode-descriptionForeground text-sm mt-1">
+								{t("settings:experimental.autoCondenseContextPercent.description")}
+							</div>
+						</div>
+					</div>
+				)}
 			</Section>
 		</div>
 	)

+ 8 - 1
webview-ui/src/components/settings/SettingsView.tsx

@@ -129,6 +129,7 @@ const SettingsView = forwardRef<SettingsViewRef, SettingsViewProps>(({ onDone, t
 		alwaysAllowWrite,
 		alwaysAllowWriteOutsideWorkspace,
 		alwaysApproveResubmit,
+		autoCondenseContextPercent,
 		browserToolEnabled,
 		browserViewportSize,
 		enableCheckpoints,
@@ -248,6 +249,7 @@ const SettingsView = forwardRef<SettingsViewRef, SettingsViewProps>(({ onDone, t
 			vscode.postMessage({ type: "alwaysAllowMcp", bool: alwaysAllowMcp })
 			vscode.postMessage({ type: "allowedCommands", commands: allowedCommands ?? [] })
 			vscode.postMessage({ type: "allowedMaxRequests", value: allowedMaxRequests })
+			vscode.postMessage({ type: "autoCondenseContextPercent", value: autoCondenseContextPercent })
 			vscode.postMessage({ type: "browserToolEnabled", bool: browserToolEnabled })
 			vscode.postMessage({ type: "soundEnabled", bool: soundEnabled })
 			vscode.postMessage({ type: "ttsEnabled", bool: ttsEnabled })
@@ -628,7 +630,12 @@ const SettingsView = forwardRef<SettingsViewRef, SettingsViewProps>(({ onDone, t
 
 					{/* Experimental Section */}
 					{activeTab === "experimental" && (
-						<ExperimentalSettings setExperimentEnabled={setExperimentEnabled} experiments={experiments} />
+						<ExperimentalSettings
+							setExperimentEnabled={setExperimentEnabled}
+							experiments={experiments}
+							autoCondenseContextPercent={autoCondenseContextPercent}
+							setCachedStateField={setCachedStateField}
+						/>
 					)}
 
 					{/* Language Section */}

+ 5 - 0
webview-ui/src/context/ExtensionStateContext.tsx

@@ -96,6 +96,8 @@ export interface ExtensionStateContextType extends ExtensionState {
 	terminalCompressProgressBar?: boolean
 	setTerminalCompressProgressBar: (value: boolean) => void
 	setHistoryPreviewCollapsed: (value: boolean) => void
+	autoCondenseContextPercent: number
+	setAutoCondenseContextPercent: (value: number) => void
 }
 
 export const ExtensionStateContext = createContext<ExtensionStateContextType | undefined>(undefined)
@@ -175,6 +177,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
 		terminalZdotdir: false, // Default ZDOTDIR handling setting
 		terminalCompressProgressBar: true, // Default to compress progress bar output
 		historyPreviewCollapsed: false, // Initialize the new state (default to expanded)
+		autoCondenseContextPercent: 100,
 	})
 
 	const [didHydrateState, setDidHydrateState] = useState(false)
@@ -351,6 +354,8 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode
 			}),
 		setHistoryPreviewCollapsed: (value) =>
 			setState((prevState) => ({ ...prevState, historyPreviewCollapsed: value })), // Implement the setter
+		setAutoCondenseContextPercent: (value) =>
+			setState((prevState) => ({ ...prevState, autoCondenseContextPercent: value })),
 	}
 
 	return <ExtensionStateContext.Provider value={contextValue}>{children}</ExtensionStateContext.Provider>

+ 1 - 0
webview-ui/src/context/__tests__/ExtensionStateContext.test.tsx

@@ -202,6 +202,7 @@ describe("mergeExtensionState", () => {
 			showRooIgnoredFiles: true,
 			renderContext: "sidebar",
 			maxReadFileLine: 500,
+			autoCondenseContextPercent: 100,
 		}
 
 		const prevState: ExtensionState = {

+ 6 - 2
webview-ui/src/i18n/locales/ca/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Llindar per activar la condensació intel·ligent de context",
+			"description": "Quan la finestra de context assoleix aquest llindar, Roo la condensarà automàticament."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Condensar intel·ligentment la finestra de context",
-			"description": "Utilitza una crida LLM per resumir la conversa anterior quan la finestra de context de la tasca està gairebé plena, en lloc d'eliminar missatges antics. Avís: el cost de resumir actualment no s'inclou en els costos d'API mostrats a la interfície."
+			"name": "Activar automàticament la condensació intel·ligent de context",
+			"description": "La condensació intel·ligent de context utilitza una crida LLM per resumir la conversa anterior quan la finestra de context de la tasca assoleix un llindar predefinit, en lloc d'eliminar missatges antics quan el context s'omple."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Utilitzar estratègia diff unificada experimental",

+ 6 - 2
webview-ui/src/i18n/locales/de/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Schwellenwert für intelligente Kontextkomprimierung",
+			"description": "Wenn das Kontextfenster diesen Schwellenwert erreicht, wird Roo es automatisch komprimieren."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Kontextfenster intelligent komprimieren",
-			"description": "Verwendet einen LLM-Aufruf, um das vorherige Gespräch zusammenzufassen, wenn das Kontextfenster der Aufgabe fast voll ist, anstatt alte Nachrichten zu verwerfen. Hinweis: Die Kosten für die Zusammenfassung sind derzeit nicht in den in der Benutzeroberfläche angezeigten API-Kosten enthalten."
+			"name": "Intelligente Kontextkomprimierung automatisch auslösen",
+			"description": "Intelligente Kontextkomprimierung verwendet einen LLM-Aufruf, um das vorherige Gespräch zusammenzufassen, wenn das Kontextfenster der Aufgabe einen voreingestellten Schwellenwert erreicht, anstatt alte Nachrichten zu verwerfen, wenn der Kontext voll ist."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Experimentelle einheitliche Diff-Strategie verwenden",

+ 6 - 2
webview-ui/src/i18n/locales/en/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Threshold to trigger intelligent context condensing",
+			"description": "When the context window reaches this threshold, Roo will automatically condense it."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Intelligently condense the context window",
-			"description": "Uses an LLM call to summarize the past conversation when the task's context window is almost full, rather than dropping old messages."
+			"name": "Automatically trigger intelligent context condensing",
+			"description": "Intelligent context condensing uses an LLM call to summarize the past conversation when the task's context window reaches a preset threshold, rather than dropping old messages when the context fills."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Use experimental unified diff strategy",

+ 6 - 2
webview-ui/src/i18n/locales/es/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Umbral para activar la condensación inteligente de contexto",
+			"description": "Cuando la ventana de contexto alcanza este umbral, Roo la condensará automáticamente."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Condensar inteligentemente la ventana de contexto",
-			"description": "Utiliza una llamada LLM para resumir la conversación anterior cuando la ventana de contexto de la tarea está casi llena, en lugar de eliminar mensajes antiguos. Aviso: el costo de resumir actualmente no está incluido en los costos de API mostrados en la interfaz."
+			"name": "Activar automáticamente la condensación inteligente de contexto",
+			"description": "La condensación inteligente de contexto utiliza una llamada LLM para resumir la conversación anterior cuando la ventana de contexto de la tarea alcanza un umbral preestablecido, en lugar de eliminar mensajes antiguos cuando el contexto se llena."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Usar estrategia de diff unificada experimental",

+ 6 - 2
webview-ui/src/i18n/locales/fr/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Seuil pour déclencher la condensation intelligente du contexte",
+			"description": "Lorsque la fenêtre de contexte atteint ce seuil, Roo la condensera automatiquement."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Condenser intelligemment la fenêtre de contexte",
-			"description": "Utilise un appel LLM pour résumer la conversation précédente lorsque la fenêtre de contexte de la tâche est presque pleine, plutôt que de supprimer les anciens messages. Avertissement : le coût de la synthèse n'est actuellement pas inclus dans les coûts API affichés dans l'interface."
+			"name": "Déclencher automatiquement la condensation intelligente du contexte",
+			"description": "La condensation intelligente du contexte utilise un appel LLM pour résumer la conversation passée lorsque la fenêtre de contexte de la tâche atteint un seuil prédéfini, plutôt que de supprimer les anciens messages lorsque le contexte est plein."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Utiliser la stratégie diff unifiée expérimentale",

+ 6 - 2
webview-ui/src/i18n/locales/hi/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "बुद्धिमान संदर्भ संघनन को ट्रिगर करने की सीमा",
+			"description": "जब संदर्भ विंडो इस सीमा तक पहुंचती है, तो Roo इसे स्वचालित रूप से संघनित कर देगा।"
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "संदर्भ विंडो को बुद्धिमानी से संघनित करें",
-			"description": "जब कार्य का संदर्भ विंडो लगभग भर जाता है, तो पुराने संदेशों को हटाने के बजाय पिछली बातचीत को संक्षेप में प्रस्तुत करने के लिए LLM कॉल का उपयोग करता है। अस्वीकरण: संक्षेपण की लागत वर्तमान में UI में दिखाए गए API लागतों में शामिल नहीं है।"
+			"name": "बुद्धिमान संदर्भ संघनन को स्वचालित रूप से ट्रिगर करें",
+			"description": "बुद्धिमान संदर्भ संघनन कार्य के संदर्भ विंडो के पूर्व-निर्धारित सीमा तक पहुंचने पर पिछली बातचीत को संक्षेप में प्रस्तुत करने के लिए LLM कॉल का उपयोग करता है, बजाय इसके कि संदर्भ भरने पर पुराने संदेशों को हटा दिया जाए।"
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "प्रायोगिक एकीकृत diff रणनीति का उपयोग करें",

+ 6 - 2
webview-ui/src/i18n/locales/it/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Soglia per attivare la condensazione intelligente del contesto",
+			"description": "Quando la finestra di contesto raggiunge questa soglia, Roo la condenserà automaticamente."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Condensa intelligentemente la finestra di contesto",
-			"description": "Utilizza una chiamata LLM per riassumere la conversazione precedente quando la finestra di contesto dell'attività è quasi piena, invece di eliminare i messaggi vecchi. Avviso: il costo della sintesi non è attualmente incluso nei costi API mostrati nell'interfaccia."
+			"name": "Attiva automaticamente la condensazione intelligente del contesto",
+			"description": "La condensazione intelligente del contesto utilizza una chiamata LLM per riassumere la conversazione precedente quando la finestra di contesto dell'attività raggiunge una soglia preimpostata, invece di eliminare i messaggi vecchi quando il contesto si riempie."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Usa strategia diff unificata sperimentale",

+ 6 - 2
webview-ui/src/i18n/locales/ja/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "インテリジェントなコンテキスト圧縮をトリガーするしきい値",
+			"description": "コンテキストウィンドウがこのしきい値に達すると、Rooは自動的に圧縮します。"
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "コンテキストウィンドウをインテリジェントに圧縮する",
-			"description": "タスクのコンテキストウィンドウがほぼいっぱいになったとき、古いメッセージを削除する代わりに、LLM呼び出しを使用して過去の会話を要約します。免責事項:要約のコストは現在UIに表示されるAPIコストには含まれていません。"
+			"name": "インテリジェントなコンテキスト圧縮を自動的にトリガーする",
+			"description": "インテリジェントなコンテキスト圧縮は、タスクのコンテキストウィンドウが事前設定されたしきい値に達したとき、コンテキストがいっぱいになって古いメッセージを削除する代わりに、LLM呼び出しを使用して過去の会話を要約します。"
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "実験的な統合diff戦略を使用する",

+ 6 - 2
webview-ui/src/i18n/locales/ko/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "지능적 컨텍스트 압축을 트리거하는 임계값",
+			"description": "컨텍스트 창이 이 임계값에 도달하면 Roo가 자동으로 압축합니다."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "컨텍스트 창을 지능적으로 압축",
-			"description": "작업의 컨텍스트 창이 거의 가득 찼을 때 이전 메시지를 삭제하는 대신 LLM 호출을 사용하여 이전 대화를 요약합니다. 참고: 요약 비용은 현재 UI에 표시된 API 비용에 포함되지 않습니다."
+			"name": "지능적 컨텍스트 압축 자동 트리거",
+			"description": "지능적 컨텍스트 압축은 작업의 컨텍스트 창이 사전 설정된 임계값에 도달했을 때 컨텍스트가 가득 차서 이전 메시지를 삭제하는 대신 LLM 호출을 사용하여 이전 대화를 요약합니다."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "실험적 통합 diff 전략 사용",

+ 6 - 2
webview-ui/src/i18n/locales/nl/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Drempelwaarde om intelligente contextcompressie te activeren",
+			"description": "Wanneer het contextvenster deze drempelwaarde bereikt, zal Roo het automatisch comprimeren."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Contextvenster intelligent comprimeren",
-			"description": "Gebruikt een LLM-aanroep om eerdere gesprekken samen te vatten wanneer het contextvenster van de taak bijna vol is, in plaats van oude berichten te verwijderen. Let op: de kosten van het samenvatten zijn momenteel niet inbegrepen in de API-kosten die in de interface worden getoond."
+			"name": "Automatisch intelligente contextcompressie activeren",
+			"description": "Intelligente contextcompressie gebruikt een LLM-aanroep om eerdere gesprekken samen te vatten wanneer het contextvenster van de taak een vooraf ingestelde drempelwaarde bereikt, in plaats van oude berichten te verwijderen wanneer de context vol is."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Experimentele unified diff-strategie gebruiken",

+ 6 - 2
webview-ui/src/i18n/locales/pl/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Próg wyzwalający inteligentną kondensację kontekstu",
+			"description": "Gdy okno kontekstu osiągnie ten próg, Roo automatycznie je skondensuje."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Inteligentnie kondensuj okno kontekstu",
-			"description": "Używa wywołania LLM do podsumowania wcześniejszej rozmowy, gdy okno kontekstu zadania jest prawie pełne, zamiast usuwać stare wiadomości. Zastrzeżenie: koszt podsumowania nie jest obecnie uwzględniony w kosztach API pokazywanych w interfejsie."
+			"name": "Automatycznie wyzwalaj inteligentną kondensację kontekstu",
+			"description": "Inteligentna kondensacja kontekstu używa wywołania LLM do podsumowania wcześniejszej rozmowy, gdy okno kontekstu zadania osiągnie ustawiony próg, zamiast usuwać stare wiadomości, gdy kontekst się zapełni."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Użyj eksperymentalnej ujednoliconej strategii diff",

+ 6 - 2
webview-ui/src/i18n/locales/pt-BR/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Limite para acionar a condensação inteligente de contexto",
+			"description": "Quando a janela de contexto atingir este limite, o Roo a condensará automaticamente."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Condensar inteligentemente a janela de contexto",
-			"description": "Usa uma chamada LLM para resumir a conversa anterior quando a janela de contexto da tarefa está quase cheia, em vez de descartar mensagens antigas. Aviso: o custo de resumir não está atualmente incluído nos custos de API mostrados na interface."
+			"name": "Acionar automaticamente a condensação inteligente de contexto",
+			"description": "A condensação inteligente de contexto usa uma chamada LLM para resumir a conversa anterior quando a janela de contexto da tarefa atinge um limite predefinido, em vez de descartar mensagens antigas quando o contexto estiver cheio."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Usar estratégia diff unificada experimental",

+ 6 - 2
webview-ui/src/i18n/locales/ru/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Порог для запуска интеллектуального сжатия контекста",
+			"description": "Когда контекстное окно достигает этого порога, Roo автоматически его сожмёт."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Интеллектуальное сжатие контекстного окна",
-			"description": "Использует вызов LLM для обобщения предыдущего разговора, когда контекстное окно задачи почти заполнено, вместо удаления старых сообщений. Примечание: стоимость обобщения в настоящее время не включена в стоимость API, отображаемую в интерфейсе."
+			"name": "Автоматически запускать интеллектуальное сжатие контекста",
+			"description": "Интеллектуальное сжатие контекста использует вызов LLM для обобщения предыдущего разговора, когда контекстное окно задачи достигает заданного порога, вместо удаления старых сообщений при заполнении контекста."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Использовать экспериментальную стратегию унифицированного диффа",

+ 6 - 2
webview-ui/src/i18n/locales/tr/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Akıllı bağlam sıkıştırmayı tetikleyecek eşik",
+			"description": "Bağlam penceresi bu eşiğe ulaştığında, Roo otomatik olarak sıkıştıracaktır."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Bağlam penceresini akıllıca sıkıştır",
-			"description": "Görevin bağlam penceresi neredeyse dolduğunda, eski mesajları atmak yerine önceki konuşmayı özetlemek için bir LLM çağrısı kullanır. Not: Özetleme maliyeti şu anda arayüzde gösterilen API maliyetlerine dahil değildir."
+			"name": "Akıllı bağlam sıkıştırmayı otomatik olarak tetikle",
+			"description": "Akıllı bağlam sıkıştırma, görevin bağlam penceresi önceden belirlenmiş bir eşiğe ulaştığında, bağlam dolduğunda eski mesajları atmak yerine önceki konuşmayı özetlemek için bir LLM çağrısı kullanır."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Deneysel birleştirilmiş diff stratejisini kullan",

+ 6 - 2
webview-ui/src/i18n/locales/vi/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "Ngưỡng kích hoạt nén ngữ cảnh thông minh",
+			"description": "Khi cửa sổ ngữ cảnh đạt đến ngưỡng này, Roo sẽ tự động nén nó."
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "Nén cửa sổ ngữ cảnh một cách thông minh",
-			"description": "Sử dụng một lệnh gọi LLM để tóm tắt cuộc trò chuyện trước đó khi cửa sổ ngữ cảnh của tác vụ gần đầy, thay vì loại bỏ các tin nhắn cũ. Lưu ý: chi phí tóm tắt hiện không được tính vào chi phí API hiển thị trong giao diện người dùng."
+			"name": "Tự động kích hoạt nén ngữ cảnh thông minh",
+			"description": "Nén ngữ cảnh thông minh sử dụng một lệnh gọi LLM để tóm tắt cuộc trò chuyện trước đó khi cửa sổ ngữ cảnh của tác vụ đạt đến ngưỡng đã định, thay vì loại bỏ các tin nhắn cũ khi ngữ cảnh đầy."
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "Sử dụng chiến lược diff thống nhất thử nghiệm",

+ 6 - 2
webview-ui/src/i18n/locales/zh-CN/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "触发智能上下文压缩的阈值",
+			"description": "当上下文窗口达到此阈值时,Roo 将自动压缩它。"
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "智能压缩上下文窗口",
-			"description": "当任务上下文窗口接近填满时,使用 LLM 调用来总结过去的对话,而不是删除旧消息。注意:目前 UI 中显示的 API 费用不包括总结的成本。"
+			"name": "自动触发智能上下文压缩",
+			"description": "智能上下文压缩使用 LLM 调用来总结过去的对话,在任务上下文窗口达到预设阈值时进行,而不是在上下文填满时丢弃旧消息。"
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "启用diff更新工具",

+ 6 - 2
webview-ui/src/i18n/locales/zh-TW/settings.json

@@ -403,9 +403,13 @@
 	},
 	"experimental": {
 		"warning": "⚠️",
+		"autoCondenseContextPercent": {
+			"label": "觸發智慧上下文壓縮的閾值",
+			"description": "當上下文視窗達到此閾值時,Roo 將自動壓縮它。"
+		},
 		"AUTO_CONDENSE_CONTEXT": {
-			"name": "智慧壓縮上下文視窗",
-			"description": "當工作的上下文視窗接近填滿時,使用 LLM 呼叫來摘要過去的對話,而非捨棄舊訊息。注意:目前 UI 中顯示的 API 費用並未包含摘要的成本。"
+			"name": "自動觸發智慧上下文壓縮",
+			"description": "智慧上下文壓縮使用 LLM 呼叫來摘要過去的對話,在工作的上下文視窗達到預設閾值時進行,而非在上下文填滿時捨棄舊訊息。"
 		},
 		"DIFF_STRATEGY_UNIFIED": {
 			"name": "使用實驗性統一差異比對策略",