Browse Source

Enable separate config for truncation for models without context caching

Nissa Seru 1 year ago
parent
commit
d154d059a6
2 changed files with 64 additions and 21 deletions
  1. 20 13
      src/core/Cline.ts
  2. 44 8
      src/core/sliding-window/index.ts

+ 20 - 13
src/core/Cline.ts

@@ -53,7 +53,7 @@ import { AssistantMessageContent, parseAssistantMessage, ToolParamName, ToolUseN
 import { formatResponse } from "./prompts/responses"
 import { SYSTEM_PROMPT } from "./prompts/system"
 import { modes, defaultModeSlug, getModeBySlug } from "../shared/modes"
-import { truncateHalfConversation } from "./sliding-window"
+import { truncateConversationIfNeeded } from "./sliding-window"
 import { ClineProvider, GlobalFileNames } from "./webview/ClineProvider"
 import { detectCodeOmission } from "../integrations/editor/detect-omission"
 import { BrowserSession } from "../services/browser/BrowserSession"
@@ -837,18 +837,25 @@ export class Cline {
 
 		// If the previous API request's total token usage is close to the context window, truncate the conversation history to free up space for the new request
 		if (previousApiReqIndex >= 0) {
-			const previousRequest = this.clineMessages[previousApiReqIndex]
-			if (previousRequest && previousRequest.text) {
-				const { tokensIn, tokensOut, cacheWrites, cacheReads }: ClineApiReqInfo = JSON.parse(
-					previousRequest.text,
-				)
-				const totalTokens = (tokensIn || 0) + (tokensOut || 0) + (cacheWrites || 0) + (cacheReads || 0)
-				const contextWindow = this.api.getModel().info.contextWindow || 128_000
-				const maxAllowedSize = Math.max(contextWindow - 40_000, contextWindow * 0.8)
-				if (totalTokens >= maxAllowedSize) {
-					const truncatedMessages = truncateHalfConversation(this.apiConversationHistory)
-					await this.overwriteApiConversationHistory(truncatedMessages)
-				}
+			const previousRequest = this.clineMessages[previousApiReqIndex]?.text
+			if (!previousRequest) return
+
+			const {
+				tokensIn = 0,
+				tokensOut = 0,
+				cacheWrites = 0,
+				cacheReads = 0,
+			}: ClineApiReqInfo = JSON.parse(previousRequest)
+			const totalTokens = tokensIn + tokensOut + cacheWrites + cacheReads
+
+			const trimmedMessages = truncateConversationIfNeeded(
+				this.apiConversationHistory,
+				totalTokens,
+				this.api.getModel().info,
+			)
+
+			if (trimmedMessages !== this.apiConversationHistory) {
+				await this.overwriteApiConversationHistory(trimmedMessages)
 			}
 		}
 

+ 44 - 8
src/core/sliding-window/index.ts

@@ -1,4 +1,5 @@
 import { Anthropic } from "@anthropic-ai/sdk"
+<<<<<<< HEAD
 
 /*
 We can't implement a dynamically updating sliding window as it would break prompt cache
@@ -9,18 +10,53 @@ Therefore, this function should only be called when absolutely necessary to fit
 context limits, not as a continuous process.
 */
 export function truncateHalfConversation(
+=======
+import { ModelInfo } from "../../shared/api"
+import { MessageParam } from "@anthropic-ai/sdk/resources/messages.mjs"
+
+export function truncateConversation(
+>>>>>>> 455d850c (Enable separate config for truncation for models without context caching)
 	messages: Anthropic.Messages.MessageParam[],
+	fracToRemove: number,
 ): Anthropic.Messages.MessageParam[] {
-	// API expects messages to be in user-assistant order, and tool use messages must be followed by tool results. We need to maintain this structure while truncating.
-
-	// Always keep the first Task message (this includes the project's file structure in environment_details)
 	const truncatedMessages = [messages[0]]
-
-	// Remove half of user-assistant pairs
-	const messagesToRemove = Math.floor(messages.length / 4) * 2 // has to be even number
-
-	const remainingMessages = messages.slice(messagesToRemove + 1) // has to start with assistant message since tool result cannot follow assistant message with no tool use
+	const rawMessagesToRemove = Math.floor((messages.length - 1) * fracToRemove)
+	const messagesToRemove = rawMessagesToRemove - (rawMessagesToRemove % 2)
+	const remainingMessages = messages.slice(messagesToRemove + 1)
 	truncatedMessages.push(...remainingMessages)
 
 	return truncatedMessages
 }
+
+export function truncateConversationIfNeeded(
+	messages: MessageParam[],
+	totalTokens: number,
+	modelInfo: ModelInfo,
+): MessageParam[] {
+	if (modelInfo.supportsPromptCache) {
+		return totalTokens < getMaxTokensForPromptCachingModels(modelInfo)
+			? messages
+			: truncateConversation(messages, getTruncFractionForPromptCachingModels(modelInfo))
+	} else {
+		const thresh = getMaxTokensForNonPromptCachingModels(modelInfo)
+		return totalTokens < thresh
+			? messages
+			: truncateConversation(messages, getTruncFractionForNonPromptCachingModels(modelInfo))
+	}
+}
+
+function getMaxTokensForPromptCachingModels(modelInfo: ModelInfo): number {
+	return Math.max(modelInfo.contextWindow - 40_000, modelInfo.contextWindow * 0.8)
+}
+
+function getTruncFractionForPromptCachingModels(modelInfo: ModelInfo): number {
+	return Math.min(80_000, modelInfo.contextWindow * 0.4)
+}
+
+function getMaxTokensForNonPromptCachingModels(modelInfo: ModelInfo): number {
+	return Math.max(modelInfo.contextWindow - 40_000, modelInfo.contextWindow * 0.8)
+}
+
+function getTruncFractionForNonPromptCachingModels(modelInfo: ModelInfo): number {
+	return Math.min(80_000, modelInfo.contextWindow * 0.4)
+}