Browse Source

Add prompt cache

Saoud Rizwan 1 year ago
parent
commit
ec2bfa352a

+ 7 - 18
package-lock.json

@@ -1,16 +1,16 @@
 {
   "name": "claude-dev",
-  "version": "1.0.98",
+  "version": "1.1.15",
   "lockfileVersion": 3,
   "requires": true,
   "packages": {
     "": {
       "name": "claude-dev",
-      "version": "1.0.98",
+      "version": "1.1.15",
       "license": "MIT",
       "dependencies": {
         "@anthropic-ai/bedrock-sdk": "^0.10.2",
-        "@anthropic-ai/sdk": "^0.24.3",
+        "@anthropic-ai/sdk": "^0.26.0",
         "@vscode/codicons": "^0.0.36",
         "default-shell": "^2.2.0",
         "delay": "^6.0.0",
@@ -62,10 +62,9 @@
       }
     },
     "node_modules/@anthropic-ai/sdk": {
-      "version": "0.24.3",
-      "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.24.3.tgz",
-      "integrity": "sha512-916wJXO6T6k8R6BAAcLhLPv/pnLGy7YSEBZXZ1XTFbLcTZE8oTy3oDW9WJf9KKZwMvVcePIfoTSvzXHRcGxkQQ==",
-      "license": "MIT",
+      "version": "0.26.0",
+      "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.26.0.tgz",
+      "integrity": "sha512-vNbZ2rnnMfk8Bf4OdeVy6GA4EXao8tGC0tLEoSAl1NZrip9oOxnEGUkXl3FsPQgeBM5hmpGE1tSLuu9HEVJiHg==",
       "dependencies": {
         "@types/node": "^18.11.18",
         "@types/node-fetch": "^2.6.4",
@@ -73,8 +72,7 @@
         "agentkeepalive": "^4.2.1",
         "form-data-encoder": "1.7.2",
         "formdata-node": "^4.3.2",
-        "node-fetch": "^2.6.7",
-        "web-streams-polyfill": "^3.2.1"
+        "node-fetch": "^2.6.7"
       }
     },
     "node_modules/@anthropic-ai/sdk/node_modules/@types/node": {
@@ -9642,15 +9640,6 @@
         "spdx-expression-parse": "^3.0.0"
       }
     },
-    "node_modules/web-streams-polyfill": {
-      "version": "3.3.3",
-      "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz",
-      "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==",
-      "license": "MIT",
-      "engines": {
-        "node": ">= 8"
-      }
-    },
     "node_modules/web-tree-sitter": {
       "version": "0.22.6",
       "resolved": "https://registry.npmjs.org/web-tree-sitter/-/web-tree-sitter-0.22.6.tgz",

+ 2 - 2
package.json

@@ -123,7 +123,7 @@
   },
   "dependencies": {
     "@anthropic-ai/bedrock-sdk": "^0.10.2",
-    "@anthropic-ai/sdk": "^0.24.3",
+    "@anthropic-ai/sdk": "^0.26.0",
     "@vscode/codicons": "^0.0.36",
     "default-shell": "^2.2.0",
     "delay": "^6.0.0",
@@ -138,4 +138,4 @@
     "tree-sitter-wasms": "^0.1.11",
     "web-tree-sitter": "^0.22.6"
   }
-}
+}

+ 33 - 4
src/ClaudeDev.ts

@@ -411,10 +411,25 @@ export class ClaudeDev {
 		}
 	}
 
-	calculateApiCost(inputTokens: number, outputTokens: number): number {
-		const inputCost = (this.api.getModel().info.inputPrice / 1_000_000) * inputTokens
+	calculateApiCost(
+		inputTokens: number,
+		outputTokens: number,
+		cacheCreationInputTokens?: number,
+		cacheReadInputTokens?: number
+	): number {
+		const modelCacheWritesPrice = this.api.getModel().info.cacheWrites
+		let cacheWritesCost = 0
+		if (cacheCreationInputTokens && modelCacheWritesPrice) {
+			cacheWritesCost = (modelCacheWritesPrice / 1_000_000) * cacheCreationInputTokens
+		}
+		const modelCacheReadsPrice = this.api.getModel().info.cacheReads
+		let cacheReadsCost = 0
+		if (cacheReadInputTokens && modelCacheReadsPrice) {
+			cacheReadsCost = (modelCacheReadsPrice / 1_000_000) * cacheReadInputTokens
+		}
+		const baseInputCost = (this.api.getModel().info.inputPrice / 1_000_000) * inputTokens
 		const outputCost = (this.api.getModel().info.outputPrice / 1_000_000) * outputTokens
-		const totalCost = inputCost + outputCost
+		const totalCost = cacheWritesCost + cacheReadsCost + baseInputCost + outputCost
 		return totalCost
 	}
 
@@ -901,6 +916,7 @@ export class ClaudeDev {
 		try {
 			let systemPrompt = SYSTEM_PROMPT()
 			if (this.customInstructions && this.customInstructions.trim()) {
+				// altering the system prompt mid-task will break the prompt cache, but in the grand scheme this will not change often so it's better to not pollute user messages with it the way we have to with <potentially relevant details>
 				systemPrompt += `
 ====
 
@@ -975,12 +991,25 @@ ${this.customInstructions.trim()}
 			let assistantResponses: Anthropic.Messages.ContentBlock[] = []
 			let inputTokens = response.usage.input_tokens
 			let outputTokens = response.usage.output_tokens
+			let cacheCreationInputTokens =
+				(response as Anthropic.Beta.PromptCaching.Messages.PromptCachingBetaMessage).usage
+					.cache_creation_input_tokens || undefined
+			let cacheReadInputTokens =
+				(response as Anthropic.Beta.PromptCaching.Messages.PromptCachingBetaMessage).usage
+					.cache_read_input_tokens || undefined
 			await this.say(
 				"api_req_finished",
 				JSON.stringify({
 					tokensIn: inputTokens,
 					tokensOut: outputTokens,
-					cost: this.calculateApiCost(inputTokens, outputTokens),
+					cacheWrites: cacheCreationInputTokens,
+					cacheReads: cacheReadInputTokens,
+					cost: this.calculateApiCost(
+						inputTokens,
+						outputTokens,
+						cacheCreationInputTokens,
+						cacheReadInputTokens
+					),
 				})
 			)
 

+ 76 - 17
src/api/anthropic.ts

@@ -16,23 +16,82 @@ export class AnthropicHandler implements ApiHandler {
 		messages: Anthropic.Messages.MessageParam[],
 		tools: Anthropic.Messages.Tool[]
 	): Promise<Anthropic.Messages.Message> {
-		return await this.client.messages.create(
-			{
-				model: this.getModel().id,
-				max_tokens: this.getModel().info.maxTokens,
-				system: systemPrompt,
-				messages,
-				tools,
-				tool_choice: { type: "auto" },
-			},
-			// https://x.com/alexalbert__/status/1812921642143900036
-			// https://github.com/anthropics/anthropic-sdk-typescript?tab=readme-ov-file#default-headers
-			this.getModel().id === "claude-3-5-sonnet-20240620"
-				? {
-						headers: { "anthropic-beta": "max-tokens-3-5-sonnet-2024-07-15" },
-				  }
-				: undefined
-		)
+		const modelId = this.getModel().id
+		switch (modelId) {
+			case "claude-3-5-sonnet-20240620":
+			case "claude-3-haiku-20240307":
+				/*
+				The latest message will be the new user message, one before will be the assistant message from a previous request, and the user message before that will be a previously cached user message. So we need to mark the latest user message as ephemeral to cache it for the next request, and mark the second to last user message as ephemeral to let the server know the last message to retrieve from the cache for the current request..
+				*/
+				const userMsgIndices = messages.reduce(
+					(acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc),
+					[] as number[]
+				)
+				const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1
+				const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1
+				return await this.client.beta.promptCaching.messages.create(
+					{
+						model: modelId,
+						max_tokens: this.getModel().info.maxTokens,
+						system: [{ text: systemPrompt, type: "text", cache_control: { type: "ephemeral" } }],
+						messages: messages.map((message, index) => {
+							if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) {
+								return {
+									...message,
+									content:
+										typeof message.content === "string"
+											? [
+													{
+														type: "text",
+														text: message.content,
+														cache_control: { type: "ephemeral" },
+													},
+											  ]
+											: message.content.map((content, contentIndex) =>
+													contentIndex === message.content.length - 1
+														? { ...content, cache_control: { type: "ephemeral" } }
+														: content
+											  ),
+								}
+							}
+							return message
+						}),
+						tools: tools.map((tool, index) =>
+							index === tools.length - 1 ? { ...tool, cache_control: { type: "ephemeral" } } : tool
+						),
+						tool_choice: { type: "auto" },
+					},
+					(() => {
+						// 8192 tokens: https://x.com/alexalbert__/status/1812921642143900036
+						// prompt caching: https://x.com/alexalbert__/status/1823751995901272068
+						// https://github.com/anthropics/anthropic-sdk-typescript?tab=readme-ov-file#default-headers
+						// https://github.com/anthropics/anthropic-sdk-typescript/commit/c920b77fc67bd839bfeb6716ceab9d7c9bbe7393
+						switch (modelId) {
+							case "claude-3-5-sonnet-20240620":
+								return {
+									headers: {
+										"anthropic-beta": "prompt-caching-2024-07-31,max-tokens-3-5-sonnet-2024-07-15",
+									},
+								}
+							case "claude-3-haiku-20240307":
+								return {
+									headers: { "anthropic-beta": "prompt-caching-2024-07-31" },
+								}
+							default:
+								return undefined
+						}
+					})()
+				)
+			default:
+				return await this.client.messages.create({
+					model: modelId,
+					max_tokens: this.getModel().info.maxTokens,
+					system: [{ text: systemPrompt, type: "text" }],
+					messages,
+					tools,
+					tool_choice: { type: "auto" },
+				})
+		}
 	}
 
 	createUserReadableRequest(

+ 26 - 0
src/shared/api.ts

@@ -18,8 +18,11 @@ export type ApiConfiguration = ApiHandlerOptions & {
 export interface ModelInfo {
 	maxTokens: number
 	supportsImages: boolean
+	supportsPromptCache: boolean
 	inputPrice: number
 	outputPrice: number
+	cacheWrites?: number
+	cacheReads?: number
 }
 
 export type ApiModelId = AnthropicModelId | OpenRouterModelId | BedrockModelId
@@ -32,26 +35,36 @@ export const anthropicModels = {
 	"claude-3-5-sonnet-20240620": {
 		maxTokens: 8192,
 		supportsImages: true,
+		supportsPromptCache: true,
 		inputPrice: 3.0, // $3 per million input tokens
 		outputPrice: 15.0, // $15 per million output tokens
+		cacheWrites: 3.75, // $3.75 per million tokens
+		cacheReads: 0.3, // $0.30 per million tokens
 	},
 	"claude-3-opus-20240229": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 15.0,
 		outputPrice: 75.0,
+		cacheWrites: 18.75,
+		cacheReads: 1.5,
 	},
 	"claude-3-sonnet-20240229": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 	},
 	"claude-3-haiku-20240307": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: true,
 		inputPrice: 0.25,
 		outputPrice: 1.25,
+		cacheWrites: 0.3,
+		cacheReads: 0.03,
 	},
 } as const satisfies Record<string, ModelInfo> // as const assertion makes the object deeply readonly
 
@@ -63,24 +76,28 @@ export const bedrockModels = {
 	"anthropic.claude-3-5-sonnet-20240620-v1:0": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 	},
 	"anthropic.claude-3-opus-20240229-v1:0": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 	},
 	"anthropic.claude-3-sonnet-20240229-v1:0": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 	},
 	"anthropic.claude-3-haiku-20240307-v1:0": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 0.25,
 		outputPrice: 1.25,
 	},
@@ -94,42 +111,49 @@ export const openRouterModels = {
 	"anthropic/claude-3.5-sonnet:beta": {
 		maxTokens: 8192,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 	},
 	"anthropic/claude-3-opus:beta": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 15,
 		outputPrice: 75,
 	},
 	"anthropic/claude-3-sonnet:beta": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 3,
 		outputPrice: 15,
 	},
 	"anthropic/claude-3-haiku:beta": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 0.25,
 		outputPrice: 1.25,
 	},
 	"openai/gpt-4o-2024-08-06": {
 		maxTokens: 16384,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 2.5,
 		outputPrice: 10,
 	},
 	"openai/gpt-4o-mini-2024-07-18": {
 		maxTokens: 16384,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 0.15,
 		outputPrice: 0.6,
 	},
 	"openai/gpt-4-turbo": {
 		maxTokens: 4096,
 		supportsImages: true,
+		supportsPromptCache: false,
 		inputPrice: 10,
 		outputPrice: 30,
 	},
@@ -175,6 +199,7 @@ export const openRouterModels = {
 	"deepseek/deepseek-coder": {
 		maxTokens: 4096,
 		supportsImages: false,
+		supportsPromptCache: false,
 		inputPrice: 0.14,
 		outputPrice: 0.28,
 	},
@@ -182,6 +207,7 @@ export const openRouterModels = {
 	"mistralai/mistral-large": {
 		maxTokens: 8192,
 		supportsImages: false,
+		supportsPromptCache: false,
 		inputPrice: 3,
 		outputPrice: 9,
 	},

+ 1 - 0
webview-ui/src/App.tsx

@@ -105,6 +105,7 @@ const App: React.FC = () => {
 						vscodeThemeName={vscodeThemeName}
 						showAnnouncement={showAnnouncement}
 						selectedModelSupportsImages={selectedModelInfo.supportsImages}
+						selectedModelSupportsPromptCache={selectedModelInfo.supportsPromptCache}
 						hideAnnouncement={() => setShowAnnouncement(false)}
 					/>
 				</>

+ 39 - 19
webview-ui/src/components/ApiOptions.tsx

@@ -217,25 +217,17 @@ const ModelInfoView = ({ modelInfo }: { modelInfo: ModelInfo }) => {
 
 	return (
 		<p style={{ fontSize: "12px", marginTop: "2px", color: "var(--vscode-descriptionForeground)" }}>
-			<span
-				style={{
-					fontWeight: 500,
-					color: modelInfo.supportsImages
-						? "var(--vscode-testing-iconPassed)"
-						: "var(--vscode-errorForeground)",
-				}}>
-				<i
-					className={`codicon codicon-${modelInfo.supportsImages ? "check" : "x"}`}
-					style={{
-						marginRight: 4,
-						marginBottom: modelInfo.supportsImages ? 1 : -1,
-						fontSize: modelInfo.supportsImages ? 11 : 13,
-						fontWeight: 700,
-						display: "inline-block",
-						verticalAlign: "bottom",
-					}}></i>
-				{modelInfo.supportsImages ? "Supports images" : "Does not support images"}
-			</span>
+			<ModelInfoSupportsItem
+				isSupported={modelInfo.supportsPromptCache}
+				supportsLabel="Supports prompt cache"
+				doesNotSupportLabel="Does not support prompt cache"
+			/>
+			<br />
+			<ModelInfoSupportsItem
+				isSupported={modelInfo.supportsImages}
+				supportsLabel="Supports images"
+				doesNotSupportLabel="Does not support images"
+			/>
 			<br />
 			<span style={{ fontWeight: 500 }}>Max output:</span> {modelInfo.maxTokens.toLocaleString()} tokens
 			<br />
@@ -247,6 +239,34 @@ const ModelInfoView = ({ modelInfo }: { modelInfo: ModelInfo }) => {
 	)
 }
 
+const ModelInfoSupportsItem = ({
+	isSupported,
+	supportsLabel,
+	doesNotSupportLabel,
+}: {
+	isSupported: boolean
+	supportsLabel: string
+	doesNotSupportLabel: string
+}) => (
+	<span
+		style={{
+			fontWeight: 500,
+			color: isSupported ? "var(--vscode-testing-iconPassed)" : "var(--vscode-errorForeground)",
+		}}>
+		<i
+			className={`codicon codicon-${isSupported ? "check" : "x"}`}
+			style={{
+				marginRight: 4,
+				marginBottom: isSupported ? 1 : -1,
+				fontSize: isSupported ? 11 : 13,
+				fontWeight: 700,
+				display: "inline-block",
+				verticalAlign: "bottom",
+			}}></i>
+		{isSupported ? supportsLabel : doesNotSupportLabel}
+	</span>
+)
+
 export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
 	const provider = apiConfiguration?.apiProvider || "anthropic"
 	const modelId = apiConfiguration?.apiModelId

+ 5 - 0
webview-ui/src/components/ChatView.tsx

@@ -22,6 +22,7 @@ interface ChatViewProps {
 	vscodeThemeName?: string
 	showAnnouncement: boolean
 	selectedModelSupportsImages: boolean
+	selectedModelSupportsPromptCache: boolean
 	hideAnnouncement: () => void
 }
 
@@ -34,6 +35,7 @@ const ChatView = ({
 	vscodeThemeName,
 	showAnnouncement,
 	selectedModelSupportsImages,
+	selectedModelSupportsPromptCache,
 	hideAnnouncement,
 }: ChatViewProps) => {
 	//const task = messages.length > 0 ? (messages[0].say === "task" ? messages[0] : undefined) : undefined
@@ -448,6 +450,9 @@ const ChatView = ({
 					task={task}
 					tokensIn={apiMetrics.totalTokensIn}
 					tokensOut={apiMetrics.totalTokensOut}
+					doesModelSupportPromptCache={selectedModelSupportsPromptCache}
+					cacheWrites={apiMetrics.totalCacheWrites}
+					cacheReads={apiMetrics.totalCacheReads}
 					totalCost={apiMetrics.totalCost}
 					onClose={handleTaskCloseButtonClick}
 					isHidden={isHidden}

+ 33 - 1
webview-ui/src/components/TaskHeader.tsx

@@ -9,12 +9,25 @@ interface TaskHeaderProps {
 	task: ClaudeMessage
 	tokensIn: number
 	tokensOut: number
+	doesModelSupportPromptCache: boolean
+	cacheWrites?: number
+	cacheReads?: number
 	totalCost: number
 	onClose: () => void
 	isHidden: boolean
 }
 
-const TaskHeader: React.FC<TaskHeaderProps> = ({ task, tokensIn, tokensOut, totalCost, onClose, isHidden }) => {
+const TaskHeader: React.FC<TaskHeaderProps> = ({
+	task,
+	tokensIn,
+	tokensOut,
+	doesModelSupportPromptCache,
+	cacheWrites,
+	cacheReads,
+	totalCost,
+	onClose,
+	isHidden,
+}) => {
 	const [isExpanded, setIsExpanded] = useState(false)
 	const [showSeeMore, setShowSeeMore] = useState(false)
 	const textContainerRef = useRef<HTMLDivElement>(null)
@@ -194,6 +207,25 @@ const TaskHeader: React.FC<TaskHeaderProps> = ({ task, tokensIn, tokensOut, tota
 							{tokensOut.toLocaleString()}
 						</span>
 					</div>
+					{(doesModelSupportPromptCache || cacheReads !== undefined || cacheWrites !== undefined) && (
+						<div style={{ display: "flex", alignItems: "center", gap: "4px", flexWrap: "wrap" }}>
+							<span style={{ fontWeight: "bold" }}>Prompt Cache:</span>
+							<span style={{ display: "flex", alignItems: "center", gap: "3px" }}>
+								<i
+									className="codicon codicon-database"
+									style={{ fontSize: "12px", fontWeight: "bold", marginBottom: "-1px" }}
+								/>
+								+{(cacheWrites || 0).toLocaleString()}
+							</span>
+							<span style={{ display: "flex", alignItems: "center", gap: "3px" }}>
+								<i
+									className="codicon codicon-arrow-right"
+									style={{ fontSize: "12px", fontWeight: "bold", marginBottom: 0 }}
+								/>
+								{(cacheReads || 0).toLocaleString()}
+							</span>
+						</div>
+					)}
 					<div
 						style={{
 							display: "flex",

+ 13 - 3
webview-ui/src/utils/getApiMetrics.ts

@@ -3,6 +3,8 @@ import { ClaudeMessage } from "../../../src/shared/ExtensionMessage"
 interface ApiMetrics {
 	totalTokensIn: number
 	totalTokensOut: number
+	totalCacheWrites?: number
+	totalCacheReads?: number
 	totalCost: number
 }
 
@@ -11,10 +13,10 @@ interface ApiMetrics {
  *
  * This function processes 'api_req_started' messages that have been combined with their
  * corresponding 'api_req_finished' messages by the combineApiRequests function.
- * It extracts and sums up the tokensIn, tokensOut, and cost from these messages.
+ * It extracts and sums up the tokensIn, tokensOut, cacheWrites, cacheReads, and cost from these messages.
  *
  * @param messages - An array of ClaudeMessage objects to process.
- * @returns An ApiMetrics object containing totalTokensIn, totalTokensOut, and totalCost.
+ * @returns An ApiMetrics object containing totalTokensIn, totalTokensOut, totalCacheWrites, totalCacheReads, and totalCost.
  *
  * @example
  * const messages = [
@@ -27,6 +29,8 @@ export function getApiMetrics(messages: ClaudeMessage[]): ApiMetrics {
 	const result: ApiMetrics = {
 		totalTokensIn: 0,
 		totalTokensOut: 0,
+		totalCacheWrites: undefined,
+		totalCacheReads: undefined,
 		totalCost: 0,
 	}
 
@@ -34,7 +38,7 @@ export function getApiMetrics(messages: ClaudeMessage[]): ApiMetrics {
 		if (message.type === "say" && message.say === "api_req_started" && message.text) {
 			try {
 				const parsedData = JSON.parse(message.text)
-				const { tokensIn, tokensOut, cost } = parsedData
+				const { tokensIn, tokensOut, cacheWrites, cacheReads, cost } = parsedData
 
 				if (typeof tokensIn === "number") {
 					result.totalTokensIn += tokensIn
@@ -42,6 +46,12 @@ export function getApiMetrics(messages: ClaudeMessage[]): ApiMetrics {
 				if (typeof tokensOut === "number") {
 					result.totalTokensOut += tokensOut
 				}
+				if (typeof cacheWrites === "number") {
+					result.totalCacheWrites = (result.totalCacheWrites ?? 0) + cacheWrites
+				}
+				if (typeof cacheReads === "number") {
+					result.totalCacheReads = (result.totalCacheReads ?? 0) + cacheReads
+				}
 				if (typeof cost === "number") {
 					result.totalCost += cost
 				}