Explorar o código

[WIP] Claude 3.7 Sonnet (Thinking)

cte hai 10 meses
pai
achega
b2b135c05c
Modificáronse 4 ficheiros con 47 adicións e 30 borrados
  1. 5 4
      package-lock.json
  2. 1 1
      package.json
  3. 40 25
      src/api/providers/anthropic.ts
  4. 1 0
      src/shared/api.ts

+ 5 - 4
package-lock.json

@@ -9,7 +9,7 @@
 			"version": "3.7.1",
 			"version": "3.7.1",
 			"dependencies": {
 			"dependencies": {
 				"@anthropic-ai/bedrock-sdk": "^0.10.2",
 				"@anthropic-ai/bedrock-sdk": "^0.10.2",
-				"@anthropic-ai/sdk": "^0.26.0",
+				"@anthropic-ai/sdk": "^0.37.0",
 				"@anthropic-ai/vertex-sdk": "^0.4.1",
 				"@anthropic-ai/vertex-sdk": "^0.4.1",
 				"@aws-sdk/client-bedrock-runtime": "^3.706.0",
 				"@aws-sdk/client-bedrock-runtime": "^3.706.0",
 				"@google/generative-ai": "^0.18.0",
 				"@google/generative-ai": "^0.18.0",
@@ -122,9 +122,10 @@
 			}
 			}
 		},
 		},
 		"node_modules/@anthropic-ai/sdk": {
 		"node_modules/@anthropic-ai/sdk": {
-			"version": "0.26.1",
-			"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.26.1.tgz",
-			"integrity": "sha512-HeMJP1bDFfQPQS3XTJAmfXkFBdZ88wvfkE05+vsoA9zGn5dHqEaHOPsqkazf/i0gXYg2XlLxxZrf6rUAarSqzw==",
+			"version": "0.37.0",
+			"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.37.0.tgz",
+			"integrity": "sha512-tHjX2YbkUBwEgg0JZU3EFSSAQPoK4qQR/NFYa8Vtzd5UAyXzZksCw2In69Rml4R/TyHPBfRYaLK35XiOe33pjw==",
+			"license": "MIT",
 			"dependencies": {
 			"dependencies": {
 				"@types/node": "^18.11.18",
 				"@types/node": "^18.11.18",
 				"@types/node-fetch": "^2.6.4",
 				"@types/node-fetch": "^2.6.4",

+ 1 - 1
package.json

@@ -304,7 +304,7 @@
 	},
 	},
 	"dependencies": {
 	"dependencies": {
 		"@anthropic-ai/bedrock-sdk": "^0.10.2",
 		"@anthropic-ai/bedrock-sdk": "^0.10.2",
-		"@anthropic-ai/sdk": "^0.26.0",
+		"@anthropic-ai/sdk": "^0.37.0",
 		"@anthropic-ai/vertex-sdk": "^0.4.1",
 		"@anthropic-ai/vertex-sdk": "^0.4.1",
 		"@aws-sdk/client-bedrock-runtime": "^3.706.0",
 		"@aws-sdk/client-bedrock-runtime": "^3.706.0",
 		"@google/generative-ai": "^0.18.0",
 		"@google/generative-ai": "^0.18.0",

+ 40 - 25
src/api/providers/anthropic.ts

@@ -1,5 +1,7 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { Anthropic } from "@anthropic-ai/sdk"
 import { Stream as AnthropicStream } from "@anthropic-ai/sdk/streaming"
 import { Stream as AnthropicStream } from "@anthropic-ai/sdk/streaming"
+import { CacheControlEphemeral } from "@anthropic-ai/sdk/resources"
+import { BetaThinkingConfigParam } from "@anthropic-ai/sdk/resources/beta"
 import {
 import {
 	anthropicDefaultModelId,
 	anthropicDefaultModelId,
 	AnthropicModelId,
 	AnthropicModelId,
@@ -12,12 +14,15 @@ import { ApiStream } from "../transform/stream"
 
 
 const ANTHROPIC_DEFAULT_TEMPERATURE = 0
 const ANTHROPIC_DEFAULT_TEMPERATURE = 0
 
 
+const THINKING_MODELS = ["claude-3-7-sonnet-20250219"]
+
 export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
 export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
 	private options: ApiHandlerOptions
 	private options: ApiHandlerOptions
 	private client: Anthropic
 	private client: Anthropic
 
 
 	constructor(options: ApiHandlerOptions) {
 	constructor(options: ApiHandlerOptions) {
 		this.options = options
 		this.options = options
+
 		this.client = new Anthropic({
 		this.client = new Anthropic({
 			apiKey: this.options.apiKey,
 			apiKey: this.options.apiKey,
 			baseURL: this.options.anthropicBaseUrl || undefined,
 			baseURL: this.options.anthropicBaseUrl || undefined,
@@ -25,26 +30,36 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
 	}
 	}
 
 
 	async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
 	async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
-		let stream: AnthropicStream<Anthropic.Beta.PromptCaching.Messages.RawPromptCachingBetaMessageStreamEvent>
+		let stream: AnthropicStream<Anthropic.Messages.RawMessageStreamEvent>
+		const cacheControl: CacheControlEphemeral = { type: "ephemeral" }
 		const modelId = this.getModel().id
 		const modelId = this.getModel().id
+		let thinking: BetaThinkingConfigParam | undefined = undefined
+
+		if (THINKING_MODELS.includes(modelId)) {
+			thinking = this.options.anthropicThinking
+				? { type: "enabled", budget_tokens: this.options.anthropicThinking }
+				: { type: "disabled" }
+		}
 
 
 		switch (modelId) {
 		switch (modelId) {
-			// 'latest' alias does not support cache_control
 			case "claude-3-7-sonnet-20250219":
 			case "claude-3-7-sonnet-20250219":
 			case "claude-3-5-sonnet-20241022":
 			case "claude-3-5-sonnet-20241022":
 			case "claude-3-5-haiku-20241022":
 			case "claude-3-5-haiku-20241022":
 			case "claude-3-opus-20240229":
 			case "claude-3-opus-20240229":
 			case "claude-3-haiku-20240307": {
 			case "claude-3-haiku-20240307": {
-				/*
-				The latest message will be the new user message, one before will be the assistant message from a previous request, and the user message before that will be a previously cached user message. So we need to mark the latest user message as ephemeral to cache it for the next request, and mark the second to last user message as ephemeral to let the server know the last message to retrieve from the cache for the current request..
-				*/
+				/**
+				 * The latest message will be the new user message, one before will
+				 * be the assistant message from a previous request, and the user message before that will be a previously cached user message. So we need to mark the latest user message as ephemeral to cache it for the next request, and mark the second to last user message as ephemeral to let the server know the last message to retrieve from the cache for the current request..
+				 */
 				const userMsgIndices = messages.reduce(
 				const userMsgIndices = messages.reduce(
 					(acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc),
 					(acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc),
 					[] as number[],
 					[] as number[],
 				)
 				)
+
 				const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1
 				const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1
 				const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1
 				const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1
-				stream = await this.client.beta.promptCaching.messages.create(
+
+				stream = await this.client.messages.create(
 					{
 					{
 						model: modelId,
 						model: modelId,
 						max_tokens: this.getModel().info.maxTokens || 8192,
 						max_tokens: this.getModel().info.maxTokens || 8192,
@@ -60,12 +75,12 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
 													{
 													{
 														type: "text",
 														type: "text",
 														text: message.content,
 														text: message.content,
-														cache_control: { type: "ephemeral" },
+														cache_control: cacheControl,
 													},
 													},
 												]
 												]
 											: message.content.map((content, contentIndex) =>
 											: message.content.map((content, contentIndex) =>
 													contentIndex === message.content.length - 1
 													contentIndex === message.content.length - 1
-														? { ...content, cache_control: { type: "ephemeral" } }
+														? { ...content, cache_control: cacheControl }
 														: content,
 														: content,
 												),
 												),
 								}
 								}
@@ -76,6 +91,7 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
 						// tool_choice: { type: "auto" },
 						// tool_choice: { type: "auto" },
 						// tools: tools,
 						// tools: tools,
 						stream: true,
 						stream: true,
+						thinking,
 					},
 					},
 					(() => {
 					(() => {
 						// prompt caching: https://x.com/alexalbert__/status/1823751995901272068
 						// prompt caching: https://x.com/alexalbert__/status/1823751995901272068
@@ -114,8 +130,9 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
 		for await (const chunk of stream) {
 		for await (const chunk of stream) {
 			switch (chunk.type) {
 			switch (chunk.type) {
 				case "message_start":
 				case "message_start":
-					// tells us cache reads/writes/input/output
+					// Tells us cache reads/writes/input/output.
 					const usage = chunk.message.usage
 					const usage = chunk.message.usage
+
 					yield {
 					yield {
 						type: "usage",
 						type: "usage",
 						inputTokens: usage.input_tokens || 0,
 						inputTokens: usage.input_tokens || 0,
@@ -123,43 +140,41 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
 						cacheWriteTokens: usage.cache_creation_input_tokens || undefined,
 						cacheWriteTokens: usage.cache_creation_input_tokens || undefined,
 						cacheReadTokens: usage.cache_read_input_tokens || undefined,
 						cacheReadTokens: usage.cache_read_input_tokens || undefined,
 					}
 					}
+
 					break
 					break
 				case "message_delta":
 				case "message_delta":
-					// tells us stop_reason, stop_sequence, and output tokens along the way and at the end of the message
-
+					// Tells us stop_reason, stop_sequence, and output tokens
+					// along the way and at the end of the message.
 					yield {
 					yield {
 						type: "usage",
 						type: "usage",
 						inputTokens: 0,
 						inputTokens: 0,
 						outputTokens: chunk.usage.output_tokens || 0,
 						outputTokens: chunk.usage.output_tokens || 0,
 					}
 					}
+
 					break
 					break
 				case "message_stop":
 				case "message_stop":
-					// no usage data, just an indicator that the message is done
+					// No usage data, just an indicator that the message is done.
 					break
 					break
 				case "content_block_start":
 				case "content_block_start":
 					switch (chunk.content_block.type) {
 					switch (chunk.content_block.type) {
+						case "thinking":
+							yield { type: "reasoning", text: chunk.content_block.thinking }
+							break
 						case "text":
 						case "text":
-							// we may receive multiple text blocks, in which case just insert a line break between them
+							// We may receive multiple text blocks, in which
+							// case just insert a line break between them.
 							if (chunk.index > 0) {
 							if (chunk.index > 0) {
-								yield {
-									type: "text",
-									text: "\n",
-								}
-							}
-							yield {
-								type: "text",
-								text: chunk.content_block.text,
+								yield { type: "text", text: "\n" }
 							}
 							}
+
+							yield { type: "text", text: chunk.content_block.text }
 							break
 							break
 					}
 					}
 					break
 					break
 				case "content_block_delta":
 				case "content_block_delta":
 					switch (chunk.delta.type) {
 					switch (chunk.delta.type) {
 						case "text_delta":
 						case "text_delta":
-							yield {
-								type: "text",
-								text: chunk.delta.text,
-							}
+							yield { type: "text", text: chunk.delta.text }
 							break
 							break
 					}
 					}
 					break
 					break

+ 1 - 0
src/shared/api.ts

@@ -21,6 +21,7 @@ export interface ApiHandlerOptions {
 	apiModelId?: string
 	apiModelId?: string
 	apiKey?: string // anthropic
 	apiKey?: string // anthropic
 	anthropicBaseUrl?: string
 	anthropicBaseUrl?: string
+	anthropicThinking?: number
 	vsCodeLmModelSelector?: vscode.LanguageModelChatSelector
 	vsCodeLmModelSelector?: vscode.LanguageModelChatSelector
 	glamaModelId?: string
 	glamaModelId?: string
 	glamaModelInfo?: ModelInfo
 	glamaModelInfo?: ModelInfo