Bläddra i källkod

Bugfix/fix vscodellm model information (#2832)

* feat: initialize VS Code Language Model client in constructor

* feat: add VS Code LLM models and configuration

* feat: integrate VS Code LLM models into API configuration normalization

* Fix tests

---------

Co-authored-by: Matt Rubens <[email protected]>
Alfredo Medrano 8 månader sedan
förälder
incheckning
3a5913ffca

+ 15 - 9
src/api/providers/__tests__/vscode-lm.test.ts

@@ -134,6 +134,9 @@ describe("VsCodeLmHandler", () => {
 			const mockModel = { ...mockLanguageModelChat }
 			;(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel])
 			mockLanguageModelChat.countTokens.mockResolvedValue(10)
+
+			// Override the default client with our test client
+			handler["client"] = mockLanguageModelChat
 		})
 
 		it("should stream text responses", async () => {
@@ -229,12 +232,7 @@ describe("VsCodeLmHandler", () => {
 
 			mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error("API Error"))
 
-			await expect(async () => {
-				const stream = handler.createMessage(systemPrompt, messages)
-				for await (const _ of stream) {
-					// consume stream
-				}
-			}).rejects.toThrow("API Error")
+			await expect(handler.createMessage(systemPrompt, messages).next()).rejects.toThrow("API Error")
 		})
 	})
 
@@ -253,6 +251,8 @@ describe("VsCodeLmHandler", () => {
 		})
 
 		it("should return fallback model info when no client exists", () => {
+			// Clear the client first
+			handler["client"] = null
 			const model = handler.getModel()
 			expect(model.id).toBe("test-vendor/test-family")
 			expect(model.info).toBeDefined()
@@ -276,6 +276,10 @@ describe("VsCodeLmHandler", () => {
 				})(),
 			})
 
+			// Override the default client with our test client to ensure it uses
+			// the mock implementation rather than the default fallback
+			handler["client"] = mockLanguageModelChat
+
 			const result = await handler.completePrompt("Test prompt")
 			expect(result).toBe(responseText)
 			expect(mockLanguageModelChat.sendRequest).toHaveBeenCalled()
@@ -287,9 +291,11 @@ describe("VsCodeLmHandler", () => {
 
 			mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error("Completion failed"))
 
-			await expect(handler.completePrompt("Test prompt")).rejects.toThrow(
-				"VSCode LM completion error: Completion failed",
-			)
+			// Make sure we're using the mock client
+			handler["client"] = mockLanguageModelChat
+
+			const promise = handler.completePrompt("Test prompt")
+			await expect(promise).rejects.toThrow("VSCode LM completion error: Completion failed")
 		})
 	})
 })

+ 25 - 1
src/api/providers/vscode-lm.ts

@@ -61,6 +61,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
 					}
 				}
 			})
+			this.initializeClient()
 		} catch (error) {
 			// Ensure cleanup if constructor fails
 			this.dispose()
@@ -70,7 +71,30 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
 			)
 		}
 	}
-
+	/**
+	 * Initializes the VS Code Language Model client.
+	 * This method is called during the constructor to set up the client.
+	 * This useful when the client is not created yet and call getModel() before the client is created.
+	 * @returns Promise<void>
+	 * @throws Error when client initialization fails
+	 */
+	async initializeClient(): Promise<void> {
+		try {
+			// Check if the client is already initialized
+			if (this.client) {
+				console.debug("Roo Code <Language Model API>: Client already initialized")
+				return
+			}
+			// Create a new client instance
+			this.client = await this.createClient(this.options.vsCodeLmModelSelector || {})
+			console.debug("Roo Code <Language Model API>: Client initialized successfully")
+		} catch (error) {
+			// Handle errors during client initialization
+			const errorMessage = error instanceof Error ? error.message : "Unknown error"
+			console.error("Roo Code <Language Model API>: Client initialization failed:", errorMessage)
+			throw new Error(`Roo Code <Language Model API>: Failed to initialize client: ${errorMessage}`)
+		}
+	}
 	/**
 	 * Creates a language model chat client based on the provided selector.
 	 *

+ 183 - 0
src/shared/api.ts

@@ -1179,3 +1179,186 @@ export const xaiModels = {
 		description: "xAI's Grok Beta model (legacy) with 131K context window",
 	},
 } as const satisfies Record<string, ModelInfo>
+
+export type VscodeLlmModelId = keyof typeof vscodeLlmModels
+export const vscodeLlmDefaultModelId: VscodeLlmModelId = "claude-3.5-sonnet"
+export const vscodeLlmModels = {
+	"gpt-3.5-turbo": {
+		contextWindow: 12114,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "gpt-3.5-turbo",
+		version: "gpt-3.5-turbo-0613",
+		name: "GPT 3.5 Turbo",
+		supportsToolCalling: true,
+		maxInputTokens: 12114,
+	},
+	"gpt-4o-mini": {
+		contextWindow: 12115,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "gpt-4o-mini",
+		version: "gpt-4o-mini-2024-07-18",
+		name: "GPT-4o mini",
+		supportsToolCalling: true,
+		maxInputTokens: 12115,
+	},
+	"gpt-4": {
+		contextWindow: 28501,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "gpt-4",
+		version: "gpt-4-0613",
+		name: "GPT 4",
+		supportsToolCalling: true,
+		maxInputTokens: 28501,
+	},
+	"gpt-4-0125-preview": {
+		contextWindow: 63826,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "gpt-4-turbo",
+		version: "gpt-4-0125-preview",
+		name: "GPT 4 Turbo",
+		supportsToolCalling: true,
+		maxInputTokens: 63826,
+	},
+	"gpt-4o": {
+		contextWindow: 63827,
+		supportsImages: true,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "gpt-4o",
+		version: "gpt-4o-2024-11-20",
+		name: "GPT-4o",
+		supportsToolCalling: true,
+		maxInputTokens: 63827,
+	},
+	o1: {
+		contextWindow: 19827,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "o1-ga",
+		version: "o1-2024-12-17",
+		name: "o1 (Preview)",
+		supportsToolCalling: true,
+		maxInputTokens: 19827,
+	},
+	"o3-mini": {
+		contextWindow: 63827,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "o3-mini",
+		version: "o3-mini-2025-01-31",
+		name: "o3-mini",
+		supportsToolCalling: true,
+		maxInputTokens: 63827,
+	},
+	"claude-3.5-sonnet": {
+		contextWindow: 81638,
+		supportsImages: true,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "claude-3.5-sonnet",
+		version: "claude-3.5-sonnet",
+		name: "Claude 3.5 Sonnet",
+		supportsToolCalling: true,
+		maxInputTokens: 81638,
+	},
+	"claude-3.7-sonnet": {
+		contextWindow: 89827,
+		supportsImages: true,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "claude-3.7-sonnet",
+		version: "claude-3.7-sonnet",
+		name: "Claude 3.7 Sonnet",
+		supportsToolCalling: true,
+		maxInputTokens: 89827,
+	},
+	"claude-3.7-sonnet-thought": {
+		contextWindow: 89827,
+		supportsImages: true,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "claude-3.7-sonnet-thought",
+		version: "claude-3.7-sonnet-thought",
+		name: "Claude 3.7 Sonnet Thinking",
+		supportsToolCalling: false,
+		maxInputTokens: 89827,
+		thinking: true,
+	},
+	"gemini-2.0-flash-001": {
+		contextWindow: 127827,
+		supportsImages: true,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "gemini-2.0-flash",
+		version: "gemini-2.0-flash-001",
+		name: "Gemini 2.0 Flash",
+		supportsToolCalling: false,
+		maxInputTokens: 127827,
+	},
+	"gemini-2.5-pro": {
+		contextWindow: 63830,
+		supportsImages: true,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "gemini-2.5-pro",
+		version: "gemini-2.5-pro-preview-03-25",
+		name: "Gemini 2.5 Pro (Preview)",
+		supportsToolCalling: true,
+		maxInputTokens: 63830,
+	},
+	"o4-mini": {
+		contextWindow: 111446,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "o4-mini",
+		version: "o4-mini-2025-04-16",
+		name: "o4-mini (Preview)",
+		supportsToolCalling: true,
+		maxInputTokens: 111446,
+	},
+	"gpt-4.1": {
+		contextWindow: 111446,
+		supportsImages: true,
+		supportsPromptCache: false,
+		inputPrice: 0,
+		outputPrice: 0,
+		family: "gpt-4.1",
+		version: "gpt-4.1-2025-04-14",
+		name: "GPT-4.1 (Preview)",
+		supportsToolCalling: true,
+		maxInputTokens: 111446,
+	},
+} as const satisfies Record<
+	string,
+	ModelInfo & {
+		family: string
+		version: string
+		name: string
+		supportsToolCalling: boolean
+		maxInputTokens: number
+	}
+>

+ 9 - 5
webview-ui/src/components/settings/ApiOptions.tsx

@@ -38,6 +38,8 @@ import {
 	xaiDefaultModelId,
 	xaiModels,
 	ApiProvider,
+	vscodeLlmModels,
+	vscodeLlmDefaultModelId,
 } from "@roo/shared/api"
 import { ExtensionMessage } from "@roo/shared/ExtensionMessage"
 
@@ -1738,7 +1740,6 @@ const ApiOptions = ({
 export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
 	const provider = apiConfiguration?.apiProvider || "anthropic"
 	const modelId = apiConfiguration?.apiModelId
-
 	const getProviderData = (models: Record<string, ModelInfo>, defaultId: string) => {
 		let selectedModelId: string
 		let selectedModelInfo: ModelInfo
@@ -1827,15 +1828,18 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
 				selectedModelInfo: openAiModelInfoSaneDefaults,
 			}
 		case "vscode-lm":
+			const modelFamily = apiConfiguration?.vsCodeLmModelSelector?.family ?? vscodeLlmDefaultModelId
+			const modelInfo = {
+				...openAiModelInfoSaneDefaults,
+				...vscodeLlmModels[modelFamily as keyof typeof vscodeLlmModels],
+				supportsImages: false, // VSCode LM API currently doesn't support images.
+			}
 			return {
 				selectedProvider: provider,
 				selectedModelId: apiConfiguration?.vsCodeLmModelSelector
 					? `${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}`
 					: "",
-				selectedModelInfo: {
-					...openAiModelInfoSaneDefaults,
-					supportsImages: false, // VSCode LM API currently doesn't support images.
-				},
+				selectedModelInfo: modelInfo,
 			}
 		default:
 			return getProviderData(anthropicModels, anthropicDefaultModelId)