Matt Rubens 11 месяцев назад
Родитель
Сommit
7137f8c528

+ 30 - 33
src/api/index.ts

@@ -15,43 +15,40 @@ import { VsCodeLmHandler } from "./providers/vscode-lm"
 import { ApiStream } from "./transform/stream"
 
 export interface SingleCompletionHandler {
-  completePrompt(prompt: string): Promise<string>
+	completePrompt(prompt: string): Promise<string>
 }
 
 export interface ApiHandler {
-  createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
-  getModel(): { id: string; info: ModelInfo }
+	createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
+	getModel(): { id: string; info: ModelInfo }
 }
 
 export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
-  const { apiProvider, ...options } = configuration
-
-  switch (apiProvider) {
-    case "anthropic":
-      return new AnthropicHandler(options)
-    case "glama":
-      return new GlamaHandler(options)
-    case "openrouter":
-      return new OpenRouterHandler(options)
-    case "bedrock":
-      return new AwsBedrockHandler(options)
-    case "vertex":
-      return new VertexHandler(options)
-    case "openai":
-      return new OpenAiHandler(options)
-    case "ollama":
-      return new OllamaHandler(options)
-    case "lmstudio":
-      return new LmStudioHandler(options)
-    case "gemini":
-      return new GeminiHandler(options)
-    case "openai-native":
-      return new OpenAiNativeHandler(options)
-    case "deepseek":
-      return new DeepSeekHandler(options)
-    case "vscode-lm":
-      return new VsCodeLmHandler(options)
-    default:
-      return new AnthropicHandler(options)
-  }
+	const { apiProvider, ...options } = configuration
+	switch (apiProvider) {
+		case "anthropic":
+			return new AnthropicHandler(options)
+		case "glama":
+			return new GlamaHandler(options)
+		case "openrouter":
+			return new OpenRouterHandler(options)
+		case "bedrock":
+			return new AwsBedrockHandler(options)
+		case "vertex":
+			return new VertexHandler(options)
+		case "openai":
+			return new OpenAiHandler(options)
+		case "ollama":
+			return new OllamaHandler(options)
+		case "lmstudio":
+			return new LmStudioHandler(options)
+		case "gemini":
+			return new GeminiHandler(options)
+		case "openai-native":
+			return new OpenAiNativeHandler(options)
+		case "deepseek":
+			return new DeepSeekHandler(options)
+		default:
+			return new AnthropicHandler(options)
+	}
 }

+ 525 - 530
src/api/providers/vscode-lm.ts

@@ -36,534 +36,529 @@ import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../..
  */
 export class VsCodeLmHandler implements ApiHandler, SingleCompletionHandler {
 
-    private options: ApiHandlerOptions;
-    private client: vscode.LanguageModelChat | null;
-    private disposable: vscode.Disposable | null;
-    private currentRequestCancellation: vscode.CancellationTokenSource | null;
-
-    constructor(options: ApiHandlerOptions) {
-        this.options = options;
-        this.client = null;
-        this.disposable = null;
-        this.currentRequestCancellation = null;
-
-        try {
-            // Listen for model changes and reset client
-            this.disposable = vscode.workspace.onDidChangeConfiguration(event => {
-
-                if (event.affectsConfiguration('lm')) {
-
-                    try {
-
-                        this.client = null;
-                        this.ensureCleanState();
-                    }
-                    catch (error) {
-
-                        console.error('Error during configuration change cleanup:', error);
-                    }
-                }
-            });
-        }
-        catch (error) {
-
-            // Ensure cleanup if constructor fails
-            this.dispose();
-
-            throw new Error(
-                `Cline <Language Model API>: Failed to initialize handler: ${error instanceof Error ? error.message : 'Unknown error'}`
-            );
-        }
-    }
-
-    /**
-     * Creates a language model chat client based on the provided selector.
-     *
-     * @param selector - Selector criteria to filter language model chat instances
-     * @returns Promise resolving to the first matching language model chat instance
-     * @throws Error when no matching models are found with the given selector
-     * 
-     * @example
-     * const selector = { vendor: "copilot", family: "gpt-4o" };
-     * const chatClient = await createClient(selector);
-     */
-    async createClient(selector: vscode.LanguageModelChatSelector): Promise<vscode.LanguageModelChat> {
-        try {
-            const models = await vscode.lm.selectChatModels(selector);
-
-            // Use first available model or create a minimal model object
-            if (models && Array.isArray(models) && models.length > 0) {
-                return models[0];
-            }
-
-            // Create a minimal model if no models are available
-            return {
-                id: 'default-lm',
-                name: 'Default Language Model',
-                vendor: 'vscode',
-                family: 'lm',
-                version: '1.0',
-                maxInputTokens: 8192,
-                sendRequest: async (messages, options, token) => {
-                    // Provide a minimal implementation
-                    return {
-                        stream: (async function* () {
-                            yield new vscode.LanguageModelTextPart(
-                                "Language model functionality is limited. Please check VS Code configuration."
-                            );
-                        })(),
-                        text: (async function* () {
-                            yield "Language model functionality is limited. Please check VS Code configuration.";
-                        })()
-                    };
-                },
-                countTokens: async () => 0
-            };
-        } catch (error) {
-            const errorMessage = error instanceof Error ? error.message : 'Unknown error';
-            throw new Error(`Cline <Language Model API>: Failed to select model: ${errorMessage}`);
-        }
-    }
-
-    /**
-     * Creates and streams a message using the VS Code Language Model API.
-     *
-     * @param systemPrompt - The system prompt to initialize the conversation context
-     * @param messages - An array of message parameters following the Anthropic message format
-     * 
-     * @yields {ApiStream} An async generator that yields either text chunks or tool calls from the model response
-     * 
-     * @throws {Error} When vsCodeLmModelSelector option is not provided
-     * @throws {Error} When the response stream encounters an error
-     * 
-     * @remarks
-     * This method handles the initialization of the VS Code LM client if not already created,
-     * converts the messages to VS Code LM format, and streams the response chunks.
-     * Tool calls handling is currently a work in progress.
-     */
-    dispose(): void {
-
-        if (this.disposable) {
-
-            this.disposable.dispose();
-        }
-
-        if (this.currentRequestCancellation) {
-
-            this.currentRequestCancellation.cancel();
-            this.currentRequestCancellation.dispose();
-        }
-    }
-
-    private async countTokens(text: string | vscode.LanguageModelChatMessage): Promise<number> {
-        // Check for required dependencies
-        if (!this.client) {
-            console.warn('Cline <Language Model API>: No client available for token counting');
-            return 0;
-        }
-
-        if (!this.currentRequestCancellation) {
-            console.warn('Cline <Language Model API>: No cancellation token available for token counting');
-            return 0;
-        }
-
-        // Validate input
-        if (!text) {
-            console.debug('Cline <Language Model API>: Empty text provided for token counting');
-            return 0;
-        }
-
-        try {
-            // Handle different input types
-            let tokenCount: number;
-
-            if (typeof text === 'string') {
-                tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
-            } else if (text instanceof vscode.LanguageModelChatMessage) {
-                // For chat messages, ensure we have content
-                if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) {
-                    console.debug('Cline <Language Model API>: Empty chat message content');
-                    return 0;
-                }
-                tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
-            } else {
-                console.warn('Cline <Language Model API>: Invalid input type for token counting');
-                return 0;
-            }
-
-            // Validate the result
-            if (typeof tokenCount !== 'number') {
-                console.warn('Cline <Language Model API>: Non-numeric token count received:', tokenCount);
-                return 0;
-            }
-
-            if (tokenCount < 0) {
-                console.warn('Cline <Language Model API>: Negative token count received:', tokenCount);
-                return 0;
-            }
-
-            return tokenCount;
-        }
-        catch (error) {
-            // Handle specific error types
-            if (error instanceof vscode.CancellationError) {
-                console.debug('Cline <Language Model API>: Token counting cancelled by user');
-                return 0;
-            }
-
-            const errorMessage = error instanceof Error ? error.message : 'Unknown error';
-            console.warn('Cline <Language Model API>: Token counting failed:', errorMessage);
-
-            // Log additional error details if available
-            if (error instanceof Error && error.stack) {
-                console.debug('Token counting error stack:', error.stack);
-            }
-
-            return 0; // Fallback to prevent stream interruption
-        }
-    }
-
-    private async calculateTotalInputTokens(systemPrompt: string, vsCodeLmMessages: vscode.LanguageModelChatMessage[]): Promise<number> {
-
-        const systemTokens: number = await this.countTokens(systemPrompt);
-
-        const messageTokens: number[] = await Promise.all(
-            vsCodeLmMessages.map(msg => this.countTokens(msg))
-        );
-
-        return systemTokens + messageTokens.reduce(
-            (sum: number, tokens: number): number => sum + tokens, 0
-        );
-    }
-
-    private ensureCleanState(): void {
-
-        if (this.currentRequestCancellation) {
-
-            this.currentRequestCancellation.cancel();
-            this.currentRequestCancellation.dispose();
-            this.currentRequestCancellation = null;
-        }
-    }
-
-    private async getClient(): Promise<vscode.LanguageModelChat> {
-        if (!this.client) {
-            console.debug('Cline <Language Model API>: Getting client with options:', {
-                vsCodeLmModelSelector: this.options.vsCodeLmModelSelector,
-                hasOptions: !!this.options,
-                selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : []
-            });
-
-            try {
-                // Use default empty selector if none provided to get all available models
-                const selector = this.options?.vsCodeLmModelSelector || {};
-                console.debug('Cline <Language Model API>: Creating client with selector:', selector);
-                this.client = await this.createClient(selector);
-            } catch (error) {
-                const message = error instanceof Error ? error.message : 'Unknown error';
-                console.error('Cline <Language Model API>: Client creation failed:', message);
-                throw new Error(`Cline <Language Model API>: Failed to create client: ${message}`);
-            }
-        }
-
-        return this.client;
-    }
-
-    private cleanTerminalOutput(text: string): string {
-        if (!text) {
-            return '';
-        }
-
-        return text
-            // Нормализуем переносы строк
-            .replace(/\r\n/g, '\n')
-            .replace(/\r/g, '\n')
-
-            // Удаляем ANSI escape sequences
-            .replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '') // Полный набор ANSI sequences
-            .replace(/\x9B[0-?]*[ -/]*[@-~]/g, '')  // CSI sequences
-
-            // Удаляем последовательности установки заголовка терминала и прочие OSC sequences
-            .replace(/\x1B\][0-9;]*(?:\x07|\x1B\\)/g, '')
-
-            // Удаляем управляющие символы
-            .replace(/[\x00-\x09\x0B-\x0C\x0E-\x1F\x7F]/g, '')
-
-            // Удаляем escape-последовательности VS Code
-            .replace(/\x1B[PD].*?\x1B\\/g, '')      // DCS sequences
-            .replace(/\x1B_.*?\x1B\\/g, '')         // APC sequences
-            .replace(/\x1B\^.*?\x1B\\/g, '')        // PM sequences
-            .replace(/\x1B\[[\d;]*[HfABCDEFGJKST]/g, '') // Cursor movement and clear screen
-
-            // Удаляем пути Windows и служебную информацию
-            .replace(/^(?:PS )?[A-Z]:\\[^\n]*$/mg, '')
-            .replace(/^;?Cwd=.*$/mg, '')
-
-            // Очищаем экранированные последовательности
-            .replace(/\\x[0-9a-fA-F]{2}/g, '')
-            .replace(/\\u[0-9a-fA-F]{4}/g, '')
-
-            // Финальная очистка
-            .replace(/\n{3,}/g, '\n\n')  // Убираем множественные пустые строки
-            .trim();
-    }
-
-    private cleanMessageContent(content: any): any {
-        if (!content) {
-            return content;
-        }
-
-        if (typeof content === 'string') {
-            return this.cleanTerminalOutput(content);
-        }
-
-        if (Array.isArray(content)) {
-            return content.map(item => this.cleanMessageContent(item));
-        }
-
-        if (typeof content === 'object') {
-            const cleaned: any = {};
-            for (const [key, value] of Object.entries(content)) {
-                cleaned[key] = this.cleanMessageContent(value);
-            }
-            return cleaned;
-        }
-
-        return content;
-    }
-
-    async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
-
-        // Ensure clean state before starting a new request
-        this.ensureCleanState();
-        const client: vscode.LanguageModelChat = await this.getClient();
-
-        // Clean system prompt and messages
-        const cleanedSystemPrompt = this.cleanTerminalOutput(systemPrompt);
-        const cleanedMessages = messages.map(msg => ({
-            ...msg,
-            content: this.cleanMessageContent(msg.content)
-        }));
-
-        // Convert Anthropic messages to VS Code LM messages
-        const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
-            vscode.LanguageModelChatMessage.Assistant(cleanedSystemPrompt),
-            ...convertToVsCodeLmMessages(cleanedMessages),
-        ];
-
-        // Initialize cancellation token for the request
-        this.currentRequestCancellation = new vscode.CancellationTokenSource();
-
-        // Calculate input tokens before starting the stream
-        const totalInputTokens: number = await this.calculateTotalInputTokens(systemPrompt, vsCodeLmMessages);
-
-        // Accumulate the text and count at the end of the stream to reduce token counting overhead.
-        let accumulatedText: string = '';
-
-        try {
-
-            // Create the response stream with minimal required options
-            const requestOptions: vscode.LanguageModelChatRequestOptions = {
-                justification: `Cline would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`
-            };
-
-            // Note: Tool support is currently provided by the VSCode Language Model API directly
-            // Extensions can register tools using vscode.lm.registerTool()
-
-            const response: vscode.LanguageModelChatResponse = await client.sendRequest(
-                vsCodeLmMessages,
-                requestOptions,
-                this.currentRequestCancellation.token
-            );
-
-            // Consume the stream and handle both text and tool call chunks
-            for await (const chunk of response.stream) {
-                if (chunk instanceof vscode.LanguageModelTextPart) {
-                    // Validate text part value
-                    if (typeof chunk.value !== 'string') {
-                        console.warn('Cline <Language Model API>: Invalid text part value received:', chunk.value);
-                        continue;
-                    }
-
-                    accumulatedText += chunk.value;
-                    yield {
-                        type: "text",
-                        text: chunk.value,
-                    };
-                } else if (chunk instanceof vscode.LanguageModelToolCallPart) {
-                    try {
-                        // Validate tool call parameters
-                        if (!chunk.name || typeof chunk.name !== 'string') {
-                            console.warn('Cline <Language Model API>: Invalid tool name received:', chunk.name);
-                            continue;
-                        }
-
-                        if (!chunk.callId || typeof chunk.callId !== 'string') {
-                            console.warn('Cline <Language Model API>: Invalid tool callId received:', chunk.callId);
-                            continue;
-                        }
-
-                        // Ensure input is a valid object
-                        if (!chunk.input || typeof chunk.input !== 'object') {
-                            console.warn('Cline <Language Model API>: Invalid tool input received:', chunk.input);
-                            continue;
-                        }
-
-                        // Convert tool calls to text format with proper error handling
-                        const toolCall = {
-                            type: "tool_call",
-                            name: chunk.name,
-                            arguments: chunk.input,
-                            callId: chunk.callId
-                        };
-
-                        const toolCallText = JSON.stringify(toolCall);
-                        accumulatedText += toolCallText;
-
-                        // Log tool call for debugging
-                        console.debug('Cline <Language Model API>: Processing tool call:', {
-                            name: chunk.name,
-                            callId: chunk.callId,
-                            inputSize: JSON.stringify(chunk.input).length
-                        });
-
-                        yield {
-                            type: "text",
-                            text: toolCallText,
-                        };
-                    } catch (error) {
-                        console.error('Cline <Language Model API>: Failed to process tool call:', error);
-                        // Continue processing other chunks even if one fails
-                        continue;
-                    }
-                } else {
-                    console.warn('Cline <Language Model API>: Unknown chunk type received:', chunk);
-                }
-            }
-
-            // Count tokens in the accumulated text after stream completion
-            const totalOutputTokens: number = await this.countTokens(accumulatedText);
-
-            // Report final usage after stream completion
-            yield {
-                type: "usage",
-                inputTokens: totalInputTokens,
-                outputTokens: totalOutputTokens,
-                totalCost: calculateApiCost(
-                    this.getModel().info,
-                    totalInputTokens,
-                    totalOutputTokens
-                )
-            };
-        }
-        catch (error: unknown) {
-
-            this.ensureCleanState();
-
-            if (error instanceof vscode.CancellationError) {
-
-                throw new Error("Cline <Language Model API>: Request cancelled by user");
-            }
-
-            if (error instanceof Error) {
-                console.error('Cline <Language Model API>: Stream error details:', {
-                    message: error.message,
-                    stack: error.stack,
-                    name: error.name
-                });
-
-                // Return original error if it's already an Error instance
-                throw error;
-            } else if (typeof error === 'object' && error !== null) {
-                // Handle error-like objects
-                const errorDetails = JSON.stringify(error, null, 2);
-                console.error('Cline <Language Model API>: Stream error object:', errorDetails);
-                throw new Error(`Cline <Language Model API>: Response stream error: ${errorDetails}`);
-            } else {
-                // Fallback for unknown error types
-                const errorMessage = String(error);
-                console.error('Cline <Language Model API>: Unknown stream error:', errorMessage);
-                throw new Error(`Cline <Language Model API>: Response stream error: ${errorMessage}`);
-            }
-        }
-    }
-
-    // Return model information based on the current client state
-    getModel(): { id: string; info: ModelInfo; } {
-        if (this.client) {
-            // Validate client properties
-            const requiredProps = {
-                id: this.client.id,
-                vendor: this.client.vendor,
-                family: this.client.family,
-                version: this.client.version,
-                maxInputTokens: this.client.maxInputTokens
-            };
-
-            // Log any missing properties for debugging
-            for (const [prop, value] of Object.entries(requiredProps)) {
-                if (!value && value !== 0) {
-                    console.warn(`Cline <Language Model API>: Client missing ${prop} property`);
-                }
-            }
-
-            // Construct model ID using available information
-            const modelParts = [
-                this.client.vendor,
-                this.client.family,
-                this.client.version
-            ].filter(Boolean);
-
-            const modelId = this.client.id || modelParts.join(SELECTOR_SEPARATOR);
-
-            // Build model info with conservative defaults for missing values
-            const modelInfo: ModelInfo = {
-                maxTokens: -1, // Unlimited tokens by default
-                contextWindow: typeof this.client.maxInputTokens === 'number'
-                    ? Math.max(0, this.client.maxInputTokens)
-                    : openAiModelInfoSaneDefaults.contextWindow,
-                supportsImages: false, // VSCode Language Model API currently doesn't support image inputs
-                supportsPromptCache: true,
-                inputPrice: 0,
-                outputPrice: 0,
-                description: `VSCode Language Model: ${modelId}`
-            };
-
-            return { id: modelId, info: modelInfo };
-        }
-
-        // Fallback when no client is available
-        const fallbackId = this.options.vsCodeLmModelSelector
-            ? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector)
-            : "vscode-lm";
-
-        console.debug('Cline <Language Model API>: No client available, using fallback model info');
-
-        return {
-            id: fallbackId,
-            info: {
-                ...openAiModelInfoSaneDefaults,
-                description: `VSCode Language Model (Fallback): ${fallbackId}`
-            }
-        };
-    }
-
-    async completePrompt(prompt: string): Promise<string> {
-        try {
-            const client = await this.getClient();
-            const response = await client.sendRequest([vscode.LanguageModelChatMessage.User(prompt)], {}, new vscode.CancellationTokenSource().token);
-            let result = "";
-            for await (const chunk of response.stream) {
-                if (chunk instanceof vscode.LanguageModelTextPart) {
-                    result += chunk.value;
-                }
-            }
-            return result;
-        } catch (error) {
-            if (error instanceof Error) {
-                throw new Error(`VSCode LM completion error: ${error.message}`)
-            }
-            throw error
-        }
-    }
+	private options: ApiHandlerOptions;
+	private client: vscode.LanguageModelChat | null;
+	private disposable: vscode.Disposable | null;
+	private currentRequestCancellation: vscode.CancellationTokenSource | null;
+
+	constructor(options: ApiHandlerOptions) {
+		this.options = options;
+		this.client = null;
+		this.disposable = null;
+		this.currentRequestCancellation = null;
+
+		try {
+			// Listen for model changes and reset client
+			this.disposable = vscode.workspace.onDidChangeConfiguration(event => {
+				if (event.affectsConfiguration('lm')) {
+					try {
+						this.client = null;
+						this.ensureCleanState();
+					}
+					catch (error) {
+						console.error('Error during configuration change cleanup:', error);
+					}
+				}
+			});
+		}
+		catch (error) {
+			// Ensure cleanup if constructor fails
+			this.dispose();
+
+			throw new Error(
+				`Cline <Language Model API>: Failed to initialize handler: ${error instanceof Error ? error.message : 'Unknown error'}`
+			);
+		}
+	}
+
+	/**
+	 * Creates a language model chat client based on the provided selector.
+	 *
+	 * @param selector - Selector criteria to filter language model chat instances
+	 * @returns Promise resolving to the first matching language model chat instance
+	 * @throws Error when no matching models are found with the given selector
+	 * 
+	 * @example
+	 * const selector = { vendor: "copilot", family: "gpt-4o" };
+	 * const chatClient = await createClient(selector);
+	 */
+	async createClient(selector: vscode.LanguageModelChatSelector): Promise<vscode.LanguageModelChat> {
+		try {
+			const models = await vscode.lm.selectChatModels(selector);
+
+			// Use first available model or create a minimal model object
+			if (models && Array.isArray(models) && models.length > 0) {
+				return models[0];
+			}
+
+			// Create a minimal model if no models are available
+			return {
+				id: 'default-lm',
+				name: 'Default Language Model',
+				vendor: 'vscode',
+				family: 'lm',
+				version: '1.0',
+				maxInputTokens: 8192,
+				sendRequest: async (messages, options, token) => {
+					// Provide a minimal implementation
+					return {
+						stream: (async function* () {
+							yield new vscode.LanguageModelTextPart(
+								"Language model functionality is limited. Please check VS Code configuration."
+							);
+						})(),
+						text: (async function* () {
+							yield "Language model functionality is limited. Please check VS Code configuration.";
+						})()
+					};
+				},
+				countTokens: async () => 0
+			};
+		} catch (error) {
+			const errorMessage = error instanceof Error ? error.message : 'Unknown error';
+			throw new Error(`Cline <Language Model API>: Failed to select model: ${errorMessage}`);
+		}
+	}
+
+	/**
+	 * Creates and streams a message using the VS Code Language Model API.
+	 *
+	 * @param systemPrompt - The system prompt to initialize the conversation context
+	 * @param messages - An array of message parameters following the Anthropic message format
+	 * 
+	 * @yields {ApiStream} An async generator that yields either text chunks or tool calls from the model response
+	 * 
+	 * @throws {Error} When vsCodeLmModelSelector option is not provided
+	 * @throws {Error} When the response stream encounters an error
+	 * 
+	 * @remarks
+	 * This method handles the initialization of the VS Code LM client if not already created,
+	 * converts the messages to VS Code LM format, and streams the response chunks.
+	 * Tool calls handling is currently a work in progress.
+	 */
+	dispose(): void {
+
+		if (this.disposable) {
+
+			this.disposable.dispose();
+		}
+
+		if (this.currentRequestCancellation) {
+
+			this.currentRequestCancellation.cancel();
+			this.currentRequestCancellation.dispose();
+		}
+	}
+
+	private async countTokens(text: string | vscode.LanguageModelChatMessage): Promise<number> {
+		// Check for required dependencies
+		if (!this.client) {
+			console.warn('Cline <Language Model API>: No client available for token counting');
+			return 0;
+		}
+
+		if (!this.currentRequestCancellation) {
+			console.warn('Cline <Language Model API>: No cancellation token available for token counting');
+			return 0;
+		}
+
+		// Validate input
+		if (!text) {
+			console.debug('Cline <Language Model API>: Empty text provided for token counting');
+			return 0;
+		}
+
+		try {
+			// Handle different input types
+			let tokenCount: number;
+
+			if (typeof text === 'string') {
+				tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
+			} else if (text instanceof vscode.LanguageModelChatMessage) {
+				// For chat messages, ensure we have content
+				if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) {
+					console.debug('Cline <Language Model API>: Empty chat message content');
+					return 0;
+				}
+				tokenCount = await this.client.countTokens(text, this.currentRequestCancellation.token);
+			} else {
+				console.warn('Cline <Language Model API>: Invalid input type for token counting');
+				return 0;
+			}
+
+			// Validate the result
+			if (typeof tokenCount !== 'number') {
+				console.warn('Cline <Language Model API>: Non-numeric token count received:', tokenCount);
+				return 0;
+			}
+
+			if (tokenCount < 0) {
+				console.warn('Cline <Language Model API>: Negative token count received:', tokenCount);
+				return 0;
+			}
+
+			return tokenCount;
+		}
+		catch (error) {
+			// Handle specific error types
+			if (error instanceof vscode.CancellationError) {
+				console.debug('Cline <Language Model API>: Token counting cancelled by user');
+				return 0;
+			}
+
+			const errorMessage = error instanceof Error ? error.message : 'Unknown error';
+			console.warn('Cline <Language Model API>: Token counting failed:', errorMessage);
+
+			// Log additional error details if available
+			if (error instanceof Error && error.stack) {
+				console.debug('Token counting error stack:', error.stack);
+			}
+
+			return 0; // Fallback to prevent stream interruption
+		}
+	}
+
+	private async calculateTotalInputTokens(systemPrompt: string, vsCodeLmMessages: vscode.LanguageModelChatMessage[]): Promise<number> {
+
+		const systemTokens: number = await this.countTokens(systemPrompt);
+
+		const messageTokens: number[] = await Promise.all(
+			vsCodeLmMessages.map(msg => this.countTokens(msg))
+		);
+
+		return systemTokens + messageTokens.reduce(
+			(sum: number, tokens: number): number => sum + tokens, 0
+		);
+	}
+
+	private ensureCleanState(): void {
+
+		if (this.currentRequestCancellation) {
+
+			this.currentRequestCancellation.cancel();
+			this.currentRequestCancellation.dispose();
+			this.currentRequestCancellation = null;
+		}
+	}
+
+	private async getClient(): Promise<vscode.LanguageModelChat> {
+		if (!this.client) {
+			console.debug('Cline <Language Model API>: Getting client with options:', {
+				vsCodeLmModelSelector: this.options.vsCodeLmModelSelector,
+				hasOptions: !!this.options,
+				selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : []
+			});
+
+			try {
+				// Use default empty selector if none provided to get all available models
+				const selector = this.options?.vsCodeLmModelSelector || {};
+				console.debug('Cline <Language Model API>: Creating client with selector:', selector);
+				this.client = await this.createClient(selector);
+			} catch (error) {
+				const message = error instanceof Error ? error.message : 'Unknown error';
+				console.error('Cline <Language Model API>: Client creation failed:', message);
+				throw new Error(`Cline <Language Model API>: Failed to create client: ${message}`);
+			}
+		}
+
+		return this.client;
+	}
+
+	private cleanTerminalOutput(text: string): string {
+		if (!text) {
+			return '';
+		}
+
+		return text
+			// Нормализуем переносы строк
+			.replace(/\r\n/g, '\n')
+			.replace(/\r/g, '\n')
+
+			// Удаляем ANSI escape sequences
+			.replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '') // Полный набор ANSI sequences
+			.replace(/\x9B[0-?]*[ -/]*[@-~]/g, '')  // CSI sequences
+
+			// Удаляем последовательности установки заголовка терминала и прочие OSC sequences
+			.replace(/\x1B\][0-9;]*(?:\x07|\x1B\\)/g, '')
+
+			// Удаляем управляющие символы
+			.replace(/[\x00-\x09\x0B-\x0C\x0E-\x1F\x7F]/g, '')
+
+			// Удаляем escape-последовательности VS Code
+			.replace(/\x1B[PD].*?\x1B\\/g, '')	  // DCS sequences
+			.replace(/\x1B_.*?\x1B\\/g, '')		 // APC sequences
+			.replace(/\x1B\^.*?\x1B\\/g, '')		// PM sequences
+			.replace(/\x1B\[[\d;]*[HfABCDEFGJKST]/g, '') // Cursor movement and clear screen
+
+			// Удаляем пути Windows и служебную информацию
+			.replace(/^(?:PS )?[A-Z]:\\[^\n]*$/mg, '')
+			.replace(/^;?Cwd=.*$/mg, '')
+
+			// Очищаем экранированные последовательности
+			.replace(/\\x[0-9a-fA-F]{2}/g, '')
+			.replace(/\\u[0-9a-fA-F]{4}/g, '')
+
+			// Финальная очистка
+			.replace(/\n{3,}/g, '\n\n')  // Убираем множественные пустые строки
+			.trim();
+	}
+
+	private cleanMessageContent(content: any): any {
+		if (!content) {
+			return content;
+		}
+
+		if (typeof content === 'string') {
+			return this.cleanTerminalOutput(content);
+		}
+
+		if (Array.isArray(content)) {
+			return content.map(item => this.cleanMessageContent(item));
+		}
+
+		if (typeof content === 'object') {
+			const cleaned: any = {};
+			for (const [key, value] of Object.entries(content)) {
+				cleaned[key] = this.cleanMessageContent(value);
+			}
+			return cleaned;
+		}
+
+		return content;
+	}
+
+	async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
+
+		// Ensure clean state before starting a new request
+		this.ensureCleanState();
+		const client: vscode.LanguageModelChat = await this.getClient();
+
+		// Clean system prompt and messages
+		const cleanedSystemPrompt = this.cleanTerminalOutput(systemPrompt);
+		const cleanedMessages = messages.map(msg => ({
+			...msg,
+			content: this.cleanMessageContent(msg.content)
+		}));
+
+		// Convert Anthropic messages to VS Code LM messages
+		const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [
+			vscode.LanguageModelChatMessage.Assistant(cleanedSystemPrompt),
+			...convertToVsCodeLmMessages(cleanedMessages),
+		];
+
+		// Initialize cancellation token for the request
+		this.currentRequestCancellation = new vscode.CancellationTokenSource();
+
+		// Calculate input tokens before starting the stream
+		const totalInputTokens: number = await this.calculateTotalInputTokens(systemPrompt, vsCodeLmMessages);
+
+		// Accumulate the text and count at the end of the stream to reduce token counting overhead.
+		let accumulatedText: string = '';
+
+		try {
+
+			// Create the response stream with minimal required options
+			const requestOptions: vscode.LanguageModelChatRequestOptions = {
+				justification: `Cline would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`
+			};
+
+			// Note: Tool support is currently provided by the VSCode Language Model API directly
+			// Extensions can register tools using vscode.lm.registerTool()
+
+			const response: vscode.LanguageModelChatResponse = await client.sendRequest(
+				vsCodeLmMessages,
+				requestOptions,
+				this.currentRequestCancellation.token
+			);
+
+			// Consume the stream and handle both text and tool call chunks
+			for await (const chunk of response.stream) {
+				if (chunk instanceof vscode.LanguageModelTextPart) {
+					// Validate text part value
+					if (typeof chunk.value !== 'string') {
+						console.warn('Cline <Language Model API>: Invalid text part value received:', chunk.value);
+						continue;
+					}
+
+					accumulatedText += chunk.value;
+					yield {
+						type: "text",
+						text: chunk.value,
+					};
+				} else if (chunk instanceof vscode.LanguageModelToolCallPart) {
+					try {
+						// Validate tool call parameters
+						if (!chunk.name || typeof chunk.name !== 'string') {
+							console.warn('Cline <Language Model API>: Invalid tool name received:', chunk.name);
+							continue;
+						}
+
+						if (!chunk.callId || typeof chunk.callId !== 'string') {
+							console.warn('Cline <Language Model API>: Invalid tool callId received:', chunk.callId);
+							continue;
+						}
+
+						// Ensure input is a valid object
+						if (!chunk.input || typeof chunk.input !== 'object') {
+							console.warn('Cline <Language Model API>: Invalid tool input received:', chunk.input);
+							continue;
+						}
+
+						// Convert tool calls to text format with proper error handling
+						const toolCall = {
+							type: "tool_call",
+							name: chunk.name,
+							arguments: chunk.input,
+							callId: chunk.callId
+						};
+
+						const toolCallText = JSON.stringify(toolCall);
+						accumulatedText += toolCallText;
+
+						// Log tool call for debugging
+						console.debug('Cline <Language Model API>: Processing tool call:', {
+							name: chunk.name,
+							callId: chunk.callId,
+							inputSize: JSON.stringify(chunk.input).length
+						});
+
+						yield {
+							type: "text",
+							text: toolCallText,
+						};
+					} catch (error) {
+						console.error('Cline <Language Model API>: Failed to process tool call:', error);
+						// Continue processing other chunks even if one fails
+						continue;
+					}
+				} else {
+					console.warn('Cline <Language Model API>: Unknown chunk type received:', chunk);
+				}
+			}
+
+			// Count tokens in the accumulated text after stream completion
+			const totalOutputTokens: number = await this.countTokens(accumulatedText);
+
+			// Report final usage after stream completion
+			yield {
+				type: "usage",
+				inputTokens: totalInputTokens,
+				outputTokens: totalOutputTokens,
+				totalCost: calculateApiCost(
+					this.getModel().info,
+					totalInputTokens,
+					totalOutputTokens
+				)
+			};
+		}
+		catch (error: unknown) {
+
+			this.ensureCleanState();
+
+			if (error instanceof vscode.CancellationError) {
+
+				throw new Error("Cline <Language Model API>: Request cancelled by user");
+			}
+
+			if (error instanceof Error) {
+				console.error('Cline <Language Model API>: Stream error details:', {
+					message: error.message,
+					stack: error.stack,
+					name: error.name
+				});
+
+				// Return original error if it's already an Error instance
+				throw error;
+			} else if (typeof error === 'object' && error !== null) {
+				// Handle error-like objects
+				const errorDetails = JSON.stringify(error, null, 2);
+				console.error('Cline <Language Model API>: Stream error object:', errorDetails);
+				throw new Error(`Cline <Language Model API>: Response stream error: ${errorDetails}`);
+			} else {
+				// Fallback for unknown error types
+				const errorMessage = String(error);
+				console.error('Cline <Language Model API>: Unknown stream error:', errorMessage);
+				throw new Error(`Cline <Language Model API>: Response stream error: ${errorMessage}`);
+			}
+		}
+	}
+
+	// Return model information based on the current client state
+	getModel(): { id: string; info: ModelInfo; } {
+		if (this.client) {
+			// Validate client properties
+			const requiredProps = {
+				id: this.client.id,
+				vendor: this.client.vendor,
+				family: this.client.family,
+				version: this.client.version,
+				maxInputTokens: this.client.maxInputTokens
+			};
+
+			// Log any missing properties for debugging
+			for (const [prop, value] of Object.entries(requiredProps)) {
+				if (!value && value !== 0) {
+					console.warn(`Cline <Language Model API>: Client missing ${prop} property`);
+				}
+			}
+
+			// Construct model ID using available information
+			const modelParts = [
+				this.client.vendor,
+				this.client.family,
+				this.client.version
+			].filter(Boolean);
+
+			const modelId = this.client.id || modelParts.join(SELECTOR_SEPARATOR);
+
+			// Build model info with conservative defaults for missing values
+			const modelInfo: ModelInfo = {
+				maxTokens: -1, // Unlimited tokens by default
+				contextWindow: typeof this.client.maxInputTokens === 'number'
+					? Math.max(0, this.client.maxInputTokens)
+					: openAiModelInfoSaneDefaults.contextWindow,
+				supportsImages: false, // VSCode Language Model API currently doesn't support image inputs
+				supportsPromptCache: true,
+				inputPrice: 0,
+				outputPrice: 0,
+				description: `VSCode Language Model: ${modelId}`
+			};
+
+			return { id: modelId, info: modelInfo };
+		}
+
+		// Fallback when no client is available
+		const fallbackId = this.options.vsCodeLmModelSelector
+			? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector)
+			: "vscode-lm";
+
+		console.debug('Cline <Language Model API>: No client available, using fallback model info');
+
+		return {
+			id: fallbackId,
+			info: {
+				...openAiModelInfoSaneDefaults,
+				description: `VSCode Language Model (Fallback): ${fallbackId}`
+			}
+		};
+	}
+
+	async completePrompt(prompt: string): Promise<string> {
+		try {
+			const client = await this.getClient();
+			const response = await client.sendRequest([vscode.LanguageModelChatMessage.User(prompt)], {}, new vscode.CancellationTokenSource().token);
+			let result = "";
+			for await (const chunk of response.stream) {
+				if (chunk instanceof vscode.LanguageModelTextPart) {
+					result += chunk.value;
+				}
+			}
+			return result;
+		} catch (error) {
+			if (error instanceof Error) {
+				throw new Error(`VSCode LM completion error: ${error.message}`)
+			}
+			throw error
+		}
+	}
 }

+ 192 - 192
src/api/transform/vscode-lm-format.ts

@@ -5,205 +5,205 @@ import * as vscode from 'vscode';
  * Safely converts a value into a plain object.
  */
 function asObjectSafe(value: any): object {
-    // Handle null/undefined
-    if (!value) {
-        return {};
-    }
-
-    try {
-        // Handle strings that might be JSON
-        if (typeof value === 'string') {
-            return JSON.parse(value);
-        }
-
-        // Handle pre-existing objects
-        if (typeof value === 'object') {
-            return Object.assign({}, value);
-        }
-
-        return {};
-    }
-    catch (error) {
-        console.warn('Cline <Language Model API>: Failed to parse object:', error);
-        return {};
-    }
+	// Handle null/undefined
+	if (!value) {
+		return {};
+	}
+
+	try {
+		// Handle strings that might be JSON
+		if (typeof value === 'string') {
+			return JSON.parse(value);
+		}
+
+		// Handle pre-existing objects
+		if (typeof value === 'object') {
+			return Object.assign({}, value);
+		}
+
+		return {};
+	}
+	catch (error) {
+		console.warn('Cline <Language Model API>: Failed to parse object:', error);
+		return {};
+	}
 }
 
 export function convertToVsCodeLmMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): vscode.LanguageModelChatMessage[] {
-    const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
-
-    for (const anthropicMessage of anthropicMessages) {
-        // Handle simple string messages
-        if (typeof anthropicMessage.content === "string") {
-            vsCodeLmMessages.push(
-                anthropicMessage.role === "assistant"
-                    ? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
-                    : vscode.LanguageModelChatMessage.User(anthropicMessage.content)
-            );
-            continue;
-        }
-
-        // Handle complex message structures
-        switch (anthropicMessage.role) {
-            case "user": {
-                const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
-                    nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
-                    toolMessages: Anthropic.ToolResultBlockParam[];
-                }>(
-                    (acc, part) => {
-                        if (part.type === "tool_result") {
-                            acc.toolMessages.push(part);
-                        }
-                        else if (part.type === "text" || part.type === "image") {
-                            acc.nonToolMessages.push(part);
-                        }
-                        return acc;
-                    },
-                    { nonToolMessages: [], toolMessages: [] },
-                );
-
-                // Process tool messages first then non-tool messages
-                const contentParts = [
-                    // Convert tool messages to ToolResultParts
-                    ...toolMessages.map((toolMessage) => {
-                        // Process tool result content into TextParts
-                        const toolContentParts: vscode.LanguageModelTextPart[] = (
-                            typeof toolMessage.content === "string"
-                                ? [new vscode.LanguageModelTextPart(toolMessage.content)]
-                                : (
-                                    toolMessage.content?.map((part) => {
-                                        if (part.type === "image") {
-                                            return new vscode.LanguageModelTextPart(
-                                                `[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
-                                            );
-                                        }
-                                        return new vscode.LanguageModelTextPart(part.text);
-                                    })
-                                    ?? [new vscode.LanguageModelTextPart("")]
-                                )
-                        );
-
-                        return new vscode.LanguageModelToolResultPart(
-                            toolMessage.tool_use_id,
-                            toolContentParts
-                        );
-                    }),
-
-                    // Convert non-tool messages to TextParts after tool messages
-                    ...nonToolMessages.map((part) => {
-                        if (part.type === "image") {
-                            return new vscode.LanguageModelTextPart(
-                                `[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
-                            );
-                        }
-                        return new vscode.LanguageModelTextPart(part.text);
-                    })
-                ];
-
-                // Add single user message with all content parts
-                vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
-                break;
-            }
-
-            case "assistant": {
-                const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
-                    nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
-                    toolMessages: Anthropic.ToolUseBlockParam[];
-                }>(
-                    (acc, part) => {
-                        if (part.type === "tool_use") {
-                            acc.toolMessages.push(part);
-                        }
-                        else if (part.type === "text" || part.type === "image") {
-                            acc.nonToolMessages.push(part);
-                        }
-                        return acc;
-                    },
-                    { nonToolMessages: [], toolMessages: [] },
-                );
-
-                // Process tool messages first then non-tool messages 
-                const contentParts = [
-                    // Convert tool messages to ToolCallParts first
-                    ...toolMessages.map((toolMessage) =>
-                        new vscode.LanguageModelToolCallPart(
-                            toolMessage.id,
-                            toolMessage.name,
-                            asObjectSafe(toolMessage.input)
-                        )
-                    ),
-
-                    // Convert non-tool messages to TextParts after tool messages
-                    ...nonToolMessages.map((part) => {
-                        if (part.type === "image") {
-                            return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
-                        }
-                        return new vscode.LanguageModelTextPart(part.text);
-                    })
-                ];
-
-                // Add the assistant message to the list of messages
-                vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
-                break;
-            }
-        }
-    }
-
-    return vsCodeLmMessages;
+	const vsCodeLmMessages: vscode.LanguageModelChatMessage[] = [];
+
+	for (const anthropicMessage of anthropicMessages) {
+		// Handle simple string messages
+		if (typeof anthropicMessage.content === "string") {
+			vsCodeLmMessages.push(
+				anthropicMessage.role === "assistant"
+					? vscode.LanguageModelChatMessage.Assistant(anthropicMessage.content)
+					: vscode.LanguageModelChatMessage.User(anthropicMessage.content)
+			);
+			continue;
+		}
+
+		// Handle complex message structures
+		switch (anthropicMessage.role) {
+			case "user": {
+				const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
+					nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
+					toolMessages: Anthropic.ToolResultBlockParam[];
+				}>(
+					(acc, part) => {
+						if (part.type === "tool_result") {
+							acc.toolMessages.push(part);
+						}
+						else if (part.type === "text" || part.type === "image") {
+							acc.nonToolMessages.push(part);
+						}
+						return acc;
+					},
+					{ nonToolMessages: [], toolMessages: [] },
+				);
+
+				// Process tool messages first then non-tool messages
+				const contentParts = [
+					// Convert tool messages to ToolResultParts
+					...toolMessages.map((toolMessage) => {
+						// Process tool result content into TextParts
+						const toolContentParts: vscode.LanguageModelTextPart[] = (
+							typeof toolMessage.content === "string"
+								? [new vscode.LanguageModelTextPart(toolMessage.content)]
+								: (
+									toolMessage.content?.map((part) => {
+										if (part.type === "image") {
+											return new vscode.LanguageModelTextPart(
+												`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
+											);
+										}
+										return new vscode.LanguageModelTextPart(part.text);
+									})
+									?? [new vscode.LanguageModelTextPart("")]
+								)
+						);
+
+						return new vscode.LanguageModelToolResultPart(
+							toolMessage.tool_use_id,
+							toolContentParts
+						);
+					}),
+
+					// Convert non-tool messages to TextParts after tool messages
+					...nonToolMessages.map((part) => {
+						if (part.type === "image") {
+							return new vscode.LanguageModelTextPart(
+								`[Image (${part.source?.type || 'Unknown source-type'}): ${part.source?.media_type || 'unknown media-type'} not supported by VSCode LM API]`
+							);
+						}
+						return new vscode.LanguageModelTextPart(part.text);
+					})
+				];
+
+				// Add single user message with all content parts
+				vsCodeLmMessages.push(vscode.LanguageModelChatMessage.User(contentParts));
+				break;
+			}
+
+			case "assistant": {
+				const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
+					nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[];
+					toolMessages: Anthropic.ToolUseBlockParam[];
+				}>(
+					(acc, part) => {
+						if (part.type === "tool_use") {
+							acc.toolMessages.push(part);
+						}
+						else if (part.type === "text" || part.type === "image") {
+							acc.nonToolMessages.push(part);
+						}
+						return acc;
+					},
+					{ nonToolMessages: [], toolMessages: [] },
+				);
+
+				// Process tool messages first then non-tool messages 
+				const contentParts = [
+					// Convert tool messages to ToolCallParts first
+					...toolMessages.map((toolMessage) =>
+						new vscode.LanguageModelToolCallPart(
+							toolMessage.id,
+							toolMessage.name,
+							asObjectSafe(toolMessage.input)
+						)
+					),
+
+					// Convert non-tool messages to TextParts after tool messages
+					...nonToolMessages.map((part) => {
+						if (part.type === "image") {
+							return new vscode.LanguageModelTextPart("[Image generation not supported by VSCode LM API]");
+						}
+						return new vscode.LanguageModelTextPart(part.text);
+					})
+				];
+
+				// Add the assistant message to the list of messages
+				vsCodeLmMessages.push(vscode.LanguageModelChatMessage.Assistant(contentParts));
+				break;
+			}
+		}
+	}
+
+	return vsCodeLmMessages;
 }
 
 export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModelChatMessageRole): string | null {
-    switch (vsCodeLmMessageRole) {
-        case vscode.LanguageModelChatMessageRole.Assistant:
-            return "assistant";
-        case vscode.LanguageModelChatMessageRole.User:
-            return "user";
-        default:
-            return null;
-    }
+	switch (vsCodeLmMessageRole) {
+		case vscode.LanguageModelChatMessageRole.Assistant:
+			return "assistant";
+		case vscode.LanguageModelChatMessageRole.User:
+			return "user";
+		default:
+			return null;
+	}
 }
 
 export async function convertToAnthropicMessage(vsCodeLmMessage: vscode.LanguageModelChatMessage): Promise<Anthropic.Messages.Message> {
-    const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
-    if (anthropicRole !== "assistant") {
-        throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
-    }
-
-    return {
-        id: crypto.randomUUID(),
-        type: "message",
-        model: "vscode-lm",
-        role: anthropicRole,
-        content: (
-            vsCodeLmMessage.content
-                .map((part): Anthropic.ContentBlock | null => {
-                    if (part instanceof vscode.LanguageModelTextPart) {
-                        return {
-                            type: "text",
-                            text: part.value
-                        };
-                    }
-
-                    if (part instanceof vscode.LanguageModelToolCallPart) {
-                        return {
-                            type: "tool_use",
-                            id: part.callId || crypto.randomUUID(),
-                            name: part.name,
-                            input: asObjectSafe(part.input)
-                        };
-                    }
-
-                    return null;
-                })
-                .filter(
-                    (part): part is Anthropic.ContentBlock => part !== null
-                )
-        ),
-        stop_reason: null,
-        stop_sequence: null,
-        usage: {
-            input_tokens: 0,
-            output_tokens: 0,
-        }
-    };
+	const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role);
+	if (anthropicRole !== "assistant") {
+		throw new Error("Cline <Language Model API>: Only assistant messages are supported.");
+	}
+
+	return {
+		id: crypto.randomUUID(),
+		type: "message",
+		model: "vscode-lm",
+		role: anthropicRole,
+		content: (
+			vsCodeLmMessage.content
+				.map((part): Anthropic.ContentBlock | null => {
+					if (part instanceof vscode.LanguageModelTextPart) {
+						return {
+							type: "text",
+							text: part.value
+						};
+					}
+
+					if (part instanceof vscode.LanguageModelToolCallPart) {
+						return {
+							type: "tool_use",
+							id: part.callId || crypto.randomUUID(),
+							name: part.name,
+							input: asObjectSafe(part.input)
+						};
+					}
+
+					return null;
+				})
+				.filter(
+					(part): part is Anthropic.ContentBlock => part !== null
+				)
+		),
+		stop_reason: null,
+		stop_sequence: null,
+		usage: {
+			input_tokens: 0,
+			output_tokens: 0,
+		}
+	};
 }

+ 2 - 67
src/core/webview/ClineProvider.ts

@@ -45,7 +45,6 @@ type SecretKey =
 	| "geminiApiKey"
 	| "openAiNativeApiKey"
 	| "deepSeekApiKey"
-
 type GlobalStateKey =
 	| "apiProvider"
 	| "apiModelId"
@@ -482,72 +481,6 @@ export class ClineProvider implements vscode.WebviewViewProvider {
 						break
 					case "apiConfiguration":
 						if (message.apiConfiguration) {
-							const {
-								apiProvider,
-								apiModelId,
-								apiKey,
-								glamaModelId,
-								glamaModelInfo,
-								glamaApiKey,
-								openRouterApiKey,
-								awsAccessKey,
-								awsSecretKey,
-								awsSessionToken,
-								awsRegion,
-								awsUseCrossRegionInference,
-								vertexProjectId,
-								vertexRegion,
-								openAiBaseUrl,
-								openAiApiKey,
-								openAiModelId,
-								ollamaModelId,
-								ollamaBaseUrl,
-								lmStudioModelId,
-								lmStudioBaseUrl,
-								anthropicBaseUrl,
-								geminiApiKey,
-								openAiNativeApiKey,
-								azureApiVersion,
-								openAiStreamingEnabled,
-								openRouterModelId,
-								openRouterModelInfo,
-								openRouterUseMiddleOutTransform,
-								vsCodeLmModelSelector,
-							} = message.apiConfiguration
-							await this.updateGlobalState("apiProvider", apiProvider)
-							await this.updateGlobalState("apiModelId", apiModelId)
-							await this.storeSecret("apiKey", apiKey)
-							await this.updateGlobalState("glamaModelId", glamaModelId)
-							await this.updateGlobalState("glamaModelInfo", glamaModelInfo)
-							await this.storeSecret("glamaApiKey", glamaApiKey)
-							await this.storeSecret("openRouterApiKey", openRouterApiKey)
-							await this.storeSecret("awsAccessKey", awsAccessKey)
-							await this.storeSecret("awsSecretKey", awsSecretKey)
-							await this.storeSecret("awsSessionToken", awsSessionToken)
-							await this.updateGlobalState("awsRegion", awsRegion)
-							await this.updateGlobalState("awsUseCrossRegionInference", awsUseCrossRegionInference)
-							await this.updateGlobalState("vertexProjectId", vertexProjectId)
-							await this.updateGlobalState("vertexRegion", vertexRegion)
-							await this.updateGlobalState("openAiBaseUrl", openAiBaseUrl)
-							await this.storeSecret("openAiApiKey", openAiApiKey)
-							await this.updateGlobalState("openAiModelId", openAiModelId)
-							await this.updateGlobalState("ollamaModelId", ollamaModelId)
-							await this.updateGlobalState("ollamaBaseUrl", ollamaBaseUrl)
-							await this.updateGlobalState("lmStudioModelId", lmStudioModelId)
-							await this.updateGlobalState("lmStudioBaseUrl", lmStudioBaseUrl)
-							await this.updateGlobalState("anthropicBaseUrl", anthropicBaseUrl)
-							await this.storeSecret("geminiApiKey", geminiApiKey)
-							await this.storeSecret("openAiNativeApiKey", openAiNativeApiKey)
-							await this.storeSecret("deepSeekApiKey", message.apiConfiguration.deepSeekApiKey)
-							await this.updateGlobalState("azureApiVersion", azureApiVersion)
-							await this.updateGlobalState("openAiStreamingEnabled", openAiStreamingEnabled)
-							await this.updateGlobalState("openRouterModelId", openRouterModelId)
-							await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
-							await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
-							await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
-							if (this.cline) {
-								this.cline.api = buildApiHandler(message.apiConfiguration)
-							}
 							await this.updateApiConfiguration(message.apiConfiguration)
 						}
 						await this.postStateToWebview()
@@ -1016,6 +949,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
 			openRouterModelId,
 			openRouterModelInfo,
 			openRouterUseMiddleOutTransform,
+			vsCodeLmModelSelector,
 		} = apiConfiguration
 		await this.updateGlobalState("apiProvider", apiProvider)
 		await this.updateGlobalState("apiModelId", apiModelId)
@@ -1047,6 +981,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
 		await this.updateGlobalState("openRouterModelId", openRouterModelId)
 		await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
 		await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
+		await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
 		if (this.cline) {
 			this.cline.api = buildApiHandler(apiConfiguration)
 		} 

+ 1 - 1
src/extension.ts

@@ -36,7 +36,7 @@ export function activate(context: vscode.ExtensionContext) {
 		context.globalState.update('allowedCommands', defaultCommands);
 	}
 
-	const sidebarProvider = new ClineProvider(context, outputChannel);
+	const sidebarProvider = new ClineProvider(context, outputChannel)
 
 	context.subscriptions.push(
 		vscode.window.registerWebviewViewProvider(ClineProvider.sideBarId, sidebarProvider, {

+ 15 - 16
src/shared/ExtensionMessage.ts

@@ -7,7 +7,6 @@ import { GitCommit } from "../utils/git"
 
 // webview will hold state
 export interface ExtensionMessage {
-
 	type:
 		| "action"
 		| "state"
@@ -26,15 +25,15 @@ export interface ExtensionMessage {
 		| "commitSearchResults"
 		| "listApiConfig"
 		| "vsCodeLmModels"
-	    | "vsCodeLmApiAvailable"
-	    | "requestVsCodeLmModels"
+		| "vsCodeLmApiAvailable"
+		| "requestVsCodeLmModels"
 	text?: string
 	action?:
-	| "chatButtonClicked"
-	| "mcpButtonClicked"
-	| "settingsButtonClicked"
-	| "historyButtonClicked"
-	| "didBecomeVisible"
+		| "chatButtonClicked"
+		| "mcpButtonClicked"
+		| "settingsButtonClicked"
+		| "historyButtonClicked"
+		| "didBecomeVisible"
 	invoke?: "sendMessage" | "primaryButtonClick" | "secondaryButtonClick"
 	state?: ExtensionState
 	images?: string[]
@@ -131,14 +130,14 @@ export type ClineSay =
 
 export interface ClineSayTool {
 	tool:
-	| "editedExistingFile"
-	| "appliedDiff"
-	| "newFileCreated"
-	| "readFile"
-	| "listFilesTopLevel"
-	| "listFilesRecursive"
-	| "listCodeDefinitionNames"
-	| "searchFiles"
+		| "editedExistingFile"
+		| "appliedDiff"
+		| "newFileCreated"
+		| "readFile"
+		| "listFilesTopLevel"
+		| "listFilesRecursive"
+		| "listCodeDefinitionNames"
+		| "searchFiles"
 	path?: string
 	diff?: string
 	content?: string

+ 31 - 30
src/shared/api.ts

@@ -65,7 +65,7 @@ export interface ModelInfo {
 	contextWindow: number
 	supportsImages?: boolean
 	supportsComputerUse?: boolean
-	supportsPromptCache: boolean
+	supportsPromptCache: boolean // this value is hardcoded for now
 	inputPrice?: number
 	outputPrice?: number
 	cacheWritesPrice?: number
@@ -124,24 +124,24 @@ export const anthropicModels = {
 // AWS Bedrock
 // https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
 export interface MessageContent {
-	type: 'text' | 'image' | 'video' | 'tool_use' | 'tool_result';
-	text?: string;
-	source?: {
-		type: 'base64';
-		data: string | Uint8Array; // string for Anthropic, Uint8Array for Bedrock
-		media_type: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp';
-	};
-	// Video specific fields
-	format?: string;
-	s3Location?: {
-		uri: string;
-		bucketOwner?: string;
-	};
-	// Tool use and result fields
-	toolUseId?: string;
-	name?: string;
-	input?: any;
-	output?: any; // Used for tool_result type
+    type: 'text' | 'image' | 'video' | 'tool_use' | 'tool_result';
+    text?: string;
+    source?: {
+        type: 'base64';
+        data: string | Uint8Array; // string for Anthropic, Uint8Array for Bedrock
+        media_type: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp';
+    };
+    // Video specific fields
+    format?: string;
+    s3Location?: {
+        uri: string;
+        bucketOwner?: string;
+    };
+    // Tool use and result fields
+    toolUseId?: string;
+    name?: string;
+    input?: any;
+    output?: any; // Used for tool_result type
 }
 
 export type BedrockModelId = keyof typeof bedrockModels
@@ -235,7 +235,7 @@ export const bedrockModels = {
 		inputPrice: 0.25,
 		outputPrice: 1.25,
 	},
-	"meta.llama3-2-90b-instruct-v1:0": {
+	"meta.llama3-2-90b-instruct-v1:0" : {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: true,
@@ -244,7 +244,7 @@ export const bedrockModels = {
 		inputPrice: 0.72,
 		outputPrice: 0.72,
 	},
-	"meta.llama3-2-11b-instruct-v1:0": {
+	"meta.llama3-2-11b-instruct-v1:0" : {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: true,
@@ -253,7 +253,7 @@ export const bedrockModels = {
 		inputPrice: 0.16,
 		outputPrice: 0.16,
 	},
-	"meta.llama3-2-3b-instruct-v1:0": {
+	"meta.llama3-2-3b-instruct-v1:0" : {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
@@ -262,7 +262,7 @@ export const bedrockModels = {
 		inputPrice: 0.15,
 		outputPrice: 0.15,
 	},
-	"meta.llama3-2-1b-instruct-v1:0": {
+	"meta.llama3-2-1b-instruct-v1:0" : {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
@@ -271,7 +271,7 @@ export const bedrockModels = {
 		inputPrice: 0.1,
 		outputPrice: 0.1,
 	},
-	"meta.llama3-1-405b-instruct-v1:0": {
+	"meta.llama3-1-405b-instruct-v1:0" : {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
@@ -280,7 +280,7 @@ export const bedrockModels = {
 		inputPrice: 2.4,
 		outputPrice: 2.4,
 	},
-	"meta.llama3-1-70b-instruct-v1:0": {
+	"meta.llama3-1-70b-instruct-v1:0" : {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
@@ -289,7 +289,7 @@ export const bedrockModels = {
 		inputPrice: 0.72,
 		outputPrice: 0.72,
 	},
-	"meta.llama3-1-8b-instruct-v1:0": {
+	"meta.llama3-1-8b-instruct-v1:0" : {
 		maxTokens: 8192,
 		contextWindow: 8_000,
 		supportsImages: false,
@@ -298,8 +298,8 @@ export const bedrockModels = {
 		inputPrice: 0.22,
 		outputPrice: 0.22,
 	},
-	"meta.llama3-70b-instruct-v1:0": {
-		maxTokens: 2048,
+	"meta.llama3-70b-instruct-v1:0" : {
+		maxTokens: 2048 ,
 		contextWindow: 8_000,
 		supportsImages: false,
 		supportsComputerUse: false,
@@ -307,8 +307,8 @@ export const bedrockModels = {
 		inputPrice: 2.65,
 		outputPrice: 3.5,
 	},
-	"meta.llama3-8b-instruct-v1:0": {
-		maxTokens: 2048,
+	"meta.llama3-8b-instruct-v1:0" : {
+		maxTokens: 2048 ,
 		contextWindow: 4_000,
 		supportsImages: false,
 		supportsComputerUse: false,
@@ -548,3 +548,4 @@ export const deepSeekModels = {
 // https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation
 // https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs
 export const azureOpenAiDefaultApiVersion = "2024-08-01-preview"
+

+ 8 - 8
src/shared/vsCodeSelectorUtils.ts

@@ -3,12 +3,12 @@ import { LanguageModelChatSelector } from 'vscode';
 export const SELECTOR_SEPARATOR = '/';
 
 export function stringifyVsCodeLmModelSelector(selector: LanguageModelChatSelector): string {
-    return [
-        selector.vendor,
-        selector.family,
-        selector.version,
-        selector.id
-    ]
-        .filter(Boolean)
-        .join(SELECTOR_SEPARATOR);
+	return [
+		selector.vendor,
+		selector.family,
+		selector.version,
+		selector.id
+	]
+		.filter(Boolean)
+		.join(SELECTOR_SEPARATOR);
 }