Browse Source

Merge branch 'main' into feature/add_sse_mcp

aheizi 10 months ago
parent
commit
bb42fb63a1

+ 5 - 0
.changeset/wild-dragons-leave.md

@@ -0,0 +1,5 @@
+---
+"roo-cline": patch
+---
+
+Add o3-mini support to openai compatible

+ 37 - 26
CHANGELOG.md

@@ -1,6 +1,17 @@
 # Roo Code Changelog
 
-## [3.8.0]
+## [3.8.1] - 2025-03-07
+
+- Show the reserved output tokens in the context window visualization
+- Improve the UI of the configuration profile dropdown (thanks @DeXtroTip!)
+- Fix bug where custom temperature could not be unchecked (thanks @System233!)
+- Fix bug where decimal prices could not be entered for OpenAI-compatible providers (thanks @System233!)
+- Fix bug with enhance prompt on Sonnet 3.7 with a high thinking budget (thanks @moqimoqidea!)
+- Fix bug with the context window management for thinking models (thanks @ReadyPlayerEmma!)
+- Fix bug where checkpoints were no longer enabled by default
+- Add extension and VSCode versions to telemetry
+
+## [3.8.0] - 2025-03-07
 
 - Add opt-in telemetry to help us improve Roo Code faster (thanks Cline!)
 - Fix terminal overload / gray screen of death, and other terminal issues
@@ -19,7 +30,7 @@
 - Improve styling of the task headers (thanks @monotykamary!)
 - Improve context mention path handling on Windows (thanks @samhvw8!)
 
-## [3.7.12]
+## [3.7.12] - 2025-03-03
 
 - Expand max tokens of thinking models to 128k, and max thinking budget to over 100k (thanks @monotykamary!)
 - Fix issue where keyboard mode switcher wasn't updating API profile (thanks @aheizi!)
@@ -31,19 +42,19 @@
 - Update the warning text for the VS LM API
 - Correctly populate the default OpenRouter model on the welcome screen
 
-## [3.7.11]
+## [3.7.11] - 2025-03-02
 
 - Don't honor custom max tokens for non thinking models
 - Include custom modes in mode switching keyboard shortcut
 - Support read-only modes that can run commands
 
-## [3.7.10]
+## [3.7.10] - 2025-03-01
 
 - Add Gemini models on Vertex AI (thanks @ashktn!)
 - Keyboard shortcuts to switch modes (thanks @aheizi!)
 - Add support for Mermaid diagrams (thanks Cline!)
 
-## [3.7.9]
+## [3.7.9] - 2025-03-01
 
 - Delete task confirmation enhancements
 - Smarter context window management
@@ -53,19 +64,19 @@
 - UI fix to dropdown hover colors (thanks @SamirSaji!)
 - Add support for Claude Sonnet 3.7 thinking via Vertex AI (thanks @lupuletic!)
 
-## [3.7.8]
+## [3.7.8] - 2025-02-27
 
 - Add Vertex AI prompt caching support for Claude models (thanks @aitoroses and @lupuletic!)
 - Add gpt-4.5-preview
 - Add an advanced feature to customize the system prompt
 
-## [3.7.7]
+## [3.7.7] - 2025-02-27
 
 - Graduate checkpoints out of beta
 - Fix enhance prompt button when using Thinking Sonnet
 - Add tooltips to make what buttons do more obvious
 
-## [3.7.6]
+## [3.7.6] - 2025-02-26
 
 - Handle really long text better in the in the ChatRow similar to TaskHeader (thanks @joemanley201!)
 - Support multiple files in drag-and-drop
@@ -73,7 +84,7 @@
 - Better OpenRouter error handling (no more "Provider Error")
 - Add slider to control max output tokens for thinking models
 
-## [3.7.5]
+## [3.7.5] - 2025-02-26
 
 - Fix context window truncation math (see [#1173](https://github.com/RooVetGit/Roo-Code/issues/1173))
 - Fix various issues with the model picker (thanks @System233!)
@@ -81,48 +92,48 @@
 - Add drag-and-drop for files
 - Enable the "Thinking Budget" slider for Claude 3.7 Sonnet on OpenRouter
 
-## [3.7.4]
+## [3.7.4] - 2025-02-25
 
 - Fix a bug that prevented the "Thinking" setting from properly updating when switching profiles.
 
-## [3.7.3]
+## [3.7.3] - 2025-02-25
 
 - Support for ["Thinking"](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking) Sonnet 3.7 when using the Anthropic provider.
 
-## [3.7.2]
+## [3.7.2] - 2025-02-24
 
 - Fix computer use and prompt caching for OpenRouter's `anthropic/claude-3.7-sonnet:beta` (thanks @cte!)
 - Fix sliding window calculations for Sonnet 3.7 that were causing a context window overflow (thanks @cte!)
 - Encourage diff editing more strongly in the system prompt (thanks @hannesrudolph!)
 
-## [3.7.1]
+## [3.7.1] - 2025-02-24
 
 - Add AWS Bedrock support for Sonnet 3.7 and update some defaults to Sonnet 3.7 instead of 3.5
 
-## [3.7.0]
+## [3.7.0] - 2025-02-24
 
 - Introducing Roo Code 3.7, with support for the new Claude Sonnet 3.7. Because who cares about skipping version numbers anymore? Thanks @lupuletic and @cte for the PRs!
 
-## [3.3.26]
+## [3.3.26] - 2025-02-27
 
 - Adjust the default prompt for Debug mode to focus more on diagnosis and to require user confirmation before moving on to implementation
 
-## [3.3.25]
+## [3.3.25] - 2025-02-21
 
 - Add a "Debug" mode that specializes in debugging tricky problems (thanks [Ted Werbel](https://x.com/tedx_ai/status/1891514191179309457) and [Carlos E. Perez](https://x.com/IntuitMachine/status/1891516362486337739)!)
 - Add an experimental "Power Steering" option to significantly improve adherence to role definitions and custom instructions
 
-## [3.3.24]
+## [3.3.24] - 2025-02-20
 
 - Fixed a bug with region selection preventing AWS Bedrock profiles from being saved (thanks @oprstchn!)
 - Updated the price of gpt-4o (thanks @marvijo-code!)
 
-## [3.3.23]
+## [3.3.23] - 2025-02-20
 
 - Handle errors more gracefully when reading custom instructions from files (thanks @joemanley201!)
 - Bug fix to hitting "Done" on settings page with unsaved changes (thanks @System233!)
 
-## [3.3.22]
+## [3.3.22] - 2025-02-20
 
 - Improve the Provider Settings configuration with clear Save buttons and warnings about unsaved changes (thanks @System233!)
 - Correctly parse `<think>` reasoning tags from Ollama models (thanks @System233!)
@@ -132,7 +143,7 @@
 - Fix a bug where the .roomodes file was not automatically created when adding custom modes from the Prompts tab
 - Allow setting a wildcard (`*`) to auto-approve all command execution (use with caution!)
 
-## [3.3.21]
+## [3.3.21] - 2025-02-17
 
 - Fix input box revert issue and configuration loss during profile switch (thanks @System233!)
 - Fix default preferred language for zh-cn and zh-tw (thanks @System233!)
@@ -141,7 +152,7 @@
 - Fix system prompt to make sure Roo knows about all available modes
 - Enable streaming mode for OpenAI o1
 
-## [3.3.20]
+## [3.3.20] - 2025-02-14
 
 - Support project-specific custom modes in a .roomodes file
 - Add more Mistral models (thanks @d-oit and @bramburn!)
@@ -149,7 +160,7 @@
 - Add a setting to control the number of open editor tabs to tell the model about (665 is probably too many!)
 - Fix race condition bug with entering API key on the welcome screen
 
-## [3.3.19]
+## [3.3.19] - 2025-02-12
 
 - Fix a bug where aborting in the middle of file writes would not revert the write
 - Honor the VS Code theme for dialog backgrounds
@@ -157,7 +168,7 @@
 - Add a help button that links to our new documentation site (which we would love help from the community to improve!)
 - Switch checkpoints logic to use a shadow git repository to work around issues with hot reloads and polluting existing repositories (thanks Cline for the inspiration!)
 
-## [3.3.18]
+## [3.3.18] - 2025-02-11
 
 - Add a per-API-configuration model temperature setting (thanks @joemanley201!)
 - Add retries for fetching usage stats from OpenRouter (thanks @jcbdev!)
@@ -168,18 +179,18 @@
 - Fix logic error where automatic retries were waiting twice as long as intended
 - Rework the checkpoints code to avoid conflicts with file locks on Windows (sorry for the hassle!)
 
-## [3.3.17]
+## [3.3.17] - 2025-02-09
 
 - Fix the restore checkpoint popover
 - Unset git config that was previously set incorrectly by the checkpoints feature
 
-## [3.3.16]
+## [3.3.16] - 2025-02-09
 
 - Support Volcano Ark platform through the OpenAI-compatible provider
 - Fix jumpiness while entering API config by updating on blur instead of input
 - Add tooltips on checkpoint actions and fix an issue where checkpoints were overwriting existing git name/email settings - thanks for the feedback!
 
-## [3.3.15]
+## [3.3.15] - 2025-02-08
 
 - Improvements to MCP initialization and server restarts (thanks @MuriloFP and @hannesrudolph!)
 - Add a copy button to the recent tasks (thanks @hannesrudolph!)

+ 2 - 2
package-lock.json

@@ -1,12 +1,12 @@
 {
 	"name": "roo-cline",
-	"version": "3.8.0",
+	"version": "3.8.1",
 	"lockfileVersion": 3,
 	"requires": true,
 	"packages": {
 		"": {
 			"name": "roo-cline",
-			"version": "3.8.0",
+			"version": "3.8.1",
 			"dependencies": {
 				"@anthropic-ai/bedrock-sdk": "^0.10.2",
 				"@anthropic-ai/sdk": "^0.37.0",

+ 1 - 1
package.json

@@ -3,7 +3,7 @@
 	"displayName": "Roo Code (prev. Roo Cline)",
 	"description": "A whole dev team of AI agents in your editor.",
 	"publisher": "RooVeterinaryInc",
-	"version": "3.8.0",
+	"version": "3.8.1",
 	"icon": "assets/icons/rocket.png",
 	"galleryBanner": {
 		"color": "#617A91",

+ 3 - 3
src/api/providers/anthropic.ts

@@ -214,12 +214,12 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 	}
 
 	async completePrompt(prompt: string) {
-		let { id: modelId, maxTokens, thinking, temperature } = this.getModel()
+		let { id: modelId, temperature } = this.getModel()
 
 		const message = await this.client.messages.create({
 			model: modelId,
-			max_tokens: maxTokens ?? ANTHROPIC_DEFAULT_MAX_TOKENS,
-			thinking,
+			max_tokens: ANTHROPIC_DEFAULT_MAX_TOKENS,
+			thinking: undefined,
 			temperature,
 			messages: [{ role: "user", content: prompt }],
 			stream: false,

+ 68 - 0
src/api/providers/openai.ts

@@ -66,6 +66,11 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 		const deepseekReasoner = modelId.includes("deepseek-reasoner")
 		const ark = modelUrl.includes(".volces.com")
 
+		if (modelId.startsWith("o3-mini")) {
+			yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages)
+			return
+		}
+
 		if (this.options.openAiStreamingEnabled ?? true) {
 			const systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = {
 				role: "system",
@@ -169,6 +174,69 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 			throw error
 		}
 	}
+
+	private async *handleO3FamilyMessage(
+		modelId: string,
+		systemPrompt: string,
+		messages: Anthropic.Messages.MessageParam[],
+	): ApiStream {
+		if (this.options.openAiStreamingEnabled ?? true) {
+			const stream = await this.client.chat.completions.create({
+				model: "o3-mini",
+				messages: [
+					{
+						role: "developer",
+						content: `Formatting re-enabled\n${systemPrompt}`,
+					},
+					...convertToOpenAiMessages(messages),
+				],
+				stream: true,
+				stream_options: { include_usage: true },
+				reasoning_effort: this.getModel().info.reasoningEffort,
+			})
+
+			yield* this.handleStreamResponse(stream)
+		} else {
+			const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
+				model: modelId,
+				messages: [
+					{
+						role: "developer",
+						content: `Formatting re-enabled\n${systemPrompt}`,
+					},
+					...convertToOpenAiMessages(messages),
+				],
+			}
+
+			const response = await this.client.chat.completions.create(requestOptions)
+
+			yield {
+				type: "text",
+				text: response.choices[0]?.message.content || "",
+			}
+			yield this.processUsageMetrics(response.usage)
+		}
+	}
+
+	private async *handleStreamResponse(stream: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>): ApiStream {
+		for await (const chunk of stream) {
+			const delta = chunk.choices[0]?.delta
+			if (delta?.content) {
+				yield {
+					type: "text",
+					text: delta.content,
+				}
+			}
+
+			if (chunk.usage) {
+				yield {
+					type: "usage",
+					inputTokens: chunk.usage.prompt_tokens || 0,
+					outputTokens: chunk.usage.completion_tokens || 0,
+				}
+			}
+		}
+	}
 }
 
 export async function getOpenAiModels(baseUrl?: string, apiKey?: string) {

+ 5 - 2
src/core/Cline.ts

@@ -158,7 +158,7 @@ export class Cline {
 		apiConfiguration,
 		customInstructions,
 		enableDiff,
-		enableCheckpoints = false,
+		enableCheckpoints = true,
 		checkpointStorage = "task",
 		fuzzyMatchThreshold,
 		task,
@@ -1124,9 +1124,12 @@ export class Cline {
 
 			const totalTokens = tokensIn + tokensOut + cacheWrites + cacheReads
 
+			// Default max tokens value for thinking models when no specific value is set
+			const DEFAULT_THINKING_MODEL_MAX_TOKENS = 16_384
+
 			const modelInfo = this.api.getModel().info
 			const maxTokens = modelInfo.thinking
-				? this.apiConfiguration.modelMaxTokens || modelInfo.maxTokens
+				? this.apiConfiguration.modelMaxTokens || DEFAULT_THINKING_MODEL_MAX_TOKENS
 				: modelInfo.maxTokens
 			const contextWindow = modelInfo.contextWindow
 			const trimmedMessages = await truncateConversationIfNeeded({

+ 1 - 1
src/core/webview/ClineProvider.ts

@@ -2389,7 +2389,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
 			allowedCommands: stateValues.allowedCommands,
 			soundEnabled: stateValues.soundEnabled ?? false,
 			diffEnabled: stateValues.diffEnabled ?? true,
-			enableCheckpoints: stateValues.enableCheckpoints ?? false,
+			enableCheckpoints: stateValues.enableCheckpoints ?? true,
 			checkpointStorage: stateValues.checkpointStorage ?? "task",
 			soundVolume: stateValues.soundVolume,
 			browserViewportSize: stateValues.browserViewportSize ?? "900x600",

+ 1 - 1
src/shared/api.ts

@@ -70,7 +70,7 @@ export interface ApiHandlerOptions {
 	requestyApiKey?: string
 	requestyModelId?: string
 	requestyModelInfo?: ModelInfo
-	modelTemperature?: number
+	modelTemperature?: number | null
 	modelMaxTokens?: number
 	modelMaxThinkingTokens?: number
 }

+ 224 - 1
webview-ui/package-lock.json

@@ -15,6 +15,7 @@
 				"@radix-ui/react-icons": "^1.3.2",
 				"@radix-ui/react-popover": "^1.1.6",
 				"@radix-ui/react-progress": "^1.1.2",
+				"@radix-ui/react-select": "^2.1.6",
 				"@radix-ui/react-separator": "^1.1.2",
 				"@radix-ui/react-slider": "^1.2.3",
 				"@radix-ui/react-slot": "^1.1.2",
@@ -4688,6 +4689,229 @@
 				}
 			}
 		},
+		"node_modules/@radix-ui/react-select": {
+			"version": "2.1.6",
+			"resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.1.6.tgz",
+			"integrity": "sha512-T6ajELxRvTuAMWH0YmRJ1qez+x4/7Nq7QIx7zJ0VK3qaEWdnWpNbEDnmWldG1zBDwqrLy5aLMUWcoGirVj5kMg==",
+			"license": "MIT",
+			"dependencies": {
+				"@radix-ui/number": "1.1.0",
+				"@radix-ui/primitive": "1.1.1",
+				"@radix-ui/react-collection": "1.1.2",
+				"@radix-ui/react-compose-refs": "1.1.1",
+				"@radix-ui/react-context": "1.1.1",
+				"@radix-ui/react-direction": "1.1.0",
+				"@radix-ui/react-dismissable-layer": "1.1.5",
+				"@radix-ui/react-focus-guards": "1.1.1",
+				"@radix-ui/react-focus-scope": "1.1.2",
+				"@radix-ui/react-id": "1.1.0",
+				"@radix-ui/react-popper": "1.2.2",
+				"@radix-ui/react-portal": "1.1.4",
+				"@radix-ui/react-primitive": "2.0.2",
+				"@radix-ui/react-slot": "1.1.2",
+				"@radix-ui/react-use-callback-ref": "1.1.0",
+				"@radix-ui/react-use-controllable-state": "1.1.0",
+				"@radix-ui/react-use-layout-effect": "1.1.0",
+				"@radix-ui/react-use-previous": "1.1.0",
+				"@radix-ui/react-visually-hidden": "1.1.2",
+				"aria-hidden": "^1.2.4",
+				"react-remove-scroll": "^2.6.3"
+			},
+			"peerDependencies": {
+				"@types/react": "*",
+				"@types/react-dom": "*",
+				"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+				"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+			},
+			"peerDependenciesMeta": {
+				"@types/react": {
+					"optional": true
+				},
+				"@types/react-dom": {
+					"optional": true
+				}
+			}
+		},
+		"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-arrow": {
+			"version": "1.1.2",
+			"resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.2.tgz",
+			"integrity": "sha512-G+KcpzXHq24iH0uGG/pF8LyzpFJYGD4RfLjCIBfGdSLXvjLHST31RUiRVrupIBMvIppMgSzQ6l66iAxl03tdlg==",
+			"license": "MIT",
+			"dependencies": {
+				"@radix-ui/react-primitive": "2.0.2"
+			},
+			"peerDependencies": {
+				"@types/react": "*",
+				"@types/react-dom": "*",
+				"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+				"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+			},
+			"peerDependenciesMeta": {
+				"@types/react": {
+					"optional": true
+				},
+				"@types/react-dom": {
+					"optional": true
+				}
+			}
+		},
+		"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-collection": {
+			"version": "1.1.2",
+			"resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.2.tgz",
+			"integrity": "sha512-9z54IEKRxIa9VityapoEYMuByaG42iSy1ZXlY2KcuLSEtq8x4987/N6m15ppoMffgZX72gER2uHe1D9Y6Unlcw==",
+			"license": "MIT",
+			"dependencies": {
+				"@radix-ui/react-compose-refs": "1.1.1",
+				"@radix-ui/react-context": "1.1.1",
+				"@radix-ui/react-primitive": "2.0.2",
+				"@radix-ui/react-slot": "1.1.2"
+			},
+			"peerDependencies": {
+				"@types/react": "*",
+				"@types/react-dom": "*",
+				"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+				"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+			},
+			"peerDependenciesMeta": {
+				"@types/react": {
+					"optional": true
+				},
+				"@types/react-dom": {
+					"optional": true
+				}
+			}
+		},
+		"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-dismissable-layer": {
+			"version": "1.1.5",
+			"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.5.tgz",
+			"integrity": "sha512-E4TywXY6UsXNRhFrECa5HAvE5/4BFcGyfTyK36gP+pAW1ed7UTK4vKwdr53gAJYwqbfCWC6ATvJa3J3R/9+Qrg==",
+			"license": "MIT",
+			"dependencies": {
+				"@radix-ui/primitive": "1.1.1",
+				"@radix-ui/react-compose-refs": "1.1.1",
+				"@radix-ui/react-primitive": "2.0.2",
+				"@radix-ui/react-use-callback-ref": "1.1.0",
+				"@radix-ui/react-use-escape-keydown": "1.1.0"
+			},
+			"peerDependencies": {
+				"@types/react": "*",
+				"@types/react-dom": "*",
+				"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+				"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+			},
+			"peerDependenciesMeta": {
+				"@types/react": {
+					"optional": true
+				},
+				"@types/react-dom": {
+					"optional": true
+				}
+			}
+		},
+		"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-focus-scope": {
+			"version": "1.1.2",
+			"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.2.tgz",
+			"integrity": "sha512-zxwE80FCU7lcXUGWkdt6XpTTCKPitG1XKOwViTxHVKIJhZl9MvIl2dVHeZENCWD9+EdWv05wlaEkRXUykU27RA==",
+			"license": "MIT",
+			"dependencies": {
+				"@radix-ui/react-compose-refs": "1.1.1",
+				"@radix-ui/react-primitive": "2.0.2",
+				"@radix-ui/react-use-callback-ref": "1.1.0"
+			},
+			"peerDependencies": {
+				"@types/react": "*",
+				"@types/react-dom": "*",
+				"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+				"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+			},
+			"peerDependenciesMeta": {
+				"@types/react": {
+					"optional": true
+				},
+				"@types/react-dom": {
+					"optional": true
+				}
+			}
+		},
+		"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-popper": {
+			"version": "1.2.2",
+			"resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.2.tgz",
+			"integrity": "sha512-Rvqc3nOpwseCyj/rgjlJDYAgyfw7OC1tTkKn2ivhaMGcYt8FSBlahHOZak2i3QwkRXUXgGgzeEe2RuqeEHuHgA==",
+			"license": "MIT",
+			"dependencies": {
+				"@floating-ui/react-dom": "^2.0.0",
+				"@radix-ui/react-arrow": "1.1.2",
+				"@radix-ui/react-compose-refs": "1.1.1",
+				"@radix-ui/react-context": "1.1.1",
+				"@radix-ui/react-primitive": "2.0.2",
+				"@radix-ui/react-use-callback-ref": "1.1.0",
+				"@radix-ui/react-use-layout-effect": "1.1.0",
+				"@radix-ui/react-use-rect": "1.1.0",
+				"@radix-ui/react-use-size": "1.1.0",
+				"@radix-ui/rect": "1.1.0"
+			},
+			"peerDependencies": {
+				"@types/react": "*",
+				"@types/react-dom": "*",
+				"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+				"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+			},
+			"peerDependenciesMeta": {
+				"@types/react": {
+					"optional": true
+				},
+				"@types/react-dom": {
+					"optional": true
+				}
+			}
+		},
+		"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-portal": {
+			"version": "1.1.4",
+			"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.4.tgz",
+			"integrity": "sha512-sn2O9k1rPFYVyKd5LAJfo96JlSGVFpa1fS6UuBJfrZadudiw5tAmru+n1x7aMRQ84qDM71Zh1+SzK5QwU0tJfA==",
+			"license": "MIT",
+			"dependencies": {
+				"@radix-ui/react-primitive": "2.0.2",
+				"@radix-ui/react-use-layout-effect": "1.1.0"
+			},
+			"peerDependencies": {
+				"@types/react": "*",
+				"@types/react-dom": "*",
+				"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+				"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+			},
+			"peerDependenciesMeta": {
+				"@types/react": {
+					"optional": true
+				},
+				"@types/react-dom": {
+					"optional": true
+				}
+			}
+		},
+		"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-primitive": {
+			"version": "2.0.2",
+			"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.2.tgz",
+			"integrity": "sha512-Ec/0d38EIuvDF+GZjcMU/Ze6MxntVJYO/fRlCPhCaVUyPY9WTalHJw54tp9sXeJo3tlShWpy41vQRgLRGOuz+w==",
+			"license": "MIT",
+			"dependencies": {
+				"@radix-ui/react-slot": "1.1.2"
+			},
+			"peerDependencies": {
+				"@types/react": "*",
+				"@types/react-dom": "*",
+				"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+				"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+			},
+			"peerDependenciesMeta": {
+				"@types/react": {
+					"optional": true
+				},
+				"@types/react-dom": {
+					"optional": true
+				}
+			}
+		},
 		"node_modules/@radix-ui/react-separator": {
 			"version": "1.1.2",
 			"resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.2.tgz",
@@ -14151,7 +14375,6 @@
 			"resolved": "https://registry.npmjs.org/jest-environment-jsdom/-/jest-environment-jsdom-29.7.0.tgz",
 			"integrity": "sha512-k9iQbsf9OyOfdzWH8HDmrRT0gSIcX+FLNW7IQq94tFX0gynPwqDTW0Ho6iMVNjGz/nb+l/vW3dWM2bbLLpkbXA==",
 			"dev": true,
-			"license": "MIT",
 			"dependencies": {
 				"@jest/environment": "^29.7.0",
 				"@jest/fake-timers": "^29.7.0",

+ 1 - 0
webview-ui/package.json

@@ -22,6 +22,7 @@
 		"@radix-ui/react-icons": "^1.3.2",
 		"@radix-ui/react-popover": "^1.1.6",
 		"@radix-ui/react-progress": "^1.1.2",
+		"@radix-ui/react-select": "^2.1.6",
 		"@radix-ui/react-separator": "^1.1.2",
 		"@radix-ui/react-slider": "^1.2.3",
 		"@radix-ui/react-slot": "^1.1.2",

+ 3 - 3
webview-ui/src/__tests__/getMaxTokensForModel.test.tsx

@@ -1,4 +1,4 @@
-import { getMaxTokensForModel } from "@/utils/model-utils"
+import { DEFAULT_THINKING_MODEL_MAX_TOKENS, getMaxTokensForModel } from "@/utils/model-utils"
 
 describe("getMaxTokensForModel utility from model-utils", () => {
 	test("should return maxTokens from modelInfo when thinking is false", () => {
@@ -29,7 +29,7 @@ describe("getMaxTokensForModel utility from model-utils", () => {
 		expect(result).toBe(4096)
 	})
 
-	test("should fallback to modelInfo.maxTokens when thinking is true but apiConfig.modelMaxTokens is not defined", () => {
+	test("should fallback to DEFAULT_THINKING_MODEL_MAX_TOKENS when thinking is true but apiConfig.modelMaxTokens is not defined", () => {
 		const modelInfo = {
 			maxTokens: 2048,
 			thinking: true,
@@ -38,7 +38,7 @@ describe("getMaxTokensForModel utility from model-utils", () => {
 		const apiConfig = {}
 
 		const result = getMaxTokensForModel(modelInfo, apiConfig)
-		expect(result).toBe(2048)
+		expect(result).toBe(DEFAULT_THINKING_MODEL_MAX_TOKENS)
 	})
 
 	test("should handle undefined inputs gracefully", () => {

+ 80 - 48
webview-ui/src/components/settings/ApiOptions.tsx

@@ -4,6 +4,8 @@ import { Checkbox, Dropdown, type DropdownOption } from "vscrui"
 import { VSCodeLink, VSCodeRadio, VSCodeRadioGroup, VSCodeTextField } from "@vscode/webview-ui-toolkit/react"
 import * as vscodemodels from "vscode"
 
+import { Select, SelectContent, SelectGroup, SelectItem, SelectTrigger, SelectValue, Button } from "@/components/ui"
+
 import {
 	ApiConfiguration,
 	ModelInfo,
@@ -42,7 +44,6 @@ import { TemperatureControl } from "./TemperatureControl"
 import { validateApiConfiguration, validateModelId } from "@/utils/validate"
 import { ApiErrorMessage } from "./ApiErrorMessage"
 import { ThinkingBudget } from "./ThinkingBudget"
-import { Button } from "../ui"
 
 const modelsByProvider: Record<string, Record<string, ModelInfo>> = {
 	anthropic: anthropicModels,
@@ -54,6 +55,25 @@ const modelsByProvider: Record<string, Record<string, ModelInfo>> = {
 	mistral: mistralModels,
 }
 
+const providers = [
+	{ value: "openrouter", label: "OpenRouter" },
+	{ value: "anthropic", label: "Anthropic" },
+	{ value: "gemini", label: "Google Gemini" },
+	{ value: "deepseek", label: "DeepSeek" },
+	{ value: "openai-native", label: "OpenAI" },
+	{ value: "openai", label: "OpenAI Compatible" },
+	{ value: "vertex", label: "GCP Vertex AI" },
+	{ value: "bedrock", label: "AWS Bedrock" },
+	{ value: "glama", label: "Glama" },
+	{ value: "vscode-lm", label: "VS Code LM API" },
+	{ value: "mistral", label: "Mistral" },
+	{ value: "lmstudio", label: "LM Studio" },
+	{ value: "ollama", label: "Ollama" },
+	{ value: "unbound", label: "Unbound" },
+	{ value: "requesty", label: "Requesty" },
+	{ value: "human-relay", label: "Human Relay" },
+]
+
 interface ApiOptionsProps {
 	uriScheme: string | undefined
 	apiConfiguration: ApiConfiguration
@@ -238,30 +258,22 @@ const ApiOptions = ({
 				<label htmlFor="api-provider" className="font-medium">
 					API Provider
 				</label>
-				<Dropdown
-					id="api-provider"
+				<Select
 					value={selectedProvider}
-					onChange={handleInputChange("apiProvider", dropdownEventTransform)}
-					options={[
-						{ value: "openrouter", label: "OpenRouter" },
-						{ value: "anthropic", label: "Anthropic" },
-						{ value: "gemini", label: "Google Gemini" },
-						{ value: "deepseek", label: "DeepSeek" },
-						{ value: "openai-native", label: "OpenAI" },
-						{ value: "openai", label: "OpenAI Compatible" },
-						{ value: "vertex", label: "GCP Vertex AI" },
-						{ value: "bedrock", label: "AWS Bedrock" },
-						{ value: "glama", label: "Glama" },
-						{ value: "vscode-lm", label: "VS Code LM API" },
-						{ value: "mistral", label: "Mistral" },
-						{ value: "lmstudio", label: "LM Studio" },
-						{ value: "ollama", label: "Ollama" },
-						{ value: "unbound", label: "Unbound" },
-						{ value: "requesty", label: "Requesty" },
-						{ value: "human-relay", label: "Human Relay" },
-					]}
-					className="w-full"
-				/>
+					onValueChange={handleInputChange("apiProvider", dropdownEventTransform)}>
+					<SelectTrigger className="w-full">
+						<SelectValue placeholder="Select" />
+					</SelectTrigger>
+					<SelectContent>
+						<SelectGroup>
+							{providers.map(({ value, label }) => (
+								<SelectItem key={value} value={value}>
+									{label}
+								</SelectItem>
+							))}
+						</SelectGroup>
+					</SelectContent>
+				</Select>
 			</div>
 
 			{errorMessage && <ApiErrorMessage errorMessage={errorMessage} />}
@@ -424,10 +436,10 @@ const ApiOptions = ({
 				<>
 					<VSCodeTextField
 						value={apiConfiguration?.mistralApiKey || ""}
-						style={{ width: "100%" }}
 						type="password"
 						onInput={handleInputChange("mistralApiKey")}
-						placeholder="Enter API Key...">
+						placeholder="Enter API Key..."
+						className="w-full">
 						<span className="font-medium">Mistral API Key</span>
 					</VSCodeTextField>
 					<div className="text-sm text-vscode-descriptionForeground -mt-2">
@@ -575,16 +587,16 @@ const ApiOptions = ({
 					</div>
 					<VSCodeTextField
 						value={apiConfiguration?.vertexJsonCredentials || ""}
-						style={{ width: "100%" }}
 						onInput={handleInputChange("vertexJsonCredentials")}
-						placeholder="Enter Credentials JSON...">
+						placeholder="Enter Credentials JSON..."
+						className="w-full">
 						<span className="font-medium">Google Cloud Credentials</span>
 					</VSCodeTextField>
 					<VSCodeTextField
 						value={apiConfiguration?.vertexKeyFile || ""}
-						style={{ width: "100%" }}
 						onInput={handleInputChange("vertexKeyFile")}
-						placeholder="Enter Key File Path...">
+						placeholder="Enter Key File Path..."
+						className="w-full">
 						<span className="font-medium">Google Cloud Key File Path</span>
 					</VSCodeTextField>
 					<VSCodeTextField
@@ -620,10 +632,10 @@ const ApiOptions = ({
 				<>
 					<VSCodeTextField
 						value={apiConfiguration?.geminiApiKey || ""}
-						style={{ width: "100%" }}
 						type="password"
 						onInput={handleInputChange("geminiApiKey")}
-						placeholder="Enter API Key...">
+						placeholder="Enter API Key..."
+						className="w-full">
 						<span className="font-medium">Gemini API Key</span>
 					</VSCodeTextField>
 					<div className="text-sm text-vscode-descriptionForeground -mt-2">
@@ -713,10 +725,13 @@ const ApiOptions = ({
 								}
 								type="text"
 								style={{
-									width: "100%",
 									borderColor: (() => {
 										const value = apiConfiguration?.openAiCustomModelInfo?.maxTokens
-										if (!value) return "var(--vscode-input-border)"
+
+										if (!value) {
+											return "var(--vscode-input-border)"
+										}
+
 										return value > 0
 											? "var(--vscode-charts-green)"
 											: "var(--vscode-errorForeground)"
@@ -725,12 +740,14 @@ const ApiOptions = ({
 								title="Maximum number of tokens the model can generate in a single response"
 								onInput={handleInputChange("openAiCustomModelInfo", (e) => {
 									const value = parseInt((e.target as HTMLInputElement).value)
+
 									return {
 										...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults),
 										maxTokens: isNaN(value) ? undefined : value,
 									}
 								})}
-								placeholder="e.g. 4096">
+								placeholder="e.g. 4096"
+								className="w-full">
 								<span className="font-medium">Max Output Tokens</span>
 							</VSCodeTextField>
 							<div className="text-sm text-vscode-descriptionForeground">
@@ -748,10 +765,13 @@ const ApiOptions = ({
 								}
 								type="text"
 								style={{
-									width: "100%",
 									borderColor: (() => {
 										const value = apiConfiguration?.openAiCustomModelInfo?.contextWindow
-										if (!value) return "var(--vscode-input-border)"
+
+										if (!value) {
+											return "var(--vscode-input-border)"
+										}
+
 										return value > 0
 											? "var(--vscode-charts-green)"
 											: "var(--vscode-errorForeground)"
@@ -761,6 +781,7 @@ const ApiOptions = ({
 								onInput={handleInputChange("openAiCustomModelInfo", (e) => {
 									const value = (e.target as HTMLInputElement).value
 									const parsed = parseInt(value)
+
 									return {
 										...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults),
 										contextWindow: isNaN(parsed)
@@ -768,7 +789,8 @@ const ApiOptions = ({
 											: parsed,
 									}
 								})}
-								placeholder="e.g. 128000">
+								placeholder="e.g. 128000"
+								className="w-full">
 								<span className="font-medium">Context Window Size</span>
 							</VSCodeTextField>
 							<div className="text-sm text-vscode-descriptionForeground">
@@ -834,24 +856,29 @@ const ApiOptions = ({
 								}
 								type="text"
 								style={{
-									width: "100%",
 									borderColor: (() => {
 										const value = apiConfiguration?.openAiCustomModelInfo?.inputPrice
-										if (!value && value !== 0) return "var(--vscode-input-border)"
+
+										if (!value && value !== 0) {
+											return "var(--vscode-input-border)"
+										}
+
 										return value >= 0
 											? "var(--vscode-charts-green)"
 											: "var(--vscode-errorForeground)"
 									})(),
 								}}
-								onInput={handleInputChange("openAiCustomModelInfo", (e) => {
+								onChange={handleInputChange("openAiCustomModelInfo", (e) => {
 									const value = (e.target as HTMLInputElement).value
 									const parsed = parseFloat(value)
+
 									return {
 										...(apiConfiguration?.openAiCustomModelInfo ?? openAiModelInfoSaneDefaults),
 										inputPrice: isNaN(parsed) ? openAiModelInfoSaneDefaults.inputPrice : parsed,
 									}
 								})}
-								placeholder="e.g. 0.0001">
+								placeholder="e.g. 0.0001"
+								className="w-full">
 								<div className="flex items-center gap-1">
 									<span className="font-medium">Input Price</span>
 									<i
@@ -872,24 +899,29 @@ const ApiOptions = ({
 								}
 								type="text"
 								style={{
-									width: "100%",
 									borderColor: (() => {
 										const value = apiConfiguration?.openAiCustomModelInfo?.outputPrice
-										if (!value && value !== 0) return "var(--vscode-input-border)"
+
+										if (!value && value !== 0) {
+											return "var(--vscode-input-border)"
+										}
+
 										return value >= 0
 											? "var(--vscode-charts-green)"
 											: "var(--vscode-errorForeground)"
 									})(),
 								}}
-								onInput={handleInputChange("openAiCustomModelInfo", (e) => {
+								onChange={handleInputChange("openAiCustomModelInfo", (e) => {
 									const value = (e.target as HTMLInputElement).value
 									const parsed = parseFloat(value)
+
 									return {
 										...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults),
 										outputPrice: isNaN(parsed) ? openAiModelInfoSaneDefaults.outputPrice : parsed,
 									}
 								})}
-								placeholder="e.g. 0.0002">
+								placeholder="e.g. 0.0002"
+								className="w-full">
 								<div className="flex items-center gap-1">
 									<span className="font-medium">Output Price</span>
 									<i
@@ -960,9 +992,9 @@ const ApiOptions = ({
 							<div>
 								<VSCodeTextField
 									value={apiConfiguration?.lmStudioDraftModelId || ""}
-									style={{ width: "100%" }}
 									onInput={handleInputChange("lmStudioDraftModelId")}
-									placeholder={"e.g. lmstudio-community/llama-3.2-1b-instruct"}>
+									placeholder={"e.g. lmstudio-community/llama-3.2-1b-instruct"}
+									className="w-full">
 									<span className="font-medium">Draft Model ID</span>
 								</VSCodeTextField>
 								<div className="text-sm text-vscode-descriptionForeground">

+ 5 - 5
webview-ui/src/components/settings/TemperatureControl.tsx

@@ -3,8 +3,8 @@ import { useEffect, useState } from "react"
 import { useDebounce } from "react-use"
 
 interface TemperatureControlProps {
-	value: number | undefined
-	onChange: (value: number | undefined) => void
+	value: number | undefined | null
+	onChange: (value: number | undefined | null) => void
 	maxValue?: number // Some providers like OpenAI use 0-2 range
 }
 
@@ -14,7 +14,7 @@ export const TemperatureControl = ({ value, onChange, maxValue = 1 }: Temperatur
 	useDebounce(() => onChange(inputValue), 50, [onChange, inputValue])
 	// Sync internal state with prop changes when switching profiles
 	useEffect(() => {
-		const hasCustomTemperature = value !== undefined
+		const hasCustomTemperature = value !== undefined && value !== null
 		setIsCustomTemperature(hasCustomTemperature)
 		setInputValue(value)
 	}, [value])
@@ -28,7 +28,7 @@ export const TemperatureControl = ({ value, onChange, maxValue = 1 }: Temperatur
 						const isChecked = e.target.checked
 						setIsCustomTemperature(isChecked)
 						if (!isChecked) {
-							setInputValue(undefined) // Unset the temperature
+							setInputValue(null) // Unset the temperature, note that undefined is unserializable
 						} else {
 							setInputValue(value ?? 0) // Use the value from apiConfiguration, if set
 						}
@@ -53,7 +53,7 @@ export const TemperatureControl = ({ value, onChange, maxValue = 1 }: Temperatur
 							min="0"
 							max={maxValue}
 							step="0.01"
-							value={inputValue}
+							value={inputValue ?? 0}
 							className="h-2 focus:outline-0 w-4/5 accent-vscode-button-background"
 							onChange={(e) => setInputValue(parseFloat(e.target.value))}
 						/>

+ 25 - 0
webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx

@@ -35,6 +35,31 @@ jest.mock("vscrui", () => ({
 	Pane: ({ children }: any) => <div>{children}</div>,
 }))
 
+// Mock @shadcn/ui components
+jest.mock("@/components/ui", () => ({
+	Select: ({ children, value, onValueChange }: any) => (
+		<div className="select-mock">
+			<select value={value} onChange={(e) => onValueChange && onValueChange(e.target.value)}>
+				{children}
+			</select>
+		</div>
+	),
+	SelectTrigger: ({ children }: any) => <div className="select-trigger-mock">{children}</div>,
+	SelectValue: ({ children }: any) => <div className="select-value-mock">{children}</div>,
+	SelectContent: ({ children }: any) => <div className="select-content-mock">{children}</div>,
+	SelectGroup: ({ children }: any) => <div className="select-group-mock">{children}</div>,
+	SelectItem: ({ children, value }: any) => (
+		<option value={value} className="select-item-mock">
+			{children}
+		</option>
+	),
+	Button: ({ children, onClick }: any) => (
+		<button onClick={onClick} className="button-mock">
+			{children}
+		</button>
+	),
+}))
+
 jest.mock("../TemperatureControl", () => ({
 	TemperatureControl: ({ value, onChange }: any) => (
 		<div data-testid="temperature-control">

+ 1 - 1
webview-ui/src/components/settings/__tests__/TemperatureControl.test.tsx

@@ -33,7 +33,7 @@ describe("TemperatureControl", () => {
 		fireEvent.click(checkbox)
 		// Waiting for debounce
 		await new Promise((x) => setTimeout(x, 100))
-		expect(onChange).toHaveBeenCalledWith(undefined)
+		expect(onChange).toHaveBeenCalledWith(null)
 
 		// Check - should restore previous temperature
 		fireEvent.click(checkbox)

+ 2 - 1
webview-ui/src/components/ui/index.ts

@@ -11,6 +11,7 @@ export * from "./popover"
 export * from "./progress"
 export * from "./separator"
 export * from "./slider"
+export * from "./select-dropdown"
+export * from "./select"
 export * from "./textarea"
 export * from "./tooltip"
-export * from "./select-dropdown"

+ 144 - 0
webview-ui/src/components/ui/select.tsx

@@ -0,0 +1,144 @@
+import * as React from "react"
+import * as SelectPrimitive from "@radix-ui/react-select"
+import { CheckIcon, ChevronDownIcon, ChevronUpIcon } from "lucide-react"
+
+import { cn } from "@/lib/utils"
+
+function Select({ ...props }: React.ComponentProps<typeof SelectPrimitive.Root>) {
+	return <SelectPrimitive.Root data-slot="select" {...props} />
+}
+
+function SelectGroup({ ...props }: React.ComponentProps<typeof SelectPrimitive.Group>) {
+	return <SelectPrimitive.Group data-slot="select-group" {...props} />
+}
+
+function SelectValue({ ...props }: React.ComponentProps<typeof SelectPrimitive.Value>) {
+	return <SelectPrimitive.Value data-slot="select-value" {...props} />
+}
+
+function SelectTrigger({ className, children, ...props }: React.ComponentProps<typeof SelectPrimitive.Trigger>) {
+	return (
+		<SelectPrimitive.Trigger
+			data-slot="select-trigger"
+			className={cn(
+				"border-input data-[placeholder]:text-muted-foreground [&_svg:not([class*='text-'])]:text-muted-foreground focus-visible:border-vscode-focusBorder aria-invalid:border-destructive flex h-7 w-fit items-center justify-between gap-2 rounded-xs border bg-vscode-input-background hover:bg-transparent px-3 py-2 text-sm whitespace-nowrap shadow-xs transition-[color,box-shadow] outline-none disabled:cursor-not-allowed disabled:opacity-50 *:data-[slot=select-value]:line-clamp-1 *:data-[slot=select-value]:flex *:data-[slot=select-value]:items-center *:data-[slot=select-value]:gap-2 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4 cursor-pointer",
+				className,
+			)}
+			{...props}>
+			{children}
+			<SelectPrimitive.Icon asChild>
+				<ChevronDownIcon className="size-4 opacity-50" />
+			</SelectPrimitive.Icon>
+		</SelectPrimitive.Trigger>
+	)
+}
+
+function SelectContent({
+	className,
+	children,
+	position = "popper",
+	...props
+}: React.ComponentProps<typeof SelectPrimitive.Content>) {
+	return (
+		<SelectPrimitive.Portal>
+			<SelectPrimitive.Content
+				data-slot="select-content"
+				className={cn(
+					"bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 relative z-50 max-h-72 min-w-[8rem] overflow-hidden rounded-xs border border-vscode-focusBorder shadow-md",
+					position === "popper" &&
+						"data-[side=bottom]:translate-y-1 data-[side=left]:-translate-x-1 data-[side=right]:translate-x-1 data-[side=top]:-translate-y-1",
+					className,
+				)}
+				position={position}
+				{...props}>
+				<SelectScrollUpButton />
+				<SelectPrimitive.Viewport
+					className={cn(
+						"p-1",
+						position === "popper" &&
+							"h-[var(--radix-select-trigger-height)] w-full min-w-[var(--radix-select-trigger-width)] scroll-my-1",
+					)}>
+					{children}
+				</SelectPrimitive.Viewport>
+				<SelectScrollDownButton />
+			</SelectPrimitive.Content>
+		</SelectPrimitive.Portal>
+	)
+}
+
+function SelectLabel({ className, ...props }: React.ComponentProps<typeof SelectPrimitive.Label>) {
+	return (
+		<SelectPrimitive.Label
+			data-slot="select-label"
+			className={cn("px-2 py-1.5 text-sm font-medium", className)}
+			{...props}
+		/>
+	)
+}
+
+function SelectItem({ className, children, ...props }: React.ComponentProps<typeof SelectPrimitive.Item>) {
+	return (
+		<SelectPrimitive.Item
+			data-slot="select-item"
+			className={cn(
+				"focus:bg-accent focus:text-accent-foreground [&_svg:not([class*='text-'])]:text-muted-foreground relative flex w-full cursor-default items-center gap-2 rounded-xs py-1.5 pr-8 pl-2 text-sm outline-hidden select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4 *:[span]:last:flex *:[span]:last:items-center *:[span]:last:gap-2 cursor-pointer",
+				className,
+			)}
+			{...props}>
+			<span className="absolute right-2 flex size-3.5 items-center justify-center">
+				<SelectPrimitive.ItemIndicator>
+					<CheckIcon className="size-4" />
+				</SelectPrimitive.ItemIndicator>
+			</span>
+			<SelectPrimitive.ItemText>{children}</SelectPrimitive.ItemText>
+		</SelectPrimitive.Item>
+	)
+}
+
+function SelectSeparator({ className, ...props }: React.ComponentProps<typeof SelectPrimitive.Separator>) {
+	return (
+		<SelectPrimitive.Separator
+			data-slot="select-separator"
+			className={cn("bg-border pointer-events-none -mx-1 my-1 h-px", className)}
+			{...props}
+		/>
+	)
+}
+
+function SelectScrollUpButton({ className, ...props }: React.ComponentProps<typeof SelectPrimitive.ScrollUpButton>) {
+	return (
+		<SelectPrimitive.ScrollUpButton
+			data-slot="select-scroll-up-button"
+			className={cn("flex cursor-default items-center justify-center py-1", className)}
+			{...props}>
+			<ChevronUpIcon className="size-4" />
+		</SelectPrimitive.ScrollUpButton>
+	)
+}
+
+function SelectScrollDownButton({
+	className,
+	...props
+}: React.ComponentProps<typeof SelectPrimitive.ScrollDownButton>) {
+	return (
+		<SelectPrimitive.ScrollDownButton
+			data-slot="select-scroll-down-button"
+			className={cn("flex cursor-default items-center justify-center py-1", className)}
+			{...props}>
+			<ChevronDownIcon className="size-4" />
+		</SelectPrimitive.ScrollDownButton>
+	)
+}
+
+export {
+	Select,
+	SelectContent,
+	SelectGroup,
+	SelectItem,
+	SelectLabel,
+	SelectScrollDownButton,
+	SelectScrollUpButton,
+	SelectSeparator,
+	SelectTrigger,
+	SelectValue,
+}

+ 134 - 0
webview-ui/src/utils/__tests__/model-utils.test.ts

@@ -0,0 +1,134 @@
+/**
+ * @fileoverview Tests for token and model utility functions
+ */
+
+import {
+	getMaxTokensForModel,
+	calculateTokenDistribution,
+	ModelInfo,
+	ApiConfig,
+	DEFAULT_THINKING_MODEL_MAX_TOKENS,
+} from "../model-utils"
+
+describe("Model utility functions", () => {
+	describe("getMaxTokensForModel", () => {
+		/**
+		 * Testing the specific fix in commit cc79178f:
+		 * For thinking models, use apiConfig.modelMaxTokens if available,
+		 * otherwise fall back to 16_384 (not modelInfo.maxTokens)
+		 */
+
+		it("should return apiConfig.modelMaxTokens for thinking models when provided", () => {
+			const modelInfo: ModelInfo = {
+				thinking: true,
+				maxTokens: 8000,
+			}
+
+			const apiConfig: ApiConfig = {
+				modelMaxTokens: 4000,
+			}
+
+			expect(getMaxTokensForModel(modelInfo, apiConfig)).toBe(4000)
+		})
+
+		it("should return 16_384 for thinking models when modelMaxTokens not provided", () => {
+			const modelInfo: ModelInfo = {
+				thinking: true,
+				maxTokens: 8000,
+			}
+
+			const apiConfig: ApiConfig = {}
+
+			// This tests the specific fix: now using DEFAULT_THINKING_MODEL_MAX_TOKENS instead of falling back to modelInfo.maxTokens
+			expect(getMaxTokensForModel(modelInfo, apiConfig)).toBe(DEFAULT_THINKING_MODEL_MAX_TOKENS)
+		})
+
+		it("should return 16_384 for thinking models when apiConfig is undefined", () => {
+			const modelInfo: ModelInfo = {
+				thinking: true,
+				maxTokens: 8000,
+			}
+
+			expect(getMaxTokensForModel(modelInfo, undefined)).toBe(DEFAULT_THINKING_MODEL_MAX_TOKENS)
+		})
+
+		it("should return modelInfo.maxTokens for non-thinking models", () => {
+			const modelInfo: ModelInfo = {
+				thinking: false,
+				maxTokens: 8000,
+			}
+
+			const apiConfig: ApiConfig = {
+				modelMaxTokens: 4000,
+			}
+
+			expect(getMaxTokensForModel(modelInfo, apiConfig)).toBe(8000)
+		})
+
+		it("should return undefined for non-thinking models with undefined maxTokens", () => {
+			const modelInfo: ModelInfo = {
+				thinking: false,
+			}
+
+			const apiConfig: ApiConfig = {
+				modelMaxTokens: 4000,
+			}
+
+			expect(getMaxTokensForModel(modelInfo, apiConfig)).toBeUndefined()
+		})
+
+		it("should return undefined when modelInfo is undefined", () => {
+			const apiConfig: ApiConfig = {
+				modelMaxTokens: 4000,
+			}
+
+			expect(getMaxTokensForModel(undefined, apiConfig)).toBeUndefined()
+		})
+	})
+
+	describe("calculateTokenDistribution", () => {
+		it("should calculate token distribution correctly", () => {
+			const contextWindow = 10000
+			const contextTokens = 5000
+			const maxTokens = 2000
+
+			const result = calculateTokenDistribution(contextWindow, contextTokens, maxTokens)
+
+			expect(result.reservedForOutput).toBe(maxTokens)
+			expect(result.availableSize).toBe(3000) // 10000 - 5000 - 2000
+
+			// Percentages should sum to 100%
+			expect(Math.round(result.currentPercent + result.reservedPercent + result.availablePercent)).toBe(100)
+		})
+
+		it("should default to 20% of context window when maxTokens not provided", () => {
+			const contextWindow = 10000
+			const contextTokens = 5000
+
+			const result = calculateTokenDistribution(contextWindow, contextTokens)
+
+			expect(result.reservedForOutput).toBe(2000) // 20% of 10000
+			expect(result.availableSize).toBe(3000) // 10000 - 5000 - 2000
+		})
+
+		it("should handle negative or zero inputs by using positive fallbacks", () => {
+			const result = calculateTokenDistribution(-1000, -500)
+
+			expect(result.currentPercent).toBe(0)
+			expect(result.reservedPercent).toBe(0)
+			expect(result.availablePercent).toBe(0)
+			expect(result.reservedForOutput).toBe(0) // With negative inputs, both context window and tokens become 0, so 20% of 0 is 0
+			expect(result.availableSize).toBe(0)
+		})
+
+		it("should handle zero total tokens without division by zero errors", () => {
+			const result = calculateTokenDistribution(0, 0, 0)
+
+			expect(result.currentPercent).toBe(0)
+			expect(result.reservedPercent).toBe(0)
+			expect(result.availablePercent).toBe(0)
+			expect(result.reservedForOutput).toBe(0)
+			expect(result.availableSize).toBe(0)
+		})
+	})
+})

+ 6 - 1
webview-ui/src/utils/model-utils.ts

@@ -2,6 +2,11 @@
  * Utility functions for working with language models and tokens
  */
 
+/**
+ * Default maximum tokens for thinking-capable models when no specific value is provided
+ */
+export const DEFAULT_THINKING_MODEL_MAX_TOKENS = 16_384
+
 /**
  * Model information interface with properties used in token calculations
  */
@@ -70,7 +75,7 @@ export const getMaxTokensForModel = (
 	apiConfig: ApiConfig | undefined,
 ): number | undefined => {
 	if (modelInfo?.thinking) {
-		return apiConfig?.modelMaxTokens || modelInfo?.maxTokens
+		return apiConfig?.modelMaxTokens || DEFAULT_THINKING_MODEL_MAX_TOKENS
 	}
 	return modelInfo?.maxTokens
 }