Browse Source

Enable browser-use tool for all image-capable models (#8121)

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
Co-authored-by: Hannes Rudolph <[email protected]>
Co-authored-by: Matt Rubens <[email protected]>
roomote[bot] 2 tháng trước cách đây
mục cha
commit
d9ed33db67
65 tập tin đã thay đổi với 132 bổ sung355 xóa
  1. 0 1
      packages/types/src/model.ts
  2. 0 7
      packages/types/src/providers/anthropic.ts
  3. 0 27
      packages/types/src/providers/bedrock.ts
  4. 0 1
      packages/types/src/providers/glama.ts
  5. 0 39
      packages/types/src/providers/lite-llm.ts
  6. 0 1
      packages/types/src/providers/lm-studio.ts
  7. 0 1
      packages/types/src/providers/ollama.ts
  8. 0 14
      packages/types/src/providers/openrouter.ts
  9. 0 1
      packages/types/src/providers/requesty.ts
  10. 0 1
      packages/types/src/providers/vercel-ai-gateway.ts
  11. 0 7
      packages/types/src/providers/vertex.ts
  12. 0 1
      src/api/providers/__tests__/glama.spec.ts
  13. 0 2
      src/api/providers/__tests__/openrouter.spec.ts
  14. 0 3
      src/api/providers/__tests__/requesty.spec.ts
  15. 0 2
      src/api/providers/__tests__/unbound.spec.ts
  16. 0 4
      src/api/providers/__tests__/vercel-ai-gateway.spec.ts
  17. 0 13
      src/api/providers/fetchers/__tests__/litellm.spec.ts
  18. 0 1
      src/api/providers/fetchers/__tests__/lmstudio.test.ts
  19. 0 2
      src/api/providers/fetchers/__tests__/ollama.test.ts
  20. 0 19
      src/api/providers/fetchers/__tests__/openrouter.spec.ts
  21. 0 7
      src/api/providers/fetchers/__tests__/vercel-ai-gateway.spec.ts
  22. 0 1
      src/api/providers/fetchers/glama.ts
  23. 0 1
      src/api/providers/fetchers/huggingface.ts
  24. 0 1
      src/api/providers/fetchers/io-intelligence.ts
  25. 0 16
      src/api/providers/fetchers/litellm.ts
  26. 0 1
      src/api/providers/fetchers/lmstudio.ts
  27. 0 1
      src/api/providers/fetchers/ollama.ts
  28. 0 7
      src/api/providers/fetchers/openrouter.ts
  29. 0 1
      src/api/providers/fetchers/requesty.ts
  30. 0 1
      src/api/providers/fetchers/unbound.ts
  31. 0 2
      src/api/providers/fetchers/vercel-ai-gateway.ts
  32. 0 1
      src/api/providers/human-relay.ts
  33. 0 3
      src/core/condense/__tests__/index.spec.ts
  34. 2 58
      src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap
  35. 5 5
      src/core/prompts/__tests__/add-custom-instructions.spec.ts
  36. 3 3
      src/core/prompts/__tests__/custom-system-prompt.spec.ts
  37. 16 16
      src/core/prompts/__tests__/system-prompt.spec.ts
  38. 15 4
      src/core/task/Task.ts
  39. 0 2
      src/core/task/__tests__/Task.spec.ts
  40. 0 1
      src/core/webview/__tests__/ClineProvider.spec.ts
  41. 0 1
      src/core/webview/__tests__/ClineProvider.sticky-mode.spec.ts
  42. 83 0
      src/core/webview/__tests__/generateSystemPrompt.browser-capability.spec.ts
  43. 8 5
      src/core/webview/generateSystemPrompt.ts
  44. 0 5
      webview-ui/src/components/settings/ModelInfoView.tsx
  45. 0 1
      webview-ui/src/components/settings/__tests__/ModelPicker.spec.tsx
  46. 0 24
      webview-ui/src/components/settings/providers/OpenAICompatible.tsx
  47. 0 4
      webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts
  48. 0 2
      webview-ui/src/i18n/locales/ca/settings.json
  49. 0 2
      webview-ui/src/i18n/locales/de/settings.json
  50. 0 2
      webview-ui/src/i18n/locales/en/settings.json
  51. 0 2
      webview-ui/src/i18n/locales/es/settings.json
  52. 0 2
      webview-ui/src/i18n/locales/fr/settings.json
  53. 0 2
      webview-ui/src/i18n/locales/hi/settings.json
  54. 0 2
      webview-ui/src/i18n/locales/id/settings.json
  55. 0 2
      webview-ui/src/i18n/locales/it/settings.json
  56. 0 2
      webview-ui/src/i18n/locales/ja/settings.json
  57. 0 2
      webview-ui/src/i18n/locales/ko/settings.json
  58. 0 2
      webview-ui/src/i18n/locales/nl/settings.json
  59. 0 2
      webview-ui/src/i18n/locales/pl/settings.json
  60. 0 2
      webview-ui/src/i18n/locales/pt-BR/settings.json
  61. 0 2
      webview-ui/src/i18n/locales/ru/settings.json
  62. 0 2
      webview-ui/src/i18n/locales/tr/settings.json
  63. 0 2
      webview-ui/src/i18n/locales/vi/settings.json
  64. 0 2
      webview-ui/src/i18n/locales/zh-CN/settings.json
  65. 0 2
      webview-ui/src/i18n/locales/zh-TW/settings.json

+ 0 - 1
packages/types/src/model.ts

@@ -57,7 +57,6 @@ export const modelInfoSchema = z.object({
 	maxThinkingTokens: z.number().nullish(),
 	contextWindow: z.number(),
 	supportsImages: z.boolean().optional(),
-	supportsComputerUse: z.boolean().optional(),
 	supportsPromptCache: z.boolean(),
 	// Capability flag to indicate whether the model supports an output verbosity parameter
 	supportsVerbosity: z.boolean().optional(),

+ 0 - 7
packages/types/src/providers/anthropic.ts

@@ -10,7 +10,6 @@ export const anthropicModels = {
 		maxTokens: 64_000, // Overridden to 8k if `enableReasoningEffort` is false.
 		contextWindow: 200_000, // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0, // $3 per million input tokens (≤200K context)
 		outputPrice: 15.0, // $15 per million output tokens (≤200K context)
@@ -32,7 +31,6 @@ export const anthropicModels = {
 		maxTokens: 64_000, // Overridden to 8k if `enableReasoningEffort` is false.
 		contextWindow: 200_000, // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0, // $3 per million input tokens (≤200K context)
 		outputPrice: 15.0, // $15 per million output tokens (≤200K context)
@@ -54,7 +52,6 @@ export const anthropicModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 15.0, // $15 per million input tokens
 		outputPrice: 75.0, // $75 per million output tokens
@@ -66,7 +63,6 @@ export const anthropicModels = {
 		maxTokens: 32_000, // Overridden to 8k if `enableReasoningEffort` is false.
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 15.0, // $15 per million input tokens
 		outputPrice: 75.0, // $75 per million output tokens
@@ -78,7 +74,6 @@ export const anthropicModels = {
 		maxTokens: 128_000, // Unlocked by passing `beta` flag to the model. Otherwise, it's 64k.
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0, // $3 per million input tokens
 		outputPrice: 15.0, // $15 per million output tokens
@@ -91,7 +86,6 @@ export const anthropicModels = {
 		maxTokens: 8192, // Since we already have a `:thinking` virtual model we aren't setting `supportsReasoningBudget: true` here.
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0, // $3 per million input tokens
 		outputPrice: 15.0, // $15 per million output tokens
@@ -102,7 +96,6 @@ export const anthropicModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0, // $3 per million input tokens
 		outputPrice: 15.0, // $15 per million output tokens

+ 0 - 27
packages/types/src/providers/bedrock.ts

@@ -17,7 +17,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		inputPrice: 3.0,
@@ -32,7 +31,6 @@ export const bedrockModels = {
 		maxTokens: 5000,
 		contextWindow: 300_000,
 		supportsImages: true,
-		supportsComputerUse: false,
 		supportsPromptCache: true,
 		inputPrice: 0.8,
 		outputPrice: 3.2,
@@ -46,7 +44,6 @@ export const bedrockModels = {
 		maxTokens: 5000,
 		contextWindow: 300_000,
 		supportsImages: true,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 1.0,
 		outputPrice: 4.0,
@@ -58,7 +55,6 @@ export const bedrockModels = {
 		maxTokens: 5000,
 		contextWindow: 300_000,
 		supportsImages: true,
-		supportsComputerUse: false,
 		supportsPromptCache: true,
 		inputPrice: 0.06,
 		outputPrice: 0.24,
@@ -72,7 +68,6 @@ export const bedrockModels = {
 		maxTokens: 5000,
 		contextWindow: 128_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: true,
 		inputPrice: 0.035,
 		outputPrice: 0.14,
@@ -86,7 +81,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		inputPrice: 3.0,
@@ -101,7 +95,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		inputPrice: 15.0,
@@ -116,7 +109,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		inputPrice: 15.0,
@@ -131,7 +123,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		inputPrice: 3.0,
@@ -146,7 +137,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
@@ -254,7 +244,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.5,
 		outputPrice: 1.5,
@@ -264,7 +253,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 2.0,
 		outputPrice: 6.0,
@@ -274,7 +262,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.72,
 		outputPrice: 0.72,
@@ -284,7 +271,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: true,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.72,
 		outputPrice: 0.72,
@@ -294,7 +280,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: true,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.16,
 		outputPrice: 0.16,
@@ -304,7 +289,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.15,
 		outputPrice: 0.15,
@@ -314,7 +298,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.1,
 		outputPrice: 0.1,
@@ -324,7 +307,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 2.4,
 		outputPrice: 2.4,
@@ -334,7 +316,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.72,
 		outputPrice: 0.72,
@@ -344,7 +325,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 128_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.9,
 		outputPrice: 0.9,
@@ -354,7 +334,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 8_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.22,
 		outputPrice: 0.22,
@@ -364,7 +343,6 @@ export const bedrockModels = {
 		maxTokens: 2048,
 		contextWindow: 8_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 2.65,
 		outputPrice: 3.5,
@@ -373,7 +351,6 @@ export const bedrockModels = {
 		maxTokens: 2048,
 		contextWindow: 4_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.3,
 		outputPrice: 0.6,
@@ -382,7 +359,6 @@ export const bedrockModels = {
 		maxTokens: 4096,
 		contextWindow: 8_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.15,
 		outputPrice: 0.2,
@@ -392,7 +368,6 @@ export const bedrockModels = {
 		maxTokens: 4096,
 		contextWindow: 8_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.2,
 		outputPrice: 0.6,
@@ -402,7 +377,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 8_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.1,
 		description: "Amazon Titan Text Embeddings",
@@ -411,7 +385,6 @@ export const bedrockModels = {
 		maxTokens: 8192,
 		contextWindow: 8_000,
 		supportsImages: false,
-		supportsComputerUse: false,
 		supportsPromptCache: false,
 		inputPrice: 0.02,
 		description: "Amazon Titan Text Embeddings V2",

+ 0 - 1
packages/types/src/providers/glama.ts

@@ -7,7 +7,6 @@ export const glamaDefaultModelInfo: ModelInfo = {
 	maxTokens: 8192,
 	contextWindow: 200_000,
 	supportsImages: true,
-	supportsComputerUse: true,
 	supportsPromptCache: true,
 	inputPrice: 3.0,
 	outputPrice: 15.0,

+ 0 - 39
packages/types/src/providers/lite-llm.ts

@@ -7,48 +7,9 @@ export const litellmDefaultModelInfo: ModelInfo = {
 	maxTokens: 8192,
 	contextWindow: 200_000,
 	supportsImages: true,
-	supportsComputerUse: true,
 	supportsPromptCache: true,
 	inputPrice: 3.0,
 	outputPrice: 15.0,
 	cacheWritesPrice: 3.75,
 	cacheReadsPrice: 0.3,
 }
-
-export const LITELLM_COMPUTER_USE_MODELS = new Set([
-	"claude-3-5-sonnet-latest",
-	"claude-opus-4-1-20250805",
-	"claude-opus-4-20250514",
-	"claude-sonnet-4-20250514",
-	"claude-3-7-sonnet-latest",
-	"claude-3-7-sonnet-20250219",
-	"claude-3-5-sonnet-20241022",
-	"vertex_ai/claude-3-5-sonnet",
-	"vertex_ai/claude-3-5-sonnet-v2",
-	"vertex_ai/claude-3-5-sonnet-v2@20241022",
-	"vertex_ai/claude-3-7-sonnet@20250219",
-	"vertex_ai/claude-opus-4-1@20250805",
-	"vertex_ai/claude-opus-4@20250514",
-	"vertex_ai/claude-sonnet-4@20250514",
-	"vertex_ai/claude-sonnet-4-5@20250929",
-	"openrouter/anthropic/claude-3.5-sonnet",
-	"openrouter/anthropic/claude-3.5-sonnet:beta",
-	"openrouter/anthropic/claude-3.7-sonnet",
-	"openrouter/anthropic/claude-3.7-sonnet:beta",
-	"anthropic.claude-opus-4-1-20250805-v1:0",
-	"anthropic.claude-opus-4-20250514-v1:0",
-	"anthropic.claude-sonnet-4-20250514-v1:0",
-	"anthropic.claude-3-7-sonnet-20250219-v1:0",
-	"anthropic.claude-3-5-sonnet-20241022-v2:0",
-	"us.anthropic.claude-3-5-sonnet-20241022-v2:0",
-	"us.anthropic.claude-3-7-sonnet-20250219-v1:0",
-	"us.anthropic.claude-opus-4-1-20250805-v1:0",
-	"us.anthropic.claude-opus-4-20250514-v1:0",
-	"us.anthropic.claude-sonnet-4-20250514-v1:0",
-	"eu.anthropic.claude-3-5-sonnet-20241022-v2:0",
-	"eu.anthropic.claude-3-7-sonnet-20250219-v1:0",
-	"eu.anthropic.claude-opus-4-1-20250805-v1:0",
-	"eu.anthropic.claude-opus-4-20250514-v1:0",
-	"eu.anthropic.claude-sonnet-4-20250514-v1:0",
-	"snowflake/claude-3-5-sonnet",
-])

+ 0 - 1
packages/types/src/providers/lm-studio.ts

@@ -9,7 +9,6 @@ export const lMStudioDefaultModelInfo: ModelInfo = {
 	maxTokens: 8192,
 	contextWindow: 200_000,
 	supportsImages: true,
-	supportsComputerUse: true,
 	supportsPromptCache: true,
 	inputPrice: 0,
 	outputPrice: 0,

+ 0 - 1
packages/types/src/providers/ollama.ts

@@ -7,7 +7,6 @@ export const ollamaDefaultModelInfo: ModelInfo = {
 	maxTokens: 4096,
 	contextWindow: 200_000,
 	supportsImages: true,
-	supportsComputerUse: true,
 	supportsPromptCache: true,
 	inputPrice: 0,
 	outputPrice: 0,

+ 0 - 14
packages/types/src/providers/openrouter.ts

@@ -7,7 +7,6 @@ export const openRouterDefaultModelInfo: ModelInfo = {
 	maxTokens: 8192,
 	contextWindow: 200_000,
 	supportsImages: true,
-	supportsComputerUse: true,
 	supportsPromptCache: true,
 	inputPrice: 3.0,
 	outputPrice: 15.0,
@@ -52,19 +51,6 @@ export const OPEN_ROUTER_PROMPT_CACHING_MODELS = new Set([
 	"google/gemini-flash-1.5-8b",
 ])
 
-// https://www.anthropic.com/news/3-5-models-and-computer-use
-export const OPEN_ROUTER_COMPUTER_USE_MODELS = new Set([
-	"anthropic/claude-3.5-sonnet",
-	"anthropic/claude-3.5-sonnet:beta",
-	"anthropic/claude-3.7-sonnet",
-	"anthropic/claude-3.7-sonnet:beta",
-	"anthropic/claude-3.7-sonnet:thinking",
-	"anthropic/claude-sonnet-4",
-	"anthropic/claude-sonnet-4.5",
-	"anthropic/claude-opus-4",
-	"anthropic/claude-opus-4.1",
-])
-
 // When we first launched these models we didn't have support for
 // enabling/disabling the reasoning budget for hybrid models. Now that we
 // do support this we should give users the option to enable/disable it

+ 0 - 1
packages/types/src/providers/requesty.ts

@@ -8,7 +8,6 @@ export const requestyDefaultModelInfo: ModelInfo = {
 	maxTokens: 8192,
 	contextWindow: 200_000,
 	supportsImages: true,
-	supportsComputerUse: true,
 	supportsPromptCache: true,
 	inputPrice: 3.0,
 	outputPrice: 15.0,

+ 0 - 1
packages/types/src/providers/vercel-ai-gateway.ts

@@ -89,7 +89,6 @@ export const vercelAiGatewayDefaultModelInfo: ModelInfo = {
 	maxTokens: 64000,
 	contextWindow: 200000,
 	supportsImages: true,
-	supportsComputerUse: true,
 	supportsPromptCache: true,
 	inputPrice: 3,
 	outputPrice: 15,

+ 0 - 7
packages/types/src/providers/vertex.ts

@@ -167,7 +167,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
@@ -179,7 +178,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
@@ -202,7 +200,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 15.0,
 		outputPrice: 75.0,
@@ -214,7 +211,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 15.0,
 		outputPrice: 75.0,
@@ -225,7 +221,6 @@ export const vertexModels = {
 		maxTokens: 64_000,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
@@ -238,7 +233,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
@@ -249,7 +243,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0,
 		outputPrice: 15.0,

+ 0 - 1
src/api/providers/__tests__/glama.spec.ts

@@ -20,7 +20,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				cacheReadsPrice: 0.3,
 				description: "Claude 3.7 Sonnet",
 				thinking: false,
-				supportsComputerUse: true,
 			},
 			"openai/gpt-4o": {
 				maxTokens: 4096,

+ 0 - 2
src/api/providers/__tests__/openrouter.spec.ts

@@ -27,7 +27,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				cacheReadsPrice: 0.3,
 				description: "Claude 3.7 Sonnet",
 				thinking: false,
-				supportsComputerUse: true,
 			},
 			"anthropic/claude-3.7-sonnet:thinking": {
 				maxTokens: 128000,
@@ -39,7 +38,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				cacheWritesPrice: 3.75,
 				cacheReadsPrice: 0.3,
 				description: "Claude 3.7 Sonnet with thinking",
-				supportsComputerUse: true,
 			},
 		})
 	}),

+ 0 - 3
src/api/providers/__tests__/requesty.spec.ts

@@ -31,7 +31,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				contextWindow: 200000,
 				supportsImages: true,
 				supportsPromptCache: true,
-				supportsComputerUse: true,
 				inputPrice: 3,
 				outputPrice: 15,
 				cacheWritesPrice: 3.75,
@@ -92,7 +91,6 @@ describe("RequestyHandler", () => {
 					contextWindow: 200000,
 					supportsImages: true,
 					supportsPromptCache: true,
-					supportsComputerUse: true,
 					inputPrice: 3,
 					outputPrice: 15,
 					cacheWritesPrice: 3.75,
@@ -113,7 +111,6 @@ describe("RequestyHandler", () => {
 					contextWindow: 200000,
 					supportsImages: true,
 					supportsPromptCache: true,
-					supportsComputerUse: true,
 					inputPrice: 3,
 					outputPrice: 15,
 					cacheWritesPrice: 3.75,

+ 0 - 2
src/api/providers/__tests__/unbound.spec.ts

@@ -21,7 +21,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				cacheReadsPrice: 0.3,
 				description: "Claude 3.5 Sonnet",
 				thinking: false,
-				supportsComputerUse: true,
 			},
 			"anthropic/claude-3-7-sonnet-20250219": {
 				maxTokens: 8192,
@@ -34,7 +33,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				cacheReadsPrice: 0.3,
 				description: "Claude 3.7 Sonnet",
 				thinking: false,
-				supportsComputerUse: true,
 			},
 			"openai/gpt-4o": {
 				maxTokens: 4096,

+ 0 - 4
src/api/providers/__tests__/vercel-ai-gateway.spec.ts

@@ -26,7 +26,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				cacheWritesPrice: 3.75,
 				cacheReadsPrice: 0.3,
 				description: "Claude Sonnet 4",
-				supportsComputerUse: true,
 			},
 			"anthropic/claude-3.5-haiku": {
 				maxTokens: 32000,
@@ -38,7 +37,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				cacheWritesPrice: 1.25,
 				cacheReadsPrice: 0.1,
 				description: "Claude 3.5 Haiku",
-				supportsComputerUse: false,
 			},
 			"openai/gpt-4o": {
 				maxTokens: 16000,
@@ -50,7 +48,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				cacheWritesPrice: 3.125,
 				cacheReadsPrice: 0.25,
 				description: "GPT-4o",
-				supportsComputerUse: true,
 			},
 		})
 	}),
@@ -115,7 +112,6 @@ describe("VercelAiGatewayHandler", () => {
 			expect(result.info.contextWindow).toBe(200000)
 			expect(result.info.supportsImages).toBe(true)
 			expect(result.info.supportsPromptCache).toBe(true)
-			expect(result.info.supportsComputerUse).toBe(true)
 		})
 
 		it("returns default model info when options are not provided", async () => {

+ 0 - 13
src/api/providers/fetchers/__tests__/litellm.spec.ts

@@ -221,7 +221,6 @@ describe("getLiteLLMModels", () => {
 				maxTokens: 4096,
 				contextWindow: 200000,
 				supportsImages: true,
-				supportsComputerUse: true,
 				supportsPromptCache: false,
 				inputPrice: 3,
 				outputPrice: 15,
@@ -231,7 +230,6 @@ describe("getLiteLLMModels", () => {
 				maxTokens: 8192,
 				contextWindow: 128000,
 				supportsImages: false,
-				supportsComputerUse: false,
 				supportsPromptCache: false,
 				inputPrice: 10,
 				outputPrice: 30,
@@ -300,7 +298,6 @@ describe("getLiteLLMModels", () => {
 			maxTokens: 4096,
 			contextWindow: 200000,
 			supportsImages: true,
-			supportsComputerUse: true,
 			supportsPromptCache: false,
 			inputPrice: undefined,
 			outputPrice: undefined,
@@ -311,7 +308,6 @@ describe("getLiteLLMModels", () => {
 			maxTokens: 4096,
 			contextWindow: 200000,
 			supportsImages: false,
-			supportsComputerUse: false,
 			supportsPromptCache: false,
 			inputPrice: undefined,
 			outputPrice: undefined,
@@ -446,7 +442,6 @@ describe("getLiteLLMModels", () => {
 			maxTokens: 4096,
 			contextWindow: 200000,
 			supportsImages: true,
-			supportsComputerUse: true, // Should be true due to fallback
 			supportsPromptCache: false,
 			inputPrice: undefined,
 			outputPrice: undefined,
@@ -457,7 +452,6 @@ describe("getLiteLLMModels", () => {
 			maxTokens: 8192,
 			contextWindow: 128000,
 			supportsImages: false,
-			supportsComputerUse: false, // Should be false as it's not in fallback list
 			supportsPromptCache: false,
 			inputPrice: undefined,
 			outputPrice: undefined,
@@ -520,7 +514,6 @@ describe("getLiteLLMModels", () => {
 			maxTokens: 4096,
 			contextWindow: 200000,
 			supportsImages: true,
-			supportsComputerUse: false, // False because explicitly set to false (fallback ignored)
 			supportsPromptCache: false,
 			inputPrice: undefined,
 			outputPrice: undefined,
@@ -531,7 +524,6 @@ describe("getLiteLLMModels", () => {
 			maxTokens: 8192,
 			contextWindow: 128000,
 			supportsImages: false,
-			supportsComputerUse: true, // True because explicitly set to true
 			supportsPromptCache: false,
 			inputPrice: undefined,
 			outputPrice: undefined,
@@ -542,7 +534,6 @@ describe("getLiteLLMModels", () => {
 			maxTokens: 8192,
 			contextWindow: 128000,
 			supportsImages: false,
-			supportsComputerUse: false, // False because explicitly set to false
 			supportsPromptCache: false,
 			inputPrice: undefined,
 			outputPrice: undefined,
@@ -597,9 +588,5 @@ describe("getLiteLLMModels", () => {
 		mockedAxios.get.mockResolvedValue(mockResponse)
 
 		const result = await getLiteLLMModels("test-api-key", "http://localhost:4000")
-
-		expect(result["vertex-claude"].supportsComputerUse).toBe(true)
-		expect(result["openrouter-claude"].supportsComputerUse).toBe(true)
-		expect(result["bedrock-claude"].supportsComputerUse).toBe(true)
 	})
 })

+ 0 - 1
src/api/providers/fetchers/__tests__/lmstudio.test.ts

@@ -60,7 +60,6 @@ describe("LMStudio Fetcher", () => {
 				contextWindow: rawModel.contextLength,
 				supportsPromptCache: true,
 				supportsImages: rawModel.vision,
-				supportsComputerUse: false,
 				maxTokens: rawModel.contextLength,
 				inputPrice: 0,
 				outputPrice: 0,

+ 0 - 2
src/api/providers/fetchers/__tests__/ollama.test.ts

@@ -21,7 +21,6 @@ describe("Ollama Fetcher", () => {
 				maxTokens: 40960,
 				contextWindow: 40960,
 				supportsImages: false,
-				supportsComputerUse: false,
 				supportsPromptCache: true,
 				inputPrice: 0,
 				outputPrice: 0,
@@ -46,7 +45,6 @@ describe("Ollama Fetcher", () => {
 				maxTokens: 40960,
 				contextWindow: 40960,
 				supportsImages: false,
-				supportsComputerUse: false,
 				supportsPromptCache: true,
 				inputPrice: 0,
 				outputPrice: 0,

+ 0 - 19
src/api/providers/fetchers/__tests__/openrouter.spec.ts

@@ -6,7 +6,6 @@ import { back as nockBack } from "nock"
 
 import {
 	OPEN_ROUTER_PROMPT_CACHING_MODELS,
-	OPEN_ROUTER_COMPUTER_USE_MODELS,
 	OPEN_ROUTER_REASONING_BUDGET_MODELS,
 	OPEN_ROUTER_REQUIRED_REASONING_BUDGET_MODELS,
 } from "@roo-code/types"
@@ -51,22 +50,6 @@ describe("OpenRouter API", () => {
 
 			expect(ourCachingModels.sort()).toEqual(expectedCachingModels)
 
-			const excludedComputerUseModels = new Set([
-				"anthropic/claude-opus-4.1", // Not yet available in OpenRouter API
-				"anthropic/claude-sonnet-4.5", // Not yet available in OpenRouter API
-			])
-
-			const expectedComputerUseModels = Array.from(OPEN_ROUTER_COMPUTER_USE_MODELS)
-				.filter((id) => !excludedComputerUseModels.has(id))
-				.sort()
-
-			expect(
-				Object.entries(models)
-					.filter(([_, model]) => model.supportsComputerUse)
-					.map(([id, _]) => id)
-					.sort(),
-			).toEqual(expectedComputerUseModels)
-
 			expect(
 				Object.entries(models)
 					.filter(([_, model]) => model.supportsReasoningEffort)
@@ -174,7 +157,6 @@ describe("OpenRouter API", () => {
 				cacheWritesPrice: 3.75,
 				cacheReadsPrice: 0.3,
 				description: expect.any(String),
-				supportsComputerUse: true,
 				supportsReasoningBudget: false,
 				supportsReasoningEffort: false,
 				supportedParameters: ["max_tokens", "temperature", "reasoning", "include_reasoning"],
@@ -190,7 +172,6 @@ describe("OpenRouter API", () => {
 				cacheWritesPrice: 3.75,
 				cacheReadsPrice: 0.3,
 				description: expect.any(String),
-				supportsComputerUse: true,
 				supportsReasoningBudget: true,
 				requiredReasoningBudget: true,
 				supportsReasoningEffort: true,

+ 0 - 7
src/api/providers/fetchers/__tests__/vercel-ai-gateway.spec.ts

@@ -176,7 +176,6 @@ describe("Vercel AI Gateway Fetchers", () => {
 				maxTokens: 8000,
 				contextWindow: 100000,
 				supportsImages: false,
-				supportsComputerUse: false,
 				supportsPromptCache: false,
 				inputPrice: 2500000,
 				outputPrice: 10000000,
@@ -222,7 +221,6 @@ describe("Vercel AI Gateway Fetchers", () => {
 			})
 
 			expect(result.supportsImages).toBe(VERCEL_AI_GATEWAY_VISION_ONLY_MODELS.has("anthropic/claude-3.5-haiku"))
-			expect(result.supportsComputerUse).toBe(false)
 		})
 
 		it("detects vision and tools models", () => {
@@ -240,9 +238,6 @@ describe("Vercel AI Gateway Fetchers", () => {
 			expect(result.supportsImages).toBe(
 				VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS.has("anthropic/claude-sonnet-4"),
 			)
-			expect(result.supportsComputerUse).toBe(
-				VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS.has("anthropic/claude-sonnet-4"),
-			)
 		})
 
 		it("handles missing cache pricing", () => {
@@ -298,7 +293,6 @@ describe("Vercel AI Gateway Fetchers", () => {
 						model: { ...baseModel, id: modelId },
 					})
 					expect(result.supportsImages).toBe(true)
-					expect(result.supportsComputerUse).toBe(false)
 				}
 			})
 
@@ -309,7 +303,6 @@ describe("Vercel AI Gateway Fetchers", () => {
 						model: { ...baseModel, id: modelId },
 					})
 					expect(result.supportsImages).toBe(true)
-					expect(result.supportsComputerUse).toBe(true)
 				}
 			})
 		})

+ 0 - 1
src/api/providers/fetchers/glama.ts

@@ -16,7 +16,6 @@ export async function getGlamaModels(): Promise<Record<string, ModelInfo>> {
 				maxTokens: rawModel.maxTokensOutput,
 				contextWindow: rawModel.maxTokensInput,
 				supportsImages: rawModel.capabilities?.includes("input:image"),
-				supportsComputerUse: rawModel.capabilities?.includes("computer_use"),
 				supportsPromptCache: rawModel.capabilities?.includes("caching"),
 				inputPrice: parseApiPrice(rawModel.pricePerToken?.input),
 				outputPrice: parseApiPrice(rawModel.pricePerToken?.output),

+ 0 - 1
src/api/providers/fetchers/huggingface.ts

@@ -95,7 +95,6 @@ function parseHuggingFaceModel(model: HuggingFaceModel, provider?: HuggingFacePr
 		contextWindow: contextLength,
 		supportsImages: false, // HuggingFace API doesn't provide this info yet.
 		supportsPromptCache: false,
-		supportsComputerUse: false,
 		inputPrice: pricing?.input,
 		outputPrice: pricing?.output,
 		description,

+ 0 - 1
src/api/providers/fetchers/io-intelligence.ts

@@ -75,7 +75,6 @@ function parseIOIntelligenceModel(model: IOIntelligenceModel): ModelInfo {
 		contextWindow: contextLength,
 		supportsImages,
 		supportsPromptCache: false,
-		supportsComputerUse: false,
 		description: `${model.id} via IO Intelligence`,
 	}
 }

+ 0 - 16
src/api/providers/fetchers/litellm.ts

@@ -1,7 +1,5 @@
 import axios from "axios"
 
-import { LITELLM_COMPUTER_USE_MODELS } from "@roo-code/types"
-
 import type { ModelRecord } from "../../../shared/api"
 
 import { DEFAULT_HEADERS } from "../constants"
@@ -33,8 +31,6 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
 		const response = await axios.get(url, { headers, timeout: 5000 })
 		const models: ModelRecord = {}
 
-		const computerModels = Array.from(LITELLM_COMPUTER_USE_MODELS)
-
 		// Process the model info from the response
 		if (response.data && response.data.data && Array.isArray(response.data.data)) {
 			for (const model of response.data.data) {
@@ -44,23 +40,11 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
 
 				if (!modelName || !modelInfo || !litellmModelName) continue
 
-				// Use explicit supports_computer_use if available, otherwise fall back to hardcoded list
-				let supportsComputerUse: boolean
-				if (modelInfo.supports_computer_use !== undefined) {
-					supportsComputerUse = Boolean(modelInfo.supports_computer_use)
-				} else {
-					// Fallback for older LiteLLM versions that don't have supports_computer_use field
-					supportsComputerUse = computerModels.some((computer_model) =>
-						litellmModelName.endsWith(computer_model),
-					)
-				}
-
 				models[modelName] = {
 					maxTokens: modelInfo.max_tokens || 8192,
 					contextWindow: modelInfo.max_input_tokens || 200000,
 					supportsImages: Boolean(modelInfo.supports_vision),
 					// litellm_params.model may have a prefix like openrouter/
-					supportsComputerUse,
 					supportsPromptCache: Boolean(modelInfo.supports_prompt_caching),
 					inputPrice: modelInfo.input_cost_per_token ? modelInfo.input_cost_per_token * 1000000 : undefined,
 					outputPrice: modelInfo.output_cost_per_token

+ 0 - 1
src/api/providers/fetchers/lmstudio.ts

@@ -43,7 +43,6 @@ export const parseLMStudioModel = (rawModel: LLMInstanceInfo | LLMInfo): ModelIn
 		contextWindow: contextLength,
 		supportsPromptCache: true,
 		supportsImages: rawModel.vision,
-		supportsComputerUse: false,
 		maxTokens: contextLength,
 	})
 

+ 0 - 1
src/api/providers/fetchers/ollama.ts

@@ -47,7 +47,6 @@ export const parseOllamaModel = (rawModel: OllamaModelInfoResponse): ModelInfo =
 		contextWindow: contextWindow || ollamaDefaultModelInfo.contextWindow,
 		supportsPromptCache: true,
 		supportsImages: rawModel.capabilities?.includes("vision"),
-		supportsComputerUse: false,
 		maxTokens: contextWindow || ollamaDefaultModelInfo.contextWindow,
 	})
 

+ 0 - 7
src/api/providers/fetchers/openrouter.ts

@@ -4,7 +4,6 @@ import { z } from "zod"
 import {
 	type ModelInfo,
 	isModelParameter,
-	OPEN_ROUTER_COMPUTER_USE_MODELS,
 	OPEN_ROUTER_REASONING_BUDGET_MODELS,
 	OPEN_ROUTER_REQUIRED_REASONING_BUDGET_MODELS,
 	anthropicModels,
@@ -220,12 +219,6 @@ export const parseOpenRouterModel = ({
 		supportedParameters: supportedParameters ? supportedParameters.filter(isModelParameter) : undefined,
 	}
 
-	// The OpenRouter model definition doesn't give us any hints about
-	// computer use, so we need to set that manually.
-	if (OPEN_ROUTER_COMPUTER_USE_MODELS.has(id)) {
-		modelInfo.supportsComputerUse = true
-	}
-
 	if (OPEN_ROUTER_REASONING_BUDGET_MODELS.has(id)) {
 		modelInfo.supportsReasoningBudget = true
 	}

+ 0 - 1
src/api/providers/fetchers/requesty.ts

@@ -36,7 +36,6 @@ export async function getRequestyModels(baseUrl?: string, apiKey?: string): Prom
 				contextWindow: rawModel.context_window,
 				supportsPromptCache: rawModel.supports_caching,
 				supportsImages: rawModel.supports_vision,
-				supportsComputerUse: rawModel.supports_computer_use,
 				supportsReasoningBudget: reasoningBudget,
 				supportsReasoningEffort: reasoningEffort,
 				inputPrice: parseApiPrice(rawModel.input_price),

+ 0 - 1
src/api/providers/fetchers/unbound.ts

@@ -23,7 +23,6 @@ export async function getUnboundModels(apiKey?: string | null): Promise<Record<s
 					contextWindow: model?.contextWindow ? parseInt(model.contextWindow) : 0,
 					supportsImages: model?.supportsImages ?? false,
 					supportsPromptCache: model?.supportsPromptCaching ?? false,
-					supportsComputerUse: model?.supportsComputerUse ?? false,
 					inputPrice: model?.inputTokenPrice ? parseFloat(model.inputTokenPrice) : undefined,
 					outputPrice: model?.outputTokenPrice ? parseFloat(model.outputTokenPrice) : undefined,
 					cacheWritesPrice: model?.cacheWritePrice ? parseFloat(model.cacheWritePrice) : undefined,

+ 0 - 2
src/api/providers/fetchers/vercel-ai-gateway.ts

@@ -102,13 +102,11 @@ export const parseVercelAiGatewayModel = ({ id, model }: { id: string; model: Ve
 	const supportsPromptCache = typeof cacheWritesPrice !== "undefined" && typeof cacheReadsPrice !== "undefined"
 	const supportsImages =
 		VERCEL_AI_GATEWAY_VISION_ONLY_MODELS.has(id) || VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS.has(id)
-	const supportsComputerUse = VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS.has(id)
 
 	const modelInfo: ModelInfo = {
 		maxTokens: model.max_tokens,
 		contextWindow: model.context_window,
 		supportsImages,
-		supportsComputerUse,
 		supportsPromptCache,
 		inputPrice: parseApiPrice(model.pricing?.input),
 		outputPrice: parseApiPrice(model.pricing?.output),

+ 0 - 1
src/api/providers/human-relay.ts

@@ -70,7 +70,6 @@ export class HumanRelayHandler implements ApiHandler, SingleCompletionHandler {
 				contextWindow: 100000,
 				supportsImages: true,
 				supportsPromptCache: false,
-				supportsComputerUse: true,
 				inputPrice: 0,
 				outputPrice: 0,
 				description: "Calling web-side AI model through human relay",

+ 0 - 3
src/core/condense/__tests__/index.spec.ts

@@ -102,7 +102,6 @@ describe("summarizeConversation", () => {
 				info: {
 					contextWindow: 8000,
 					supportsImages: true,
-					supportsComputerUse: true,
 					supportsVision: true,
 					maxTokens: 4000,
 					supportsPromptCache: true,
@@ -577,7 +576,6 @@ describe("summarizeConversation with custom settings", () => {
 				info: {
 					contextWindow: 8000,
 					supportsImages: true,
-					supportsComputerUse: true,
 					supportsVision: true,
 					maxTokens: 4000,
 					supportsPromptCache: true,
@@ -601,7 +599,6 @@ describe("summarizeConversation with custom settings", () => {
 				info: {
 					contextWindow: 4000,
 					supportsImages: true,
-					supportsComputerUse: false,
 					supportsVision: false,
 					maxTokens: 2000,
 					supportsPromptCache: false,

+ 2 - 58
src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap

@@ -262,59 +262,6 @@ Examples:
 <ignore_case>true</ignore_case>
 </search_and_replace>
 
-## browser_action
-Description: Request to interact with a Puppeteer-controlled browser. Every action, except `close`, will be responded to with a screenshot of the browser's current state, along with any new console logs. You may only perform one browser action per message, and wait for the user's response including a screenshot and logs to determine the next action.
-- The sequence of actions **must always start with** launching the browser at a URL, and **must always end with** closing the browser. If you need to visit a new URL that is not possible to navigate to from the current webpage, you must first close the browser, then launch again at the new URL.
-- While the browser is active, only the `browser_action` tool can be used. No other tools should be called during this time. You may proceed to use other tools only after closing the browser. For example if you run into an error and need to fix a file, you must close the browser, then use other tools to make the necessary changes, then re-launch the browser to verify the result.
-- The browser window has a resolution of **900x600** pixels. When performing any click actions, ensure the coordinates are within this resolution range.
-- Before clicking on any elements such as icons, links, or buttons, you must consult the provided screenshot of the page to determine the coordinates of the element. The click should be targeted at the **center of the element**, not on its edges.
-Parameters:
-- action: (required) The action to perform. The available actions are:
-    * launch: Launch a new Puppeteer-controlled browser instance at the specified URL. This **must always be the first action**.
-        - Use with the `url` parameter to provide the URL.
-        - Ensure the URL is valid and includes the appropriate protocol (e.g. http://localhost:3000/page, file:///path/to/file.html, etc.)
-    * hover: Move the cursor to a specific x,y coordinate.
-        - Use with the `coordinate` parameter to specify the location.
-        - Always move to the center of an element (icon, button, link, etc.) based on coordinates derived from a screenshot.
-    * click: Click at a specific x,y coordinate.
-        - Use with the `coordinate` parameter to specify the location.
-        - Always click in the center of an element (icon, button, link, etc.) based on coordinates derived from a screenshot.
-    * type: Type a string of text on the keyboard. You might use this after clicking on a text field to input text.
-        - Use with the `text` parameter to provide the string to type.
-    * resize: Resize the viewport to a specific w,h size.
-        - Use with the `size` parameter to specify the new size.
-    * scroll_down: Scroll down the page by one page height.
-    * scroll_up: Scroll up the page by one page height.
-    * close: Close the Puppeteer-controlled browser instance. This **must always be the final browser action**.
-        - Example: `<action>close</action>`
-- url: (optional) Use this for providing the URL for the `launch` action.
-    * Example: <url>https://example.com</url>
-- coordinate: (optional) The X and Y coordinates for the `click` and `hover` actions. Coordinates should be within the **900x600** resolution.
-    * Example: <coordinate>450,300</coordinate>
-- size: (optional) The width and height for the `resize` action.
-    * Example: <size>1280,720</size>
-- text: (optional) Use this for providing the text for the `type` action.
-    * Example: <text>Hello, world!</text>
-Usage:
-<browser_action>
-<action>Action to perform (e.g., launch, click, type, scroll_down, scroll_up, close)</action>
-<url>URL to launch the browser at (optional)</url>
-<coordinate>x,y coordinates (optional)</coordinate>
-<text>Text to type (optional)</text>
-</browser_action>
-
-Example: Requesting to launch a browser at https://example.com
-<browser_action>
-<action>launch</action>
-<url>https://example.com</url>
-</browser_action>
-
-Example: Requesting to click on the element at coordinates 450,300
-<browser_action>
-<action>click</action>
-<coordinate>450,300</coordinate>
-</browser_action>
-
 ## ask_followup_question
 Description: Ask the user a question to gather additional information needed to complete the task. Use when you need clarification or more details to proceed effectively.
 
@@ -494,14 +441,12 @@ By waiting for and carefully considering the user's response after each tool use
 
 CAPABILITIES
 
-- You have access to tools that let you execute CLI commands on the user's computer, list files, view source code definitions, regex search, use the browser, read and write files, and ask follow-up questions. These tools help you effectively accomplish a wide range of tasks, such as writing code, making edits or improvements to existing files, understanding the current state of a project, performing system operations, and much more.
+- You have access to tools that let you execute CLI commands on the user's computer, list files, view source code definitions, regex search, read and write files, and ask follow-up questions. These tools help you effectively accomplish a wide range of tasks, such as writing code, making edits or improvements to existing files, understanding the current state of a project, performing system operations, and much more.
 - When the user initially gives you a task, a recursive list of all filepaths in the current workspace directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current workspace directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop.
 - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring.
 - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task.
     - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed.
 - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance.
-- You can use the browser_action tool to interact with websites (including html files and locally running development servers) through a Puppeteer-controlled browser when you feel it is necessary in accomplishing the user's task. This tool is particularly useful for web development tasks as it allows you to launch a browser, navigate to pages, interact with elements through clicks and keyboard input, and capture the results through screenshots and console logs. This tool may be useful at key stages of web development tasks-such as after implementing new features, making substantial changes, when troubleshooting issues, or to verify the result of your work. You can analyze the provided screenshots to ensure correct rendering or identify errors, and review console logs for runtime issues.
-  - For example, if asked to add a component to a react website, you might create the necessary files, use execute_command to run the site locally, then use browser_action to launch the browser, navigate to the local server, and verify the component renders & functions correctly before closing the browser.
 
 ====
 
@@ -534,14 +479,13 @@ RULES
 - When executing commands, if you don't see the expected output, assume the terminal executed the command successfully and proceed with the task. The user's terminal may be unable to stream the output back properly. If you absolutely need to see the actual terminal output, use the ask_followup_question tool to request the user to copy and paste it back to you.
 - The user may provide a file's contents directly in their message, in which case you shouldn't use the read_file tool to get the file contents again since you already have it.
 - Your goal is to try to accomplish the user's task, NOT engage in a back and forth conversation.
-- The user may ask generic non-development tasks, such as "what's the latest news" or "look up the weather in San Diego", in which case you might use the browser_action tool to complete the task if it makes sense to do so, rather than trying to create a website or using curl to answer the question. However, if an available MCP server tool or resource can be used instead, you should prefer to use it over browser_action.
 - NEVER end attempt_completion result with a question or request to engage in further conversation! Formulate the end of your result in a way that is final and does not require further input from the user.
 - You are STRICTLY FORBIDDEN from starting your messages with "Great", "Certainly", "Okay", "Sure". You should NOT be conversational in your responses, but rather direct and to the point. For example you should NOT say "Great, I've updated the CSS" but instead something like "I've updated the CSS". It is important you be clear and technical in your messages.
 - When presented with images, utilize your vision capabilities to thoroughly examine them and extract meaningful information. Incorporate these insights into your thought process as you accomplish the user's task.
 - At the end of each user message, you will automatically receive environment_details. This information is not written by the user themselves, but is auto-generated to provide potentially relevant context about the project structure and environment. While this information can be valuable for understanding the project context, do not treat it as a direct part of the user's request or response. Use it to inform your actions and decisions, but don't assume the user is explicitly asking about or referring to this information unless they clearly do so in their message. When using environment_details, explain your actions clearly to ensure the user understands, as they may not be aware of these details.
 - Before executing commands, check the "Actively Running Terminals" section in environment_details. If present, consider how these active processes might impact your task. For example, if a local development server is already running, you wouldn't need to start it again. If no active terminals are listed, proceed with command execution as normal.
 - MCP operations should be used one at a time, similar to other tool usage. Wait for confirmation of success before proceeding with additional operations.
-- It is critical you wait for the user's response after each tool use, in order to confirm the success of the tool use. For example, if asked to make a todo app, you would create a file, wait for the user's response it was created successfully, then create another file if needed, wait for the user's response it was created successfully, etc. Then if you want to test your work, you might use browser_action to launch the site, wait for the user's response confirming the site was launched along with a screenshot, then perhaps e.g., click a button to test functionality if needed, wait for the user's response confirming the button was clicked along with a screenshot of the new state, before finally closing the browser.
+- It is critical you wait for the user's response after each tool use, in order to confirm the success of the tool use. For example, if asked to make a todo app, you would create a file, wait for the user's response it was created successfully, then create another file if needed, wait for the user's response it was created successfully, etc.
 
 ====
 

+ 5 - 5
src/core/prompts/__tests__/add-custom-instructions.spec.ts

@@ -193,7 +193,7 @@ describe("addCustomInstructions", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false, // supportsImages
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -216,7 +216,7 @@ describe("addCustomInstructions", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false, // supportsImages
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -241,7 +241,7 @@ describe("addCustomInstructions", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false, // supportsImages
 			mockMcpHub, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -267,7 +267,7 @@ describe("addCustomInstructions", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false, // supportsImages
 			mockMcpHub, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -291,7 +291,7 @@ describe("addCustomInstructions", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false, // supportsImages
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize

+ 3 - 3
src/core/prompts/__tests__/custom-system-prompt.spec.ts

@@ -96,7 +96,7 @@ describe("File-Based Custom System Prompt", () => {
 			const prompt = await SYSTEM_PROMPT(
 				mockContext,
 				"test/path", // Using a relative path without leading slash
-				false, // supportsComputerUse
+				false, // supportsImages
 				undefined, // mcpHub
 				undefined, // diffStrategy
 				undefined, // browserViewportSize
@@ -134,7 +134,7 @@ describe("File-Based Custom System Prompt", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"test/path", // Using a relative path without leading slash
-			false, // supportsComputerUse
+			false, // supportsImages
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -180,7 +180,7 @@ describe("File-Based Custom System Prompt", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"test/path", // Using a relative path without leading slash
-			false, // supportsComputerUse
+			false, // supportsImages
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize

+ 16 - 16
src/core/prompts/__tests__/system-prompt.spec.ts

@@ -207,7 +207,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false, // supportsImages
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -226,11 +226,11 @@ describe("SYSTEM_PROMPT", () => {
 		expect(prompt).toMatchFileSnapshot("./__snapshots__/system-prompt/consistent-system-prompt.snap")
 	})
 
-	it("should include browser actions when supportsComputerUse is true", async () => {
+	it("should include browser actions when supportsImages is true", async () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			true, // supportsComputerUse
+			true, // supportsImages
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			"1280x800", // browserViewportSize
@@ -255,7 +255,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			mockMcpHub, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -278,7 +278,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			undefined, // explicitly undefined mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -301,7 +301,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			true, // supportsComputerUse
+			false,
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			"900x600", // different viewport size
@@ -324,7 +324,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			undefined, // mcpHub
 			new MultiSearchReplaceDiffStrategy(), // Use actual diff strategy from the codebase
 			undefined, // browserViewportSize
@@ -348,7 +348,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false, // supportsImages
 			undefined, // mcpHub
 			new MultiSearchReplaceDiffStrategy(), // Use actual diff strategy from the codebase
 			undefined, // browserViewportSize
@@ -372,7 +372,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			undefined, // mcpHub
 			new MultiSearchReplaceDiffStrategy(), // Use actual diff strategy from the codebase
 			undefined, // browserViewportSize
@@ -423,7 +423,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -484,7 +484,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -522,7 +522,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -555,7 +555,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -586,7 +586,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -619,7 +619,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize
@@ -651,7 +651,7 @@ describe("SYSTEM_PROMPT", () => {
 		const prompt = await SYSTEM_PROMPT(
 			mockContext,
 			"/test/path",
-			false, // supportsComputerUse
+			false,
 			undefined, // mcpHub
 			undefined, // diffStrategy
 			undefined, // browserViewportSize

+ 15 - 4
src/core/task/Task.ts

@@ -52,7 +52,7 @@ import { t } from "../../i18n"
 import { ClineApiReqCancelReason, ClineApiReqInfo } from "../../shared/ExtensionMessage"
 import { getApiMetrics, hasTokenUsageChanged } from "../../shared/getApiMetrics"
 import { ClineAskResponse } from "../../shared/WebviewMessage"
-import { defaultModeSlug } from "../../shared/modes"
+import { defaultModeSlug, getModeBySlug, getGroupName } from "../../shared/modes"
 import { DiffStrategy } from "../../shared/tools"
 import { EXPERIMENT_IDS, experiments } from "../../shared/experiments"
 import { getModelMaxOutputTokens } from "../../shared/api"
@@ -2417,14 +2417,25 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
 				throw new Error("Provider not available")
 			}
 
+			// Align browser tool enablement with generateSystemPrompt: require model image support,
+			// mode to include the browser group, and the user setting to be enabled.
+			const modeConfig = getModeBySlug(mode ?? defaultModeSlug, customModes)
+			const modeSupportsBrowser = modeConfig?.groups.some((group) => getGroupName(group) === "browser") ?? false
+
+			// Check if model supports browser capability (images)
+			const modelInfo = this.api.getModel().info
+			const modelSupportsBrowser = (modelInfo as any)?.supportsImages === true
+
+			const canUseBrowserTool = modelSupportsBrowser && modeSupportsBrowser && (browserToolEnabled ?? true)
+
 			return SYSTEM_PROMPT(
 				provider.context,
 				this.cwd,
-				(this.api.getModel().info.supportsComputerUse ?? false) && (browserToolEnabled ?? true),
+				canUseBrowserTool,
 				mcpHub,
 				this.diffStrategy,
-				browserViewportSize,
-				mode,
+				browserViewportSize ?? "900x600",
+				mode ?? defaultModeSlug,
 				customModePrompts,
 				customModes,
 				customInstructions,

+ 0 - 2
src/core/task/__tests__/Task.spec.ts

@@ -516,7 +516,6 @@ describe("Cline", () => {
 					info: {
 						supportsImages: true,
 						supportsPromptCache: true,
-						supportsComputerUse: true,
 						contextWindow: 200000,
 						maxTokens: 4096,
 						inputPrice: 0.25,
@@ -539,7 +538,6 @@ describe("Cline", () => {
 					info: {
 						supportsImages: false,
 						supportsPromptCache: false,
-						supportsComputerUse: false,
 						contextWindow: 16000,
 						maxTokens: 2048,
 						inputPrice: 0.1,

+ 0 - 1
src/core/webview/__tests__/ClineProvider.spec.ts

@@ -291,7 +291,6 @@ vi.mock("../../../api", () => ({
 	buildApiHandler: vi.fn().mockReturnValue({
 		getModel: vi.fn().mockReturnValue({
 			id: "claude-3-sonnet",
-			info: { supportsComputerUse: false },
 		}),
 	}),
 }))

+ 0 - 1
src/core/webview/__tests__/ClineProvider.sticky-mode.spec.ts

@@ -84,7 +84,6 @@ vi.mock("../../../api", () => ({
 	buildApiHandler: vi.fn().mockReturnValue({
 		getModel: vi.fn().mockReturnValue({
 			id: "claude-3-sonnet",
-			info: { supportsComputerUse: false },
 		}),
 	}),
 }))

+ 83 - 0
src/core/webview/__tests__/generateSystemPrompt.browser-capability.spec.ts

@@ -0,0 +1,83 @@
+import { describe, test, expect, vi } from "vitest"
+
+// Module under test
+import { generateSystemPrompt } from "../generateSystemPrompt"
+
+// Mock SYSTEM_PROMPT to capture its third argument (browser capability flag)
+vi.mock("../../prompts/system", () => ({
+	SYSTEM_PROMPT: vi.fn(async (_ctx, _cwd, canUseBrowserTool: boolean) => {
+		// return a simple string to satisfy return type
+		return `SYSTEM_PROMPT:${canUseBrowserTool}`
+	}),
+}))
+
+// Mock API handler so we control model.info flags
+vi.mock("../../../api", () => ({
+	buildApiHandler: vi.fn((_config) => ({
+		getModel: () => ({
+			id: "mock-model",
+			info: {
+				supportsImages: true,
+				contextWindow: 200_000,
+				maxTokens: 8192,
+				supportsPromptCache: false,
+			},
+		}),
+	})),
+}))
+
+// Minimal mode utilities: provide a custom mode that includes the "browser" group
+const mockCustomModes = [
+	{
+		slug: "test-mode",
+		name: "Test Mode",
+		roleDefinition: "Test role",
+		description: "",
+		groups: ["browser"], // critical: include browser group
+	},
+]
+
+// Minimal ClineProvider stub
+function makeProviderStub() {
+	return {
+		cwd: "/tmp",
+		context: {} as any,
+		customModesManager: {
+			getCustomModes: async () => mockCustomModes,
+		},
+		getCurrentTask: () => ({
+			rooIgnoreController: { getInstructions: () => undefined },
+		}),
+		getMcpHub: () => undefined,
+		// State must enable browser tool and provide apiConfiguration
+		getState: async () => ({
+			apiConfiguration: {
+				apiProvider: "openrouter", // not used by the test beyond handler creation
+			},
+			customModePrompts: undefined,
+			customInstructions: undefined,
+			browserViewportSize: "900x600",
+			diffEnabled: false,
+			mcpEnabled: false,
+			fuzzyMatchThreshold: 1.0,
+			experiments: {},
+			enableMcpServerCreation: false,
+			browserToolEnabled: true, // critical: enabled in settings
+			language: "en",
+			maxReadFileLine: -1,
+			maxConcurrentFileReads: 5,
+		}),
+	} as any
+}
+
+describe("generateSystemPrompt browser capability (supportsImages=true)", () => {
+	test("passes canUseBrowserTool=true when mode has browser group and setting enabled", async () => {
+		const provider = makeProviderStub()
+		const message = { mode: "test-mode" } as any
+
+		const result = await generateSystemPrompt(provider, message)
+
+		// SYSTEM_PROMPT mock encodes the boolean into the returned string
+		expect(result).toBe("SYSTEM_PROMPT:true")
+	})
+})

+ 8 - 5
src/core/webview/generateSystemPrompt.ts

@@ -45,24 +45,27 @@ export const generateSystemPrompt = async (provider: ClineProvider, message: Web
 	const rooIgnoreInstructions = provider.getCurrentTask()?.rooIgnoreController?.getInstructions()
 
 	// Determine if browser tools can be used based on model support, mode, and user settings
-	let modelSupportsComputerUse = false
+	let modelInfo: any = undefined
 
-	// Create a temporary API handler to check if the model supports computer use
+	// Create a temporary API handler to check if the model supports browser capability
 	// This avoids relying on an active Cline instance which might not exist during preview
 	try {
 		const tempApiHandler = buildApiHandler(apiConfiguration)
-		modelSupportsComputerUse = tempApiHandler.getModel().info.supportsComputerUse ?? false
+		modelInfo = tempApiHandler.getModel().info
 	} catch (error) {
-		console.error("Error checking if model supports computer use:", error)
+		console.error("Error checking if model supports browser capability:", error)
 	}
 
 	// Check if the current mode includes the browser tool group
 	const modeConfig = getModeBySlug(mode, customModes)
 	const modeSupportsBrowser = modeConfig?.groups.some((group) => getGroupName(group) === "browser") ?? false
 
+	// Check if model supports browser capability (images)
+	const modelSupportsBrowser = modelInfo && (modelInfo as any)?.supportsImages === true
+
 	// Only enable browser tools if the model supports it, the mode includes browser tools,
 	// and browser tools are enabled in settings
-	const canUseBrowserTool = modelSupportsComputerUse && modeSupportsBrowser && (browserToolEnabled ?? true)
+	const canUseBrowserTool = modelSupportsBrowser && modeSupportsBrowser && (browserToolEnabled ?? true)
 
 	const systemPrompt = await SYSTEM_PROMPT(
 		provider.context,

+ 0 - 5
webview-ui/src/components/settings/ModelInfoView.tsx

@@ -49,11 +49,6 @@ export const ModelInfoView = ({
 			supportsLabel={t("settings:modelInfo.supportsImages")}
 			doesNotSupportLabel={t("settings:modelInfo.noImages")}
 		/>,
-		<ModelInfoSupportsItem
-			isSupported={modelInfo?.supportsComputerUse ?? false}
-			supportsLabel={t("settings:modelInfo.supportsComputerUse")}
-			doesNotSupportLabel={t("settings:modelInfo.noComputerUse")}
-		/>,
 		<ModelInfoSupportsItem
 			isSupported={modelInfo?.supportsPromptCache ?? false}
 			supportsLabel={t("settings:modelInfo.supportsPromptCache")}

+ 0 - 1
webview-ui/src/components/settings/__tests__/ModelPicker.spec.tsx

@@ -21,7 +21,6 @@ describe("ModelPicker", () => {
 		maxTokens: 8192,
 		contextWindow: 200_000,
 		supportsImages: true,
-		supportsComputerUse: true,
 		supportsPromptCache: true,
 		inputPrice: 3.0,
 		outputPrice: 15.0,

+ 0 - 24
webview-ui/src/components/settings/providers/OpenAICompatible.tsx

@@ -393,30 +393,6 @@ export const OpenAICompatible = ({
 					</div>
 				</div>
 
-				<div>
-					<div className="flex items-center gap-1">
-						<Checkbox
-							checked={apiConfiguration?.openAiCustomModelInfo?.supportsComputerUse ?? false}
-							onChange={handleInputChange("openAiCustomModelInfo", (checked) => {
-								return {
-									...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults),
-									supportsComputerUse: checked,
-								}
-							})}>
-							<span className="font-medium">{t("settings:providers.customModel.computerUse.label")}</span>
-						</Checkbox>
-						<StandardTooltip content={t("settings:providers.customModel.computerUse.description")}>
-							<i
-								className="codicon codicon-info text-vscode-descriptionForeground"
-								style={{ fontSize: "12px" }}
-							/>
-						</StandardTooltip>
-					</div>
-					<div className="text-sm text-vscode-descriptionForeground pt-1">
-						{t("settings:providers.customModel.computerUse.description")}
-					</div>
-				</div>
-
 				<div>
 					<div className="flex items-center gap-1">
 						<Checkbox

+ 0 - 4
webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts

@@ -144,7 +144,6 @@ describe("useSelectedModel", () => {
 				contextWindow: 8192,
 				supportsImages: false,
 				supportsPromptCache: false,
-				supportsComputerUse: true,
 				cacheWritesPrice: 0.1,
 				cacheReadsPrice: 0.01,
 			}
@@ -192,7 +191,6 @@ describe("useSelectedModel", () => {
 				// Fields from base model that provider doesn't have
 				contextWindow: 8192, // From base (provider doesn't override)
 				supportsPromptCache: false, // From base (provider doesn't override)
-				supportsComputerUse: true, // From base (provider doesn't have)
 				cacheWritesPrice: 0.1, // From base (provider doesn't have)
 				cacheReadsPrice: 0.01, // From base (provider doesn't have)
 
@@ -255,7 +253,6 @@ describe("useSelectedModel", () => {
 							maxTokens: 8192,
 							contextWindow: 200_000,
 							supportsImages: true,
-							supportsComputerUse: true,
 							supportsPromptCache: true,
 							inputPrice: 3.0,
 							outputPrice: 15.0,
@@ -412,7 +409,6 @@ describe("useSelectedModel", () => {
 			// Verify it inherits other properties from anthropic models
 			expect(result.current.info?.maxTokens).toBe(64_000)
 			expect(result.current.info?.contextWindow).toBe(200_000)
-			expect(result.current.info?.supportsComputerUse).toBe(true)
 		})
 
 		it("should use default claude-code model when no modelId is specified", () => {

+ 0 - 2
webview-ui/src/i18n/locales/ca/settings.json

@@ -768,8 +768,6 @@
 	"modelInfo": {
 		"supportsImages": "Suporta imatges",
 		"noImages": "No suporta imatges",
-		"supportsComputerUse": "Suporta ús de l'ordinador",
-		"noComputerUse": "No suporta ús de l'ordinador",
 		"supportsPromptCache": "Suporta emmagatzematge en caché de prompts",
 		"noPromptCache": "No suporta emmagatzematge en caché de prompts",
 		"contextWindow": "Finestra de context:",

+ 0 - 2
webview-ui/src/i18n/locales/de/settings.json

@@ -768,8 +768,6 @@
 	"modelInfo": {
 		"supportsImages": "Unterstützt Bilder",
 		"noImages": "Unterstützt keine Bilder",
-		"supportsComputerUse": "Unterstützt Computernutzung",
-		"noComputerUse": "Unterstützt keine Computernutzung",
 		"supportsPromptCache": "Unterstützt Prompt-Cache",
 		"noPromptCache": "Unterstützt keinen Prompt-Cache",
 		"contextWindow": "Kontextfenster:",

+ 0 - 2
webview-ui/src/i18n/locales/en/settings.json

@@ -773,8 +773,6 @@
 	"modelInfo": {
 		"supportsImages": "Supports images",
 		"noImages": "Does not support images",
-		"supportsComputerUse": "Supports computer use",
-		"noComputerUse": "Does not support computer use",
 		"supportsPromptCache": "Supports prompt caching",
 		"noPromptCache": "Does not support prompt caching",
 		"contextWindow": "Context Window:",

+ 0 - 2
webview-ui/src/i18n/locales/es/settings.json

@@ -768,8 +768,6 @@
 	"modelInfo": {
 		"supportsImages": "Soporta imágenes",
 		"noImages": "No soporta imágenes",
-		"supportsComputerUse": "Soporta uso del ordenador",
-		"noComputerUse": "No soporta uso del ordenador",
 		"supportsPromptCache": "Soporta caché de prompts",
 		"noPromptCache": "No soporta caché de prompts",
 		"contextWindow": "Ventana de contexto",

+ 0 - 2
webview-ui/src/i18n/locales/fr/settings.json

@@ -768,8 +768,6 @@
 	"modelInfo": {
 		"supportsImages": "Prend en charge les images",
 		"noImages": "Ne prend pas en charge les images",
-		"supportsComputerUse": "Prend en charge l'utilisation de l'ordinateur",
-		"noComputerUse": "Ne prend pas en charge l'utilisation de l'ordinateur",
 		"supportsPromptCache": "Prend en charge la mise en cache des prompts",
 		"noPromptCache": "Ne prend pas en charge la mise en cache des prompts",
 		"contextWindow": "Fenêtre de contexte :",

+ 0 - 2
webview-ui/src/i18n/locales/hi/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "छवियों का समर्थन करता है",
 		"noImages": "छवियों का समर्थन नहीं करता है",
-		"supportsComputerUse": "कंप्यूटर उपयोग का समर्थन करता है",
-		"noComputerUse": "कंप्यूटर उपयोग का समर्थन नहीं करता है",
 		"supportsPromptCache": "प्रॉम्प्ट कैशिंग का समर्थन करता है",
 		"noPromptCache": "प्रॉम्प्ट कैशिंग का समर्थन नहीं करता है",
 		"contextWindow": "संदर्भ विंडो:",

+ 0 - 2
webview-ui/src/i18n/locales/id/settings.json

@@ -798,8 +798,6 @@
 	"modelInfo": {
 		"supportsImages": "Mendukung gambar",
 		"noImages": "Tidak mendukung gambar",
-		"supportsComputerUse": "Mendukung computer use",
-		"noComputerUse": "Tidak mendukung computer use",
 		"supportsPromptCache": "Mendukung prompt caching",
 		"noPromptCache": "Tidak mendukung prompt caching",
 		"contextWindow": "Jendela Konteks:",

+ 0 - 2
webview-ui/src/i18n/locales/it/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "Supporta immagini",
 		"noImages": "Non supporta immagini",
-		"supportsComputerUse": "Supporta uso del computer",
-		"noComputerUse": "Non supporta uso del computer",
 		"supportsPromptCache": "Supporta cache dei prompt",
 		"noPromptCache": "Non supporta cache dei prompt",
 		"contextWindow": "Finestra di contesto:",

+ 0 - 2
webview-ui/src/i18n/locales/ja/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "画像をサポート",
 		"noImages": "画像をサポートしていません",
-		"supportsComputerUse": "コンピュータ使用をサポート",
-		"noComputerUse": "コンピュータ使用をサポートしていません",
 		"supportsPromptCache": "プロンプトキャッシュをサポート",
 		"noPromptCache": "プロンプトキャッシュをサポートしていません",
 		"contextWindow": "コンテキストウィンドウ:",

+ 0 - 2
webview-ui/src/i18n/locales/ko/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "이미지 지원",
 		"noImages": "이미지 지원 안 함",
-		"supportsComputerUse": "컴퓨터 사용 지원",
-		"noComputerUse": "컴퓨터 사용 지원 안 함",
 		"supportsPromptCache": "프롬프트 캐시 지원",
 		"noPromptCache": "프롬프트 캐시 지원 안 함",
 		"contextWindow": "컨텍스트 창:",

+ 0 - 2
webview-ui/src/i18n/locales/nl/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "Ondersteunt afbeeldingen",
 		"noImages": "Ondersteunt geen afbeeldingen",
-		"supportsComputerUse": "Ondersteunt computergebruik",
-		"noComputerUse": "Ondersteunt geen computergebruik",
 		"supportsPromptCache": "Ondersteunt prompt caching",
 		"noPromptCache": "Ondersteunt geen prompt caching",
 		"contextWindow": "Contextvenster:",

+ 0 - 2
webview-ui/src/i18n/locales/pl/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "Obsługuje obrazy",
 		"noImages": "Nie obsługuje obrazów",
-		"supportsComputerUse": "Obsługuje użycie komputera",
-		"noComputerUse": "Nie obsługuje użycia komputera",
 		"supportsPromptCache": "Obsługuje buforowanie podpowiedzi",
 		"noPromptCache": "Nie obsługuje buforowania podpowiedzi",
 		"contextWindow": "Okno kontekstowe:",

+ 0 - 2
webview-ui/src/i18n/locales/pt-BR/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "Suporta imagens",
 		"noImages": "Não suporta imagens",
-		"supportsComputerUse": "Suporta uso do computador",
-		"noComputerUse": "Não suporta uso do computador",
 		"supportsPromptCache": "Suporta cache de prompts",
 		"noPromptCache": "Não suporta cache de prompts",
 		"contextWindow": "Janela de Contexto:",

+ 0 - 2
webview-ui/src/i18n/locales/ru/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "Поддерживает изображения",
 		"noImages": "Не поддерживает изображения",
-		"supportsComputerUse": "Поддерживает использование компьютера",
-		"noComputerUse": "Не поддерживает использование компьютера",
 		"supportsPromptCache": "Поддерживает кэширование подсказок",
 		"noPromptCache": "Не поддерживает кэширование подсказок",
 		"contextWindow": "Контекстное окно:",

+ 0 - 2
webview-ui/src/i18n/locales/tr/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "Görüntüleri destekler",
 		"noImages": "Görüntüleri desteklemez",
-		"supportsComputerUse": "Bilgisayar kullanımını destekler",
-		"noComputerUse": "Bilgisayar kullanımını desteklemez",
 		"supportsPromptCache": "İstem önbelleğini destekler",
 		"noPromptCache": "İstem önbelleğini desteklemez",
 		"contextWindow": "Bağlam Penceresi:",

+ 0 - 2
webview-ui/src/i18n/locales/vi/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "Hỗ trợ hình ảnh",
 		"noImages": "Không hỗ trợ hình ảnh",
-		"supportsComputerUse": "Hỗ trợ sử dụng máy tính",
-		"noComputerUse": "Không hỗ trợ sử dụng máy tính",
 		"supportsPromptCache": "Hỗ trợ bộ nhớ đệm lời nhắc",
 		"noPromptCache": "Không hỗ trợ bộ nhớ đệm lời nhắc",
 		"contextWindow": "Cửa sổ ngữ cảnh:",

+ 0 - 2
webview-ui/src/i18n/locales/zh-CN/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "支持图像",
 		"noImages": "不支持图像",
-		"supportsComputerUse": "支持计算机功能调用",
-		"noComputerUse": "不支持计算机功能调用",
 		"supportsPromptCache": "支持提示缓存",
 		"noPromptCache": "不支持提示缓存",
 		"contextWindow": "上下文窗口:",

+ 0 - 2
webview-ui/src/i18n/locales/zh-TW/settings.json

@@ -769,8 +769,6 @@
 	"modelInfo": {
 		"supportsImages": "支援影像",
 		"noImages": "不支援影像",
-		"supportsComputerUse": "支援電腦使用",
-		"noComputerUse": "不支援電腦使用",
 		"supportsPromptCache": "支援提示快取",
 		"noPromptCache": "不支援提示快取",
 		"contextWindow": "上下文視窗:",