Sfoglia il codice sorgente

chore: remove XML tool calling support (#10841)

Co-authored-by: daniel-lxs <[email protected]>
Co-authored-by: Matt Rubens <[email protected]>
Hannes Rudolph 3 settimane fa
parent
commit
8de9337e63
100 ha cambiato i file con 436 aggiunte e 1706 eliminazioni
  1. 0 46
      apps/web-evals/src/app/runs/new/new-run.tsx
  2. 0 129
      packages/core/src/custom-tools/__tests__/__snapshots__/format-xml.spec.ts.snap
  3. 0 192
      packages/core/src/custom-tools/__tests__/format-xml.spec.ts
  4. 0 89
      packages/core/src/custom-tools/format-xml.ts
  5. 0 1
      packages/core/src/custom-tools/index.ts
  6. 2 2
      packages/telemetry/src/TelemetryService.ts
  7. 0 10
      packages/types/src/history.ts
  8. 0 4
      packages/types/src/model.ts
  9. 0 3
      packages/types/src/provider-settings.ts
  10. 0 24
      packages/types/src/providers/anthropic.ts
  11. 0 11
      packages/types/src/providers/baseten.ts
  12. 0 55
      packages/types/src/providers/bedrock.ts
  13. 0 10
      packages/types/src/providers/cerebras.ts
  14. 0 80
      packages/types/src/providers/chutes.ts
  15. 0 6
      packages/types/src/providers/claude-code.ts
  16. 0 1
      packages/types/src/providers/deepinfra.ts
  17. 0 4
      packages/types/src/providers/deepseek.ts
  18. 0 6
      packages/types/src/providers/doubao.ts
  19. 0 5
      packages/types/src/providers/featherless.ts
  20. 0 27
      packages/types/src/providers/fireworks.ts
  21. 0 22
      packages/types/src/providers/gemini.ts
  22. 0 14
      packages/types/src/providers/groq.ts
  23. 0 4
      packages/types/src/providers/io-intelligence.ts
  24. 0 2
      packages/types/src/providers/lite-llm.ts
  25. 0 2
      packages/types/src/providers/lm-studio.ts
  26. 0 6
      packages/types/src/providers/minimax.ts
  27. 0 18
      packages/types/src/providers/mistral.ts
  28. 0 8
      packages/types/src/providers/moonshot.ts
  29. 0 1
      packages/types/src/providers/ollama.ts
  30. 0 18
      packages/types/src/providers/openai-codex.ts
  31. 0 68
      packages/types/src/providers/openai.ts
  32. 0 1
      packages/types/src/providers/openrouter.ts
  33. 0 4
      packages/types/src/providers/qwen-code.ts
  34. 0 2
      packages/types/src/providers/requesty.ts
  35. 0 16
      packages/types/src/providers/sambanova.ts
  36. 0 1
      packages/types/src/providers/unbound.ts
  37. 0 1
      packages/types/src/providers/vercel-ai-gateway.ts
  38. 0 71
      packages/types/src/providers/vertex.ts
  39. 0 16
      packages/types/src/providers/xai.ts
  40. 0 34
      packages/types/src/providers/zai.ts
  41. 0 45
      packages/types/src/tool.ts
  42. 50 0
      src/__tests__/history-resume-delegation.spec.ts
  43. 11 7
      src/__tests__/nested-delegation-resume.spec.ts
  44. 2 7
      src/api/index.ts
  45. 7 6
      src/api/providers/__tests__/anthropic-vertex.spec.ts
  46. 15 19
      src/api/providers/__tests__/anthropic.spec.ts
  47. 4 4
      src/api/providers/__tests__/base-openai-compatible-provider.spec.ts
  48. 17 53
      src/api/providers/__tests__/bedrock-native-tools.spec.ts
  49. 5 2
      src/api/providers/__tests__/bedrock-reasoning.spec.ts
  50. 26 10
      src/api/providers/__tests__/bedrock.spec.ts
  51. 54 45
      src/api/providers/__tests__/claude-code.spec.ts
  52. 9 10
      src/api/providers/__tests__/deepinfra.spec.ts
  53. 0 1
      src/api/providers/__tests__/fireworks.spec.ts
  54. 7 2
      src/api/providers/__tests__/gemini-handler.spec.ts
  55. 0 2
      src/api/providers/__tests__/io-intelligence.spec.ts
  56. 11 11
      src/api/providers/__tests__/lite-llm.spec.ts
  57. 13 12
      src/api/providers/__tests__/lmstudio-native-tools.spec.ts
  58. 16 11
      src/api/providers/__tests__/mistral.spec.ts
  59. 12 30
      src/api/providers/__tests__/native-ollama.spec.ts
  60. 0 1
      src/api/providers/__tests__/openai-codex-native-tool-calls.spec.ts
  61. 5 21
      src/api/providers/__tests__/openai-native-tools.spec.ts
  62. 0 39
      src/api/providers/__tests__/openai-native.spec.ts
  63. 8 2
      src/api/providers/__tests__/openai.spec.ts
  64. 0 4
      src/api/providers/__tests__/openrouter.spec.ts
  65. 5 5
      src/api/providers/__tests__/qwen-code-native-tools.spec.ts
  66. 1 33
      src/api/providers/__tests__/requesty.spec.ts
  67. 0 17
      src/api/providers/__tests__/roo.spec.ts
  68. 6 15
      src/api/providers/__tests__/unbound.spec.ts
  69. 3 7
      src/api/providers/__tests__/vercel-ai-gateway.spec.ts
  70. 43 22
      src/api/providers/__tests__/vscode-lm.spec.ts
  71. 5 5
      src/api/providers/__tests__/xai.spec.ts
  72. 4 18
      src/api/providers/anthropic-vertex.ts
  73. 4 20
      src/api/providers/anthropic.ts
  74. 5 7
      src/api/providers/base-openai-compatible-provider.ts
  75. 18 37
      src/api/providers/bedrock.ts
  76. 7 12
      src/api/providers/cerebras.ts
  77. 4 4
      src/api/providers/chutes.ts
  78. 2 13
      src/api/providers/claude-code.ts
  79. 3 8
      src/api/providers/deepinfra.ts
  80. 3 5
      src/api/providers/deepseek.ts
  81. 2 2
      src/api/providers/featherless.ts
  82. 6 7
      src/api/providers/fetchers/__tests__/chutes.spec.ts
  83. 0 12
      src/api/providers/fetchers/__tests__/litellm.spec.ts
  84. 4 8
      src/api/providers/fetchers/__tests__/modelEndpointCache.spec.ts
  85. 2 4
      src/api/providers/fetchers/__tests__/ollama.test.ts
  86. 7 18
      src/api/providers/fetchers/__tests__/openrouter.spec.ts
  87. 8 17
      src/api/providers/fetchers/__tests__/roo.spec.ts
  88. 0 1
      src/api/providers/fetchers/__tests__/vercel-ai-gateway.spec.ts
  89. 2 2
      src/api/providers/fetchers/__tests__/versionedSettings.spec.ts
  90. 4 3
      src/api/providers/fetchers/chutes.ts
  91. 0 1
      src/api/providers/fetchers/deepinfra.ts
  92. 0 1
      src/api/providers/fetchers/litellm.ts
  93. 0 1
      src/api/providers/fetchers/modelEndpointCache.ts
  94. 4 8
      src/api/providers/fetchers/ollama.ts
  95. 0 5
      src/api/providers/fetchers/openrouter.ts
  96. 0 2
      src/api/providers/fetchers/requesty.ts
  97. 0 5
      src/api/providers/fetchers/roo.ts
  98. 0 1
      src/api/providers/fetchers/unbound.ts
  99. 0 1
      src/api/providers/fetchers/vercel-ai-gateway.ts
  100. 10 21
      src/api/providers/gemini.ts

+ 0 - 46
apps/web-evals/src/app/runs/new/new-run.tsx

@@ -56,7 +56,6 @@ import { useRooCodeCloudModels } from "@/hooks/use-roo-code-cloud-models"
 
 import {
 	Button,
-	Checkbox,
 	FormControl,
 	FormField,
 	FormItem,
@@ -111,7 +110,6 @@ export function NewRun() {
 
 	const [provider, setModelSource] = useState<"roo" | "openrouter" | "other">("other")
 	const [executionMethod, setExecutionMethod] = useState<ExecutionMethod>("vscode")
-	const [useNativeToolProtocol, setUseNativeToolProtocol] = useState(true)
 	const [commandExecutionTimeout, setCommandExecutionTimeout] = useState(20)
 	const [terminalShellIntegrationTimeout, setTerminalShellIntegrationTimeout] = useState(30) // seconds
 
@@ -464,7 +462,6 @@ export function NewRun() {
 							...(runValues.settings || {}),
 							apiProvider: "openrouter",
 							openRouterModelId: selection.model,
-							toolProtocol: useNativeToolProtocol ? "native" : "xml",
 							commandExecutionTimeout,
 							terminalShellIntegrationTimeout: terminalShellIntegrationTimeout * 1000,
 						}
@@ -474,7 +471,6 @@ export function NewRun() {
 							...(runValues.settings || {}),
 							apiProvider: "roo",
 							apiModelId: selection.model,
-							toolProtocol: useNativeToolProtocol ? "native" : "xml",
 							commandExecutionTimeout,
 							terminalShellIntegrationTimeout: terminalShellIntegrationTimeout * 1000,
 						}
@@ -485,7 +481,6 @@ export function NewRun() {
 							...EVALS_SETTINGS,
 							...providerSettings,
 							...importedSettings.globalSettings,
-							toolProtocol: useNativeToolProtocol ? "native" : "xml",
 							commandExecutionTimeout,
 							terminalShellIntegrationTimeout: terminalShellIntegrationTimeout * 1000,
 						}
@@ -512,7 +507,6 @@ export function NewRun() {
 			configSelections,
 			importedSettings,
 			router,
-			useNativeToolProtocol,
 			commandExecutionTimeout,
 			terminalShellIntegrationTimeout,
 		],
@@ -688,26 +682,6 @@ export function NewRun() {
 											</div>
 										)}
 
-										<div className="mt-4 p-4 rounded-md bg-muted/30 border border-border space-y-3">
-											<Label className="text-sm font-medium text-muted-foreground">
-												Tool Protocol Options
-											</Label>
-											<div className="flex flex-col gap-2.5 pl-1">
-												<label
-													htmlFor="native-other"
-													className="flex items-center gap-2 cursor-pointer">
-													<Checkbox
-														id="native-other"
-														checked={useNativeToolProtocol}
-														onCheckedChange={(checked: boolean) =>
-															setUseNativeToolProtocol(checked)
-														}
-													/>
-													<span className="text-sm">Use Native Tool Calls</span>
-												</label>
-											</div>
-										</div>
-
 										{settings && (
 											<SettingsDiff defaultSettings={EVALS_SETTINGS} customSettings={settings} />
 										)}
@@ -792,26 +766,6 @@ export function NewRun() {
 												</div>
 											))}
 										</div>
-
-										<div className="mt-4 p-4 rounded-md bg-muted/30 border border-border space-y-3">
-											<Label className="text-sm font-medium text-muted-foreground">
-												Tool Protocol Options
-											</Label>
-											<div className="flex flex-col gap-2.5 pl-1">
-												<label
-													htmlFor="native"
-													className="flex items-center gap-2 cursor-pointer">
-													<Checkbox
-														id="native"
-														checked={useNativeToolProtocol}
-														onCheckedChange={(checked: boolean) =>
-															setUseNativeToolProtocol(checked)
-														}
-													/>
-													<span className="text-sm">Use Native Tool Calls</span>
-												</label>
-											</div>
-										</div>
 									</>
 								)}
 

+ 0 - 129
packages/core/src/custom-tools/__tests__/__snapshots__/format-xml.spec.ts.snap

@@ -1,129 +0,0 @@
-// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
-
-exports[`XML Protocol snapshots > should generate correct XML description for all fixtures combined 1`] = `
-"# Custom Tools
-
-The following custom tools are available for this mode. Use them in the same way as built-in tools.
-
-## simple
-Description: Simple tool
-Parameters:
-- value: (required) The input value (type: string)
-Usage:
-<simple>
-<value>value value here</value>
-</simple>
-
-## cached
-Description: Cached tool
-Parameters:
-Usage:
-<cached>
-</cached>
-
-## legacy
-Description: Legacy tool using args
-Parameters:
-- input: (required) The input string (type: string)
-Usage:
-<legacy>
-<input>input value here</input>
-</legacy>
-
-## multi_toolA
-Description: Tool A
-Parameters:
-Usage:
-<multi_toolA>
-</multi_toolA>
-
-## multi_toolB
-Description: Tool B
-Parameters:
-Usage:
-<multi_toolB>
-</multi_toolB>
-
-## mixed_validTool
-Description: Valid
-Parameters:
-Usage:
-<mixed_validTool>
-</mixed_validTool>"
-`;
-
-exports[`XML Protocol snapshots > should generate correct XML description for cached tool 1`] = `
-"# Custom Tools
-
-The following custom tools are available for this mode. Use them in the same way as built-in tools.
-
-## cached
-Description: Cached tool
-Parameters:
-Usage:
-<cached>
-</cached>"
-`;
-
-exports[`XML Protocol snapshots > should generate correct XML description for legacy tool (using args) 1`] = `
-"# Custom Tools
-
-The following custom tools are available for this mode. Use them in the same way as built-in tools.
-
-## legacy
-Description: Legacy tool using args
-Parameters:
-- input: (required) The input string (type: string)
-Usage:
-<legacy>
-<input>input value here</input>
-</legacy>"
-`;
-
-exports[`XML Protocol snapshots > should generate correct XML description for mixed export tool 1`] = `
-"# Custom Tools
-
-The following custom tools are available for this mode. Use them in the same way as built-in tools.
-
-## mixed_validTool
-Description: Valid
-Parameters:
-Usage:
-<mixed_validTool>
-</mixed_validTool>"
-`;
-
-exports[`XML Protocol snapshots > should generate correct XML description for multi export tools 1`] = `
-"# Custom Tools
-
-The following custom tools are available for this mode. Use them in the same way as built-in tools.
-
-## multi_toolA
-Description: Tool A
-Parameters:
-Usage:
-<multi_toolA>
-</multi_toolA>
-
-## multi_toolB
-Description: Tool B
-Parameters:
-Usage:
-<multi_toolB>
-</multi_toolB>"
-`;
-
-exports[`XML Protocol snapshots > should generate correct XML description for simple tool 1`] = `
-"# Custom Tools
-
-The following custom tools are available for this mode. Use them in the same way as built-in tools.
-
-## simple
-Description: Simple tool
-Parameters:
-- value: (required) The input value (type: string)
-Usage:
-<simple>
-<value>value value here</value>
-</simple>"
-`;

+ 0 - 192
packages/core/src/custom-tools/__tests__/format-xml.spec.ts

@@ -1,192 +0,0 @@
-// pnpm --filter @roo-code/core test src/custom-tools/__tests__/format-xml.spec.ts
-
-import { type SerializedCustomToolDefinition, parametersSchema as z, defineCustomTool } from "@roo-code/types"
-
-import { serializeCustomTool, serializeCustomTools } from "../serialize.js"
-import { formatXml } from "../format-xml.js"
-
-import simpleTool from "./fixtures/simple.js"
-import cachedTool from "./fixtures/cached.js"
-import legacyTool from "./fixtures/legacy.js"
-import { toolA, toolB } from "./fixtures/multi.js"
-import { validTool as mixedValidTool } from "./fixtures/mixed.js"
-
-const fixtureTools = {
-	simple: simpleTool,
-	cached: cachedTool,
-	legacy: legacyTool,
-	multi_toolA: toolA,
-	multi_toolB: toolB,
-	mixed_validTool: mixedValidTool,
-}
-
-describe("formatXml", () => {
-	it("should return empty string for empty tools array", () => {
-		expect(formatXml([])).toBe("")
-	})
-
-	it("should throw for undefined tools", () => {
-		expect(() => formatXml(undefined as unknown as SerializedCustomToolDefinition[])).toThrow()
-	})
-
-	it("should generate description for a single tool without args", () => {
-		const tool = defineCustomTool({
-			name: "my_tool",
-			description: "A simple tool that does something",
-			async execute() {
-				return "done"
-			},
-		})
-
-		const serialized = serializeCustomTool(tool)
-		const result = formatXml([serialized])
-
-		expect(result).toContain("# Custom Tools")
-		expect(result).toContain("## my_tool")
-		expect(result).toContain("Description: A simple tool that does something")
-		expect(result).toContain("Parameters: None")
-		expect(result).toContain("<my_tool>")
-		expect(result).toContain("</my_tool>")
-	})
-
-	it("should generate description for a tool with required args", () => {
-		const tool = defineCustomTool({
-			name: "greeter",
-			description: "Greets a person by name",
-			parameters: z.object({
-				name: z.string().describe("The name of the person to greet"),
-			}),
-			async execute({ name }) {
-				return `Hello, ${name}!`
-			},
-		})
-
-		const serialized = serializeCustomTool(tool)
-		const result = formatXml([serialized])
-
-		expect(result).toContain("## greeter")
-		expect(result).toContain("Description: Greets a person by name")
-		expect(result).toContain("Parameters:")
-		expect(result).toContain("- name: (required) The name of the person to greet (type: string)")
-		expect(result).toContain("<greeter>")
-		expect(result).toContain("<name>name value here</name>")
-		expect(result).toContain("</greeter>")
-	})
-
-	it("should generate description for a tool with optional args", () => {
-		const tool = defineCustomTool({
-			name: "configurable_tool",
-			description: "A tool with optional configuration",
-			parameters: z.object({
-				input: z.string().describe("The input to process"),
-				format: z.string().optional().describe("Output format"),
-			}),
-			async execute({ input, format }) {
-				return format ? `${input} (${format})` : input
-			},
-		})
-
-		const serialized = serializeCustomTool(tool)
-		const result = formatXml([serialized])
-
-		expect(result).toContain("- input: (required) The input to process (type: string)")
-		expect(result).toContain("- format: (optional) Output format (type: string)")
-		expect(result).toContain("<input>input value here</input>")
-		expect(result).toContain("<format>optional format value</format>")
-	})
-
-	it("should generate descriptions for multiple tools", () => {
-		const tools = [
-			defineCustomTool({
-				name: "tool_a",
-				description: "First tool",
-				async execute() {
-					return "a"
-				},
-			}),
-			defineCustomTool({
-				name: "tool_b",
-				description: "Second tool",
-				parameters: z.object({
-					value: z.number().describe("A numeric value"),
-				}),
-				async execute() {
-					return "b"
-				},
-			}),
-		]
-
-		const serialized = serializeCustomTools(tools)
-		const result = formatXml(serialized)
-
-		expect(result).toContain("## tool_a")
-		expect(result).toContain("Description: First tool")
-		expect(result).toContain("## tool_b")
-		expect(result).toContain("Description: Second tool")
-		expect(result).toContain("- value: (required) A numeric value (type: number)")
-	})
-
-	it("should treat args in required array as required", () => {
-		// Using a raw SerializedToolDefinition to test the required behavior.
-		const tools: SerializedCustomToolDefinition[] = [
-			{
-				name: "test_tool",
-				description: "Test tool",
-				parameters: {
-					type: "object",
-					properties: {
-						data: {
-							type: "object",
-							description: "Some data",
-						},
-					},
-					required: ["data"],
-				},
-			},
-		]
-
-		const result = formatXml(tools)
-
-		expect(result).toContain("- data: (required) Some data (type: object)")
-		expect(result).toContain("<data>data value here</data>")
-	})
-})
-
-describe("XML Protocol snapshots", () => {
-	it("should generate correct XML description for simple tool", () => {
-		const serialized = serializeCustomTool(fixtureTools.simple)
-		const result = formatXml([serialized])
-		expect(result).toMatchSnapshot()
-	})
-
-	it("should generate correct XML description for cached tool", () => {
-		const serialized = serializeCustomTool(fixtureTools.cached)
-		const result = formatXml([serialized])
-		expect(result).toMatchSnapshot()
-	})
-
-	it("should generate correct XML description for legacy tool (using args)", () => {
-		const serialized = serializeCustomTool(fixtureTools.legacy)
-		const result = formatXml([serialized])
-		expect(result).toMatchSnapshot()
-	})
-
-	it("should generate correct XML description for multi export tools", () => {
-		const serializedA = serializeCustomTool(fixtureTools.multi_toolA)
-		const serializedB = serializeCustomTool(fixtureTools.multi_toolB)
-		const result = formatXml([serializedA, serializedB])
-		expect(result).toMatchSnapshot()
-	})
-
-	it("should generate correct XML description for mixed export tool", () => {
-		const serialized = serializeCustomTool(fixtureTools.mixed_validTool)
-		const result = formatXml([serialized])
-		expect(result).toMatchSnapshot()
-	})
-
-	it("should generate correct XML description for all fixtures combined", () => {
-		const allSerialized = Object.values(fixtureTools).map(serializeCustomTool)
-		const result = formatXml(allSerialized)
-		expect(result).toMatchSnapshot()
-	})
-})

+ 0 - 89
packages/core/src/custom-tools/format-xml.ts

@@ -1,89 +0,0 @@
-import type { SerializedCustomToolDefinition, SerializedCustomToolParameters } from "@roo-code/types"
-
-/**
- * Extract the type string from a parameter schema.
- * Handles both direct `type` property and `anyOf` schemas (used for nullable types).
- */
-function getParameterType(parameter: SerializedCustomToolParameters): string {
-	// Direct type property
-	if (parameter.type) {
-		return String(parameter.type)
-	}
-
-	// Handle anyOf schema (used for nullable types like `string | null`)
-	if (parameter.anyOf && Array.isArray(parameter.anyOf)) {
-		const types = parameter.anyOf
-			.map((schema) => (typeof schema === "object" && schema.type ? String(schema.type) : null))
-			.filter((t): t is string => t !== null && t !== "null")
-
-		if (types.length > 0) {
-			return types.join(" | ")
-		}
-	}
-
-	return "unknown"
-}
-
-function getParameterDescription(name: string, parameter: SerializedCustomToolParameters, required: string[]): string {
-	const requiredText = required.includes(name) ? "(required)" : "(optional)"
-	const typeText = getParameterType(parameter)
-	return `- ${name}: ${requiredText} ${parameter.description ?? ""} (type: ${typeText})`
-}
-
-function getUsage(tool: SerializedCustomToolDefinition): string {
-	const lines: string[] = [`<${tool.name}>`]
-
-	if (tool.parameters) {
-		const required = tool.parameters.required ?? []
-
-		for (const [argName, _argType] of Object.entries(tool.parameters.properties ?? {})) {
-			const placeholder = required.includes(argName) ? `${argName} value here` : `optional ${argName} value`
-			lines.push(`<${argName}>${placeholder}</${argName}>`)
-		}
-	}
-
-	lines.push(`</${tool.name}>`)
-	return lines.join("\n")
-}
-
-function getDescription(tool: SerializedCustomToolDefinition): string {
-	const parts: string[] = []
-
-	parts.push(`## ${tool.name}`)
-	parts.push(`Description: ${tool.description}`)
-
-	if (tool.parameters?.properties) {
-		const required = tool.parameters?.required ?? []
-		parts.push("Parameters:")
-
-		for (const [name, parameter] of Object.entries(tool.parameters.properties)) {
-			// What should we do with `boolean` values for `parameter`?
-			if (typeof parameter !== "object") {
-				continue
-			}
-
-			parts.push(getParameterDescription(name, parameter, required))
-		}
-	} else {
-		parts.push("Parameters: None")
-	}
-
-	parts.push("Usage:")
-	parts.push(getUsage(tool))
-
-	return parts.join("\n")
-}
-
-export function formatXml(tools: SerializedCustomToolDefinition[]): string {
-	if (tools.length === 0) {
-		return ""
-	}
-
-	const descriptions = tools.map((tool) => getDescription(tool))
-
-	return `# Custom Tools
-
-The following custom tools are available for this mode. Use them in the same way as built-in tools.
-
-${descriptions.join("\n\n")}`
-}

+ 0 - 1
packages/core/src/custom-tools/index.ts

@@ -1,4 +1,3 @@
 export * from "./custom-tool-registry.js"
 export * from "./serialize.js"
-export * from "./format-xml.js"
 export * from "./format-native.js"

+ 2 - 2
packages/telemetry/src/TelemetryService.ts

@@ -111,8 +111,8 @@ export class TelemetryService {
 		this.captureEvent(TelemetryEventName.MODE_SWITCH, { taskId, newMode })
 	}
 
-	public captureToolUsage(taskId: string, tool: string, toolProtocol: string): void {
-		this.captureEvent(TelemetryEventName.TOOL_USED, { taskId, tool, toolProtocol })
+	public captureToolUsage(taskId: string, tool: string): void {
+		this.captureEvent(TelemetryEventName.TOOL_USED, { taskId, tool })
 	}
 
 	public captureCheckpointCreated(taskId: string): void {

+ 0 - 10
packages/types/src/history.ts

@@ -19,16 +19,6 @@ export const historyItemSchema = z.object({
 	size: z.number().optional(),
 	workspace: z.string().optional(),
 	mode: z.string().optional(),
-	/**
-	 * The tool protocol used by this task. Once a task uses tools with a specific
-	 * protocol (XML or Native), it is permanently locked to that protocol.
-	 *
-	 * - "xml": Tool calls are parsed from XML text (no tool IDs)
-	 * - "native": Tool calls come as tool_call chunks with IDs
-	 *
-	 * This ensures task resumption works correctly even when NTC settings change.
-	 */
-	toolProtocol: z.enum(["xml", "native"]).optional(),
 	apiConfigName: z.string().optional(), // Provider profile name for sticky profile feature
 	status: z.enum(["active", "completed", "delegated"]).optional(),
 	delegatedToId: z.string().optional(), // Last child this parent delegated to

+ 0 - 4
packages/types/src/model.ts

@@ -110,10 +110,6 @@ export const modelInfoSchema = z.object({
 	isStealthModel: z.boolean().optional(),
 	// Flag to indicate if the model is free (no cost)
 	isFree: z.boolean().optional(),
-	// Flag to indicate if the model supports native tool calling (OpenAI-style function calling)
-	supportsNativeTools: z.boolean().optional(),
-	// Default tool protocol preferred by this model (if not specified, falls back to capability/provider defaults)
-	defaultToolProtocol: z.enum(["xml", "native"]).optional(),
 	// Exclude specific native tools from being available (only applies to native protocol)
 	// These tools will be removed from the set of tools available to the model
 	excludedTools: z.array(z.string()).optional(),

+ 0 - 3
packages/types/src/provider-settings.ts

@@ -185,9 +185,6 @@ const baseProviderSettingsSchema = z.object({
 
 	// Model verbosity.
 	verbosity: verbosityLevelsSchema.optional(),
-
-	// Tool protocol override for this profile.
-	toolProtocol: z.enum(["xml", "native"]).optional(),
 })
 
 // Several of the providers share common model config properties.

+ 0 - 24
packages/types/src/providers/anthropic.ts

@@ -11,8 +11,6 @@ export const anthropicModels = {
 		contextWindow: 200_000, // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0, // $3 per million input tokens (≤200K context)
 		outputPrice: 15.0, // $15 per million output tokens (≤200K context)
 		cacheWritesPrice: 3.75, // $3.75 per million tokens
@@ -34,8 +32,6 @@ export const anthropicModels = {
 		contextWindow: 200_000, // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0, // $3 per million input tokens (≤200K context)
 		outputPrice: 15.0, // $15 per million output tokens (≤200K context)
 		cacheWritesPrice: 3.75, // $3.75 per million tokens
@@ -57,8 +53,6 @@ export const anthropicModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 5.0, // $5 per million input tokens
 		outputPrice: 25.0, // $25 per million output tokens
 		cacheWritesPrice: 6.25, // $6.25 per million tokens
@@ -70,8 +64,6 @@ export const anthropicModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 15.0, // $15 per million input tokens
 		outputPrice: 75.0, // $75 per million output tokens
 		cacheWritesPrice: 18.75, // $18.75 per million tokens
@@ -83,8 +75,6 @@ export const anthropicModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 15.0, // $15 per million input tokens
 		outputPrice: 75.0, // $75 per million output tokens
 		cacheWritesPrice: 18.75, // $18.75 per million tokens
@@ -96,8 +86,6 @@ export const anthropicModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0, // $3 per million input tokens
 		outputPrice: 15.0, // $15 per million output tokens
 		cacheWritesPrice: 3.75, // $3.75 per million tokens
@@ -110,8 +98,6 @@ export const anthropicModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0, // $3 per million input tokens
 		outputPrice: 15.0, // $15 per million output tokens
 		cacheWritesPrice: 3.75, // $3.75 per million tokens
@@ -122,8 +108,6 @@ export const anthropicModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0, // $3 per million input tokens
 		outputPrice: 15.0, // $15 per million output tokens
 		cacheWritesPrice: 3.75, // $3.75 per million tokens
@@ -134,8 +118,6 @@ export const anthropicModels = {
 		contextWindow: 200_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 1.0,
 		outputPrice: 5.0,
 		cacheWritesPrice: 1.25,
@@ -146,8 +128,6 @@ export const anthropicModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 		cacheWritesPrice: 18.75,
@@ -158,8 +138,6 @@ export const anthropicModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.25,
 		outputPrice: 1.25,
 		cacheWritesPrice: 0.3,
@@ -170,8 +148,6 @@ export const anthropicModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 1.0,
 		outputPrice: 5.0,
 		cacheWritesPrice: 1.25,

+ 0 - 11
packages/types/src/providers/baseten.ts

@@ -9,7 +9,6 @@ export const basetenModels = {
 		contextWindow: 262_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.6,
 		outputPrice: 2.5,
 		cacheWritesPrice: 0,
@@ -21,7 +20,6 @@ export const basetenModels = {
 		contextWindow: 200_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.6,
 		outputPrice: 2.2,
 		cacheWritesPrice: 0,
@@ -33,7 +31,6 @@ export const basetenModels = {
 		contextWindow: 163_840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 2.55,
 		outputPrice: 5.95,
 		cacheWritesPrice: 0,
@@ -45,7 +42,6 @@ export const basetenModels = {
 		contextWindow: 163_840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 2.55,
 		outputPrice: 5.95,
 		cacheWritesPrice: 0,
@@ -57,7 +53,6 @@ export const basetenModels = {
 		contextWindow: 163_840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.77,
 		outputPrice: 0.77,
 		cacheWritesPrice: 0,
@@ -69,7 +64,6 @@ export const basetenModels = {
 		contextWindow: 163_840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.5,
 		outputPrice: 1.5,
 		cacheWritesPrice: 0,
@@ -82,7 +76,6 @@ export const basetenModels = {
 		contextWindow: 163_840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.3,
 		outputPrice: 0.45,
 		cacheWritesPrice: 0,
@@ -95,7 +88,6 @@ export const basetenModels = {
 		contextWindow: 128_072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.1,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0,
@@ -107,7 +99,6 @@ export const basetenModels = {
 		contextWindow: 262_144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.22,
 		outputPrice: 0.8,
 		cacheWritesPrice: 0,
@@ -119,7 +110,6 @@ export const basetenModels = {
 		contextWindow: 262_144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.38,
 		outputPrice: 1.53,
 		cacheWritesPrice: 0,
@@ -131,7 +121,6 @@ export const basetenModels = {
 		contextWindow: 262_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.6,
 		outputPrice: 2.5,
 		cacheWritesPrice: 0,

+ 0 - 55
packages/types/src/providers/bedrock.ts

@@ -19,8 +19,6 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
@@ -34,7 +32,6 @@ export const bedrockModels = {
 		contextWindow: 300_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
 		inputPrice: 0.8,
 		outputPrice: 3.2,
 		cacheWritesPrice: 0.8, // per million tokens
@@ -48,7 +45,6 @@ export const bedrockModels = {
 		contextWindow: 300_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 1.0,
 		outputPrice: 4.0,
 		cacheWritesPrice: 1.0, // per million tokens
@@ -60,7 +56,6 @@ export const bedrockModels = {
 		contextWindow: 300_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
 		inputPrice: 0.06,
 		outputPrice: 0.24,
 		cacheWritesPrice: 0.06, // per million tokens
@@ -74,7 +69,6 @@ export const bedrockModels = {
 		contextWindow: 1_000_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
 		inputPrice: 0.33,
 		outputPrice: 2.75,
 		cacheWritesPrice: 0,
@@ -89,7 +83,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
 		inputPrice: 0.035,
 		outputPrice: 0.14,
 		cacheWritesPrice: 0.035, // per million tokens
@@ -104,8 +97,6 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
@@ -120,8 +111,6 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 		cacheWritesPrice: 18.75,
@@ -136,8 +125,6 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 5.0,
 		outputPrice: 25.0,
 		cacheWritesPrice: 6.25,
@@ -152,8 +139,6 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 		cacheWritesPrice: 18.75,
@@ -168,8 +153,6 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
@@ -183,8 +166,6 @@ export const bedrockModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
@@ -198,8 +179,6 @@ export const bedrockModels = {
 		contextWindow: 200_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.8,
 		outputPrice: 4.0,
 		cacheWritesPrice: 1.0,
@@ -214,8 +193,6 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 1.0,
 		outputPrice: 5.0,
 		cacheWritesPrice: 1.25, // 5m cache writes
@@ -229,8 +206,6 @@ export const bedrockModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 	},
@@ -239,8 +214,6 @@ export const bedrockModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 	},
@@ -249,8 +222,6 @@ export const bedrockModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 	},
@@ -259,8 +230,6 @@ export const bedrockModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.25,
 		outputPrice: 1.25,
 	},
@@ -269,7 +238,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 1.35,
 		outputPrice: 5.4,
 	},
@@ -278,7 +246,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.5,
 		outputPrice: 1.5,
 		description: "GPT-OSS 20B - Optimized for low latency and local/specialized use cases",
@@ -288,7 +255,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 2.0,
 		outputPrice: 6.0,
 		description: "GPT-OSS 120B - Production-ready, general-purpose, high-reasoning model",
@@ -298,7 +264,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.72,
 		outputPrice: 0.72,
 		description: "Llama 3.3 Instruct (70B)",
@@ -308,7 +273,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.72,
 		outputPrice: 0.72,
 		description: "Llama 3.2 Instruct (90B)",
@@ -318,7 +282,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.16,
 		outputPrice: 0.16,
 		description: "Llama 3.2 Instruct (11B)",
@@ -328,7 +291,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.15,
 		outputPrice: 0.15,
 		description: "Llama 3.2 Instruct (3B)",
@@ -338,7 +300,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.1,
 		outputPrice: 0.1,
 		description: "Llama 3.2 Instruct (1B)",
@@ -348,7 +309,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 2.4,
 		outputPrice: 2.4,
 		description: "Llama 3.1 Instruct (405B)",
@@ -358,7 +318,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.72,
 		outputPrice: 0.72,
 		description: "Llama 3.1 Instruct (70B)",
@@ -368,7 +327,6 @@ export const bedrockModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.9,
 		outputPrice: 0.9,
 		description: "Llama 3.1 Instruct (70B) (w/ latency optimized inference)",
@@ -378,7 +336,6 @@ export const bedrockModels = {
 		contextWindow: 8_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.22,
 		outputPrice: 0.22,
 		description: "Llama 3.1 Instruct (8B)",
@@ -388,7 +345,6 @@ export const bedrockModels = {
 		contextWindow: 8_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 2.65,
 		outputPrice: 3.5,
 	},
@@ -397,7 +353,6 @@ export const bedrockModels = {
 		contextWindow: 4_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.3,
 		outputPrice: 0.6,
 	},
@@ -406,7 +361,6 @@ export const bedrockModels = {
 		contextWindow: 8_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.15,
 		outputPrice: 0.2,
 		description: "Amazon Titan Text Lite",
@@ -416,7 +370,6 @@ export const bedrockModels = {
 		contextWindow: 8_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.2,
 		outputPrice: 0.6,
 		description: "Amazon Titan Text Express",
@@ -426,8 +379,6 @@ export const bedrockModels = {
 		contextWindow: 262_144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		preserveReasoning: true,
 		inputPrice: 0.6,
 		outputPrice: 2.5,
@@ -438,8 +389,6 @@ export const bedrockModels = {
 		contextWindow: 196_608,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		preserveReasoning: true,
 		inputPrice: 0.3,
 		outputPrice: 1.2,
@@ -450,8 +399,6 @@ export const bedrockModels = {
 		contextWindow: 262_144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.15,
 		outputPrice: 1.2,
 		description: "Qwen3 Next 80B (MoE model with 3B active parameters)",
@@ -461,8 +408,6 @@ export const bedrockModels = {
 		contextWindow: 262_144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.45,
 		outputPrice: 1.8,
 		description: "Qwen3 Coder 480B (MoE model with 35B active parameters)",

+ 0 - 10
packages/types/src/providers/cerebras.ts

@@ -11,8 +11,6 @@ export const cerebrasModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description:
@@ -23,8 +21,6 @@ export const cerebrasModels = {
 		contextWindow: 64000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Intelligent model with ~1400 tokens/s",
@@ -34,8 +30,6 @@ export const cerebrasModels = {
 		contextWindow: 64000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Powerful model with ~2600 tokens/s",
@@ -45,8 +39,6 @@ export const cerebrasModels = {
 		contextWindow: 64000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "SOTA coding performance with ~2500 tokens/s",
@@ -56,8 +48,6 @@ export const cerebrasModels = {
 		contextWindow: 64000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description:

+ 0 - 80
packages/types/src/providers/chutes.ts

@@ -51,8 +51,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "DeepSeek R1 0528 model.",
@@ -62,8 +60,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "DeepSeek R1 model.",
@@ -73,8 +69,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "DeepSeek V3 model.",
@@ -84,8 +78,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "DeepSeek V3.1 model.",
@@ -95,8 +87,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.23,
 		outputPrice: 0.9,
 		description:
@@ -107,8 +97,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 1.0,
 		outputPrice: 3.0,
 		description:
@@ -119,8 +107,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.25,
 		outputPrice: 0.35,
 		description:
@@ -131,8 +117,6 @@ export const chutesModels = {
 		contextWindow: 131072, // From Groq
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Unsloth Llama 3.3 70B Instruct model.",
@@ -142,8 +126,6 @@ export const chutesModels = {
 		contextWindow: 512000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "ChutesAI Llama 4 Scout 17B Instruct model, 512K context.",
@@ -153,8 +135,6 @@ export const chutesModels = {
 		contextWindow: 128000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Unsloth Mistral Nemo Instruct model.",
@@ -164,8 +144,6 @@ export const chutesModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Unsloth Gemma 3 12B IT model.",
@@ -175,8 +153,6 @@ export const chutesModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Nous DeepHermes 3 Llama 3 8B Preview model.",
@@ -186,8 +162,6 @@ export const chutesModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Unsloth Gemma 3 4B IT model.",
@@ -197,8 +171,6 @@ export const chutesModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Nvidia Llama 3.3 Nemotron Super 49B model.",
@@ -208,8 +180,6 @@ export const chutesModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Nvidia Llama 3.1 Nemotron Ultra 253B model.",
@@ -219,8 +189,6 @@ export const chutesModels = {
 		contextWindow: 256000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "ChutesAI Llama 4 Maverick 17B Instruct FP8 model.",
@@ -230,8 +198,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "DeepSeek V3 Base model.",
@@ -241,8 +207,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "DeepSeek R1 Zero model.",
@@ -252,8 +216,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "DeepSeek V3 (0324) model.",
@@ -263,8 +225,6 @@ export const chutesModels = {
 		contextWindow: 262144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Qwen3 235B A22B Instruct 2507 model with 262K context window.",
@@ -274,8 +234,6 @@ export const chutesModels = {
 		contextWindow: 40960,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Qwen3 235B A22B model.",
@@ -285,8 +243,6 @@ export const chutesModels = {
 		contextWindow: 40960,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Qwen3 32B model.",
@@ -296,8 +252,6 @@ export const chutesModels = {
 		contextWindow: 40960,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Qwen3 30B A3B model.",
@@ -307,8 +261,6 @@ export const chutesModels = {
 		contextWindow: 40960,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Qwen3 14B model.",
@@ -318,8 +270,6 @@ export const chutesModels = {
 		contextWindow: 40960,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Qwen3 8B model.",
@@ -329,8 +279,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Microsoft MAI-DS-R1 FP8 model.",
@@ -340,8 +288,6 @@ export const chutesModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "TNGTech DeepSeek R1T Chimera model.",
@@ -351,8 +297,6 @@ export const chutesModels = {
 		contextWindow: 151329,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description:
@@ -363,8 +307,6 @@ export const chutesModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description:
@@ -375,8 +317,6 @@ export const chutesModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 1,
 		outputPrice: 3,
 		description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference.",
@@ -386,8 +326,6 @@ export const chutesModels = {
 		contextWindow: 202752,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description:
@@ -398,8 +336,6 @@ export const chutesModels = {
 		contextWindow: 202752,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 1.15,
 		outputPrice: 3.25,
 		description: "GLM-4.6-turbo model with 200K-token context window, optimized for fast inference.",
@@ -409,8 +345,6 @@ export const chutesModels = {
 		contextWindow: 128000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description:
@@ -421,8 +355,6 @@ export const chutesModels = {
 		contextWindow: 262144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Qwen3 Coder 480B A35B Instruct FP8 model, optimized for coding tasks.",
@@ -432,8 +364,6 @@ export const chutesModels = {
 		contextWindow: 75000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.1481,
 		outputPrice: 0.5926,
 		description: "Moonshot AI Kimi K2 Instruct model with 75k context window.",
@@ -443,8 +373,6 @@ export const chutesModels = {
 		contextWindow: 262144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.1999,
 		outputPrice: 0.8001,
 		description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window.",
@@ -454,8 +382,6 @@ export const chutesModels = {
 		contextWindow: 262144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.077968332,
 		outputPrice: 0.31202496,
 		description: "Qwen3 235B A22B Thinking 2507 model with 262K context window.",
@@ -465,8 +391,6 @@ export const chutesModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description:
@@ -477,8 +401,6 @@ export const chutesModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		description:
@@ -489,8 +411,6 @@ export const chutesModels = {
 		contextWindow: 262144,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.16,
 		outputPrice: 0.65,
 		description:

+ 0 - 6
packages/types/src/providers/claude-code.ts

@@ -49,8 +49,6 @@ export const claudeCodeModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsReasoningEffort: ["disable", "low", "medium", "high"],
 		reasoningEffort: "medium",
 		description: "Claude Haiku 4.5 - Fast and efficient with thinking",
@@ -60,8 +58,6 @@ export const claudeCodeModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsReasoningEffort: ["disable", "low", "medium", "high"],
 		reasoningEffort: "medium",
 		description: "Claude Sonnet 4.5 - Balanced performance with thinking",
@@ -71,8 +67,6 @@ export const claudeCodeModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsReasoningEffort: ["disable", "low", "medium", "high"],
 		reasoningEffort: "medium",
 		description: "Claude Opus 4.5 - Most capable with thinking",

+ 0 - 1
packages/types/src/providers/deepinfra.ts

@@ -8,7 +8,6 @@ export const deepInfraDefaultModelInfo: ModelInfo = {
 	contextWindow: 262144,
 	supportsImages: false,
 	supportsPromptCache: false,
-	supportsNativeTools: true,
 	inputPrice: 0.3,
 	outputPrice: 1.2,
 	description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context.",

+ 0 - 4
packages/types/src/providers/deepseek.ts

@@ -14,8 +14,6 @@ export const deepSeekModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.28, // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
 		outputPrice: 0.42, // $0.42 per million tokens - Updated Dec 9, 2025
 		cacheWritesPrice: 0.28, // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
@@ -27,8 +25,6 @@ export const deepSeekModels = {
 		contextWindow: 128_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		preserveReasoning: true,
 		inputPrice: 0.28, // $0.28 per million tokens (cache miss) - Updated Dec 9, 2025
 		outputPrice: 0.42, // $0.42 per million tokens - Updated Dec 9, 2025

+ 0 - 6
packages/types/src/providers/doubao.ts

@@ -8,8 +8,6 @@ export const doubaoModels = {
 		contextWindow: 128_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.0001, // $0.0001 per million tokens (cache miss)
 		outputPrice: 0.0004, // $0.0004 per million tokens
 		cacheWritesPrice: 0.0001, // $0.0001 per million tokens (cache miss)
@@ -21,8 +19,6 @@ export const doubaoModels = {
 		contextWindow: 128_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.0002, // $0.0002 per million tokens
 		outputPrice: 0.0008, // $0.0008 per million tokens
 		cacheWritesPrice: 0.0002, // $0.0002 per million
@@ -34,8 +30,6 @@ export const doubaoModels = {
 		contextWindow: 128_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.00015, // $0.00015 per million tokens
 		outputPrice: 0.0006, // $0.0006 per million tokens
 		cacheWritesPrice: 0.00015, // $0.00015 per million

+ 0 - 5
packages/types/src/providers/featherless.ts

@@ -13,7 +13,6 @@ export const featherlessModels = {
 		contextWindow: 32678,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "DeepSeek V3 0324 model.",
@@ -23,7 +22,6 @@ export const featherlessModels = {
 		contextWindow: 32678,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "DeepSeek R1 0528 model.",
@@ -33,7 +31,6 @@ export const featherlessModels = {
 		contextWindow: 32678,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Kimi K2 Instruct model.",
@@ -43,7 +40,6 @@ export const featherlessModels = {
 		contextWindow: 32678,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "GPT-OSS 120B model.",
@@ -53,7 +49,6 @@ export const featherlessModels = {
 		contextWindow: 32678,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0,
 		outputPrice: 0,
 		description: "Qwen3 Coder 480B A35B Instruct model.",

+ 0 - 27
packages/types/src/providers/fireworks.ts

@@ -24,8 +24,6 @@ export const fireworksModels = {
 		contextWindow: 262144,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		outputPrice: 2.5,
 		cacheReadsPrice: 0.15,
@@ -37,8 +35,6 @@ export const fireworksModels = {
 		contextWindow: 128000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		outputPrice: 2.5,
 		description:
@@ -49,7 +45,6 @@ export const fireworksModels = {
 		contextWindow: 256000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
 		supportsTemperature: true,
 		preserveReasoning: true,
 		defaultTemperature: 1.0,
@@ -64,8 +59,6 @@ export const fireworksModels = {
 		contextWindow: 204800,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.3,
 		outputPrice: 1.2,
 		description:
@@ -76,8 +69,6 @@ export const fireworksModels = {
 		contextWindow: 256000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.22,
 		outputPrice: 0.88,
 		description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025.",
@@ -87,8 +78,6 @@ export const fireworksModels = {
 		contextWindow: 256000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.45,
 		outputPrice: 1.8,
 		description: "Qwen3's most agentic code model to date.",
@@ -98,8 +87,6 @@ export const fireworksModels = {
 		contextWindow: 160000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3,
 		outputPrice: 8,
 		description:
@@ -110,8 +97,6 @@ export const fireworksModels = {
 		contextWindow: 128000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.9,
 		outputPrice: 0.9,
 		description:
@@ -122,8 +107,6 @@ export const fireworksModels = {
 		contextWindow: 163840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.56,
 		outputPrice: 1.68,
 		description:
@@ -134,8 +117,6 @@ export const fireworksModels = {
 		contextWindow: 128000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.55,
 		outputPrice: 2.19,
 		description:
@@ -146,8 +127,6 @@ export const fireworksModels = {
 		contextWindow: 128000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.55,
 		outputPrice: 2.19,
 		description:
@@ -158,8 +137,6 @@ export const fireworksModels = {
 		contextWindow: 198000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.55,
 		outputPrice: 2.19,
 		description:
@@ -170,8 +147,6 @@ export const fireworksModels = {
 		contextWindow: 128000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.07,
 		outputPrice: 0.3,
 		description:
@@ -182,8 +157,6 @@ export const fireworksModels = {
 		contextWindow: 128000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.15,
 		outputPrice: 0.6,
 		description:

+ 0 - 22
packages/types/src/providers/gemini.ts

@@ -10,8 +10,6 @@ export const geminiModels = {
 		maxTokens: 65_536,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsReasoningEffort: ["low", "high"],
 		reasoningEffort: "low",
@@ -37,8 +35,6 @@ export const geminiModels = {
 		maxTokens: 65_536,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsReasoningEffort: ["minimal", "low", "medium", "high"],
 		reasoningEffort: "medium",
@@ -55,8 +51,6 @@ export const geminiModels = {
 		maxTokens: 64_000,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
@@ -85,8 +79,6 @@ export const geminiModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
@@ -114,8 +106,6 @@ export const geminiModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
@@ -141,8 +131,6 @@ export const geminiModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
@@ -172,8 +160,6 @@ export const geminiModels = {
 		maxTokens: 65_536,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.3,
@@ -187,8 +173,6 @@ export const geminiModels = {
 		maxTokens: 65_536,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.3,
@@ -202,8 +186,6 @@ export const geminiModels = {
 		maxTokens: 64_000,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.3,
@@ -219,8 +201,6 @@ export const geminiModels = {
 		maxTokens: 65_536,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.1,
@@ -234,8 +214,6 @@ export const geminiModels = {
 		maxTokens: 65_536,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.1,

+ 0 - 14
packages/types/src/providers/groq.ts

@@ -19,8 +19,6 @@ export const groqModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.05,
 		outputPrice: 0.08,
 		description: "Meta Llama 3.1 8B Instant model, 128K context.",
@@ -30,8 +28,6 @@ export const groqModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.59,
 		outputPrice: 0.79,
 		description: "Meta Llama 3.3 70B Versatile model, 128K context.",
@@ -41,8 +37,6 @@ export const groqModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.11,
 		outputPrice: 0.34,
 		description: "Meta Llama 4 Scout 17B Instruct model, 128K context.",
@@ -52,8 +46,6 @@ export const groqModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.29,
 		outputPrice: 0.59,
 		description: "Alibaba Qwen 3 32B model, 128K context.",
@@ -63,8 +55,6 @@ export const groqModels = {
 		contextWindow: 262144,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		outputPrice: 2.5,
 		cacheReadsPrice: 0.15,
@@ -76,8 +66,6 @@ export const groqModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.15,
 		outputPrice: 0.75,
 		description:
@@ -88,8 +76,6 @@ export const groqModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.1,
 		outputPrice: 0.5,
 		description:

+ 0 - 4
packages/types/src/providers/io-intelligence.ts

@@ -18,7 +18,6 @@ export const ioIntelligenceModels = {
 		contextWindow: 128000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		description: "DeepSeek R1 reasoning model",
 	},
 	"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
@@ -26,7 +25,6 @@ export const ioIntelligenceModels = {
 		contextWindow: 430000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		description: "Llama 4 Maverick 17B model",
 	},
 	"Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": {
@@ -34,7 +32,6 @@ export const ioIntelligenceModels = {
 		contextWindow: 106000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		description: "Qwen3 Coder 480B specialized for coding",
 	},
 	"openai/gpt-oss-120b": {
@@ -42,7 +39,6 @@ export const ioIntelligenceModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		description: "OpenAI GPT-OSS 120B model",
 	},
 } as const satisfies Record<string, ModelInfo>

+ 0 - 2
packages/types/src/providers/lite-llm.ts

@@ -8,8 +8,6 @@ export const litellmDefaultModelInfo: ModelInfo = {
 	contextWindow: 200_000,
 	supportsImages: true,
 	supportsPromptCache: true,
-	supportsNativeTools: true,
-	defaultToolProtocol: "native",
 	inputPrice: 3.0,
 	outputPrice: 15.0,
 	cacheWritesPrice: 3.75,

+ 0 - 2
packages/types/src/providers/lm-studio.ts

@@ -10,8 +10,6 @@ export const lMStudioDefaultModelInfo: ModelInfo = {
 	contextWindow: 200_000,
 	supportsImages: true,
 	supportsPromptCache: true,
-	supportsNativeTools: true,
-	defaultToolProtocol: "native",
 	inputPrice: 0,
 	outputPrice: 0,
 	cacheWritesPrice: 0,

+ 0 - 6
packages/types/src/providers/minimax.ts

@@ -13,8 +13,6 @@ export const minimaxModels = {
 		contextWindow: 192_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["search_and_replace"],
 		excludedTools: ["apply_diff"],
 		preserveReasoning: true,
@@ -30,8 +28,6 @@ export const minimaxModels = {
 		contextWindow: 192_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["search_and_replace"],
 		excludedTools: ["apply_diff"],
 		preserveReasoning: true,
@@ -47,8 +43,6 @@ export const minimaxModels = {
 		contextWindow: 192_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["search_and_replace"],
 		excludedTools: ["apply_diff"],
 		preserveReasoning: true,

+ 0 - 18
packages/types/src/providers/mistral.ts

@@ -11,8 +11,6 @@ export const mistralModels = {
 		contextWindow: 128_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 2.0,
 		outputPrice: 5.0,
 	},
@@ -21,8 +19,6 @@ export const mistralModels = {
 		contextWindow: 131_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.4,
 		outputPrice: 2.0,
 	},
@@ -31,8 +27,6 @@ export const mistralModels = {
 		contextWindow: 131_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.4,
 		outputPrice: 2.0,
 	},
@@ -41,8 +35,6 @@ export const mistralModels = {
 		contextWindow: 256_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.3,
 		outputPrice: 0.9,
 	},
@@ -51,8 +43,6 @@ export const mistralModels = {
 		contextWindow: 131_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 2.0,
 		outputPrice: 6.0,
 	},
@@ -61,8 +51,6 @@ export const mistralModels = {
 		contextWindow: 131_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.1,
 		outputPrice: 0.1,
 	},
@@ -71,8 +59,6 @@ export const mistralModels = {
 		contextWindow: 131_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.04,
 		outputPrice: 0.04,
 	},
@@ -81,8 +67,6 @@ export const mistralModels = {
 		contextWindow: 32_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		outputPrice: 0.6,
 	},
@@ -91,8 +75,6 @@ export const mistralModels = {
 		contextWindow: 131_000,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 2.0,
 		outputPrice: 6.0,
 	},

+ 0 - 8
packages/types/src/providers/moonshot.ts

@@ -11,8 +11,6 @@ export const moonshotModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.6, // $0.60 per million tokens (cache miss)
 		outputPrice: 2.5, // $2.50 per million tokens
 		cacheWritesPrice: 0, // $0 per million tokens (cache miss)
@@ -24,8 +22,6 @@ export const moonshotModels = {
 		contextWindow: 262144,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		outputPrice: 2.5,
 		cacheReadsPrice: 0.15,
@@ -37,8 +33,6 @@ export const moonshotModels = {
 		contextWindow: 262_144,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 2.4, // $2.40 per million tokens (cache miss)
 		outputPrice: 10, // $10.00 per million tokens
 		cacheWritesPrice: 0, // $0 per million tokens (cache miss)
@@ -50,8 +44,6 @@ export const moonshotModels = {
 		contextWindow: 262_144, // 262,144 tokens
 		supportsImages: false, // Text-only (no image/vision support)
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.6, // $0.60 per million tokens (cache miss)
 		outputPrice: 2.5, // $2.50 per million tokens
 		cacheWritesPrice: 0, // $0 per million tokens (cache miss)

+ 0 - 1
packages/types/src/providers/ollama.ts

@@ -8,7 +8,6 @@ export const ollamaDefaultModelInfo: ModelInfo = {
 	contextWindow: 200_000,
 	supportsImages: true,
 	supportsPromptCache: true,
-	supportsNativeTools: true,
 	inputPrice: 0,
 	outputPrice: 0,
 	cacheWritesPrice: 0,

+ 0 - 18
packages/types/src/providers/openai-codex.ts

@@ -27,8 +27,6 @@ export const openAiCodexModels = {
 	"gpt-5.1-codex-max": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -44,8 +42,6 @@ export const openAiCodexModels = {
 	"gpt-5.1-codex": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -61,8 +57,6 @@ export const openAiCodexModels = {
 	"gpt-5.2-codex": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -77,8 +71,6 @@ export const openAiCodexModels = {
 	"gpt-5.1": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -95,8 +87,6 @@ export const openAiCodexModels = {
 	"gpt-5": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -113,8 +103,6 @@ export const openAiCodexModels = {
 	"gpt-5-codex": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -130,8 +118,6 @@ export const openAiCodexModels = {
 	"gpt-5-codex-mini": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -147,8 +133,6 @@ export const openAiCodexModels = {
 	"gpt-5.1-codex-mini": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -163,8 +147,6 @@ export const openAiCodexModels = {
 	"gpt-5.2": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,

+ 0 - 68
packages/types/src/providers/openai.ts

@@ -9,8 +9,6 @@ export const openAiNativeModels = {
 	"gpt-5.1-codex-max": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -29,8 +27,6 @@ export const openAiNativeModels = {
 	"gpt-5.2": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -52,8 +48,6 @@ export const openAiNativeModels = {
 	"gpt-5.2-codex": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -72,8 +66,6 @@ export const openAiNativeModels = {
 	"gpt-5.2-chat-latest": {
 		maxTokens: 16_384,
 		contextWindow: 128_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -86,8 +78,6 @@ export const openAiNativeModels = {
 	"gpt-5.1": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -109,8 +99,6 @@ export const openAiNativeModels = {
 	"gpt-5.1-codex": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -128,8 +116,6 @@ export const openAiNativeModels = {
 	"gpt-5.1-codex-mini": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -146,8 +132,6 @@ export const openAiNativeModels = {
 	"gpt-5": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -168,8 +152,6 @@ export const openAiNativeModels = {
 	"gpt-5-mini": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -190,8 +172,6 @@ export const openAiNativeModels = {
 	"gpt-5-codex": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -208,8 +188,6 @@ export const openAiNativeModels = {
 	"gpt-5-nano": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -227,8 +205,6 @@ export const openAiNativeModels = {
 	"gpt-5-chat-latest": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -241,8 +217,6 @@ export const openAiNativeModels = {
 	"gpt-4.1": {
 		maxTokens: 32_768,
 		contextWindow: 1_047_576,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -258,8 +232,6 @@ export const openAiNativeModels = {
 	"gpt-4.1-mini": {
 		maxTokens: 32_768,
 		contextWindow: 1_047_576,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -275,8 +247,6 @@ export const openAiNativeModels = {
 	"gpt-4.1-nano": {
 		maxTokens: 32_768,
 		contextWindow: 1_047_576,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -292,8 +262,6 @@ export const openAiNativeModels = {
 	o3: {
 		maxTokens: 100_000,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 2.0,
@@ -310,8 +278,6 @@ export const openAiNativeModels = {
 	"o3-high": {
 		maxTokens: 100_000,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 2.0,
@@ -323,8 +289,6 @@ export const openAiNativeModels = {
 	"o3-low": {
 		maxTokens: 100_000,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 2.0,
@@ -336,8 +300,6 @@ export const openAiNativeModels = {
 	"o4-mini": {
 		maxTokens: 100_000,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
@@ -354,8 +316,6 @@ export const openAiNativeModels = {
 	"o4-mini-high": {
 		maxTokens: 100_000,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
@@ -367,8 +327,6 @@ export const openAiNativeModels = {
 	"o4-mini-low": {
 		maxTokens: 100_000,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
@@ -380,8 +338,6 @@ export const openAiNativeModels = {
 	"o3-mini": {
 		maxTokens: 100_000,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: false,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
@@ -394,8 +350,6 @@ export const openAiNativeModels = {
 	"o3-mini-high": {
 		maxTokens: 100_000,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: false,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
@@ -407,8 +361,6 @@ export const openAiNativeModels = {
 	"o3-mini-low": {
 		maxTokens: 100_000,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: false,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
@@ -420,8 +372,6 @@ export const openAiNativeModels = {
 	o1: {
 		maxTokens: 100_000,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 15,
@@ -432,8 +382,6 @@ export const openAiNativeModels = {
 	"o1-preview": {
 		maxTokens: 32_768,
 		contextWindow: 128_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 15,
@@ -444,8 +392,6 @@ export const openAiNativeModels = {
 	"o1-mini": {
 		maxTokens: 65_536,
 		contextWindow: 128_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
@@ -456,8 +402,6 @@ export const openAiNativeModels = {
 	"gpt-4o": {
 		maxTokens: 16_384,
 		contextWindow: 128_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 2.5,
@@ -471,8 +415,6 @@ export const openAiNativeModels = {
 	"gpt-4o-mini": {
 		maxTokens: 16_384,
 		contextWindow: 128_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsPromptCache: true,
 		inputPrice: 0.15,
@@ -486,8 +428,6 @@ export const openAiNativeModels = {
 	"codex-mini-latest": {
 		maxTokens: 16_384,
 		contextWindow: 200_000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsImages: false,
 		supportsPromptCache: false,
 		inputPrice: 1.5,
@@ -501,8 +441,6 @@ export const openAiNativeModels = {
 	"gpt-5-2025-08-07": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -523,8 +461,6 @@ export const openAiNativeModels = {
 	"gpt-5-mini-2025-08-07": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -545,8 +481,6 @@ export const openAiNativeModels = {
 	"gpt-5-nano-2025-08-07": {
 		maxTokens: 128000,
 		contextWindow: 400000,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
@@ -570,8 +504,6 @@ export const openAiModelInfoSaneDefaults: ModelInfo = {
 	supportsPromptCache: false,
 	inputPrice: 0,
 	outputPrice: 0,
-	supportsNativeTools: true,
-	defaultToolProtocol: "native",
 }
 
 // https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation

+ 0 - 1
packages/types/src/providers/openrouter.ts

@@ -8,7 +8,6 @@ export const openRouterDefaultModelInfo: ModelInfo = {
 	contextWindow: 200_000,
 	supportsImages: true,
 	supportsPromptCache: true,
-	supportsNativeTools: true,
 	inputPrice: 3.0,
 	outputPrice: 15.0,
 	cacheWritesPrice: 3.75,

+ 0 - 4
packages/types/src/providers/qwen-code.ts

@@ -10,8 +10,6 @@ export const qwenCodeModels = {
 		contextWindow: 1_000_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		cacheWritesPrice: 0,
@@ -23,8 +21,6 @@ export const qwenCodeModels = {
 		contextWindow: 1_000_000,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		cacheWritesPrice: 0,

+ 0 - 2
packages/types/src/providers/requesty.ts

@@ -9,8 +9,6 @@ export const requestyDefaultModelInfo: ModelInfo = {
 	contextWindow: 200_000,
 	supportsImages: true,
 	supportsPromptCache: true,
-	supportsNativeTools: true,
-	defaultToolProtocol: "native",
 	inputPrice: 3.0,
 	outputPrice: 15.0,
 	cacheWritesPrice: 3.75,

+ 0 - 16
packages/types/src/providers/sambanova.ts

@@ -19,8 +19,6 @@ export const sambaNovaModels = {
 		contextWindow: 16384,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.1,
 		outputPrice: 0.2,
 		description: "Meta Llama 3.1 8B Instruct model with 16K context window.",
@@ -30,8 +28,6 @@ export const sambaNovaModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		outputPrice: 1.2,
 		description: "Meta Llama 3.3 70B Instruct model with 128K context window.",
@@ -42,8 +38,6 @@ export const sambaNovaModels = {
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsReasoningBudget: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 5.0,
 		outputPrice: 7.0,
 		description: "DeepSeek R1 reasoning model with 32K context window.",
@@ -53,8 +47,6 @@ export const sambaNovaModels = {
 		contextWindow: 32768,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 4.5,
 		description: "DeepSeek V3 model with 32K context window.",
@@ -64,8 +56,6 @@ export const sambaNovaModels = {
 		contextWindow: 32768,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 4.5,
 		description: "DeepSeek V3.1 model with 32K context window.",
@@ -75,8 +65,6 @@ export const sambaNovaModels = {
 		contextWindow: 131072,
 		supportsImages: true,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.63,
 		outputPrice: 1.8,
 		description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window.",
@@ -86,8 +74,6 @@ export const sambaNovaModels = {
 		contextWindow: 8192,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.4,
 		outputPrice: 0.8,
 		description: "Alibaba Qwen 3 32B model with 8K context window.",
@@ -97,8 +83,6 @@ export const sambaNovaModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.22,
 		outputPrice: 0.59,
 		description: "OpenAI gpt oss 120b model with 128k context window.",

+ 0 - 1
packages/types/src/providers/unbound.ts

@@ -7,7 +7,6 @@ export const unboundDefaultModelInfo: ModelInfo = {
 	contextWindow: 200_000,
 	supportsImages: true,
 	supportsPromptCache: true,
-	supportsNativeTools: true,
 	inputPrice: 3.0,
 	outputPrice: 15.0,
 	cacheWritesPrice: 3.75,

+ 0 - 1
packages/types/src/providers/vercel-ai-gateway.ts

@@ -90,7 +90,6 @@ export const vercelAiGatewayDefaultModelInfo: ModelInfo = {
 	contextWindow: 200000,
 	supportsImages: true,
 	supportsPromptCache: true,
-	supportsNativeTools: true,
 	inputPrice: 3,
 	outputPrice: 15,
 	cacheWritesPrice: 3.75,

+ 0 - 71
packages/types/src/providers/vertex.ts

@@ -10,8 +10,6 @@ export const vertexModels = {
 		maxTokens: 65_536,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsReasoningEffort: ["low", "high"],
 		reasoningEffort: "low",
@@ -37,8 +35,6 @@ export const vertexModels = {
 		maxTokens: 65_536,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsReasoningEffort: ["minimal", "low", "medium", "high"],
 		reasoningEffort: "medium",
@@ -54,8 +50,6 @@ export const vertexModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.15,
@@ -68,8 +62,6 @@ export const vertexModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.15,
@@ -79,8 +71,6 @@ export const vertexModels = {
 		maxTokens: 64_000,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.3,
@@ -94,8 +84,6 @@ export const vertexModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 
 		inputPrice: 0.15,
@@ -108,8 +96,6 @@ export const vertexModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 
 		inputPrice: 0.15,
@@ -119,8 +105,6 @@ export const vertexModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 2.5,
@@ -130,8 +114,6 @@ export const vertexModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 2.5,
@@ -141,8 +123,6 @@ export const vertexModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 2.5,
@@ -154,8 +134,6 @@ export const vertexModels = {
 		maxTokens: 64_000,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 2.5,
@@ -182,8 +160,6 @@ export const vertexModels = {
 		maxTokens: 65_535,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 
 		inputPrice: 0,
@@ -193,8 +169,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 2_097_152,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 
 		inputPrice: 0,
@@ -204,8 +178,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.15,
@@ -215,8 +187,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 
 		inputPrice: 0.075,
@@ -226,8 +196,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 32_768,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 
 		inputPrice: 0,
@@ -237,8 +205,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.075,
@@ -248,8 +214,6 @@ export const vertexModels = {
 		maxTokens: 8192,
 		contextWindow: 2_097_152,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 
 		inputPrice: 1.25,
@@ -260,8 +224,6 @@ export const vertexModels = {
 		contextWindow: 200_000, // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0, // $3 per million input tokens (≤200K context)
 		outputPrice: 15.0, // $15 per million output tokens (≤200K context)
 		cacheWritesPrice: 3.75, // $3.75 per million tokens
@@ -283,8 +245,6 @@ export const vertexModels = {
 		contextWindow: 200_000, // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0, // $3 per million input tokens (≤200K context)
 		outputPrice: 15.0, // $15 per million output tokens (≤200K context)
 		cacheWritesPrice: 3.75, // $3.75 per million tokens
@@ -306,8 +266,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 1.0,
 		outputPrice: 5.0,
 		cacheWritesPrice: 1.25,
@@ -319,8 +277,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 5.0,
 		outputPrice: 25.0,
 		cacheWritesPrice: 6.25,
@@ -332,8 +288,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 		cacheWritesPrice: 18.75,
@@ -345,8 +299,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 		cacheWritesPrice: 18.75,
@@ -357,8 +309,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
@@ -371,8 +321,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
@@ -383,8 +331,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
@@ -395,8 +341,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
@@ -407,8 +351,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 1.0,
 		outputPrice: 5.0,
 		cacheWritesPrice: 1.25,
@@ -419,8 +361,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 		cacheWritesPrice: 18.75,
@@ -431,8 +371,6 @@ export const vertexModels = {
 		contextWindow: 200_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.25,
 		outputPrice: 1.25,
 		cacheWritesPrice: 0.3,
@@ -442,8 +380,6 @@ export const vertexModels = {
 		maxTokens: 64_000,
 		contextWindow: 1_048_576,
 		supportsImages: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 
 		inputPrice: 0.1,
@@ -458,7 +394,6 @@ export const vertexModels = {
 		contextWindow: 131072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.35,
 		outputPrice: 1.15,
 		description: "Meta Llama 4 Maverick 17B Instruct model, 128K context.",
@@ -468,7 +403,6 @@ export const vertexModels = {
 		contextWindow: 163_840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 1.35,
 		outputPrice: 5.4,
 		description: "DeepSeek R1 (0528). Available in us-central1",
@@ -478,7 +412,6 @@ export const vertexModels = {
 		contextWindow: 163_840,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.6,
 		outputPrice: 1.7,
 		description: "DeepSeek V3.1. Available in us-west2",
@@ -488,7 +421,6 @@ export const vertexModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.15,
 		outputPrice: 0.6,
 		description: "OpenAI gpt-oss 120B. Available in us-central1",
@@ -498,7 +430,6 @@ export const vertexModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.075,
 		outputPrice: 0.3,
 		description: "OpenAI gpt-oss 20B. Available in us-central1",
@@ -508,7 +439,6 @@ export const vertexModels = {
 		contextWindow: 262_144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 1.0,
 		outputPrice: 4.0,
 		description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1",
@@ -518,7 +448,6 @@ export const vertexModels = {
 		contextWindow: 262_144,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
 		inputPrice: 0.25,
 		outputPrice: 1.0,
 		description: "Qwen3 235B A22B Instruct. Available in us-south1",

+ 0 - 16
packages/types/src/providers/xai.ts

@@ -11,8 +11,6 @@ export const xaiModels = {
 		contextWindow: 256_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		outputPrice: 1.5,
 		cacheWritesPrice: 0.02,
@@ -26,8 +24,6 @@ export const xaiModels = {
 		contextWindow: 2_000_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0.05,
@@ -42,8 +38,6 @@ export const xaiModels = {
 		contextWindow: 2_000_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0.05,
@@ -58,8 +52,6 @@ export const xaiModels = {
 		contextWindow: 2_000_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0.05,
@@ -74,8 +66,6 @@ export const xaiModels = {
 		contextWindow: 2_000_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0.05,
@@ -90,8 +80,6 @@ export const xaiModels = {
 		contextWindow: 256_000,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 0.75,
@@ -105,8 +93,6 @@ export const xaiModels = {
 		contextWindow: 131072,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.3,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0.07,
@@ -122,8 +108,6 @@ export const xaiModels = {
 		contextWindow: 131072,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 0.75,

+ 0 - 34
packages/types/src/providers/zai.ts

@@ -16,8 +16,6 @@ export const internationalZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		outputPrice: 2.2,
 		cacheWritesPrice: 0,
@@ -30,8 +28,6 @@ export const internationalZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		outputPrice: 1.1,
 		cacheWritesPrice: 0,
@@ -44,8 +40,6 @@ export const internationalZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 2.2,
 		outputPrice: 8.9,
 		cacheWritesPrice: 0,
@@ -58,8 +52,6 @@ export const internationalZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 1.1,
 		outputPrice: 4.5,
 		cacheWritesPrice: 0,
@@ -71,8 +63,6 @@ export const internationalZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		cacheWritesPrice: 0,
@@ -84,8 +74,6 @@ export const internationalZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		outputPrice: 1.8,
 		cacheWritesPrice: 0,
@@ -98,8 +86,6 @@ export const internationalZAiModels = {
 		contextWindow: 200_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		outputPrice: 2.2,
 		cacheWritesPrice: 0,
@@ -112,8 +98,6 @@ export const internationalZAiModels = {
 		contextWindow: 200_000,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsReasoningEffort: ["disable", "medium"],
 		reasoningEffort: "medium",
 		preserveReasoning: true,
@@ -129,8 +113,6 @@ export const internationalZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: false,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.1,
 		outputPrice: 0.1,
 		cacheWritesPrice: 0,
@@ -147,8 +129,6 @@ export const mainlandZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.29,
 		outputPrice: 1.14,
 		cacheWritesPrice: 0,
@@ -161,8 +141,6 @@ export const mainlandZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.1,
 		outputPrice: 0.6,
 		cacheWritesPrice: 0,
@@ -175,8 +153,6 @@ export const mainlandZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.29,
 		outputPrice: 1.14,
 		cacheWritesPrice: 0,
@@ -189,8 +165,6 @@ export const mainlandZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.1,
 		outputPrice: 0.6,
 		cacheWritesPrice: 0,
@@ -202,8 +176,6 @@ export const mainlandZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0,
 		outputPrice: 0,
 		cacheWritesPrice: 0,
@@ -215,8 +187,6 @@ export const mainlandZAiModels = {
 		contextWindow: 131_072,
 		supportsImages: true,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.29,
 		outputPrice: 0.93,
 		cacheWritesPrice: 0,
@@ -229,8 +199,6 @@ export const mainlandZAiModels = {
 		contextWindow: 204_800,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		inputPrice: 0.29,
 		outputPrice: 1.14,
 		cacheWritesPrice: 0,
@@ -243,8 +211,6 @@ export const mainlandZAiModels = {
 		contextWindow: 204_800,
 		supportsImages: false,
 		supportsPromptCache: true,
-		supportsNativeTools: true,
-		defaultToolProtocol: "native",
 		supportsReasoningEffort: ["disable", "medium"],
 		reasoningEffort: "medium",
 		preserveReasoning: true,

+ 0 - 45
packages/types/src/tool.ts

@@ -57,48 +57,3 @@ export const toolUsageSchema = z.record(
 )
 
 export type ToolUsage = z.infer<typeof toolUsageSchema>
-
-/**
- * Tool protocol constants
- */
-export const TOOL_PROTOCOL = {
-	XML: "xml",
-	NATIVE: "native",
-} as const
-
-/**
- * Tool protocol type for system prompt generation
- * Derived from TOOL_PROTOCOL constants to ensure type safety
- */
-export type ToolProtocol = (typeof TOOL_PROTOCOL)[keyof typeof TOOL_PROTOCOL]
-
-/**
- * Default model info properties for native tool support.
- * Used to merge with cached model info that may lack these fields.
- * Router providers (Requesty, Unbound, LiteLLM) assume all models support native tools.
- */
-export const NATIVE_TOOL_DEFAULTS = {
-	supportsNativeTools: true,
-	defaultToolProtocol: TOOL_PROTOCOL.NATIVE,
-} as const
-
-/**
- * Checks if the protocol is native (non-XML).
- *
- * @param protocol - The tool protocol to check
- * @returns True if protocol is native
- */
-export function isNativeProtocol(protocol: ToolProtocol): boolean {
-	return protocol === TOOL_PROTOCOL.NATIVE
-}
-
-/**
- * Gets the effective protocol from settings or falls back to the default XML.
- * This function is safe to use in webview-accessible code as it doesn't depend on vscode module.
- *
- * @param toolProtocol - Optional tool protocol from settings
- * @returns The effective tool protocol (defaults to "xml")
- */
-export function getEffectiveProtocol(toolProtocol?: ToolProtocol): ToolProtocol {
-	return toolProtocol || TOOL_PROTOCOL.XML
-}

+ 50 - 0
src/__tests__/history-resume-delegation.spec.ts

@@ -288,6 +288,56 @@ describe("History resume delegation - parent metadata transitions", () => {
 		expect((injectedMsg.content[0] as any).tool_use_id).toBe("toolu_abc123")
 	})
 
+	it("reopenParentFromDelegation injects plain text when no new_task tool_use exists in API history", async () => {
+		const provider = {
+			contextProxy: { globalStorageUri: { fsPath: "/storage" } },
+			getTaskWithId: vi.fn().mockResolvedValue({
+				historyItem: {
+					id: "p-no-tool",
+					status: "delegated",
+					awaitingChildId: "c-no-tool",
+					childIds: [],
+					ts: 100,
+					task: "Parent without tool_use",
+					tokensIn: 0,
+					tokensOut: 0,
+					totalCost: 0,
+				},
+			}),
+			emit: vi.fn(),
+			getCurrentTask: vi.fn(() => ({ taskId: "c-no-tool" })),
+			removeClineFromStack: vi.fn().mockResolvedValue(undefined),
+			createTaskWithHistoryItem: vi.fn().mockResolvedValue({
+				taskId: "p-no-tool",
+				resumeAfterDelegation: vi.fn().mockResolvedValue(undefined),
+				overwriteClineMessages: vi.fn().mockResolvedValue(undefined),
+				overwriteApiConversationHistory: vi.fn().mockResolvedValue(undefined),
+			}),
+			updateTaskHistory: vi.fn().mockResolvedValue([]),
+		} as unknown as ClineProvider
+
+		// No assistant tool_use in history
+		const existingUiMessages = [{ type: "ask", ask: "tool", text: "subtask request", ts: 50 }]
+		const existingApiMessages = [{ role: "user", content: [{ type: "text", text: "Create a subtask" }], ts: 40 }]
+
+		vi.mocked(readTaskMessages).mockResolvedValue(existingUiMessages as any)
+		vi.mocked(readApiMessages).mockResolvedValue(existingApiMessages as any)
+
+		await (ClineProvider.prototype as any).reopenParentFromDelegation.call(provider, {
+			parentTaskId: "p-no-tool",
+			childTaskId: "c-no-tool",
+			completionResultSummary: "Subtask completed without tool_use",
+		})
+
+		const apiCall = vi.mocked(saveApiMessages).mock.calls[0][0]
+		// Should append a user text note
+		expect(apiCall.messages).toHaveLength(2)
+		const injected = apiCall.messages[1]
+		expect(injected.role).toBe("user")
+		expect((injected.content[0] as any).type).toBe("text")
+		expect((injected.content[0] as any).text).toContain("Subtask c-no-tool completed")
+	})
+
 	it("reopenParentFromDelegation sets skipPrevResponseIdOnce via resumeAfterDelegation", async () => {
 		const parentInstance: any = {
 			skipPrevResponseIdOnce: false,

+ 11 - 7
src/__tests__/nested-delegation-resume.spec.ts

@@ -187,18 +187,21 @@ describe("Nested delegation resume (A → B → C)", () => {
 			type: "tool_use",
 			name: "attempt_completion",
 			params: { result: "C finished" },
+			nativeArgs: { result: "C finished" },
 			partial: false,
 		} as any
 
 		const askFinishSubTaskApproval = vi.fn(async () => true)
+		const handleError = vi.fn(async (_action: string, err: Error) => {
+			// Fail fast in this test if the tool hits an error path.
+			throw err
+		})
 
 		await attemptCompletionTool.handle(clineC, blockC, {
 			askApproval: vi.fn(),
-			handleError: vi.fn(),
+			handleError,
 			pushToolResult: vi.fn(),
-			removeClosingTag: vi.fn((_, v?: string) => v ?? ""),
 			askFinishSubTaskApproval,
-			toolProtocol: "xml",
 			toolDescription: () => "desc",
 		} as any)
 
@@ -231,20 +234,21 @@ describe("Nested delegation resume (A → B → C)", () => {
 			type: "tool_use",
 			name: "attempt_completion",
 			params: { result: "B finished" },
+			nativeArgs: { result: "B finished" },
 			partial: false,
 		} as any
 
 		await attemptCompletionTool.handle(clineB, blockB, {
 			askApproval: vi.fn(),
-			handleError: vi.fn(),
+			handleError,
 			pushToolResult: vi.fn(),
-			removeClosingTag: vi.fn((_, v?: string) => v ?? ""),
 			askFinishSubTaskApproval,
-			toolProtocol: "xml",
 			toolDescription: () => "desc",
 		} as any)
 
-		// After B completes, A must be current
+		// After B completes, A should become current
+		// Note: delegation resume may fall back to a non-tool_result user message when the parent history
+		// does not contain a new_task tool_use. This should not prevent reopening the parent.
 		expect(currentActiveId).toBe("A")
 
 		// Ensure no resume_task asks were scheduled: verified indirectly by startTask:false on both hops

+ 2 - 7
src/api/index.ts

@@ -1,7 +1,7 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
 
-import type { ProviderSettings, ModelInfo, ToolProtocol } from "@roo-code/types"
+import type { ProviderSettings, ModelInfo } from "@roo-code/types"
 
 import { ApiStream } from "./transform/stream"
 
@@ -83,16 +83,11 @@ export interface ApiHandlerCreateMessageMetadata {
 	 * Can be "none", "auto", "required", or a specific tool choice.
 	 */
 	tool_choice?: OpenAI.Chat.ChatCompletionCreateParams["tool_choice"]
-	/**
-	 * The tool protocol being used (XML or Native).
-	 * Used by providers to determine whether to include native tool definitions.
-	 */
-	toolProtocol?: ToolProtocol
+	// Tool calling is native-only.
 	/**
 	 * Controls whether the model can return multiple tool calls in a single response.
 	 * When true, parallel tool calls are enabled (OpenAI's parallel_tool_calls=true).
 	 * When false (default), only one tool call is returned per response.
-	 * Only applies when toolProtocol is "native".
 	 */
 	parallelToolCalls?: boolean
 	/**

+ 7 - 6
src/api/providers/__tests__/anthropic-vertex.spec.ts

@@ -162,7 +162,7 @@ describe("VertexHandler", () => {
 			})
 
 			expect(mockCreate).toHaveBeenCalledWith(
-				{
+				expect.objectContaining({
 					model: "claude-3-5-sonnet-v2@20241022",
 					max_tokens: 8192,
 					temperature: 0,
@@ -191,7 +191,10 @@ describe("VertexHandler", () => {
 						},
 					],
 					stream: true,
-				},
+					// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
+					tools: expect.any(Array),
+					tool_choice: expect.any(Object),
+				}),
 				undefined,
 			)
 		})
@@ -1200,13 +1203,11 @@ describe("VertexHandler", () => {
 			)
 		})
 
-		it("should include tools even when toolProtocol is set to xml (user preference now ignored)", async () => {
-			// XML protocol deprecation: user preference is now ignored when model supports native tools
+		it("should include tools when tools are provided", async () => {
 			handler = new AnthropicVertexHandler({
 				apiModelId: "claude-3-5-sonnet-v2@20241022",
 				vertexProjectId: "test-project",
 				vertexRegion: "us-central1",
-				toolProtocol: "xml",
 			})
 
 			const mockStream = [
@@ -1242,7 +1243,7 @@ describe("VertexHandler", () => {
 				// Just consume
 			}
 
-			// Native is forced when supportsNativeTools===true, so tools should still be included
+			// Tool calling is request-driven: if tools are provided, we should include them.
 			expect(mockCreate).toHaveBeenCalledWith(
 				expect.objectContaining({
 					tools: expect.arrayContaining([

+ 15 - 19
src/api/providers/__tests__/anthropic.spec.ts

@@ -420,8 +420,7 @@ describe("AnthropicHandler", () => {
 			},
 		]
 
-		it("should include tools in request by default (native is default)", async () => {
-			// Handler uses native protocol by default via model's defaultToolProtocol
+		it("should include tools in request when tools are provided", async () => {
 			const stream = handler.createMessage(systemPrompt, messages, {
 				taskId: "test-task",
 				tools: mockTools,
@@ -451,11 +450,9 @@ describe("AnthropicHandler", () => {
 			)
 		})
 
-		it("should include tools even when toolProtocol is set to xml (user preference now ignored)", async () => {
-			// XML protocol deprecation: user preference is now ignored when model supports native tools
+		it("should include tools when tools are provided", async () => {
 			const xmlHandler = new AnthropicHandler({
 				...mockOptions,
-				toolProtocol: "xml",
 			})
 
 			const stream = xmlHandler.createMessage(systemPrompt, messages, {
@@ -468,7 +465,7 @@ describe("AnthropicHandler", () => {
 				// Just consume
 			}
 
-			// Native is forced when supportsNativeTools===true, so tools should still be included
+			// Tool calling is request-driven: if tools are provided, we should include them.
 			expect(mockCreate).toHaveBeenCalledWith(
 				expect.objectContaining({
 					tools: expect.arrayContaining([
@@ -481,7 +478,7 @@ describe("AnthropicHandler", () => {
 			)
 		})
 
-		it("should not include tools when no tools are provided", async () => {
+		it("should always include tools in request (tools are always present after PR #10841)", async () => {
 			// Handler uses native protocol by default
 			const stream = handler.createMessage(systemPrompt, messages, {
 				taskId: "test-task",
@@ -492,9 +489,11 @@ describe("AnthropicHandler", () => {
 				// Just consume
 			}
 
+			// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
 			expect(mockCreate).toHaveBeenCalledWith(
-				expect.not.objectContaining({
-					tools: expect.anything(),
+				expect.objectContaining({
+					tools: expect.any(Array),
+					tool_choice: expect.any(Object),
 				}),
 				expect.anything(),
 			)
@@ -542,7 +541,7 @@ describe("AnthropicHandler", () => {
 			)
 		})
 
-		it("should omit both tools and tool_choice when tool_choice is 'none'", async () => {
+		it("should set tool_choice to undefined when tool_choice is 'none' (tools are still passed)", async () => {
 			// Handler uses native protocol by default
 			const stream = handler.createMessage(systemPrompt, messages, {
 				taskId: "test-task",
@@ -555,16 +554,13 @@ describe("AnthropicHandler", () => {
 				// Just consume
 			}
 
-			// Verify that neither tools nor tool_choice are included in the request
-			expect(mockCreate).toHaveBeenCalledWith(
-				expect.not.objectContaining({
-					tools: expect.anything(),
-				}),
-				expect.anything(),
-			)
+			// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
+			// When tool_choice is 'none', the converter returns undefined for tool_choice
+			// but tools are still passed since they're always present
 			expect(mockCreate).toHaveBeenCalledWith(
-				expect.not.objectContaining({
-					tool_choice: expect.anything(),
+				expect.objectContaining({
+					tools: expect.any(Array),
+					tool_choice: undefined,
 				}),
 				expect.anything(),
 			)

+ 4 - 4
src/api/providers/__tests__/base-openai-compatible-provider.spec.ts

@@ -57,7 +57,7 @@ describe("BaseOpenAiCompatibleProvider", () => {
 		vi.restoreAllMocks()
 	})
 
-	describe("XmlMatcher reasoning tags", () => {
+	describe("TagMatcher reasoning tags", () => {
 		it("should handle reasoning tags (<think>) from stream", async () => {
 			mockCreate.mockImplementationOnce(() => {
 				return {
@@ -87,7 +87,7 @@ describe("BaseOpenAiCompatibleProvider", () => {
 				chunks.push(chunk)
 			}
 
-			// XmlMatcher yields chunks as they're processed
+			// TagMatcher yields chunks as they're processed
 			expect(chunks).toEqual([
 				{ type: "reasoning", text: "Let me think" },
 				{ type: "reasoning", text: " about this" },
@@ -124,7 +124,7 @@ describe("BaseOpenAiCompatibleProvider", () => {
 				chunks.push(chunk)
 			}
 
-			// When a complete tag arrives in one chunk, XmlMatcher may not parse it
+			// When a complete tag arrives in one chunk, TagMatcher may not parse it
 			// This test documents the actual behavior
 			expect(chunks.length).toBeGreaterThan(0)
 			expect(chunks[0]).toEqual({ type: "text", text: "Regular text before " })
@@ -151,7 +151,7 @@ describe("BaseOpenAiCompatibleProvider", () => {
 				chunks.push(chunk)
 			}
 
-			// XmlMatcher should handle incomplete tags and flush remaining content
+			// TagMatcher should handle incomplete tags and flush remaining content
 			expect(chunks.length).toBeGreaterThan(0)
 			expect(
 				chunks.some(

+ 17 - 53
src/api/providers/__tests__/bedrock-native-tools.spec.ts

@@ -242,11 +242,7 @@ describe("AwsBedrockHandler Native Tool Calling", () => {
 	})
 
 	describe("createMessage with native tools", () => {
-		it("should include toolConfig when tools are provided with native protocol", async () => {
-			// Override model info to support native tools
-			const modelInfo = handler.getModel().info
-			;(modelInfo as any).supportsNativeTools = true
-
+		it("should include toolConfig when tools are provided", async () => {
 			const handlerWithNativeTools = new AwsBedrockHandler({
 				apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
 				awsAccessKey: "test-access-key",
@@ -254,18 +250,9 @@ describe("AwsBedrockHandler Native Tool Calling", () => {
 				awsRegion: "us-east-1",
 			})
 
-			// Manually set supportsNativeTools
-			const getModelOriginal = handlerWithNativeTools.getModel.bind(handlerWithNativeTools)
-			handlerWithNativeTools.getModel = () => {
-				const model = getModelOriginal()
-				model.info.supportsNativeTools = true
-				return model
-			}
-
 			const metadata: ApiHandlerCreateMessageMetadata = {
 				taskId: "test-task",
 				tools: testTools,
-				toolProtocol: "native",
 			}
 
 			const generator = handlerWithNativeTools.createMessage(
@@ -285,7 +272,7 @@ describe("AwsBedrockHandler Native Tool Calling", () => {
 			expect(commandArg.toolConfig.toolChoice).toEqual({ auto: {} })
 		})
 
-		it("should not include toolConfig when toolProtocol is xml", async () => {
+		it("should always include toolConfig (tools are always present after PR #10841)", async () => {
 			const handlerWithNativeTools = new AwsBedrockHandler({
 				apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
 				awsAccessKey: "test-access-key",
@@ -293,18 +280,9 @@ describe("AwsBedrockHandler Native Tool Calling", () => {
 				awsRegion: "us-east-1",
 			})
 
-			// Manually set supportsNativeTools
-			const getModelOriginal = handlerWithNativeTools.getModel.bind(handlerWithNativeTools)
-			handlerWithNativeTools.getModel = () => {
-				const model = getModelOriginal()
-				model.info.supportsNativeTools = true
-				return model
-			}
-
 			const metadata: ApiHandlerCreateMessageMetadata = {
 				taskId: "test-task",
-				tools: testTools,
-				toolProtocol: "xml", // XML protocol should not use native tools
+				// Even without explicit tools, tools are always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
 			}
 
 			const generator = handlerWithNativeTools.createMessage(
@@ -318,10 +296,13 @@ describe("AwsBedrockHandler Native Tool Calling", () => {
 			expect(mockConverseStreamCommand).toHaveBeenCalled()
 			const commandArg = mockConverseStreamCommand.mock.calls[0][0] as any
 
-			expect(commandArg.toolConfig).toBeUndefined()
+			// Tools are now always present
+			expect(commandArg.toolConfig).toBeDefined()
+			expect(commandArg.toolConfig.tools).toBeDefined()
+			expect(commandArg.toolConfig.toolChoice).toEqual({ auto: {} })
 		})
 
-		it("should not include toolConfig when tool_choice is none", async () => {
+		it("should include toolConfig with undefined toolChoice when tool_choice is none", async () => {
 			const handlerWithNativeTools = new AwsBedrockHandler({
 				apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
 				awsAccessKey: "test-access-key",
@@ -329,18 +310,9 @@ describe("AwsBedrockHandler Native Tool Calling", () => {
 				awsRegion: "us-east-1",
 			})
 
-			// Manually set supportsNativeTools
-			const getModelOriginal = handlerWithNativeTools.getModel.bind(handlerWithNativeTools)
-			handlerWithNativeTools.getModel = () => {
-				const model = getModelOriginal()
-				model.info.supportsNativeTools = true
-				return model
-			}
-
 			const metadata: ApiHandlerCreateMessageMetadata = {
 				taskId: "test-task",
 				tools: testTools,
-				toolProtocol: "native",
 				tool_choice: "none", // Explicitly disable tool use
 			}
 
@@ -355,7 +327,9 @@ describe("AwsBedrockHandler Native Tool Calling", () => {
 			expect(mockConverseStreamCommand).toHaveBeenCalled()
 			const commandArg = mockConverseStreamCommand.mock.calls[0][0] as any
 
-			expect(commandArg.toolConfig).toBeUndefined()
+			// toolConfig is still provided but toolChoice is undefined for "none"
+			expect(commandArg.toolConfig).toBeDefined()
+			expect(commandArg.toolConfig.toolChoice).toBeUndefined()
 		})
 
 		it("should include fine-grained tool streaming beta for Claude models with native tools", async () => {
@@ -366,18 +340,9 @@ describe("AwsBedrockHandler Native Tool Calling", () => {
 				awsRegion: "us-east-1",
 			})
 
-			// Manually set supportsNativeTools
-			const getModelOriginal = handlerWithNativeTools.getModel.bind(handlerWithNativeTools)
-			handlerWithNativeTools.getModel = () => {
-				const model = getModelOriginal()
-				model.info.supportsNativeTools = true
-				return model
-			}
-
 			const metadata: ApiHandlerCreateMessageMetadata = {
 				taskId: "test-task",
 				tools: testTools,
-				toolProtocol: "native",
 			}
 
 			const generator = handlerWithNativeTools.createMessage(
@@ -398,7 +363,7 @@ describe("AwsBedrockHandler Native Tool Calling", () => {
 			)
 		})
 
-		it("should not include fine-grained tool streaming beta when not using native tools", async () => {
+		it("should always include fine-grained tool streaming beta for Claude models", async () => {
 			const handlerWithNativeTools = new AwsBedrockHandler({
 				apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
 				awsAccessKey: "test-access-key",
@@ -422,12 +387,11 @@ describe("AwsBedrockHandler Native Tool Calling", () => {
 			expect(mockConverseStreamCommand).toHaveBeenCalled()
 			const commandArg = mockConverseStreamCommand.mock.calls[0][0] as any
 
-			// Should not include anthropic_beta when not using native tools
-			if (commandArg.additionalModelRequestFields?.anthropic_beta) {
-				expect(commandArg.additionalModelRequestFields.anthropic_beta).not.toContain(
-					"fine-grained-tool-streaming-2025-05-14",
-				)
-			}
+			// Should always include anthropic_beta with fine-grained-tool-streaming for Claude models
+			expect(commandArg.additionalModelRequestFields).toBeDefined()
+			expect(commandArg.additionalModelRequestFields.anthropic_beta).toContain(
+				"fine-grained-tool-streaming-2025-05-14",
+			)
 		})
 	})
 

+ 5 - 2
src/api/providers/__tests__/bedrock-reasoning.spec.ts

@@ -221,8 +221,11 @@ describe("AwsBedrockHandler - Extended Thinking", () => {
 			expect(capturedPayload).toBeDefined()
 			expect(capturedPayload.inferenceConfig).not.toHaveProperty("topP")
 
-			// Verify that additionalModelRequestFields is not present or empty
-			expect(capturedPayload.additionalModelRequestFields).toBeUndefined()
+			// Verify that additionalModelRequestFields contains fine-grained-tool-streaming for Claude models
+			expect(capturedPayload.additionalModelRequestFields).toBeDefined()
+			expect(capturedPayload.additionalModelRequestFields.anthropic_beta).toContain(
+				"fine-grained-tool-streaming-2025-05-14",
+			)
 		})
 
 		it("should enable reasoning when enableReasoningEffort is true in settings", async () => {

+ 26 - 10
src/api/providers/__tests__/bedrock.spec.ts

@@ -754,14 +754,17 @@ describe("AwsBedrockHandler", () => {
 			expect(mockConverseStreamCommand).toHaveBeenCalled()
 			const commandArg = mockConverseStreamCommand.mock.calls[0][0] as any
 
-			// Should include anthropic_beta in additionalModelRequestFields
+			// Should include anthropic_beta in additionalModelRequestFields with both 1M context and fine-grained-tool-streaming
 			expect(commandArg.additionalModelRequestFields).toBeDefined()
-			expect(commandArg.additionalModelRequestFields.anthropic_beta).toEqual(["context-1m-2025-08-07"])
+			expect(commandArg.additionalModelRequestFields.anthropic_beta).toContain("context-1m-2025-08-07")
+			expect(commandArg.additionalModelRequestFields.anthropic_beta).toContain(
+				"fine-grained-tool-streaming-2025-05-14",
+			)
 			// Should not include anthropic_version since thinking is not enabled
 			expect(commandArg.additionalModelRequestFields.anthropic_version).toBeUndefined()
 		})
 
-		it("should not include anthropic_beta parameter when 1M context is disabled", async () => {
+		it("should not include 1M context beta when 1M context is disabled but still include fine-grained-tool-streaming", async () => {
 			const handler = new AwsBedrockHandler({
 				apiModelId: BEDROCK_1M_CONTEXT_MODEL_IDS[0],
 				awsAccessKey: "test",
@@ -784,11 +787,16 @@ describe("AwsBedrockHandler", () => {
 			expect(mockConverseStreamCommand).toHaveBeenCalled()
 			const commandArg = mockConverseStreamCommand.mock.calls[0][0] as any
 
-			// Should not include anthropic_beta in additionalModelRequestFields
-			expect(commandArg.additionalModelRequestFields).toBeUndefined()
+			// Should include anthropic_beta with fine-grained-tool-streaming for Claude models
+			expect(commandArg.additionalModelRequestFields).toBeDefined()
+			expect(commandArg.additionalModelRequestFields.anthropic_beta).toContain(
+				"fine-grained-tool-streaming-2025-05-14",
+			)
+			// Should NOT include 1M context beta
+			expect(commandArg.additionalModelRequestFields.anthropic_beta).not.toContain("context-1m-2025-08-07")
 		})
 
-		it("should not include anthropic_beta parameter for non-Claude Sonnet 4 models", async () => {
+		it("should not include 1M context beta for non-Claude Sonnet 4 models but still include fine-grained-tool-streaming", async () => {
 			const handler = new AwsBedrockHandler({
 				apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
 				awsAccessKey: "test",
@@ -811,8 +819,13 @@ describe("AwsBedrockHandler", () => {
 			expect(mockConverseStreamCommand).toHaveBeenCalled()
 			const commandArg = mockConverseStreamCommand.mock.calls[0][0] as any
 
-			// Should not include anthropic_beta for non-Sonnet 4 models
-			expect(commandArg.additionalModelRequestFields).toBeUndefined()
+			// Should include anthropic_beta with fine-grained-tool-streaming for Claude models (even non-Sonnet 4)
+			expect(commandArg.additionalModelRequestFields).toBeDefined()
+			expect(commandArg.additionalModelRequestFields.anthropic_beta).toContain(
+				"fine-grained-tool-streaming-2025-05-14",
+			)
+			// Should NOT include 1M context beta for non-Sonnet 4 models
+			expect(commandArg.additionalModelRequestFields.anthropic_beta).not.toContain("context-1m-2025-08-07")
 		})
 
 		it("should enable 1M context window with cross-region inference for Claude Sonnet 4", () => {
@@ -859,9 +872,12 @@ describe("AwsBedrockHandler", () => {
 				mockConverseStreamCommand.mock.calls.length - 1
 			][0] as any
 
-			// Should include anthropic_beta in additionalModelRequestFields
+			// Should include anthropic_beta in additionalModelRequestFields with both 1M context and fine-grained-tool-streaming
 			expect(commandArg.additionalModelRequestFields).toBeDefined()
-			expect(commandArg.additionalModelRequestFields.anthropic_beta).toEqual(["context-1m-2025-08-07"])
+			expect(commandArg.additionalModelRequestFields.anthropic_beta).toContain("context-1m-2025-08-07")
+			expect(commandArg.additionalModelRequestFields.anthropic_beta).toContain(
+				"fine-grained-tool-streaming-2025-05-14",
+			)
 			// Should not include anthropic_version since thinking is not enabled
 			expect(commandArg.additionalModelRequestFields.anthropic_version).toBeUndefined()
 			// Model ID should have cross-region prefix

+ 54 - 45
src/api/providers/__tests__/claude-code.spec.ts

@@ -112,22 +112,25 @@ describe("ClaudeCodeHandler", () => {
 		// Verify createStreamingMessage was called with correct parameters
 		// Default model has reasoning effort of "medium" so thinking should be enabled
 		// With interleaved thinking, maxTokens comes from model definition (32768 for claude-sonnet-4-5)
-		expect(mockCreateStreamingMessage).toHaveBeenCalledWith({
-			accessToken: "test-access-token",
-			model: "claude-sonnet-4-5",
-			systemPrompt,
-			messages,
-			maxTokens: 32768, // model's maxTokens from claudeCodeModels definition
-			thinking: {
-				type: "enabled",
-				budget_tokens: 32000, // medium reasoning budget_tokens
-			},
-			tools: undefined,
-			toolChoice: undefined,
-			metadata: {
-				user_id: "user_abc123_account_def456_session_ghi789",
-			},
-		})
+		expect(mockCreateStreamingMessage).toHaveBeenCalledWith(
+			expect.objectContaining({
+				accessToken: "test-access-token",
+				model: "claude-sonnet-4-5",
+				systemPrompt,
+				messages,
+				maxTokens: 32768, // model's maxTokens from claudeCodeModels definition
+				thinking: {
+					type: "enabled",
+					budget_tokens: 32000, // medium reasoning budget_tokens
+				},
+				// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
+				tools: expect.any(Array),
+				toolChoice: expect.any(Object),
+				metadata: {
+					user_id: "user_abc123_account_def456_session_ghi789",
+				},
+			}),
+		)
 	})
 
 	test("should disable thinking when reasoningEffort is set to disable", async () => {
@@ -155,19 +158,22 @@ describe("ClaudeCodeHandler", () => {
 		await iterator.next()
 
 		// Verify createStreamingMessage was called with thinking disabled
-		expect(mockCreateStreamingMessage).toHaveBeenCalledWith({
-			accessToken: "test-access-token",
-			model: "claude-sonnet-4-5",
-			systemPrompt,
-			messages,
-			maxTokens: 32768, // model maxTokens from claudeCodeModels definition
-			thinking: { type: "disabled" },
-			tools: undefined,
-			toolChoice: undefined,
-			metadata: {
-				user_id: "user_abc123_account_def456_session_ghi789",
-			},
-		})
+		expect(mockCreateStreamingMessage).toHaveBeenCalledWith(
+			expect.objectContaining({
+				accessToken: "test-access-token",
+				model: "claude-sonnet-4-5",
+				systemPrompt,
+				messages,
+				maxTokens: 32768, // model maxTokens from claudeCodeModels definition
+				thinking: { type: "disabled" },
+				// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
+				tools: expect.any(Array),
+				toolChoice: expect.any(Object),
+				metadata: {
+					user_id: "user_abc123_account_def456_session_ghi789",
+				},
+			}),
+		)
 	})
 
 	test("should use high reasoning config when reasoningEffort is high", async () => {
@@ -196,22 +202,25 @@ describe("ClaudeCodeHandler", () => {
 
 		// Verify createStreamingMessage was called with high thinking config
 		// With interleaved thinking, maxTokens comes from model definition (32768 for claude-sonnet-4-5)
-		expect(mockCreateStreamingMessage).toHaveBeenCalledWith({
-			accessToken: "test-access-token",
-			model: "claude-sonnet-4-5",
-			systemPrompt,
-			messages,
-			maxTokens: 32768, // model's maxTokens from claudeCodeModels definition
-			thinking: {
-				type: "enabled",
-				budget_tokens: 64000, // high reasoning budget_tokens
-			},
-			tools: undefined,
-			toolChoice: undefined,
-			metadata: {
-				user_id: "user_abc123_account_def456_session_ghi789",
-			},
-		})
+		expect(mockCreateStreamingMessage).toHaveBeenCalledWith(
+			expect.objectContaining({
+				accessToken: "test-access-token",
+				model: "claude-sonnet-4-5",
+				systemPrompt,
+				messages,
+				maxTokens: 32768, // model's maxTokens from claudeCodeModels definition
+				thinking: {
+					type: "enabled",
+					budget_tokens: 64000, // high reasoning budget_tokens
+				},
+				// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
+				tools: expect.any(Array),
+				toolChoice: expect.any(Object),
+				metadata: {
+					user_id: "user_abc123_account_def456_session_ghi789",
+				},
+			}),
+		)
 	})
 
 	test("should handle text content from streaming", async () => {

+ 9 - 10
src/api/providers/__tests__/deepinfra.spec.ts

@@ -199,7 +199,6 @@ describe("DeepInfraHandler", () => {
 			const messageGenerator = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				tools: testTools,
-				toolProtocol: "native",
 			})
 			await messageGenerator.next()
 
@@ -213,9 +212,11 @@ describe("DeepInfraHandler", () => {
 							}),
 						}),
 					]),
-					parallel_tool_calls: false,
 				}),
 			)
+			// parallel_tool_calls should be false when not explicitly set
+			const callArgs = mockCreate.mock.calls[0][0]
+			expect(callArgs).toHaveProperty("parallel_tool_calls", false)
 		})
 
 		it("should include tool_choice when provided", async () => {
@@ -232,7 +233,6 @@ describe("DeepInfraHandler", () => {
 			const messageGenerator = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				tools: testTools,
-				toolProtocol: "native",
 				tool_choice: "auto",
 			})
 			await messageGenerator.next()
@@ -244,7 +244,7 @@ describe("DeepInfraHandler", () => {
 			)
 		})
 
-		it("should not include tools when toolProtocol is xml", async () => {
+		it("should always include tools and tool_choice in request (tools are always present after PR #10841)", async () => {
 			mockWithResponse.mockResolvedValueOnce({
 				data: {
 					[Symbol.asyncIterator]: () => ({
@@ -257,14 +257,15 @@ describe("DeepInfraHandler", () => {
 
 			const messageGenerator = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
-				tools: testTools,
-				toolProtocol: "xml",
 			})
 			await messageGenerator.next()
 
 			const callArgs = mockCreate.mock.calls[mockCreate.mock.calls.length - 1][0]
-			expect(callArgs).not.toHaveProperty("tools")
-			expect(callArgs).not.toHaveProperty("tool_choice")
+			// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
+			expect(callArgs).toHaveProperty("tools")
+			expect(callArgs).toHaveProperty("tool_choice")
+			// parallel_tool_calls should be false when not explicitly set
+			expect(callArgs).toHaveProperty("parallel_tool_calls", false)
 		})
 
 		it("should yield tool_call_partial chunks during streaming", async () => {
@@ -321,7 +322,6 @@ describe("DeepInfraHandler", () => {
 			const stream = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				tools: testTools,
-				toolProtocol: "native",
 			})
 
 			const chunks = []
@@ -360,7 +360,6 @@ describe("DeepInfraHandler", () => {
 			const messageGenerator = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				tools: testTools,
-				toolProtocol: "native",
 				parallelToolCalls: true,
 			})
 			await messageGenerator.next()

+ 0 - 1
src/api/providers/__tests__/fireworks.spec.ts

@@ -129,7 +129,6 @@ describe("FireworksHandler", () => {
 				contextWindow: 256000,
 				supportsImages: false,
 				supportsPromptCache: true,
-				supportsNativeTools: true,
 				supportsTemperature: true,
 				preserveReasoning: true,
 				defaultTemperature: 1.0,

+ 7 - 2
src/api/providers/__tests__/gemini-handler.spec.ts

@@ -5,7 +5,10 @@ import { GeminiHandler } from "../gemini"
 import type { ApiHandlerOptions } from "../../../shared/api"
 
 describe("GeminiHandler backend support", () => {
-	it("passes tools for URL context and grounding in config", async () => {
+	it("createMessage uses function declarations (URL context and grounding are only for completePrompt)", async () => {
+		// URL context and grounding are mutually exclusive with function declarations
+		// in Gemini API, so createMessage only uses function declarations.
+		// URL context/grounding are only added in completePrompt.
 		const options = {
 			apiProvider: "gemini",
 			enableUrlContext: true,
@@ -17,7 +20,9 @@ describe("GeminiHandler backend support", () => {
 		handler["client"].models.generateContentStream = stub
 		await handler.createMessage("instr", [] as any).next()
 		const config = stub.mock.calls[0][0].config
-		expect(config.tools).toEqual([{ urlContext: {} }, { googleSearch: {} }])
+		// createMessage always uses function declarations only
+		// (tools are always present from ALWAYS_AVAILABLE_TOOLS)
+		expect(config.tools).toEqual([{ functionDeclarations: expect.any(Array) }])
 	})
 
 	it("completePrompt passes config overrides without tools when URL context and grounding disabled", async () => {

+ 0 - 2
src/api/providers/__tests__/io-intelligence.spec.ts

@@ -255,7 +255,6 @@ describe("IOIntelligenceHandler", () => {
 			description: "Llama 4 Maverick 17B model",
 			supportsImages: true,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 		})
 	})
 
@@ -272,7 +271,6 @@ describe("IOIntelligenceHandler", () => {
 			description: "Llama 4 Maverick 17B model",
 			supportsImages: true,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 		})
 	})
 

+ 11 - 11
src/api/providers/__tests__/lite-llm.spec.ts

@@ -3,7 +3,7 @@ import { Anthropic } from "@anthropic-ai/sdk"
 
 import { LiteLLMHandler } from "../lite-llm"
 import { ApiHandlerOptions } from "../../../shared/api"
-import { litellmDefaultModelId, litellmDefaultModelInfo, TOOL_PROTOCOL } from "@roo-code/types"
+import { litellmDefaultModelId, litellmDefaultModelInfo } from "@roo-code/types"
 
 // Mock vscode first to avoid import errors
 vi.mock("vscode", () => ({}))
@@ -41,11 +41,11 @@ vi.mock("../fetchers/modelCache", () => ({
 			"llama-3": { ...litellmDefaultModelInfo, maxTokens: 8192 },
 			"gpt-4-turbo": { ...litellmDefaultModelInfo, maxTokens: 8192 },
 			// Gemini models for thought signature injection tests
-			"gemini-3-pro": { ...litellmDefaultModelInfo, maxTokens: 8192, supportsNativeTools: true },
-			"gemini-3-flash": { ...litellmDefaultModelInfo, maxTokens: 8192, supportsNativeTools: true },
-			"gemini-2.5-pro": { ...litellmDefaultModelInfo, maxTokens: 8192, supportsNativeTools: true },
-			"google/gemini-3-pro": { ...litellmDefaultModelInfo, maxTokens: 8192, supportsNativeTools: true },
-			"vertex_ai/gemini-3-pro": { ...litellmDefaultModelInfo, maxTokens: 8192, supportsNativeTools: true },
+			"gemini-3-pro": { ...litellmDefaultModelInfo, maxTokens: 8192 },
+			"gemini-3-flash": { ...litellmDefaultModelInfo, maxTokens: 8192 },
+			"gemini-2.5-pro": { ...litellmDefaultModelInfo, maxTokens: 8192 },
+			"google/gemini-3-pro": { ...litellmDefaultModelInfo, maxTokens: 8192 },
+			"vertex_ai/gemini-3-pro": { ...litellmDefaultModelInfo, maxTokens: 8192 },
 		})
 	}),
 	getModelsFromCache: vi.fn().mockReturnValue(undefined),
@@ -583,10 +583,10 @@ describe("LiteLLMHandler", () => {
 				}
 				handler = new LiteLLMHandler(optionsWithGemini)
 
-				// Mock fetchModel to return a Gemini model with native tool support
+				// Mock fetchModel to return a Gemini model
 				vi.spyOn(handler as any, "fetchModel").mockResolvedValue({
 					id: "gemini-3-pro",
-					info: { ...litellmDefaultModelInfo, maxTokens: 8192, supportsNativeTools: true },
+					info: { ...litellmDefaultModelInfo, maxTokens: 8192 },
 				})
 
 				const systemPrompt = "You are a helpful assistant"
@@ -632,7 +632,7 @@ describe("LiteLLMHandler", () => {
 							function: { name: "read_file", description: "Read a file", parameters: {} },
 						},
 					],
-					toolProtocol: TOOL_PROTOCOL.NATIVE,
+					// Tool calling is native-only; legacy protocol fields are not supported.
 				}
 
 				const generator = handler.createMessage(systemPrompt, messages, metadata as any)
@@ -661,7 +661,7 @@ describe("LiteLLMHandler", () => {
 
 				vi.spyOn(handler as any, "fetchModel").mockResolvedValue({
 					id: "gpt-4",
-					info: { ...litellmDefaultModelInfo, maxTokens: 8192, supportsNativeTools: true },
+					info: { ...litellmDefaultModelInfo, maxTokens: 8192 },
 				})
 
 				const systemPrompt = "You are a helpful assistant"
@@ -700,7 +700,7 @@ describe("LiteLLMHandler", () => {
 							function: { name: "read_file", description: "Read a file", parameters: {} },
 						},
 					],
-					toolProtocol: TOOL_PROTOCOL.NATIVE,
+					// Tool calling is native-only; legacy protocol fields are not supported.
 				}
 
 				const generator = handler.createMessage(systemPrompt, messages, metadata as any)

+ 13 - 12
src/api/providers/__tests__/lmstudio-native-tools.spec.ts

@@ -80,9 +80,11 @@ describe("LmStudioHandler Native Tools", () => {
 							}),
 						}),
 					]),
-					parallel_tool_calls: false,
 				}),
 			)
+			// parallel_tool_calls should be false when not explicitly set
+			const callArgs = mockCreate.mock.calls[0][0]
+			expect(callArgs).toHaveProperty("parallel_tool_calls", false)
 		})
 
 		it("should include tool_choice when provided", async () => {
@@ -108,7 +110,7 @@ describe("LmStudioHandler Native Tools", () => {
 			)
 		})
 
-		it("should not include tools when toolProtocol is xml", async () => {
+		it("should always include tools and tool_choice in request (tools are always present after PR #10841)", async () => {
 			mockCreate.mockImplementationOnce(() => ({
 				[Symbol.asyncIterator]: async function* () {
 					yield {
@@ -119,14 +121,15 @@ describe("LmStudioHandler Native Tools", () => {
 
 			const stream = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
-				tools: testTools,
-				toolProtocol: "xml",
 			})
 			await stream.next()
 
 			const callArgs = mockCreate.mock.calls[mockCreate.mock.calls.length - 1][0]
-			expect(callArgs).not.toHaveProperty("tools")
-			expect(callArgs).not.toHaveProperty("tool_choice")
+			// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
+			expect(callArgs).toHaveProperty("tools")
+			expect(callArgs).toHaveProperty("tool_choice")
+			// parallel_tool_calls should be false when not explicitly set
+			expect(callArgs).toHaveProperty("parallel_tool_calls", false)
 		})
 
 		it("should yield tool_call_partial chunks during streaming", async () => {
@@ -280,7 +283,7 @@ describe("LmStudioHandler Native Tools", () => {
 			expect(endChunks[0].id).toBe("call_lmstudio_test")
 		})
 
-		it("should work with parallel tool calls disabled", async () => {
+		it("should work with parallel tool calls disabled (sends false)", async () => {
 			mockCreate.mockImplementationOnce(() => ({
 				[Symbol.asyncIterator]: async function* () {
 					yield {
@@ -296,11 +299,9 @@ describe("LmStudioHandler Native Tools", () => {
 			})
 			await stream.next()
 
-			expect(mockCreate).toHaveBeenCalledWith(
-				expect.objectContaining({
-					parallel_tool_calls: false,
-				}),
-			)
+			// When parallelToolCalls is false, the parameter should be sent as false
+			const callArgs = mockCreate.mock.calls[0][0]
+			expect(callArgs).toHaveProperty("parallel_tool_calls", false)
 		})
 
 		it("should handle reasoning content alongside tool calls", async () => {

+ 16 - 11
src/api/providers/__tests__/mistral.spec.ts

@@ -119,12 +119,17 @@ describe("MistralHandler", () => {
 			const iterator = handler.createMessage(systemPrompt, messages)
 			const result = await iterator.next()
 
-			expect(mockCreate).toHaveBeenCalledWith({
-				model: mockOptions.apiModelId,
-				messages: expect.any(Array),
-				maxTokens: expect.any(Number),
-				temperature: 0,
-			})
+			expect(mockCreate).toHaveBeenCalledWith(
+				expect.objectContaining({
+					model: mockOptions.apiModelId,
+					messages: expect.any(Array),
+					maxTokens: expect.any(Number),
+					temperature: 0,
+					// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
+					tools: expect.any(Array),
+					toolChoice: "any",
+				}),
+			)
 
 			expect(result.value).toBeDefined()
 			expect(result.done).toBe(false)
@@ -288,19 +293,19 @@ describe("MistralHandler", () => {
 			)
 		})
 
-		it("should not include tools when toolProtocol is xml", async () => {
+		it("should always include tools in request (tools are always present after PR #10841)", async () => {
 			const metadata: ApiHandlerCreateMessageMetadata = {
 				taskId: "test-task",
-				tools: mockTools,
-				toolProtocol: "xml",
 			}
 
 			const iterator = handler.createMessage(systemPrompt, messages, metadata)
 			await iterator.next()
 
+			// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
 			expect(mockCreate).toHaveBeenCalledWith(
-				expect.not.objectContaining({
-					tools: expect.anything(),
+				expect.objectContaining({
+					tools: expect.any(Array),
+					toolChoice: "any",
 				}),
 			)
 		})

+ 12 - 30
src/api/providers/__tests__/native-ollama.spec.ts

@@ -265,15 +265,14 @@ describe("NativeOllamaHandler", () => {
 	})
 
 	describe("tool calling", () => {
-		it("should include tools when model supports native tools", async () => {
-			// Mock model with native tool support
+		it("should include tools when tools are provided", async () => {
+			// Model metadata should not gate tool inclusion; metadata.tools controls it.
 			mockGetOllamaModels.mockResolvedValue({
 				"llama3.2": {
 					contextWindow: 128000,
 					maxTokens: 4096,
 					supportsImages: true,
 					supportsPromptCache: false,
-					supportsNativeTools: true,
 				},
 			})
 
@@ -341,15 +340,14 @@ describe("NativeOllamaHandler", () => {
 			)
 		})
 
-		it("should not include tools when model does not support native tools", async () => {
-			// Mock model without native tool support
+		it("should include tools even when model metadata doesn't advertise tool support", async () => {
+			// Model metadata should not gate tool inclusion; metadata.tools controls it.
 			mockGetOllamaModels.mockResolvedValue({
 				llama2: {
 					contextWindow: 4096,
 					maxTokens: 4096,
 					supportsImages: false,
 					supportsPromptCache: false,
-					supportsNativeTools: false,
 				},
 			})
 
@@ -379,23 +377,22 @@ describe("NativeOllamaHandler", () => {
 				// consume stream
 			}
 
-			// Verify tools were NOT passed
+			// Verify tools were passed
 			expect(mockChat).toHaveBeenCalledWith(
-				expect.not.objectContaining({
-					tools: expect.anything(),
+				expect.objectContaining({
+					tools: expect.any(Array),
 				}),
 			)
 		})
 
-		it("should not include tools when toolProtocol is xml", async () => {
-			// Mock model with native tool support
+		it("should not include tools when no tools are provided", async () => {
+			// Model metadata should not gate tool inclusion; metadata.tools controls it.
 			mockGetOllamaModels.mockResolvedValue({
 				"llama3.2": {
 					contextWindow: 128000,
 					maxTokens: 4096,
 					supportsImages: true,
 					supportsPromptCache: false,
-					supportsNativeTools: true,
 				},
 			})
 
@@ -412,21 +409,8 @@ describe("NativeOllamaHandler", () => {
 				yield { message: { content: "Response" } }
 			})
 
-			const tools = [
-				{
-					type: "function" as const,
-					function: {
-						name: "get_weather",
-						description: "Get the weather",
-						parameters: { type: "object", properties: {} },
-					},
-				},
-			]
-
 			const stream = handler.createMessage("System", [{ role: "user" as const, content: "Test" }], {
 				taskId: "test",
-				tools,
-				toolProtocol: "xml",
 			})
 
 			// Consume the stream
@@ -434,7 +418,7 @@ describe("NativeOllamaHandler", () => {
 				// consume stream
 			}
 
-			// Verify tools were NOT passed (XML protocol forces XML format)
+			// Verify tools were NOT passed
 			expect(mockChat).toHaveBeenCalledWith(
 				expect.not.objectContaining({
 					tools: expect.anything(),
@@ -443,14 +427,13 @@ describe("NativeOllamaHandler", () => {
 		})
 
 		it("should yield tool_call_partial when model returns tool calls", async () => {
-			// Mock model with native tool support
+			// Model metadata should not gate tool inclusion; metadata.tools controls it.
 			mockGetOllamaModels.mockResolvedValue({
 				"llama3.2": {
 					contextWindow: 128000,
 					maxTokens: 4096,
 					supportsImages: true,
 					supportsPromptCache: false,
-					supportsNativeTools: true,
 				},
 			})
 
@@ -520,14 +503,13 @@ describe("NativeOllamaHandler", () => {
 		})
 
 		it("should yield tool_call_end events after tool_call_partial chunks", async () => {
-			// Mock model with native tool support
+			// Model metadata should not gate tool inclusion; metadata.tools controls it.
 			mockGetOllamaModels.mockResolvedValue({
 				"llama3.2": {
 					contextWindow: 128000,
 					maxTokens: 4096,
 					supportsImages: true,
 					supportsPromptCache: false,
-					supportsNativeTools: true,
 				},
 			})
 

+ 0 - 1
src/api/providers/__tests__/openai-codex-native-tool-calls.spec.ts

@@ -72,7 +72,6 @@ describe("OpenAiCodexHandler native tool calls", () => {
 
 		const stream = handler.createMessage("system", [{ role: "user", content: "hello" } as any], {
 			taskId: "t",
-			toolProtocol: "native",
 			tools: [],
 		})
 

+ 5 - 21
src/api/providers/__tests__/openai-native-tools.spec.ts

@@ -5,7 +5,7 @@ import { OpenAiNativeHandler } from "../openai-native"
 import type { ApiHandlerOptions } from "../../../shared/api"
 
 describe("OpenAiHandler native tools", () => {
-	it("includes tools in request when custom model info lacks supportsNativeTools (regression test)", async () => {
+	it("includes tools in request when tools are provided via metadata (regression test)", async () => {
 		const mockCreate = vi.fn().mockImplementationOnce(() => ({
 			[Symbol.asyncIterator]: async function* () {
 				yield {
@@ -14,10 +14,8 @@ describe("OpenAiHandler native tools", () => {
 			},
 		}))
 
-		// Set openAiCustomModelInfo WITHOUT supportsNativeTools to simulate
-		// a user-provided custom model info that doesn't specify native tool support.
-		// The getModel() fix should merge NATIVE_TOOL_DEFAULTS to ensure
-		// supportsNativeTools defaults to true.
+		// Set openAiCustomModelInfo without any tool capability flags; tools should
+		// still be passed whenever metadata.tools is present.
 		const handler = new OpenAiHandler({
 			openAiApiKey: "test-key",
 			openAiBaseUrl: "https://example.com/v1",
@@ -49,17 +47,9 @@ describe("OpenAiHandler native tools", () => {
 			},
 		]
 
-		// Mimic the behavior in Task.attemptApiRequest() where tools are only
-		// included when modelInfo.supportsNativeTools is true. This is the
-		// actual regression path being tested - without the getModel() fix,
-		// supportsNativeTools would be undefined and tools wouldn't be passed.
-		const modelInfo = handler.getModel().info
-		const supportsNativeTools = modelInfo.supportsNativeTools ?? false
-
 		const stream = handler.createMessage("system", [], {
 			taskId: "test-task-id",
-			...(supportsNativeTools && { tools }),
-			...(supportsNativeTools && { toolProtocol: "native" as const }),
+			tools,
 		})
 		await stream.next()
 
@@ -71,13 +61,10 @@ describe("OpenAiHandler native tools", () => {
 						function: expect.objectContaining({ name: "test_tool" }),
 					}),
 				]),
+				parallel_tool_calls: false,
 			}),
 			expect.anything(),
 		)
-		// Verify parallel_tool_calls is NOT included when parallelToolCalls is not explicitly true
-		// This is required for LiteLLM/Bedrock compatibility (see COM-406)
-		const callArgs = mockCreate.mock.calls[0][0]
-		expect(callArgs).not.toHaveProperty("parallel_tool_calls")
 	})
 })
 
@@ -131,7 +118,6 @@ describe("OpenAiNativeHandler MCP tool schema handling", () => {
 		const stream = handler.createMessage("system prompt", [], {
 			taskId: "test-task-id",
 			tools: mcpTools,
-			toolProtocol: "native" as const,
 		})
 
 		// Consume the stream
@@ -199,7 +185,6 @@ describe("OpenAiNativeHandler MCP tool schema handling", () => {
 		const stream = handler.createMessage("system prompt", [], {
 			taskId: "test-task-id",
 			tools: regularTools,
-			toolProtocol: "native" as const,
 		})
 
 		// Consume the stream
@@ -281,7 +266,6 @@ describe("OpenAiNativeHandler MCP tool schema handling", () => {
 		const stream = handler.createMessage("system prompt", [], {
 			taskId: "test-task-id",
 			tools: mcpToolsWithNestedObjects,
-			toolProtocol: "native" as const,
 		})
 
 		// Consume the stream

+ 0 - 39
src/api/providers/__tests__/openai-native.spec.ts

@@ -221,45 +221,6 @@ describe("OpenAiNativeHandler", () => {
 			expect(modelInfo.id).toBe("gpt-5.1-codex-max") // Default model
 			expect(modelInfo.info).toBeDefined()
 		})
-
-		it("should have defaultToolProtocol: native for all OpenAI Native models", () => {
-			// Test that all models have defaultToolProtocol: native
-			const testModels = [
-				"gpt-5.1-codex-max",
-				"gpt-5.2",
-				"gpt-5.1",
-				"gpt-5",
-				"gpt-5-mini",
-				"gpt-5-nano",
-				"gpt-4.1",
-				"gpt-4.1-mini",
-				"gpt-4.1-nano",
-				"o3",
-				"o3-high",
-				"o3-low",
-				"o4-mini",
-				"o4-mini-high",
-				"o4-mini-low",
-				"o3-mini",
-				"o3-mini-high",
-				"o3-mini-low",
-				"o1",
-				"o1-preview",
-				"o1-mini",
-				"gpt-4o",
-				"gpt-4o-mini",
-				"codex-mini-latest",
-			]
-
-			for (const modelId of testModels) {
-				const testHandler = new OpenAiNativeHandler({
-					openAiNativeApiKey: "test-api-key",
-					apiModelId: modelId,
-				})
-				const modelInfo = testHandler.getModel()
-				expect(modelInfo.info.defaultToolProtocol).toBe("native")
-			}
-		})
 	})
 
 	describe("GPT-5 models", () => {

+ 8 - 2
src/api/providers/__tests__/openai.spec.ts

@@ -633,11 +633,14 @@ describe("OpenAiHandler", () => {
 					stream: true,
 					stream_options: { include_usage: true },
 					temperature: 0,
+					tools: undefined,
+					tool_choice: undefined,
+					parallel_tool_calls: false,
 				},
 				{ path: "/models/chat/completions" },
 			)
 
-			// Verify max_tokens is NOT included when includeMaxTokens is not set
+			// Verify max_tokens is NOT included when not explicitly set
 			const callArgs = mockCreate.mock.calls[0][0]
 			expect(callArgs).not.toHaveProperty("max_completion_tokens")
 		})
@@ -679,11 +682,14 @@ describe("OpenAiHandler", () => {
 						{ role: "system", content: systemPrompt },
 						{ role: "user", content: "Hello!" },
 					],
+					tools: undefined,
+					tool_choice: undefined,
+					parallel_tool_calls: false,
 				},
 				{ path: "/models/chat/completions" },
 			)
 
-			// Verify max_tokens is NOT included when includeMaxTokens is not set
+			// Verify max_tokens is NOT included when not explicitly set
 			const callArgs = mockCreate.mock.calls[0][0]
 			expect(callArgs).not.toHaveProperty("max_completion_tokens")
 		})

+ 0 - 4
src/api/providers/__tests__/openrouter.spec.ts

@@ -42,7 +42,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				contextWindow: 200000,
 				supportsImages: true,
 				supportsPromptCache: true,
-				supportsNativeTools: true,
 				inputPrice: 3,
 				outputPrice: 15,
 				cacheWritesPrice: 3.75,
@@ -66,7 +65,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				contextWindow: 128000,
 				supportsImages: true,
 				supportsPromptCache: false,
-				supportsNativeTools: true,
 				inputPrice: 2.5,
 				outputPrice: 10,
 				description: "GPT-4o",
@@ -76,7 +74,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				contextWindow: 200000,
 				supportsImages: true,
 				supportsPromptCache: false,
-				supportsNativeTools: true,
 				inputPrice: 15,
 				outputPrice: 60,
 				description: "OpenAI o1",
@@ -129,7 +126,6 @@ describe("OpenRouterHandler", () => {
 			const result = await handler.fetchModel()
 			expect(result.id).toBe("anthropic/claude-sonnet-4.5")
 			expect(result.info.supportsPromptCache).toBe(true)
-			expect(result.info.supportsNativeTools).toBe(true)
 		})
 
 		it("honors custom maxTokens for thinking models", async () => {

+ 5 - 5
src/api/providers/__tests__/qwen-code-native-tools.spec.ts

@@ -127,7 +127,7 @@ describe("QwenCodeHandler Native Tools", () => {
 			)
 		})
 
-		it("should not include tools when toolProtocol is xml", async () => {
+		it("should always include tools and tool_choice (tools are guaranteed to be present after ALWAYS_AVAILABLE_TOOLS)", async () => {
 			mockCreate.mockImplementationOnce(() => ({
 				[Symbol.asyncIterator]: async function* () {
 					yield {
@@ -138,14 +138,14 @@ describe("QwenCodeHandler Native Tools", () => {
 
 			const stream = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
-				tools: testTools,
-				toolProtocol: "xml",
 			})
 			await stream.next()
 
+			// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
 			const callArgs = mockCreate.mock.calls[mockCreate.mock.calls.length - 1][0]
-			expect(callArgs).not.toHaveProperty("tools")
-			expect(callArgs).not.toHaveProperty("tool_choice")
+			expect(callArgs).toHaveProperty("tools")
+			expect(callArgs).toHaveProperty("tool_choice")
+			expect(callArgs).toHaveProperty("parallel_tool_calls", false)
 		})
 
 		it("should yield tool_call_partial chunks during streaming", async () => {

+ 1 - 33
src/api/providers/__tests__/requesty.spec.ts

@@ -3,15 +3,12 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
 
-import { TOOL_PROTOCOL } from "@roo-code/types"
-
 import { RequestyHandler } from "../requesty"
 import { ApiHandlerOptions } from "../../../shared/api"
 import { Package } from "../../../shared/package"
 import { ApiHandlerCreateMessageMetadata } from "../../index"
 
 const mockCreate = vitest.fn()
-const mockResolveToolProtocol = vitest.fn()
 
 vitest.mock("openai", () => {
 	return {
@@ -27,10 +24,6 @@ vitest.mock("openai", () => {
 
 vitest.mock("delay", () => ({ default: vitest.fn(() => Promise.resolve()) }))
 
-vitest.mock("../../../utils/resolveToolProtocol", () => ({
-	resolveToolProtocol: (...args: any[]) => mockResolveToolProtocol(...args),
-}))
-
 vitest.mock("../fetchers/modelCache", () => ({
 	getModels: vitest.fn().mockImplementation(() => {
 		return Promise.resolve({
@@ -244,9 +237,7 @@ describe("RequestyHandler", () => {
 				mockCreate.mockResolvedValue(mockStream)
 			})
 
-			it("should include tools in request when toolProtocol is native", async () => {
-				mockResolveToolProtocol.mockReturnValue(TOOL_PROTOCOL.NATIVE)
-
+			it("should include tools in request when tools are provided", async () => {
 				const metadata: ApiHandlerCreateMessageMetadata = {
 					taskId: "test-task",
 					tools: mockTools,
@@ -273,30 +264,7 @@ describe("RequestyHandler", () => {
 				)
 			})
 
-			it("should not include tools when toolProtocol is not native", async () => {
-				mockResolveToolProtocol.mockReturnValue(TOOL_PROTOCOL.XML)
-
-				const metadata: ApiHandlerCreateMessageMetadata = {
-					taskId: "test-task",
-					tools: mockTools,
-					tool_choice: "auto",
-				}
-
-				const handler = new RequestyHandler(mockOptions)
-				const iterator = handler.createMessage(systemPrompt, messages, metadata)
-				await iterator.next()
-
-				expect(mockCreate).toHaveBeenCalledWith(
-					expect.not.objectContaining({
-						tools: expect.anything(),
-						tool_choice: expect.anything(),
-					}),
-				)
-			})
-
 			it("should handle tool_call_partial chunks in streaming response", async () => {
-				mockResolveToolProtocol.mockReturnValue(TOOL_PROTOCOL.NATIVE)
-
 				const mockStreamWithToolCalls = {
 					async *[Symbol.asyncIterator]() {
 						yield {

+ 0 - 17
src/api/providers/__tests__/roo.spec.ts

@@ -101,27 +101,22 @@ vitest.mock("../../providers/fetchers/modelCache", () => ({
 					supportsPromptCache: true,
 					inputPrice: 0,
 					outputPrice: 0,
-					defaultToolProtocol: "native",
 				},
 				"minimax/minimax-m2:free": {
 					maxTokens: 32_768,
 					contextWindow: 1_000_000,
 					supportsImages: false,
 					supportsPromptCache: true,
-					supportsNativeTools: true,
 					inputPrice: 0.15,
 					outputPrice: 0.6,
-					defaultToolProtocol: "native",
 				},
 				"anthropic/claude-haiku-4.5": {
 					maxTokens: 8_192,
 					contextWindow: 200_000,
 					supportsImages: true,
 					supportsPromptCache: true,
-					supportsNativeTools: true,
 					inputPrice: 0.8,
 					outputPrice: 4,
-					defaultToolProtocol: "native",
 				},
 			}
 		}
@@ -428,24 +423,12 @@ describe("RooHandler", () => {
 			}
 		})
 
-		it("should have defaultToolProtocol: native for all roo provider models", () => {
-			// Test that all models have defaultToolProtocol: native
-			const testModels = ["minimax/minimax-m2:free", "anthropic/claude-haiku-4.5", "xai/grok-code-fast-1"]
-			for (const modelId of testModels) {
-				const handlerWithModel = new RooHandler({ apiModelId: modelId })
-				const modelInfo = handlerWithModel.getModel()
-				expect(modelInfo.id).toBe(modelId)
-				expect((modelInfo.info as any).defaultToolProtocol).toBe("native")
-			}
-		})
-
 		it("should return cached model info with settings applied from API", () => {
 			const handlerWithMinimax = new RooHandler({
 				apiModelId: "minimax/minimax-m2:free",
 			})
 			const modelInfo = handlerWithMinimax.getModel()
 			// The settings from API should already be applied in the cached model info
-			expect(modelInfo.info.supportsNativeTools).toBe(true)
 			expect(modelInfo.info.inputPrice).toBe(0.15)
 			expect(modelInfo.info.outputPrice).toBe(0.6)
 		})

+ 6 - 15
src/api/providers/__tests__/unbound.spec.ts

@@ -15,7 +15,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				contextWindow: 200000,
 				supportsImages: true,
 				supportsPromptCache: true,
-				supportsNativeTools: true,
 				inputPrice: 3,
 				outputPrice: 15,
 				cacheWritesPrice: 3.75,
@@ -28,7 +27,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				contextWindow: 200000,
 				supportsImages: true,
 				supportsPromptCache: true,
-				supportsNativeTools: true,
 				inputPrice: 3,
 				outputPrice: 15,
 				cacheWritesPrice: 3.75,
@@ -41,7 +39,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				contextWindow: 200000,
 				supportsImages: true,
 				supportsPromptCache: true,
-				supportsNativeTools: true,
 				inputPrice: 3,
 				outputPrice: 15,
 				cacheWritesPrice: 3.75,
@@ -54,7 +51,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				contextWindow: 128000,
 				supportsImages: true,
 				supportsPromptCache: false,
-				supportsNativeTools: true,
 				inputPrice: 5,
 				outputPrice: 15,
 				description: "GPT-4o",
@@ -64,7 +60,6 @@ vitest.mock("../fetchers/modelCache", () => ({
 				contextWindow: 128000,
 				supportsImages: true,
 				supportsPromptCache: false,
-				supportsNativeTools: true,
 				inputPrice: 1,
 				outputPrice: 3,
 				description: "O3 Mini",
@@ -353,7 +348,7 @@ describe("UnboundHandler", () => {
 			},
 		]
 
-		it("should include tools in request when model supports native tools and tools are provided", async () => {
+		it("should include tools in request when tools are provided", async () => {
 			mockWithResponse.mockResolvedValueOnce({
 				data: {
 					[Symbol.asyncIterator]: () => ({
@@ -367,7 +362,6 @@ describe("UnboundHandler", () => {
 			const messageGenerator = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				tools: testTools,
-				toolProtocol: "native",
 			})
 			await messageGenerator.next()
 
@@ -405,7 +399,6 @@ describe("UnboundHandler", () => {
 			const messageGenerator = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				tools: testTools,
-				toolProtocol: "native",
 				tool_choice: "auto",
 			})
 			await messageGenerator.next()
@@ -422,7 +415,7 @@ describe("UnboundHandler", () => {
 			)
 		})
 
-		it("should not include tools when toolProtocol is xml", async () => {
+		it("should always include tools and tool_choice (tools are guaranteed to be present after ALWAYS_AVAILABLE_TOOLS)", async () => {
 			mockWithResponse.mockResolvedValueOnce({
 				data: {
 					[Symbol.asyncIterator]: () => ({
@@ -435,14 +428,14 @@ describe("UnboundHandler", () => {
 
 			const messageGenerator = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
-				tools: testTools,
-				toolProtocol: "xml",
 			})
 			await messageGenerator.next()
 
+			// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
 			const callArgs = mockCreate.mock.calls[mockCreate.mock.calls.length - 1][0]
-			expect(callArgs).not.toHaveProperty("tools")
-			expect(callArgs).not.toHaveProperty("tool_choice")
+			expect(callArgs).toHaveProperty("tools")
+			expect(callArgs).toHaveProperty("tool_choice")
+			expect(callArgs).toHaveProperty("parallel_tool_calls", false)
 		})
 
 		it("should yield tool_call_partial chunks during streaming", async () => {
@@ -499,7 +492,6 @@ describe("UnboundHandler", () => {
 			const stream = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				tools: testTools,
-				toolProtocol: "native",
 			})
 
 			const chunks = []
@@ -538,7 +530,6 @@ describe("UnboundHandler", () => {
 			const messageGenerator = handler.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				tools: testTools,
-				toolProtocol: "native",
 				parallelToolCalls: true,
 			})
 			await messageGenerator.next()

+ 3 - 7
src/api/providers/__tests__/vercel-ai-gateway.spec.ts

@@ -315,7 +315,6 @@ describe("VercelAiGatewayHandler", () => {
 				const messageGenerator = handler.createMessage("test prompt", [], {
 					taskId: "test-task-id",
 					tools: testTools,
-					toolProtocol: "native",
 				})
 				await messageGenerator.next()
 
@@ -339,7 +338,6 @@ describe("VercelAiGatewayHandler", () => {
 				const messageGenerator = handler.createMessage("test prompt", [], {
 					taskId: "test-task-id",
 					tools: testTools,
-					toolProtocol: "native",
 					tool_choice: "auto",
 				})
 				await messageGenerator.next()
@@ -351,13 +349,12 @@ describe("VercelAiGatewayHandler", () => {
 				)
 			})
 
-			it("should set parallel_tool_calls when toolProtocol is native", async () => {
+			it("should set parallel_tool_calls when parallelToolCalls is enabled", async () => {
 				const handler = new VercelAiGatewayHandler(mockOptions)
 
 				const messageGenerator = handler.createMessage("test prompt", [], {
 					taskId: "test-task-id",
 					tools: testTools,
-					toolProtocol: "native",
 					parallelToolCalls: true,
 				})
 				await messageGenerator.next()
@@ -369,18 +366,18 @@ describe("VercelAiGatewayHandler", () => {
 				)
 			})
 
-			it("should default parallel_tool_calls to false", async () => {
+			it("should include parallel_tool_calls: false by default", async () => {
 				const handler = new VercelAiGatewayHandler(mockOptions)
 
 				const messageGenerator = handler.createMessage("test prompt", [], {
 					taskId: "test-task-id",
 					tools: testTools,
-					toolProtocol: "native",
 				})
 				await messageGenerator.next()
 
 				expect(mockCreate).toHaveBeenCalledWith(
 					expect.objectContaining({
+						tools: expect.any(Array),
 						parallel_tool_calls: false,
 					}),
 				)
@@ -445,7 +442,6 @@ describe("VercelAiGatewayHandler", () => {
 				const stream = handler.createMessage("test prompt", [], {
 					taskId: "test-task-id",
 					tools: testTools,
-					toolProtocol: "native",
 				})
 
 				const chunks = []

+ 43 - 22
src/api/providers/__tests__/vscode-lm.spec.ts

@@ -180,7 +180,7 @@ describe("VsCodeLmHandler", () => {
 			})
 		})
 
-		it("should handle tool calls as text when not using native tool protocol", async () => {
+		it("should emit tool_call chunks when tools are provided", async () => {
 			const systemPrompt = "You are a helpful assistant"
 			const messages: Anthropic.Messages.MessageParam[] = [
 				{
@@ -210,7 +210,27 @@ describe("VsCodeLmHandler", () => {
 				})(),
 			})
 
-			const stream = handler.createMessage(systemPrompt, messages)
+			const tools = [
+				{
+					type: "function" as const,
+					function: {
+						name: "calculator",
+						description: "A simple calculator",
+						parameters: {
+							type: "object",
+							properties: {
+								operation: { type: "string" },
+								numbers: { type: "array", items: { type: "number" } },
+							},
+						},
+					},
+				},
+			]
+
+			const stream = handler.createMessage(systemPrompt, messages, {
+				taskId: "test-task",
+				tools,
+			})
 			const chunks = []
 			for await (const chunk of stream) {
 				chunks.push(chunk)
@@ -218,12 +238,14 @@ describe("VsCodeLmHandler", () => {
 
 			expect(chunks).toHaveLength(2) // Tool call chunk + usage chunk
 			expect(chunks[0]).toEqual({
-				type: "text",
-				text: JSON.stringify({ type: "tool_call", ...toolCallData }),
+				type: "tool_call",
+				id: toolCallData.callId,
+				name: toolCallData.name,
+				arguments: JSON.stringify(toolCallData.arguments),
 			})
 		})
 
-		it("should handle native tool calls when using native tool protocol", async () => {
+		it("should handle native tool calls when tools are provided", async () => {
 			const systemPrompt = "You are a helpful assistant"
 			const messages: Anthropic.Messages.MessageParam[] = [
 				{
@@ -272,7 +294,6 @@ describe("VsCodeLmHandler", () => {
 
 			const stream = handler.createMessage(systemPrompt, messages, {
 				taskId: "test-task",
-				toolProtocol: "native",
 				tools,
 			})
 			const chunks = []
@@ -289,7 +310,7 @@ describe("VsCodeLmHandler", () => {
 			})
 		})
 
-		it("should pass tools to request options when using native tool protocol", async () => {
+		it("should pass tools to request options when tools are provided", async () => {
 			const systemPrompt = "You are a helpful assistant"
 			const messages: Anthropic.Messages.MessageParam[] = [
 				{
@@ -327,7 +348,6 @@ describe("VsCodeLmHandler", () => {
 
 			const stream = handler.createMessage(systemPrompt, messages, {
 				taskId: "test-task",
-				toolProtocol: "native",
 				tools,
 			})
 			const chunks = []
@@ -376,10 +396,11 @@ describe("VsCodeLmHandler", () => {
 	describe("getModel", () => {
 		it("should return model info when client exists", async () => {
 			const mockModel = { ...mockLanguageModelChat }
-			;(vscode.lm.selectChatModels as Mock).mockResolvedValueOnce([mockModel])
-
-			// Initialize client
-			await handler["getClient"]()
+			// The handler starts async initialization in the constructor.
+			// Make the test deterministic by explicitly (re)initializing here.
+			;(vscode.lm.selectChatModels as Mock).mockResolvedValue([mockModel])
+			handler["client"] = null
+			await handler.initializeClient()
 
 			const model = handler.getModel()
 			expect(model.id).toBe("test-model")
@@ -395,24 +416,24 @@ describe("VsCodeLmHandler", () => {
 			expect(model.info).toBeDefined()
 		})
 
-		it("should return supportsNativeTools and defaultToolProtocol in model info", async () => {
+		it("should return basic model info when client exists", async () => {
 			const mockModel = { ...mockLanguageModelChat }
-			;(vscode.lm.selectChatModels as Mock).mockResolvedValueOnce([mockModel])
-
-			// Initialize client
-			await handler["getClient"]()
+			// The handler starts async initialization in the constructor.
+			// Make the test deterministic by explicitly (re)initializing here.
+			;(vscode.lm.selectChatModels as Mock).mockResolvedValue([mockModel])
+			handler["client"] = null
+			await handler.initializeClient()
 
 			const model = handler.getModel()
-			expect(model.info.supportsNativeTools).toBe(true)
-			expect(model.info.defaultToolProtocol).toBe("native")
+			expect(model.info).toBeDefined()
+			expect(model.info.contextWindow).toBe(4096)
 		})
 
-		it("should return supportsNativeTools and defaultToolProtocol in fallback model info", () => {
+		it("should return fallback model info when no client exists", () => {
 			// Clear the client first
 			handler["client"] = null
 			const model = handler.getModel()
-			expect(model.info.supportsNativeTools).toBe(true)
-			expect(model.info.defaultToolProtocol).toBe("native")
+			expect(model.info).toBeDefined()
 		})
 	})
 

+ 5 - 5
src/api/providers/__tests__/xai.spec.ts

@@ -371,7 +371,7 @@ describe("XAIHandler", () => {
 			)
 		})
 
-		it("should not include tools when toolProtocol is xml", async () => {
+		it("should always include tools and tool_choice (tools are guaranteed to be present after ALWAYS_AVAILABLE_TOOLS)", async () => {
 			const handlerWithTools = new XAIHandler({ apiModelId: "grok-3" })
 
 			mockCreate.mockImplementationOnce(() => {
@@ -386,14 +386,14 @@ describe("XAIHandler", () => {
 
 			const messageGenerator = handlerWithTools.createMessage("test prompt", [], {
 				taskId: "test-task-id",
-				tools: testTools,
-				toolProtocol: "xml",
 			})
 			await messageGenerator.next()
 
+			// Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS)
 			const callArgs = mockCreate.mock.calls[mockCreate.mock.calls.length - 1][0]
-			expect(callArgs).not.toHaveProperty("tools")
-			expect(callArgs).not.toHaveProperty("tool_choice")
+			expect(callArgs).toHaveProperty("tools")
+			expect(callArgs).toHaveProperty("tool_choice")
+			expect(callArgs).toHaveProperty("parallel_tool_calls", false)
 		})
 
 		it("should yield tool_call_partial chunks during streaming", async () => {

+ 4 - 18
src/api/providers/anthropic-vertex.ts

@@ -8,7 +8,6 @@ import {
 	vertexDefaultModelId,
 	vertexModels,
 	ANTHROPIC_DEFAULT_MAX_TOKENS,
-	TOOL_PROTOCOL,
 	VERTEX_1M_CONTEXT_MODEL_IDS,
 } from "@roo-code/types"
 import { safeJsonParse } from "@roo-code/core"
@@ -19,7 +18,6 @@ import { ApiStream } from "../transform/stream"
 import { addCacheBreakpoints } from "../transform/caching/vertex"
 import { getModelParams } from "../transform/model-params"
 import { filterNonAnthropicBlocks } from "../transform/anthropic-filter"
-import { resolveToolProtocol } from "../../utils/resolveToolProtocol"
 import {
 	convertOpenAIToolsToAnthropic,
 	convertOpenAIToolChoiceToAnthropic,
@@ -77,22 +75,10 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
 		// Filter out non-Anthropic blocks (reasoning, thoughtSignature, etc.) before sending to the API
 		const sanitizedMessages = filterNonAnthropicBlocks(messages)
 
-		// Enable native tools using resolveToolProtocol (which checks model's defaultToolProtocol)
-		// This matches the approach used in AnthropicHandler
-		// Also exclude tools when tool_choice is "none" since that means "don't use tools"
-		const toolProtocol = resolveToolProtocol(this.options, info, metadata?.toolProtocol)
-		const shouldIncludeNativeTools =
-			metadata?.tools &&
-			metadata.tools.length > 0 &&
-			toolProtocol === TOOL_PROTOCOL.NATIVE &&
-			metadata?.tool_choice !== "none"
-
-		const nativeToolParams = shouldIncludeNativeTools
-			? {
-					tools: convertOpenAIToolsToAnthropic(metadata.tools!),
-					tool_choice: convertOpenAIToolChoiceToAnthropic(metadata.tool_choice, metadata.parallelToolCalls),
-				}
-			: {}
+		const nativeToolParams = {
+			tools: convertOpenAIToolsToAnthropic(metadata?.tools ?? []),
+			tool_choice: convertOpenAIToolChoiceToAnthropic(metadata?.tool_choice, metadata?.parallelToolCalls),
+		}
 
 		/**
 		 * Vertex API has specific limitations for prompt caching:

+ 4 - 20
src/api/providers/anthropic.ts

@@ -10,7 +10,6 @@ import {
 	anthropicModels,
 	ANTHROPIC_DEFAULT_MAX_TOKENS,
 	ApiProviderError,
-	TOOL_PROTOCOL,
 } from "@roo-code/types"
 import { TelemetryService } from "@roo-code/telemetry"
 
@@ -19,7 +18,6 @@ import type { ApiHandlerOptions } from "../../shared/api"
 import { ApiStream } from "../transform/stream"
 import { getModelParams } from "../transform/model-params"
 import { filterNonAnthropicBlocks } from "../transform/anthropic-filter"
-import { resolveToolProtocol } from "../../utils/resolveToolProtocol"
 import { handleProviderError } from "./utils/error-handler"
 
 import { BaseProvider } from "./base-provider"
@@ -74,24 +72,10 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
 			betas.push("context-1m-2025-08-07")
 		}
 
-		// Enable native tools by default using resolveToolProtocol (which checks model's defaultToolProtocol)
-		// This matches OpenRouter's approach of always including tools when provided
-		// Also exclude tools when tool_choice is "none" since that means "don't use tools"
-		// IMPORTANT: Use metadata.toolProtocol if provided (task's locked protocol) for consistency
-		const model = this.getModel()
-		const toolProtocol = resolveToolProtocol(this.options, model.info, metadata?.toolProtocol)
-		const shouldIncludeNativeTools =
-			metadata?.tools &&
-			metadata.tools.length > 0 &&
-			toolProtocol === TOOL_PROTOCOL.NATIVE &&
-			metadata?.tool_choice !== "none"
-
-		const nativeToolParams = shouldIncludeNativeTools
-			? {
-					tools: convertOpenAIToolsToAnthropic(metadata.tools!),
-					tool_choice: convertOpenAIToolChoiceToAnthropic(metadata.tool_choice, metadata.parallelToolCalls),
-				}
-			: {}
+		const nativeToolParams = {
+			tools: convertOpenAIToolsToAnthropic(metadata?.tools ?? []),
+			tool_choice: convertOpenAIToolChoiceToAnthropic(metadata?.tool_choice, metadata?.parallelToolCalls),
+		}
 
 		switch (modelId) {
 			case "claude-sonnet-4-5":

+ 5 - 7
src/api/providers/base-openai-compatible-provider.ts

@@ -4,7 +4,7 @@ import OpenAI from "openai"
 import type { ModelInfo } from "@roo-code/types"
 
 import { type ApiHandlerOptions, getModelMaxOutputTokens } from "../../shared/api"
-import { XmlMatcher } from "../../utils/xml-matcher"
+import { TagMatcher } from "../../utils/tag-matcher"
 import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
 import { convertToOpenAiMessages } from "../transform/openai-format"
 
@@ -93,11 +93,9 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
 			messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
 			stream: true,
 			stream_options: { include_usage: true },
-			...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
-			...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }),
-			...(metadata?.toolProtocol === "native" && {
-				parallel_tool_calls: metadata.parallelToolCalls ?? false,
-			}),
+			tools: this.convertToolsForOpenAI(metadata?.tools),
+			tool_choice: metadata?.tool_choice,
+			parallel_tool_calls: metadata?.parallelToolCalls ?? false,
 		}
 
 		// Add thinking parameter if reasoning is enabled and model supports it
@@ -119,7 +117,7 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
 	): ApiStream {
 		const stream = await this.createStream(systemPrompt, messages, metadata)
 
-		const matcher = new XmlMatcher(
+		const matcher = new TagMatcher(
 			"think",
 			(chunk) =>
 				({

+ 18 - 37
src/api/providers/bedrock.ts

@@ -359,15 +359,6 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 		const modelConfig = this.getModel()
 		const usePromptCache = Boolean(this.options.awsUsePromptCache && this.supportsAwsPromptCache(modelConfig))
 
-		// Determine early if native tools should be used (needed for message conversion)
-		const supportsNativeTools = modelConfig.info.supportsNativeTools ?? false
-		const useNativeTools =
-			supportsNativeTools &&
-			metadata?.tools &&
-			metadata.tools.length > 0 &&
-			metadata?.toolProtocol !== "xml" &&
-			metadata?.tool_choice !== "none"
-
 		const conversationId =
 			messages.length > 0
 				? `conv_${messages[0].role}_${
@@ -383,7 +374,6 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 			usePromptCache,
 			modelConfig.info,
 			conversationId,
-			useNativeTools,
 		)
 
 		let additionalModelRequestFields: BedrockAdditionalModelFields | undefined
@@ -424,6 +414,17 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 		const is1MContextEnabled =
 			BEDROCK_1M_CONTEXT_MODEL_IDS.includes(baseModelId as any) && this.options.awsBedrock1MContext
 
+		// Determine if service tier should be applied (checked later when building payload)
+		const useServiceTier =
+			this.options.awsBedrockServiceTier && BEDROCK_SERVICE_TIER_MODEL_IDS.includes(baseModelId as any)
+		if (useServiceTier) {
+			logger.info("Service tier specified for Bedrock request", {
+				ctx: "bedrock",
+				modelId: modelConfig.id,
+				serviceTier: this.options.awsBedrockServiceTier,
+			})
+		}
+
 		// Add anthropic_beta headers for various features
 		// Start with an empty array and add betas as needed
 		const anthropicBetas: string[] = []
@@ -433,9 +434,9 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 			anthropicBetas.push("context-1m-2025-08-07")
 		}
 
-		// Add fine-grained tool streaming beta when native tools are used with Claude models
+		// Add fine-grained tool streaming beta for Claude models
 		// This enables proper tool use streaming for Anthropic models on Bedrock
-		if (useNativeTools && baseModelId.includes("claude")) {
+		if (baseModelId.includes("claude")) {
 			anthropicBetas.push("fine-grained-tool-streaming-2025-05-14")
 		}
 
@@ -447,24 +448,9 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 			additionalModelRequestFields.anthropic_beta = anthropicBetas
 		}
 
-		// Determine if service tier should be applied (checked later when building payload)
-		const useServiceTier =
-			this.options.awsBedrockServiceTier && BEDROCK_SERVICE_TIER_MODEL_IDS.includes(baseModelId as any)
-		if (useServiceTier) {
-			logger.info("Service tier specified for Bedrock request", {
-				ctx: "bedrock",
-				modelId: modelConfig.id,
-				serviceTier: this.options.awsBedrockServiceTier,
-			})
-		}
-
-		// Build tool configuration if native tools are enabled
-		let toolConfig: ToolConfiguration | undefined
-		if (useNativeTools && metadata?.tools) {
-			toolConfig = {
-				tools: this.convertToolsForBedrock(metadata.tools),
-				toolChoice: this.convertToolChoiceForBedrock(metadata.tool_choice),
-			}
+		const toolConfig: ToolConfiguration = {
+			tools: this.convertToolsForBedrock(metadata?.tools ?? []),
+			toolChoice: this.convertToolChoiceForBedrock(metadata?.tool_choice),
 		}
 
 		// Build payload with optional service_tier at top level
@@ -478,7 +464,7 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 			...(additionalModelRequestFields && { additionalModelRequestFields }),
 			// Add anthropic_version at top level when using thinking features
 			...(thinkingEnabled && { anthropic_version: "bedrock-2023-05-31" }),
-			...(toolConfig && { toolConfig }),
+			toolConfig,
 			// Add service_tier as a top-level parameter (not inside additionalModelRequestFields)
 			...(useServiceTier && { service_tier: this.options.awsBedrockServiceTier }),
 		}
@@ -844,12 +830,9 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 		usePromptCache: boolean = false,
 		modelInfo?: any,
 		conversationId?: string, // Optional conversation ID to track cache points across messages
-		useNativeTools: boolean = false, // Whether native tool calling is being used
 	): { system: SystemContentBlock[]; messages: Message[] } {
 		// First convert messages using shared converter for proper image handling
-		const convertedMessages = sharedConverter(anthropicMessages as Anthropic.Messages.MessageParam[], {
-			useNativeTools,
-		})
+		const convertedMessages = sharedConverter(anthropicMessages as Anthropic.Messages.MessageParam[])
 
 		// If prompt caching is disabled, return the converted messages directly
 		if (!usePromptCache) {
@@ -1360,8 +1343,6 @@ Please verify:
 2. If using a provisioned model, check its throughput settings
 3. Contact AWS support to request a quota increase if needed
 
-
-
 `,
 			logLevel: "error",
 		},

+ 7 - 12
src/api/providers/cerebras.ts

@@ -6,7 +6,7 @@ import type { ApiHandlerOptions } from "../../shared/api"
 import { calculateApiCostOpenAI } from "../../shared/cost"
 import { ApiStream } from "../transform/stream"
 import { convertToOpenAiMessages } from "../transform/openai-format"
-import { XmlMatcher } from "../../utils/xml-matcher"
+import { TagMatcher } from "../../utils/tag-matcher"
 
 import type { ApiHandlerCreateMessageMetadata, SingleCompletionHandler } from "../index"
 import { BaseProvider } from "./base-provider"
@@ -125,13 +125,8 @@ export class CerebrasHandler extends BaseProvider implements SingleCompletionHan
 	): ApiStream {
 		const { id: model, info: modelInfo } = this.getModel()
 		const max_tokens = modelInfo.maxTokens
-		const supportsNativeTools = modelInfo.supportsNativeTools ?? false
 		const temperature = this.options.modelTemperature ?? CEREBRAS_DEFAULT_TEMPERATURE
 
-		// Check if we should use native tool calling
-		const useNativeTools =
-			supportsNativeTools && metadata?.tools && metadata.tools.length > 0 && metadata?.toolProtocol !== "xml"
-
 		// Convert Anthropic messages to OpenAI format (Cerebras is OpenAI-compatible)
 		const openaiMessages = convertToOpenAiMessages(messages)
 
@@ -149,9 +144,9 @@ export class CerebrasHandler extends BaseProvider implements SingleCompletionHan
 					}
 				: {}),
 			// Native tool calling support
-			...(useNativeTools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
-			...(useNativeTools && metadata.tool_choice && { tool_choice: metadata.tool_choice }),
-			...(useNativeTools && { parallel_tool_calls: metadata?.parallelToolCalls ?? false }),
+			tools: this.convertToolsForOpenAI(metadata?.tools),
+			tool_choice: metadata?.tool_choice,
+			parallel_tool_calls: metadata?.parallelToolCalls ?? false,
 		}
 
 		try {
@@ -197,8 +192,8 @@ export class CerebrasHandler extends BaseProvider implements SingleCompletionHan
 				throw new Error(t("common:errors.cerebras.noResponseBody"))
 			}
 
-			// Initialize XmlMatcher to parse <think>...</think> tags
-			const matcher = new XmlMatcher(
+			// Initialize TagMatcher to parse <think>...</think> tags
+			const matcher = new TagMatcher(
 				"think",
 				(chunk) =>
 					({
@@ -240,7 +235,7 @@ export class CerebrasHandler extends BaseProvider implements SingleCompletionHan
 								if (delta?.content) {
 									const content = delta.content
 
-									// Use XmlMatcher to parse <think>...</think> tags
+									// Use TagMatcher to parse <think>...</think> tags
 									for (const chunk of matcher.update(content)) {
 										yield chunk
 									}

+ 4 - 4
src/api/providers/chutes.ts

@@ -4,7 +4,7 @@ import OpenAI from "openai"
 
 import type { ApiHandlerOptions } from "../../shared/api"
 import { getModelMaxOutputTokens } from "../../shared/api"
-import { XmlMatcher } from "../../utils/xml-matcher"
+import { TagMatcher } from "../../utils/tag-matcher"
 import { convertToR1Format } from "../transform/r1-format"
 import { convertToOpenAiMessages } from "../transform/openai-format"
 import { ApiStream } from "../transform/stream"
@@ -47,8 +47,8 @@ export class ChutesHandler extends RouterProvider implements SingleCompletionHan
 			messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
 			stream: true,
 			stream_options: { include_usage: true },
-			...(metadata?.tools && { tools: metadata.tools }),
-			...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }),
+			tools: metadata?.tools,
+			tool_choice: metadata?.tool_choice,
 		}
 
 		// Only add temperature if model supports it
@@ -72,7 +72,7 @@ export class ChutesHandler extends RouterProvider implements SingleCompletionHan
 				messages: convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]),
 			})
 
-			const matcher = new XmlMatcher(
+			const matcher = new TagMatcher(
 				"think",
 				(chunk) =>
 					({

+ 2 - 13
src/api/providers/claude-code.ts

@@ -144,19 +144,8 @@ export class ClaudeCodeHandler implements ApiHandler, SingleCompletionHandler {
 			// Generate user_id metadata in the format required by Claude Code API
 			const userId = generateUserId(email || undefined)
 
-			// Convert OpenAI tools to Anthropic format if provided and protocol is native
-			// Exclude tools when tool_choice is "none" since that means "don't use tools"
-			const shouldIncludeNativeTools =
-				metadata?.tools &&
-				metadata.tools.length > 0 &&
-				metadata?.toolProtocol !== "xml" &&
-				metadata?.tool_choice !== "none"
-
-			const anthropicTools = shouldIncludeNativeTools ? convertOpenAIToolsToAnthropic(metadata.tools!) : undefined
-
-			const anthropicToolChoice = shouldIncludeNativeTools
-				? convertOpenAIToolChoice(metadata.tool_choice, metadata.parallelToolCalls)
-				: undefined
+			const anthropicTools = convertOpenAIToolsToAnthropic(metadata?.tools ?? [])
+			const anthropicToolChoice = convertOpenAIToolChoice(metadata?.tool_choice, metadata?.parallelToolCalls)
 
 			// Determine reasoning effort and thinking configuration
 			const reasoningLevel = this.getReasoningEffort(model.info)

+ 3 - 8
src/api/providers/deepinfra.ts

@@ -65,11 +65,6 @@ export class DeepInfraHandler extends RouterProvider implements SingleCompletion
 			prompt_cache_key = _metadata.taskId
 		}
 
-		// Check if model supports native tools and tools are provided with native protocol
-		const supportsNativeTools = info.supportsNativeTools ?? false
-		const useNativeTools =
-			supportsNativeTools && _metadata?.tools && _metadata.tools.length > 0 && _metadata?.toolProtocol !== "xml"
-
 		const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
 			model: modelId,
 			messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
@@ -77,9 +72,9 @@ export class DeepInfraHandler extends RouterProvider implements SingleCompletion
 			stream_options: { include_usage: true },
 			reasoning_effort,
 			prompt_cache_key,
-			...(useNativeTools && { tools: this.convertToolsForOpenAI(_metadata.tools) }),
-			...(useNativeTools && _metadata.tool_choice && { tool_choice: _metadata.tool_choice }),
-			...(useNativeTools && { parallel_tool_calls: _metadata?.parallelToolCalls ?? false }),
+			tools: this.convertToolsForOpenAI(_metadata?.tools),
+			tool_choice: _metadata?.tool_choice,
+			parallel_tool_calls: _metadata?.parallelToolCalls ?? false,
 		} as OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming
 
 		if (this.supportsTemperature(modelId)) {

+ 3 - 5
src/api/providers/deepseek.ts

@@ -70,11 +70,9 @@ export class DeepSeekHandler extends OpenAiHandler {
 			stream_options: { include_usage: true },
 			// Enable thinking mode for deepseek-reasoner or when tools are used with thinking model
 			...(isThinkingModel && { thinking: { type: "enabled" } }),
-			...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
-			...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }),
-			...(metadata?.toolProtocol === "native" && {
-				parallel_tool_calls: metadata.parallelToolCalls ?? false,
-			}),
+			tools: this.convertToolsForOpenAI(metadata?.tools),
+			tool_choice: metadata?.tool_choice,
+			parallel_tool_calls: metadata?.parallelToolCalls ?? false,
 		}
 
 		// Add max_tokens if needed

+ 2 - 2
src/api/providers/featherless.ts

@@ -8,7 +8,7 @@ import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
 
 import type { ApiHandlerOptions } from "../../shared/api"
-import { XmlMatcher } from "../../utils/xml-matcher"
+import { TagMatcher } from "../../utils/tag-matcher"
 import { convertToR1Format } from "../transform/r1-format"
 import { convertToOpenAiMessages } from "../transform/openai-format"
 import { ApiStream } from "../transform/stream"
@@ -63,7 +63,7 @@ export class FeatherlessHandler extends BaseOpenAiCompatibleProvider<Featherless
 				messages: convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]),
 			})
 
-			const matcher = new XmlMatcher(
+			const matcher = new TagMatcher(
 				"think",
 				(chunk) =>
 					({

+ 6 - 7
src/api/providers/fetchers/__tests__/chutes.spec.ts

@@ -51,7 +51,6 @@ describe("getChutesModels", () => {
 			contextWindow: 128000,
 			supportsImages: false,
 			supportsPromptCache: false,
-			supportsNativeTools: false,
 			inputPrice: 0,
 			outputPrice: 0,
 			description: "Chutes AI model: test/new-model",
@@ -162,7 +161,7 @@ describe("getChutesModels", () => {
 		expect(models["test/image-model"].supportsImages).toBe(true)
 	})
 
-	it("should detect native tool support from supported_features", async () => {
+	it("should accept supported_features containing tools", async () => {
 		const mockResponse = {
 			data: {
 				data: [
@@ -184,10 +183,11 @@ describe("getChutesModels", () => {
 
 		const models = await getChutesModels("test-api-key")
 
-		expect(models["test/tools-model"].supportsNativeTools).toBe(true)
+		expect(models["test/tools-model"]).toBeDefined()
+		expect(models["test/tools-model"].contextWindow).toBe(128000)
 	})
 
-	it("should not enable native tool support when tools is not in supported_features", async () => {
+	it("should accept supported_features without tools", async () => {
 		const mockResponse = {
 			data: {
 				data: [
@@ -209,8 +209,8 @@ describe("getChutesModels", () => {
 
 		const models = await getChutesModels("test-api-key")
 
-		expect(models["test/no-tools-model"].supportsNativeTools).toBe(false)
-		expect(models["test/no-tools-model"].defaultToolProtocol).toBeUndefined()
+		expect(models["test/no-tools-model"]).toBeDefined()
+		expect(models["test/no-tools-model"].contextWindow).toBe(128000)
 	})
 
 	it("should skip empty objects in API response and still process valid models", async () => {
@@ -336,7 +336,6 @@ describe("getChutesModels", () => {
 		// Both valid models should be processed
 		expect(models["test/valid-1"]).toBeDefined()
 		expect(models["test/valid-2"]).toBeDefined()
-		expect(models["test/valid-2"].supportsNativeTools).toBe(true)
 
 		consoleErrorSpy.mockRestore()
 	})

+ 0 - 12
src/api/providers/fetchers/__tests__/litellm.spec.ts

@@ -222,7 +222,6 @@ describe("getLiteLLMModels", () => {
 				contextWindow: 200000,
 				supportsImages: true,
 				supportsPromptCache: false,
-				supportsNativeTools: true,
 				inputPrice: 3,
 				outputPrice: 15,
 				cacheWritesPrice: undefined,
@@ -234,7 +233,6 @@ describe("getLiteLLMModels", () => {
 				contextWindow: 128000,
 				supportsImages: false,
 				supportsPromptCache: false,
-				supportsNativeTools: true,
 				inputPrice: 10,
 				outputPrice: 30,
 				cacheWritesPrice: undefined,
@@ -305,7 +303,6 @@ describe("getLiteLLMModels", () => {
 			contextWindow: 200000,
 			supportsImages: true,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 			inputPrice: undefined,
 			outputPrice: undefined,
 			cacheWritesPrice: undefined,
@@ -318,7 +315,6 @@ describe("getLiteLLMModels", () => {
 			contextWindow: 200000,
 			supportsImages: false,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 			inputPrice: undefined,
 			outputPrice: undefined,
 			cacheWritesPrice: undefined,
@@ -455,7 +451,6 @@ describe("getLiteLLMModels", () => {
 			contextWindow: 200000,
 			supportsImages: true,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 			inputPrice: undefined,
 			outputPrice: undefined,
 			cacheWritesPrice: undefined,
@@ -468,7 +463,6 @@ describe("getLiteLLMModels", () => {
 			contextWindow: 128000,
 			supportsImages: false,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 			inputPrice: undefined,
 			outputPrice: undefined,
 			cacheWritesPrice: undefined,
@@ -533,7 +527,6 @@ describe("getLiteLLMModels", () => {
 			contextWindow: 200000,
 			supportsImages: true,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 			inputPrice: undefined,
 			outputPrice: undefined,
 			cacheWritesPrice: undefined,
@@ -546,7 +539,6 @@ describe("getLiteLLMModels", () => {
 			contextWindow: 128000,
 			supportsImages: false,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 			inputPrice: undefined,
 			outputPrice: undefined,
 			cacheWritesPrice: undefined,
@@ -559,7 +551,6 @@ describe("getLiteLLMModels", () => {
 			contextWindow: 128000,
 			supportsImages: false,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 			inputPrice: undefined,
 			outputPrice: undefined,
 			cacheWritesPrice: undefined,
@@ -673,7 +664,6 @@ describe("getLiteLLMModels", () => {
 			contextWindow: 200000,
 			supportsImages: true,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 			inputPrice: undefined,
 			outputPrice: undefined,
 			cacheWritesPrice: undefined,
@@ -687,7 +677,6 @@ describe("getLiteLLMModels", () => {
 			contextWindow: 128000,
 			supportsImages: false,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 			inputPrice: undefined,
 			outputPrice: undefined,
 			cacheWritesPrice: undefined,
@@ -701,7 +690,6 @@ describe("getLiteLLMModels", () => {
 			contextWindow: 100000,
 			supportsImages: false,
 			supportsPromptCache: false,
-			supportsNativeTools: true,
 			inputPrice: undefined,
 			outputPrice: undefined,
 			cacheWritesPrice: undefined,

+ 4 - 8
src/api/providers/fetchers/__tests__/modelEndpointCache.spec.ts

@@ -15,14 +15,13 @@ describe("modelEndpointCache", () => {
 
 	describe("getModelEndpoints", () => {
 		it("should copy model-level capabilities from parent model to endpoints", async () => {
-			// Mock the parent model data with native tools support
+			// Mock the parent model data with capabilities
 			const mockParentModels = {
 				"anthropic/claude-sonnet-4": {
 					maxTokens: 8192,
 					contextWindow: 200000,
 					supportsImages: true,
 					supportsPromptCache: true,
-					supportsNativeTools: true, // Parent supports native tools
 					supportsReasoningEffort: true,
 					supportedParameters: ["max_tokens", "temperature", "reasoning"] as any,
 					inputPrice: 3,
@@ -39,7 +38,7 @@ describe("modelEndpointCache", () => {
 					supportsPromptCache: true,
 					inputPrice: 3,
 					outputPrice: 15,
-					// Note: No supportsNativeTools, supportsReasoningEffort, or supportedParameters
+					// Note: No supportsReasoningEffort, or supportedParameters
 				},
 				"amazon-bedrock": {
 					maxTokens: 8192,
@@ -61,11 +60,9 @@ describe("modelEndpointCache", () => {
 			})
 
 			// Verify capabilities were copied from parent to ALL endpoints
-			expect(result.anthropic.supportsNativeTools).toBe(true)
 			expect(result.anthropic.supportsReasoningEffort).toBe(true)
 			expect(result.anthropic.supportedParameters).toEqual(["max_tokens", "temperature", "reasoning"])
 
-			expect(result["amazon-bedrock"].supportsNativeTools).toBe(true)
 			expect(result["amazon-bedrock"].supportsReasoningEffort).toBe(true)
 			expect(result["amazon-bedrock"].supportedParameters).toEqual(["max_tokens", "temperature", "reasoning"])
 		})
@@ -76,7 +73,6 @@ describe("modelEndpointCache", () => {
 					maxTokens: 1000,
 					contextWindow: 10000,
 					supportsPromptCache: false,
-					supportsNativeTools: true,
 					supportedParameters: ["max_tokens", "temperature"] as any,
 				},
 			}
@@ -131,9 +127,9 @@ describe("modelEndpointCache", () => {
 				endpoint: "anthropic",
 			})
 
-			// Should not crash, but capabilities will be undefined
+			// Should not crash, but copied capabilities will be undefined
 			expect(result.anthropic).toBeDefined()
-			expect(result.anthropic.supportsNativeTools).toBeUndefined()
+			expect(result.anthropic.supportedParameters).toBeUndefined()
 		})
 
 		it("should return empty object for non-openrouter providers", async () => {

+ 2 - 4
src/api/providers/fetchers/__tests__/ollama.test.ts

@@ -22,7 +22,6 @@ describe("Ollama Fetcher", () => {
 				contextWindow: 40960,
 				supportsImages: false,
 				supportsPromptCache: true,
-				supportsNativeTools: true,
 				inputPrice: 0,
 				outputPrice: 0,
 				cacheWritesPrice: 0,
@@ -47,7 +46,6 @@ describe("Ollama Fetcher", () => {
 				contextWindow: 40960,
 				supportsImages: false,
 				supportsPromptCache: true,
-				supportsNativeTools: true,
 				inputPrice: 0,
 				outputPrice: 0,
 				cacheWritesPrice: 0,
@@ -77,7 +75,7 @@ describe("Ollama Fetcher", () => {
 			const parsedModel = parseOllamaModel(modelDataWithTools as any)
 
 			expect(parsedModel).not.toBeNull()
-			expect(parsedModel!.supportsNativeTools).toBe(true)
+			expect(parsedModel!.contextWindow).toBeGreaterThan(0)
 		})
 
 		it("should return null when capabilities is undefined (no tool support)", () => {
@@ -114,7 +112,7 @@ describe("Ollama Fetcher", () => {
 
 			expect(parsedModel).not.toBeNull()
 			expect(parsedModel!.supportsImages).toBe(true)
-			expect(parsedModel!.supportsNativeTools).toBe(true)
+			expect(parsedModel!.contextWindow).toBeGreaterThan(0)
 		})
 	})
 

+ 7 - 18
src/api/providers/fetchers/__tests__/openrouter.spec.ts

@@ -28,9 +28,7 @@ describe("OpenRouter API", () => {
 				description: expect.any(String),
 				supportsReasoningBudget: false,
 				supportsReasoningEffort: false,
-				supportsNativeTools: true,
 				supportedParameters: ["max_tokens", "temperature", "reasoning", "include_reasoning"],
-				defaultToolProtocol: "native",
 			})
 
 			expect(models["anthropic/claude-3.7-sonnet:thinking"]).toEqual({
@@ -46,9 +44,7 @@ describe("OpenRouter API", () => {
 				supportsReasoningBudget: true,
 				requiredReasoningBudget: true,
 				supportsReasoningEffort: true,
-				supportsNativeTools: true,
 				supportedParameters: ["max_tokens", "temperature", "reasoning", "include_reasoning"],
-				defaultToolProtocol: "native",
 			})
 
 			expect(models["google/gemini-2.5-flash-preview-05-20"].maxTokens).toEqual(65535)
@@ -136,7 +132,7 @@ describe("OpenRouter API", () => {
 					cacheWritesPrice: 1.625,
 					cacheReadsPrice: 0.31,
 					supportsReasoningEffort: true,
-					supportsNativeTools: false, // Gemini doesn't support native tools via "tools" parameter
+					// Tool support is handled via metadata/tools at request time.
 					supportedParameters: ["max_tokens", "temperature", "reasoning"],
 				},
 			} as Record<string, any>
@@ -150,7 +146,6 @@ describe("OpenRouter API", () => {
 			const parentModel = mockCachedModels["google/gemini-2.5-pro-preview"]
 			if (parentModel) {
 				for (const key of Object.keys(endpoints)) {
-					endpoints[key].supportsNativeTools = parentModel.supportsNativeTools
 					endpoints[key].supportsReasoningEffort = parentModel.supportsReasoningEffort
 					endpoints[key].supportedParameters = parentModel.supportedParameters
 				}
@@ -169,7 +164,6 @@ describe("OpenRouter API", () => {
 					cacheReadsPrice: 0.31,
 					description: undefined,
 					supportsReasoningEffort: true,
-					supportsNativeTools: false, // Copied from parent model
 					supportedParameters: ["max_tokens", "temperature", "reasoning"],
 				},
 				"google-ai-studio": {
@@ -184,7 +178,6 @@ describe("OpenRouter API", () => {
 					cacheReadsPrice: 0.31,
 					description: undefined,
 					supportsReasoningEffort: true,
-					supportsNativeTools: false, // Copied from parent model
 					supportedParameters: ["max_tokens", "temperature", "reasoning"],
 				},
 			})
@@ -221,7 +214,7 @@ describe("OpenRouter API", () => {
 				},
 			}
 
-			// Mock cached parent model with native tools support
+			// Mock cached parent model capabilities
 			const mockCachedModels = {
 				"anthropic/claude-sonnet-4": {
 					maxTokens: 8192,
@@ -234,7 +227,7 @@ describe("OpenRouter API", () => {
 					cacheWritesPrice: 3.75,
 					cacheReadsPrice: 0.3,
 					supportsReasoningEffort: true,
-					supportsNativeTools: true, // Anthropic supports native tools
+					// Tool support is handled via metadata/tools at request time.
 					supportedParameters: ["max_tokens", "temperature", "reasoning"],
 				},
 			} as Record<string, any>
@@ -248,7 +241,6 @@ describe("OpenRouter API", () => {
 			const parentModel = mockCachedModels["anthropic/claude-sonnet-4"]
 			if (parentModel) {
 				for (const key of Object.keys(endpoints)) {
-					endpoints[key].supportsNativeTools = parentModel.supportsNativeTools
 					endpoints[key].supportsReasoningEffort = parentModel.supportsReasoningEffort
 					endpoints[key].supportedParameters = parentModel.supportedParameters
 				}
@@ -266,7 +258,6 @@ describe("OpenRouter API", () => {
 				description: undefined,
 				supportsReasoningBudget: true,
 				supportsReasoningEffort: true,
-				supportsNativeTools: true, // Copied from parent model
 				supportedParameters: ["max_tokens", "temperature", "reasoning"],
 			})
 
@@ -393,7 +384,7 @@ describe("OpenRouter API", () => {
 			expect(imageResult.maxTokens).toBe(64000)
 		})
 
-		it("sets defaultToolProtocol to native when model supports native tools", () => {
+		it("treats supportedParameters containing tools as allowed", () => {
 			const mockModel = {
 				name: "Tools Model",
 				description: "Model with native tool support",
@@ -414,11 +405,10 @@ describe("OpenRouter API", () => {
 				supportedParameters: ["tools", "max_tokens", "temperature"],
 			})
 
-			expect(resultWithTools.supportsNativeTools).toBe(true)
-			expect(resultWithTools.defaultToolProtocol).toBe("native")
+			expect(resultWithTools.supportedParameters).toContain("max_tokens")
 		})
 
-		it("does not set defaultToolProtocol when model does not support native tools", () => {
+		it("treats supportedParameters without tools as allowed", () => {
 			const mockModel = {
 				name: "No Tools Model",
 				description: "Model without native tool support",
@@ -439,8 +429,7 @@ describe("OpenRouter API", () => {
 				supportedParameters: ["max_tokens", "temperature"],
 			})
 
-			expect(resultWithoutTools.supportsNativeTools).toBe(false)
-			expect(resultWithoutTools.defaultToolProtocol).toBeUndefined()
+			expect(resultWithoutTools.supportedParameters).toContain("max_tokens")
 		})
 	})
 })

+ 8 - 17
src/api/providers/fetchers/__tests__/roo.spec.ts

@@ -69,7 +69,6 @@ describe("getRooModels", () => {
 				supportsImages: true,
 				supportsReasoningEffort: true,
 				requiredReasoningEffort: false,
-				supportsNativeTools: false,
 				supportsPromptCache: true,
 				inputPrice: 100, // 0.0001 * 1_000_000
 				outputPrice: 200, // 0.0002 * 1_000_000
@@ -78,7 +77,6 @@ describe("getRooModels", () => {
 				description: "Fast coding model",
 				deprecated: false,
 				isFree: false,
-				defaultToolProtocol: "native",
 			},
 		})
 	})
@@ -119,7 +117,6 @@ describe("getRooModels", () => {
 			supportsImages: false,
 			supportsReasoningEffort: true,
 			requiredReasoningEffort: true,
-			supportsNativeTools: false,
 			supportsPromptCache: false,
 			inputPrice: 100, // 0.0001 * 1_000_000
 			outputPrice: 200, // 0.0002 * 1_000_000
@@ -129,7 +126,7 @@ describe("getRooModels", () => {
 			deprecated: false,
 			isFree: false,
 			defaultTemperature: undefined,
-			defaultToolProtocol: "native",
+
 			isStealthModel: undefined,
 		})
 	})
@@ -169,7 +166,6 @@ describe("getRooModels", () => {
 			supportsImages: false,
 			supportsReasoningEffort: false,
 			requiredReasoningEffort: false,
-			supportsNativeTools: false,
 			supportsPromptCache: false,
 			inputPrice: 100, // 0.0001 * 1_000_000
 			outputPrice: 200, // 0.0002 * 1_000_000
@@ -179,7 +175,7 @@ describe("getRooModels", () => {
 			deprecated: false,
 			isFree: false,
 			defaultTemperature: undefined,
-			defaultToolProtocol: "native",
+
 			isStealthModel: undefined,
 		})
 	})
@@ -551,7 +547,7 @@ describe("getRooModels", () => {
 		expect(models["test/model-no-temp"].defaultTemperature).toBeUndefined()
 	})
 
-	it("should set defaultToolProtocol to native when default-native-tools tag is present", async () => {
+	it("should include models when tool-use tags are present", async () => {
 		const mockResponse = {
 			object: "list",
 			data: [
@@ -581,11 +577,10 @@ describe("getRooModels", () => {
 
 		const models = await getRooModels(baseUrl, apiKey)
 
-		expect(models["test/native-tools-model"].supportsNativeTools).toBe(true)
-		expect(models["test/native-tools-model"].defaultToolProtocol).toBe("native")
+		expect(models["test/native-tools-model"]).toBeDefined()
 	})
 
-	it("should set defaultToolProtocol to native for all models regardless of tags", async () => {
+	it("handles models when tool tags are absent", async () => {
 		const mockResponse = {
 			object: "list",
 			data: [
@@ -615,12 +610,10 @@ describe("getRooModels", () => {
 
 		const models = await getRooModels(baseUrl, apiKey)
 
-		// All Roo provider models now default to native tool protocol
-		expect(models["test/model-without-tool-tags"].supportsNativeTools).toBe(false)
-		expect(models["test/model-without-tool-tags"].defaultToolProtocol).toBe("native")
+		expect(models["test/model-without-tool-tags"]).toBeDefined()
 	})
 
-	it("should set supportsNativeTools from tool-use tag and always set defaultToolProtocol to native", async () => {
+	it("handles models with tool-use tag", async () => {
 		const mockResponse = {
 			object: "list",
 			data: [
@@ -650,9 +643,7 @@ describe("getRooModels", () => {
 
 		const models = await getRooModels(baseUrl, apiKey)
 
-		// tool-use tag sets supportsNativeTools, and all models get defaultToolProtocol: native
-		expect(models["test/tool-use-model"].supportsNativeTools).toBe(true)
-		expect(models["test/tool-use-model"].defaultToolProtocol).toBe("native")
+		expect(models["test/tool-use-model"]).toBeDefined()
 	})
 
 	it("should detect stealth mode from tags", async () => {

+ 0 - 1
src/api/providers/fetchers/__tests__/vercel-ai-gateway.spec.ts

@@ -173,7 +173,6 @@ describe("Vercel AI Gateway Fetchers", () => {
 				maxTokens: 8000,
 				contextWindow: 100000,
 				supportsImages: false,
-				supportsNativeTools: true,
 				supportsPromptCache: false,
 				inputPrice: 2500000,
 				outputPrice: 10000000,

+ 2 - 2
src/api/providers/fetchers/__tests__/versionedSettings.spec.ts

@@ -197,14 +197,14 @@ describe("versionedSettings", () => {
 		it("should handle versioned boolean values", () => {
 			const versionedSettings: VersionedSettings = {
 				"3.36.0": {
-					supportsNativeTools: true,
+					supportsReasoningEffort: true,
 				},
 			}
 
 			const resolved = resolveVersionedSettings(versionedSettings, currentVersion)
 
 			expect(resolved).toEqual({
-				supportsNativeTools: true,
+				supportsReasoningEffort: true,
 			})
 		})
 

+ 4 - 3
src/api/providers/fetchers/chutes.ts

@@ -57,8 +57,10 @@ export async function getChutesModels(apiKey?: string): Promise<Record<string, M
 				continue
 			}
 
-			const contextWindow = typeof m.context_length === "number" && Number.isFinite(m.context_length) ? m.context_length : undefined
-			const maxModelLen = typeof m.max_model_len === "number" && Number.isFinite(m.max_model_len) ? m.max_model_len : undefined
+			const contextWindow =
+				typeof m.context_length === "number" && Number.isFinite(m.context_length) ? m.context_length : undefined
+			const maxModelLen =
+				typeof m.max_model_len === "number" && Number.isFinite(m.max_model_len) ? m.max_model_len : undefined
 
 			// Skip models without valid context window information
 			if (!contextWindow) {
@@ -70,7 +72,6 @@ export async function getChutesModels(apiKey?: string): Promise<Record<string, M
 				contextWindow,
 				supportsImages: (m.input_modalities || []).includes("image"),
 				supportsPromptCache: false,
-				supportsNativeTools: (m.supported_features || []).includes("tools"),
 				inputPrice: 0,
 				outputPrice: 0,
 				description: `Chutes AI model: ${m.id}`,

+ 0 - 1
src/api/providers/fetchers/deepinfra.ts

@@ -58,7 +58,6 @@ export async function getDeepInfraModels(
 			contextWindow,
 			supportsImages: tags.includes("vision"),
 			supportsPromptCache: tags.includes("prompt_cache"),
-			supportsNativeTools: true,
 			inputPrice: meta.pricing?.input_tokens,
 			outputPrice: meta.pricing?.output_tokens,
 			cacheReadsPrice: meta.pricing?.cache_read_tokens,

+ 0 - 1
src/api/providers/fetchers/litellm.ts

@@ -45,7 +45,6 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
 					contextWindow: modelInfo.max_input_tokens || 200000,
 					supportsImages: Boolean(modelInfo.supports_vision),
 					supportsPromptCache: Boolean(modelInfo.supports_prompt_caching),
-					supportsNativeTools: true,
 					inputPrice: modelInfo.input_cost_per_token ? modelInfo.input_cost_per_token * 1000000 : undefined,
 					outputPrice: modelInfo.output_cost_per_token
 						? modelInfo.output_cost_per_token * 1000000

+ 0 - 1
src/api/providers/fetchers/modelEndpointCache.ts

@@ -68,7 +68,6 @@ export const getModelEndpoints = async ({
 			// Copy model-level capabilities to all endpoints
 			// Clone arrays to avoid shared mutable references
 			for (const endpointKey of Object.keys(modelProviders)) {
-				modelProviders[endpointKey].supportsNativeTools = parentModel.supportsNativeTools
 				modelProviders[endpointKey].supportsReasoningEffort = parentModel.supportsReasoningEffort
 				modelProviders[endpointKey].supportedParameters = parentModel.supportedParameters
 					? [...parentModel.supportedParameters]

+ 4 - 8
src/api/providers/fetchers/ollama.ts

@@ -42,13 +42,10 @@ export const parseOllamaModel = (rawModel: OllamaModelInfoResponse): ModelInfo |
 	const contextWindow =
 		contextKey && typeof rawModel.model_info[contextKey] === "number" ? rawModel.model_info[contextKey] : undefined
 
-	// Determine native tool support from capabilities array
-	// The capabilities array is populated by Ollama based on model metadata
-	const supportsNativeTools = rawModel.capabilities?.includes("tools") ?? false
-
-	// Filter out models that don't support native tools
-	// This prevents users from selecting models that won't work properly with Roo Code's tool calling
-	if (!supportsNativeTools) {
+	// Filter out models that don't support tools.
+	// Roo Code tool calling is native-only; models without tool capability won't work.
+	const supportsTools = rawModel.capabilities?.includes("tools") ?? false
+	if (!supportsTools) {
 		return null
 	}
 
@@ -58,7 +55,6 @@ export const parseOllamaModel = (rawModel: OllamaModelInfoResponse): ModelInfo |
 		supportsPromptCache: true,
 		supportsImages: rawModel.capabilities?.includes("vision"),
 		maxTokens: contextWindow || ollamaDefaultModelInfo.contextWindow,
-		supportsNativeTools: true, // Only models with tools capability reach this point
 	})
 
 	return modelInfo

+ 0 - 5
src/api/providers/fetchers/openrouter.ts

@@ -207,8 +207,6 @@ export const parseOpenRouterModel = ({
 
 	const supportsPromptCache = typeof cacheReadsPrice !== "undefined" // some models support caching but don't charge a cacheWritesPrice, e.g. GPT-5
 
-	const supportsNativeTools = supportedParameters ? supportedParameters.includes("tools") : undefined
-
 	const modelInfo: ModelInfo = {
 		maxTokens: maxTokens || Math.ceil(model.context_length * 0.2),
 		contextWindow: model.context_length,
@@ -220,10 +218,7 @@ export const parseOpenRouterModel = ({
 		cacheReadsPrice,
 		description: model.description,
 		supportsReasoningEffort: supportedParameters ? supportedParameters.includes("reasoning") : undefined,
-		supportsNativeTools,
 		supportedParameters: supportedParameters ? supportedParameters.filter(isModelParameter) : undefined,
-		// Default to native tool protocol when native tools are supported
-		defaultToolProtocol: supportsNativeTools ? ("native" as const) : undefined,
 	}
 
 	if (OPEN_ROUTER_REASONING_BUDGET_MODELS.has(id)) {

+ 0 - 2
src/api/providers/fetchers/requesty.ts

@@ -38,8 +38,6 @@ export async function getRequestyModels(baseUrl?: string, apiKey?: string): Prom
 				supportsImages: rawModel.supports_vision,
 				supportsReasoningBudget: reasoningBudget,
 				supportsReasoningEffort: reasoningEffort,
-				supportsNativeTools: true,
-				defaultToolProtocol: "native",
 				inputPrice: parseApiPrice(rawModel.input_price),
 				outputPrice: parseApiPrice(rawModel.output_price),
 				description: rawModel.description,

+ 0 - 5
src/api/providers/fetchers/roo.ts

@@ -92,9 +92,6 @@ export async function getRooModels(baseUrl: string, apiKey?: string): Promise<Mo
 				// Determine if the model requires reasoning effort based on tags
 				const requiredReasoningEffort = tags.includes("reasoning-required")
 
-				// Determine if the model supports native tool calling based on tags
-				const supportsNativeTools = tags.includes("tool-use")
-
 				// Determine if the model should hide vendor/company identity (stealth mode)
 				const isStealthModel = tags.includes("stealth")
 
@@ -111,7 +108,6 @@ export async function getRooModels(baseUrl: string, apiKey?: string): Promise<Mo
 					supportsImages,
 					supportsReasoningEffort,
 					requiredReasoningEffort,
-					supportsNativeTools,
 					supportsPromptCache: Boolean(cacheReadPrice !== undefined),
 					inputPrice,
 					outputPrice,
@@ -121,7 +117,6 @@ export async function getRooModels(baseUrl: string, apiKey?: string): Promise<Mo
 					deprecated: model.deprecated || false,
 					isFree: tags.includes("free"),
 					defaultTemperature: model.default_temperature,
-					defaultToolProtocol: "native" as const,
 					isStealthModel: isStealthModel || undefined,
 				}
 

+ 0 - 1
src/api/providers/fetchers/unbound.ts

@@ -23,7 +23,6 @@ export async function getUnboundModels(apiKey?: string | null): Promise<Record<s
 					contextWindow: model?.contextWindow ? parseInt(model.contextWindow) : 0,
 					supportsImages: model?.supportsImages ?? false,
 					supportsPromptCache: model?.supportsPromptCaching ?? false,
-					supportsNativeTools: true,
 					inputPrice: model?.inputTokenPrice ? parseFloat(model.inputTokenPrice) : undefined,
 					outputPrice: model?.outputTokenPrice ? parseFloat(model.outputTokenPrice) : undefined,
 					cacheWritesPrice: model?.cacheWritePrice ? parseFloat(model.cacheWritePrice) : undefined,

+ 0 - 1
src/api/providers/fetchers/vercel-ai-gateway.ts

@@ -106,7 +106,6 @@ export const parseVercelAiGatewayModel = ({ id, model }: { id: string; model: Ve
 		contextWindow: model.context_window,
 		supportsImages,
 		supportsPromptCache,
-		supportsNativeTools: true,
 		inputPrice: parseApiPrice(model.pricing?.input),
 		outputPrice: parseApiPrice(model.pricing?.output),
 		cacheWritesPrice,

+ 10 - 21
src/api/providers/gemini.ts

@@ -93,8 +93,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 		// Gemini 3 validates thought signatures for tool/function calling steps.
 		// We must round-trip the signature when tools are in use, even if the user chose
 		// a minimal thinking level (or thinkingConfig is otherwise absent).
-		const usingNativeTools = Boolean(metadata?.tools && metadata.tools.length > 0)
-		const includeThoughtSignatures = Boolean(thinkingConfig) || usingNativeTools
+		const includeThoughtSignatures = Boolean(thinkingConfig) || Boolean(metadata?.tools?.length)
 
 		// The message list can include provider-specific meta entries such as
 		// `{ type: "reasoning", ... }` that are intended only for providers like
@@ -129,29 +128,19 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 			.map((message) => convertAnthropicMessageToGemini(message, { includeThoughtSignatures, toolIdToName }))
 			.flat()
 
-		const tools: GenerateContentConfig["tools"] = []
-
-		// Google built-in tools (Grounding, URL Context) are currently mutually exclusive
-		// with function declarations in the Gemini API. If native function calling is
-		// used (Agent tools), we must prioritize it and skip built-in tools to avoid
-		// "Tool use with function calling is unsupported" (HTTP 400) errors.
-		if (metadata?.tools && metadata.tools.length > 0) {
-			tools.push({
-				functionDeclarations: metadata.tools.map((tool) => ({
+		// Tools are always present (minimum ALWAYS_AVAILABLE_TOOLS).
+		// Google built-in tools (Grounding, URL Context) are mutually exclusive
+		// with function declarations in the Gemini API, so we always use
+		// function declarations when tools are provided.
+		const tools: GenerateContentConfig["tools"] = [
+			{
+				functionDeclarations: (metadata?.tools ?? []).map((tool) => ({
 					name: (tool as any).function.name,
 					description: (tool as any).function.description,
 					parametersJsonSchema: (tool as any).function.parameters,
 				})),
-			})
-		} else {
-			if (this.options.enableUrlContext) {
-				tools.push({ urlContext: {} })
-			}
-
-			if (this.options.enableGrounding) {
-				tools.push({ googleSearch: {} })
-			}
-		}
+			},
+		]
 
 		// Determine temperature respecting model capabilities and defaults:
 		// - If supportsTemperature is explicitly false, ignore user overrides

Some files were not shown because too many files changed in this diff