Bladeren bron

feat: enable native tools by default for multiple providers (#10059)

Daniel 2 weken geleden
bovenliggende
commit
a9a15b37fd

+ 16 - 0
packages/types/src/providers/bedrock.ts

@@ -20,6 +20,7 @@ export const bedrockModels = {
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		supportsReasoningBudget: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
 		cacheWritesPrice: 3.75,
@@ -104,6 +105,7 @@ export const bedrockModels = {
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		supportsReasoningBudget: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
 		cacheWritesPrice: 3.75,
@@ -119,6 +121,7 @@ export const bedrockModels = {
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		supportsReasoningBudget: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 15.0,
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 		outputPrice: 75.0,
 		cacheWritesPrice: 18.75,
 		cacheWritesPrice: 18.75,
@@ -134,6 +137,7 @@ export const bedrockModels = {
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		supportsReasoningBudget: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 5.0,
 		inputPrice: 5.0,
 		outputPrice: 25.0,
 		outputPrice: 25.0,
 		cacheWritesPrice: 6.25,
 		cacheWritesPrice: 6.25,
@@ -149,6 +153,7 @@ export const bedrockModels = {
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		supportsReasoningBudget: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 15.0,
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 		outputPrice: 75.0,
 		cacheWritesPrice: 18.75,
 		cacheWritesPrice: 18.75,
@@ -164,6 +169,7 @@ export const bedrockModels = {
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		supportsReasoningBudget: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
 		cacheWritesPrice: 3.75,
@@ -178,6 +184,7 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 3.75,
 		cacheWritesPrice: 3.75,
@@ -192,6 +199,7 @@ export const bedrockModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.8,
 		inputPrice: 0.8,
 		outputPrice: 4.0,
 		outputPrice: 4.0,
 		cacheWritesPrice: 1.0,
 		cacheWritesPrice: 1.0,
@@ -207,6 +215,7 @@ export const bedrockModels = {
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsReasoningBudget: true,
 		supportsReasoningBudget: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 1.0,
 		inputPrice: 1.0,
 		outputPrice: 5.0,
 		outputPrice: 5.0,
 		cacheWritesPrice: 1.25, // 5m cache writes
 		cacheWritesPrice: 1.25, // 5m cache writes
@@ -221,6 +230,7 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		outputPrice: 15.0,
 	},
 	},
@@ -230,6 +240,7 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 15.0,
 		inputPrice: 15.0,
 		outputPrice: 75.0,
 		outputPrice: 75.0,
 	},
 	},
@@ -239,6 +250,7 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		outputPrice: 15.0,
 	},
 	},
@@ -248,6 +260,7 @@ export const bedrockModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.25,
 		inputPrice: 0.25,
 		outputPrice: 1.25,
 		outputPrice: 1.25,
 	},
 	},
@@ -257,6 +270,7 @@ export const bedrockModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 8.0,
 		inputPrice: 8.0,
 		outputPrice: 24.0,
 		outputPrice: 24.0,
 		description: "Claude 2.1",
 		description: "Claude 2.1",
@@ -267,6 +281,7 @@ export const bedrockModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 8.0,
 		inputPrice: 8.0,
 		outputPrice: 24.0,
 		outputPrice: 24.0,
 		description: "Claude 2.0",
 		description: "Claude 2.0",
@@ -277,6 +292,7 @@ export const bedrockModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.8,
 		inputPrice: 0.8,
 		outputPrice: 2.4,
 		outputPrice: 2.4,
 		description: "Claude Instant",
 		description: "Claude Instant",

+ 5 - 0
packages/types/src/providers/cerebras.ts

@@ -12,6 +12,7 @@ export const cerebrasModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0,
 		inputPrice: 0,
 		outputPrice: 0,
 		outputPrice: 0,
 		description: "Highly intelligent general purpose model with up to 1,000 tokens/s",
 		description: "Highly intelligent general purpose model with up to 1,000 tokens/s",
@@ -22,6 +23,7 @@ export const cerebrasModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0,
 		inputPrice: 0,
 		outputPrice: 0,
 		outputPrice: 0,
 		description: "Intelligent model with ~1400 tokens/s",
 		description: "Intelligent model with ~1400 tokens/s",
@@ -32,6 +34,7 @@ export const cerebrasModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0,
 		inputPrice: 0,
 		outputPrice: 0,
 		outputPrice: 0,
 		description: "Powerful model with ~2600 tokens/s",
 		description: "Powerful model with ~2600 tokens/s",
@@ -42,6 +45,7 @@ export const cerebrasModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0,
 		inputPrice: 0,
 		outputPrice: 0,
 		outputPrice: 0,
 		description: "SOTA coding performance with ~2500 tokens/s",
 		description: "SOTA coding performance with ~2500 tokens/s",
@@ -52,6 +56,7 @@ export const cerebrasModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0,
 		inputPrice: 0,
 		outputPrice: 0,
 		outputPrice: 0,
 		description:
 		description:

+ 3 - 0
packages/types/src/providers/doubao.ts

@@ -9,6 +9,7 @@ export const doubaoModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.0001, // $0.0001 per million tokens (cache miss)
 		inputPrice: 0.0001, // $0.0001 per million tokens (cache miss)
 		outputPrice: 0.0004, // $0.0004 per million tokens
 		outputPrice: 0.0004, // $0.0004 per million tokens
 		cacheWritesPrice: 0.0001, // $0.0001 per million tokens (cache miss)
 		cacheWritesPrice: 0.0001, // $0.0001 per million tokens (cache miss)
@@ -21,6 +22,7 @@ export const doubaoModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.0002, // $0.0002 per million tokens
 		inputPrice: 0.0002, // $0.0002 per million tokens
 		outputPrice: 0.0008, // $0.0008 per million tokens
 		outputPrice: 0.0008, // $0.0008 per million tokens
 		cacheWritesPrice: 0.0002, // $0.0002 per million
 		cacheWritesPrice: 0.0002, // $0.0002 per million
@@ -33,6 +35,7 @@ export const doubaoModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.00015, // $0.00015 per million tokens
 		inputPrice: 0.00015, // $0.00015 per million tokens
 		outputPrice: 0.0006, // $0.0006 per million tokens
 		outputPrice: 0.0006, // $0.0006 per million tokens
 		cacheWritesPrice: 0.00015, // $0.00015 per million
 		cacheWritesPrice: 0.00015, // $0.00015 per million

+ 13 - 0
packages/types/src/providers/fireworks.ts

@@ -24,6 +24,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		inputPrice: 0.6,
 		outputPrice: 2.5,
 		outputPrice: 2.5,
 		cacheReadsPrice: 0.15,
 		cacheReadsPrice: 0.15,
@@ -36,6 +37,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		inputPrice: 0.6,
 		outputPrice: 2.5,
 		outputPrice: 2.5,
 		description:
 		description:
@@ -47,6 +49,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.3,
 		inputPrice: 0.3,
 		outputPrice: 1.2,
 		outputPrice: 1.2,
 		description:
 		description:
@@ -58,6 +61,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.22,
 		inputPrice: 0.22,
 		outputPrice: 0.88,
 		outputPrice: 0.88,
 		description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025.",
 		description: "Latest Qwen3 thinking model, competitive against the best closed source models in Jul 2025.",
@@ -68,6 +72,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.45,
 		inputPrice: 0.45,
 		outputPrice: 1.8,
 		outputPrice: 1.8,
 		description: "Qwen3's most agentic code model to date.",
 		description: "Qwen3's most agentic code model to date.",
@@ -78,6 +83,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3,
 		inputPrice: 3,
 		outputPrice: 8,
 		outputPrice: 8,
 		description:
 		description:
@@ -89,6 +95,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.9,
 		inputPrice: 0.9,
 		outputPrice: 0.9,
 		outputPrice: 0.9,
 		description:
 		description:
@@ -100,6 +107,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.56,
 		inputPrice: 0.56,
 		outputPrice: 1.68,
 		outputPrice: 1.68,
 		description:
 		description:
@@ -111,6 +119,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.55,
 		inputPrice: 0.55,
 		outputPrice: 2.19,
 		outputPrice: 2.19,
 		description:
 		description:
@@ -122,6 +131,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.55,
 		inputPrice: 0.55,
 		outputPrice: 2.19,
 		outputPrice: 2.19,
 		description:
 		description:
@@ -133,6 +143,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.55,
 		inputPrice: 0.55,
 		outputPrice: 2.19,
 		outputPrice: 2.19,
 		description:
 		description:
@@ -144,6 +155,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.07,
 		inputPrice: 0.07,
 		outputPrice: 0.3,
 		outputPrice: 0.3,
 		description:
 		description:
@@ -155,6 +167,7 @@ export const fireworksModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.15,
 		inputPrice: 0.15,
 		outputPrice: 0.6,
 		outputPrice: 0.6,
 		description:
 		description:

+ 10 - 0
packages/types/src/providers/gemini.ts

@@ -11,6 +11,7 @@ export const geminiModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsReasoningEffort: ["low", "high"],
 		supportsReasoningEffort: ["low", "high"],
 		reasoningEffort: "low",
 		reasoningEffort: "low",
@@ -37,6 +38,7 @@ export const geminiModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
 		outputPrice: 15,
 		outputPrice: 15,
@@ -65,6 +67,7 @@ export const geminiModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
 		outputPrice: 15,
 		outputPrice: 15,
@@ -92,6 +95,7 @@ export const geminiModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
 		outputPrice: 15,
 		outputPrice: 15,
@@ -117,6 +121,7 @@ export const geminiModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
 		inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
 		outputPrice: 15,
 		outputPrice: 15,
@@ -146,6 +151,7 @@ export const geminiModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.3,
 		inputPrice: 0.3,
 		outputPrice: 2.5,
 		outputPrice: 2.5,
@@ -159,6 +165,7 @@ export const geminiModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.3,
 		inputPrice: 0.3,
 		outputPrice: 2.5,
 		outputPrice: 2.5,
@@ -172,6 +179,7 @@ export const geminiModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.3,
 		inputPrice: 0.3,
 		outputPrice: 2.5,
 		outputPrice: 2.5,
@@ -187,6 +195,7 @@ export const geminiModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.1,
 		inputPrice: 0.1,
 		outputPrice: 0.4,
 		outputPrice: 0.4,
@@ -200,6 +209,7 @@ export const geminiModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.1,
 		inputPrice: 0.1,
 		outputPrice: 0.4,
 		outputPrice: 0.4,

+ 7 - 0
packages/types/src/providers/groq.ts

@@ -25,6 +25,7 @@ export const groqModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.05,
 		inputPrice: 0.05,
 		outputPrice: 0.08,
 		outputPrice: 0.08,
 		description: "Meta Llama 3.1 8B Instant model, 128K context.",
 		description: "Meta Llama 3.1 8B Instant model, 128K context.",
@@ -35,6 +36,7 @@ export const groqModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.59,
 		inputPrice: 0.59,
 		outputPrice: 0.79,
 		outputPrice: 0.79,
 		description: "Meta Llama 3.3 70B Versatile model, 128K context.",
 		description: "Meta Llama 3.3 70B Versatile model, 128K context.",
@@ -45,6 +47,7 @@ export const groqModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.11,
 		inputPrice: 0.11,
 		outputPrice: 0.34,
 		outputPrice: 0.34,
 		description: "Meta Llama 4 Scout 17B Instruct model, 128K context.",
 		description: "Meta Llama 4 Scout 17B Instruct model, 128K context.",
@@ -82,6 +85,7 @@ export const groqModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.29,
 		inputPrice: 0.29,
 		outputPrice: 0.59,
 		outputPrice: 0.59,
 		description: "Alibaba Qwen 3 32B model, 128K context.",
 		description: "Alibaba Qwen 3 32B model, 128K context.",
@@ -111,6 +115,7 @@ export const groqModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		inputPrice: 0.6,
 		outputPrice: 2.5,
 		outputPrice: 2.5,
 		cacheReadsPrice: 0.15,
 		cacheReadsPrice: 0.15,
@@ -123,6 +128,7 @@ export const groqModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.15,
 		inputPrice: 0.15,
 		outputPrice: 0.75,
 		outputPrice: 0.75,
 		description:
 		description:
@@ -134,6 +140,7 @@ export const groqModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.1,
 		inputPrice: 0.1,
 		outputPrice: 0.5,
 		outputPrice: 0.5,
 		description:
 		description:

+ 9 - 0
packages/types/src/providers/mistral.ts

@@ -12,6 +12,7 @@ export const mistralModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 2.0,
 		inputPrice: 2.0,
 		outputPrice: 5.0,
 		outputPrice: 5.0,
 	},
 	},
@@ -21,6 +22,7 @@ export const mistralModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.4,
 		inputPrice: 0.4,
 		outputPrice: 2.0,
 		outputPrice: 2.0,
 	},
 	},
@@ -30,6 +32,7 @@ export const mistralModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.4,
 		inputPrice: 0.4,
 		outputPrice: 2.0,
 		outputPrice: 2.0,
 	},
 	},
@@ -39,6 +42,7 @@ export const mistralModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.3,
 		inputPrice: 0.3,
 		outputPrice: 0.9,
 		outputPrice: 0.9,
 	},
 	},
@@ -48,6 +52,7 @@ export const mistralModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 2.0,
 		inputPrice: 2.0,
 		outputPrice: 6.0,
 		outputPrice: 6.0,
 	},
 	},
@@ -57,6 +62,7 @@ export const mistralModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.1,
 		inputPrice: 0.1,
 		outputPrice: 0.1,
 		outputPrice: 0.1,
 	},
 	},
@@ -66,6 +72,7 @@ export const mistralModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.04,
 		inputPrice: 0.04,
 		outputPrice: 0.04,
 		outputPrice: 0.04,
 	},
 	},
@@ -75,6 +82,7 @@ export const mistralModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		inputPrice: 0.2,
 		outputPrice: 0.6,
 		outputPrice: 0.6,
 	},
 	},
@@ -84,6 +92,7 @@ export const mistralModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 2.0,
 		inputPrice: 2.0,
 		outputPrice: 6.0,
 		outputPrice: 6.0,
 	},
 	},

+ 32 - 0
packages/types/src/providers/openai.ts

@@ -10,6 +10,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -29,6 +30,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -51,6 +53,7 @@ export const openAiNativeModels = {
 		maxTokens: 16_384,
 		maxTokens: 16_384,
 		contextWindow: 128_000,
 		contextWindow: 128_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -64,6 +67,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -86,6 +90,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -104,6 +109,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -121,6 +127,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -142,6 +149,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -163,6 +171,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -180,6 +189,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -198,6 +208,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -211,6 +222,7 @@ export const openAiNativeModels = {
 		maxTokens: 32_768,
 		maxTokens: 32_768,
 		contextWindow: 1_047_576,
 		contextWindow: 1_047_576,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -227,6 +239,7 @@ export const openAiNativeModels = {
 		maxTokens: 32_768,
 		maxTokens: 32_768,
 		contextWindow: 1_047_576,
 		contextWindow: 1_047_576,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -243,6 +256,7 @@ export const openAiNativeModels = {
 		maxTokens: 32_768,
 		maxTokens: 32_768,
 		contextWindow: 1_047_576,
 		contextWindow: 1_047_576,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -259,6 +273,7 @@ export const openAiNativeModels = {
 		maxTokens: 100_000,
 		maxTokens: 100_000,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.0,
 		inputPrice: 2.0,
@@ -276,6 +291,7 @@ export const openAiNativeModels = {
 		maxTokens: 100_000,
 		maxTokens: 100_000,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.0,
 		inputPrice: 2.0,
@@ -288,6 +304,7 @@ export const openAiNativeModels = {
 		maxTokens: 100_000,
 		maxTokens: 100_000,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.0,
 		inputPrice: 2.0,
@@ -300,6 +317,7 @@ export const openAiNativeModels = {
 		maxTokens: 100_000,
 		maxTokens: 100_000,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
 		inputPrice: 1.1,
@@ -317,6 +335,7 @@ export const openAiNativeModels = {
 		maxTokens: 100_000,
 		maxTokens: 100_000,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
 		inputPrice: 1.1,
@@ -329,6 +348,7 @@ export const openAiNativeModels = {
 		maxTokens: 100_000,
 		maxTokens: 100_000,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
 		inputPrice: 1.1,
@@ -341,6 +361,7 @@ export const openAiNativeModels = {
 		maxTokens: 100_000,
 		maxTokens: 100_000,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
 		inputPrice: 1.1,
@@ -354,6 +375,7 @@ export const openAiNativeModels = {
 		maxTokens: 100_000,
 		maxTokens: 100_000,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
 		inputPrice: 1.1,
@@ -366,6 +388,7 @@ export const openAiNativeModels = {
 		maxTokens: 100_000,
 		maxTokens: 100_000,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
 		inputPrice: 1.1,
@@ -378,6 +401,7 @@ export const openAiNativeModels = {
 		maxTokens: 100_000,
 		maxTokens: 100_000,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 15,
 		inputPrice: 15,
@@ -389,6 +413,7 @@ export const openAiNativeModels = {
 		maxTokens: 32_768,
 		maxTokens: 32_768,
 		contextWindow: 128_000,
 		contextWindow: 128_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 15,
 		inputPrice: 15,
@@ -400,6 +425,7 @@ export const openAiNativeModels = {
 		maxTokens: 65_536,
 		maxTokens: 65_536,
 		contextWindow: 128_000,
 		contextWindow: 128_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 1.1,
 		inputPrice: 1.1,
@@ -411,6 +437,7 @@ export const openAiNativeModels = {
 		maxTokens: 16_384,
 		maxTokens: 16_384,
 		contextWindow: 128_000,
 		contextWindow: 128_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.5,
 		inputPrice: 2.5,
@@ -425,6 +452,7 @@ export const openAiNativeModels = {
 		maxTokens: 16_384,
 		maxTokens: 16_384,
 		contextWindow: 128_000,
 		contextWindow: 128_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.15,
 		inputPrice: 0.15,
@@ -439,6 +467,7 @@ export const openAiNativeModels = {
 		maxTokens: 16_384,
 		maxTokens: 16_384,
 		contextWindow: 200_000,
 		contextWindow: 200_000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		inputPrice: 1.5,
 		inputPrice: 1.5,
@@ -453,6 +482,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -474,6 +504,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,
@@ -495,6 +526,7 @@ export const openAiNativeModels = {
 		maxTokens: 128000,
 		maxTokens: 128000,
 		contextWindow: 400000,
 		contextWindow: 400000,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		includedTools: ["apply_patch"],
 		includedTools: ["apply_patch"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		excludedTools: ["apply_diff", "write_to_file"],
 		supportsImages: true,
 		supportsImages: true,

+ 2 - 0
packages/types/src/providers/requesty.ts

@@ -9,6 +9,8 @@ export const requestyDefaultModelInfo: ModelInfo = {
 	contextWindow: 200_000,
 	contextWindow: 200_000,
 	supportsImages: true,
 	supportsImages: true,
 	supportsPromptCache: true,
 	supportsPromptCache: true,
+	supportsNativeTools: true,
+	defaultToolProtocol: "native",
 	inputPrice: 3.0,
 	inputPrice: 3.0,
 	outputPrice: 15.0,
 	outputPrice: 15.0,
 	cacheWritesPrice: 3.75,
 	cacheWritesPrice: 3.75,

+ 8 - 0
packages/types/src/providers/sambanova.ts

@@ -22,6 +22,7 @@ export const sambaNovaModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.1,
 		inputPrice: 0.1,
 		outputPrice: 0.2,
 		outputPrice: 0.2,
 		description: "Meta Llama 3.1 8B Instruct model with 16K context window.",
 		description: "Meta Llama 3.1 8B Instruct model with 16K context window.",
@@ -32,6 +33,7 @@ export const sambaNovaModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.6,
 		inputPrice: 0.6,
 		outputPrice: 1.2,
 		outputPrice: 1.2,
 		description: "Meta Llama 3.3 70B Instruct model with 128K context window.",
 		description: "Meta Llama 3.3 70B Instruct model with 128K context window.",
@@ -43,6 +45,7 @@ export const sambaNovaModels = {
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsReasoningBudget: true,
 		supportsReasoningBudget: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 5.0,
 		inputPrice: 5.0,
 		outputPrice: 7.0,
 		outputPrice: 7.0,
 		description: "DeepSeek R1 reasoning model with 32K context window.",
 		description: "DeepSeek R1 reasoning model with 32K context window.",
@@ -53,6 +56,7 @@ export const sambaNovaModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		inputPrice: 3.0,
 		outputPrice: 4.5,
 		outputPrice: 4.5,
 		description: "DeepSeek V3 model with 32K context window.",
 		description: "DeepSeek V3 model with 32K context window.",
@@ -63,6 +67,7 @@ export const sambaNovaModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		inputPrice: 3.0,
 		outputPrice: 4.5,
 		outputPrice: 4.5,
 		description: "DeepSeek V3.1 model with 32K context window.",
 		description: "DeepSeek V3.1 model with 32K context window.",
@@ -82,6 +87,7 @@ export const sambaNovaModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.63,
 		inputPrice: 0.63,
 		outputPrice: 1.8,
 		outputPrice: 1.8,
 		description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window.",
 		description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window.",
@@ -101,6 +107,7 @@ export const sambaNovaModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.4,
 		inputPrice: 0.4,
 		outputPrice: 0.8,
 		outputPrice: 0.8,
 		description: "Alibaba Qwen 3 32B model with 8K context window.",
 		description: "Alibaba Qwen 3 32B model with 8K context window.",
@@ -111,6 +118,7 @@ export const sambaNovaModels = {
 		supportsImages: false,
 		supportsImages: false,
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.22,
 		inputPrice: 0.22,
 		outputPrice: 0.59,
 		outputPrice: 0.59,
 		description: "OpenAI gpt oss 120b model with 128k context window.",
 		description: "OpenAI gpt oss 120b model with 128k context window.",

+ 18 - 0
packages/types/src/providers/vertex.ts

@@ -11,6 +11,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsReasoningEffort: ["low", "high"],
 		supportsReasoningEffort: ["low", "high"],
 		reasoningEffort: "low",
 		reasoningEffort: "low",
@@ -36,6 +37,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.15,
 		inputPrice: 0.15,
 		outputPrice: 3.5,
 		outputPrice: 3.5,
@@ -48,6 +50,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.15,
 		inputPrice: 0.15,
 		outputPrice: 0.6,
 		outputPrice: 0.6,
@@ -57,6 +60,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.3,
 		inputPrice: 0.3,
 		outputPrice: 2.5,
 		outputPrice: 2.5,
@@ -70,6 +74,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		inputPrice: 0.15,
 		inputPrice: 0.15,
 		outputPrice: 3.5,
 		outputPrice: 3.5,
@@ -82,6 +87,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		inputPrice: 0.15,
 		inputPrice: 0.15,
 		outputPrice: 0.6,
 		outputPrice: 0.6,
@@ -91,6 +97,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.5,
 		inputPrice: 2.5,
 		outputPrice: 15,
 		outputPrice: 15,
@@ -100,6 +107,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.5,
 		inputPrice: 2.5,
 		outputPrice: 15,
 		outputPrice: 15,
@@ -109,6 +117,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.5,
 		inputPrice: 2.5,
 		outputPrice: 15,
 		outputPrice: 15,
@@ -120,6 +129,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 2.5,
 		inputPrice: 2.5,
 		outputPrice: 15,
 		outputPrice: 15,
@@ -146,6 +156,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		inputPrice: 0,
 		inputPrice: 0,
 		outputPrice: 0,
 		outputPrice: 0,
@@ -155,6 +166,7 @@ export const vertexModels = {
 		contextWindow: 2_097_152,
 		contextWindow: 2_097_152,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		inputPrice: 0,
 		inputPrice: 0,
 		outputPrice: 0,
 		outputPrice: 0,
@@ -164,6 +176,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.15,
 		inputPrice: 0.15,
 		outputPrice: 0.6,
 		outputPrice: 0.6,
@@ -173,6 +186,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		inputPrice: 0.075,
 		inputPrice: 0.075,
 		outputPrice: 0.3,
 		outputPrice: 0.3,
@@ -182,6 +196,7 @@ export const vertexModels = {
 		contextWindow: 32_768,
 		contextWindow: 32_768,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		inputPrice: 0,
 		inputPrice: 0,
 		outputPrice: 0,
 		outputPrice: 0,
@@ -191,6 +206,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.075,
 		inputPrice: 0.075,
 		outputPrice: 0.3,
 		outputPrice: 0.3,
@@ -200,6 +216,7 @@ export const vertexModels = {
 		contextWindow: 2_097_152,
 		contextWindow: 2_097_152,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: false,
 		supportsPromptCache: false,
 		inputPrice: 1.25,
 		inputPrice: 1.25,
 		outputPrice: 5,
 		outputPrice: 5,
@@ -346,6 +363,7 @@ export const vertexModels = {
 		contextWindow: 1_048_576,
 		contextWindow: 1_048_576,
 		supportsImages: true,
 		supportsImages: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		inputPrice: 0.1,
 		inputPrice: 0.1,
 		outputPrice: 0.4,
 		outputPrice: 0.4,

+ 8 - 0
packages/types/src/providers/xai.ts

@@ -12,6 +12,7 @@ export const xaiModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		inputPrice: 0.2,
 		outputPrice: 1.5,
 		outputPrice: 1.5,
 		cacheWritesPrice: 0.02,
 		cacheWritesPrice: 0.02,
@@ -26,6 +27,7 @@ export const xaiModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		inputPrice: 0.2,
 		outputPrice: 0.5,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0.05,
 		cacheWritesPrice: 0.05,
@@ -41,6 +43,7 @@ export const xaiModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		inputPrice: 0.2,
 		outputPrice: 0.5,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0.05,
 		cacheWritesPrice: 0.05,
@@ -56,6 +59,7 @@ export const xaiModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		inputPrice: 0.2,
 		outputPrice: 0.5,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0.05,
 		cacheWritesPrice: 0.05,
@@ -71,6 +75,7 @@ export const xaiModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.2,
 		inputPrice: 0.2,
 		outputPrice: 0.5,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0.05,
 		cacheWritesPrice: 0.05,
@@ -86,6 +91,7 @@ export const xaiModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 0.75,
 		cacheWritesPrice: 0.75,
@@ -100,6 +106,7 @@ export const xaiModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 0.3,
 		inputPrice: 0.3,
 		outputPrice: 0.5,
 		outputPrice: 0.5,
 		cacheWritesPrice: 0.07,
 		cacheWritesPrice: 0.07,
@@ -116,6 +123,7 @@ export const xaiModels = {
 		supportsImages: true,
 		supportsImages: true,
 		supportsPromptCache: true,
 		supportsPromptCache: true,
 		supportsNativeTools: true,
 		supportsNativeTools: true,
+		defaultToolProtocol: "native",
 		inputPrice: 3.0,
 		inputPrice: 3.0,
 		outputPrice: 15.0,
 		outputPrice: 15.0,
 		cacheWritesPrice: 0.75,
 		cacheWritesPrice: 0.75,

+ 11 - 0
src/api/providers/__tests__/bedrock-error-handling.spec.ts

@@ -1,3 +1,13 @@
+// Mock TelemetryService - must come before other imports
+const mockCaptureException = vi.hoisted(() => vi.fn())
+vi.mock("@roo-code/telemetry", () => ({
+	TelemetryService: {
+		instance: {
+			captureException: mockCaptureException,
+		},
+	},
+}))
+
 // Mock BedrockRuntimeClient and commands
 // Mock BedrockRuntimeClient and commands
 const mockSend = vi.fn()
 const mockSend = vi.fn()
 
 
@@ -27,6 +37,7 @@ describe("AwsBedrockHandler Error Handling", () => {
 
 
 	beforeEach(() => {
 	beforeEach(() => {
 		vi.clearAllMocks()
 		vi.clearAllMocks()
+		mockCaptureException.mockClear()
 		handler = new AwsBedrockHandler({
 		handler = new AwsBedrockHandler({
 			apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
 			apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
 			awsAccessKey: "test-access-key",
 			awsAccessKey: "test-access-key",

+ 148 - 2
src/api/providers/__tests__/bedrock.spec.ts

@@ -1,3 +1,14 @@
+// Mock TelemetryService before other imports
+const mockCaptureException = vi.fn()
+
+vi.mock("@roo-code/telemetry", () => ({
+	TelemetryService: {
+		instance: {
+			captureException: (...args: unknown[]) => mockCaptureException(...args),
+		},
+	},
+}))
+
 // Mock AWS SDK credential providers
 // Mock AWS SDK credential providers
 vi.mock("@aws-sdk/credential-providers", () => {
 vi.mock("@aws-sdk/credential-providers", () => {
 	const mockFromIni = vi.fn().mockReturnValue({
 	const mockFromIni = vi.fn().mockReturnValue({
@@ -24,8 +35,8 @@ vi.mock("@aws-sdk/client-bedrock-runtime", () => {
 })
 })
 
 
 import { AwsBedrockHandler } from "../bedrock"
 import { AwsBedrockHandler } from "../bedrock"
-import { ConverseStreamCommand, BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"
-import { BEDROCK_1M_CONTEXT_MODEL_IDS, BEDROCK_SERVICE_TIER_MODEL_IDS, bedrockModels } from "@roo-code/types"
+import { ConverseStreamCommand, BedrockRuntimeClient, ConverseCommand } from "@aws-sdk/client-bedrock-runtime"
+import { BEDROCK_1M_CONTEXT_MODEL_IDS, BEDROCK_SERVICE_TIER_MODEL_IDS, bedrockModels, ApiProviderError } from "@roo-code/types"
 
 
 import type { Anthropic } from "@anthropic-ai/sdk"
 import type { Anthropic } from "@anthropic-ai/sdk"
 
 
@@ -996,4 +1007,139 @@ describe("AwsBedrockHandler", () => {
 			})
 			})
 		})
 		})
 	})
 	})
+
+	describe("error telemetry", () => {
+		let mockSend: ReturnType<typeof vi.fn>
+
+		beforeEach(() => {
+			mockCaptureException.mockClear()
+			// Get access to the mock send function from the mocked client
+			mockSend = vi.mocked(BedrockRuntimeClient).mock.results[0]?.value?.send
+		})
+
+		it("should capture telemetry on createMessage error", async () => {
+			// Create a handler with a fresh mock
+			const errorHandler = new AwsBedrockHandler({
+				apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
+				awsAccessKey: "test-access-key",
+				awsSecretKey: "test-secret-key",
+				awsRegion: "us-east-1",
+			})
+
+			// Get the mock send from the new handler instance
+			const clientInstance =
+				vi.mocked(BedrockRuntimeClient).mock.results[vi.mocked(BedrockRuntimeClient).mock.results.length - 1]
+					?.value
+			const mockSendFn = clientInstance?.send as ReturnType<typeof vi.fn>
+
+			// Mock the send to throw an error
+			mockSendFn.mockRejectedValueOnce(new Error("Bedrock API error"))
+
+			const messages: Anthropic.Messages.MessageParam[] = [
+				{
+					role: "user",
+					content: "Hello",
+				},
+			]
+
+			const generator = errorHandler.createMessage("You are a helpful assistant", messages)
+
+			// Consume the generator - it should throw
+			await expect(async () => {
+				for await (const _chunk of generator) {
+					// Should throw before or during iteration
+				}
+			}).rejects.toThrow()
+
+			// Verify telemetry was captured
+			expect(mockCaptureException).toHaveBeenCalledTimes(1)
+			expect(mockCaptureException).toHaveBeenCalledWith(
+				expect.objectContaining({
+					message: "Bedrock API error",
+					provider: "Bedrock",
+					modelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
+					operation: "createMessage",
+				}),
+			)
+
+			// Verify it's an ApiProviderError
+			const capturedError = mockCaptureException.mock.calls[0][0]
+			expect(capturedError).toBeInstanceOf(ApiProviderError)
+		})
+
+		it("should capture telemetry on completePrompt error", async () => {
+			// Create a handler with a fresh mock
+			const errorHandler = new AwsBedrockHandler({
+				apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
+				awsAccessKey: "test-access-key",
+				awsSecretKey: "test-secret-key",
+				awsRegion: "us-east-1",
+			})
+
+			// Get the mock send from the new handler instance
+			const clientInstance =
+				vi.mocked(BedrockRuntimeClient).mock.results[vi.mocked(BedrockRuntimeClient).mock.results.length - 1]
+					?.value
+			const mockSendFn = clientInstance?.send as ReturnType<typeof vi.fn>
+
+			// Mock the send to throw an error for ConverseCommand
+			mockSendFn.mockRejectedValueOnce(new Error("Bedrock completion error"))
+
+			// Call completePrompt - it should throw
+			await expect(errorHandler.completePrompt("Test prompt")).rejects.toThrow()
+
+			// Verify telemetry was captured
+			expect(mockCaptureException).toHaveBeenCalledTimes(1)
+			expect(mockCaptureException).toHaveBeenCalledWith(
+				expect.objectContaining({
+					message: "Bedrock completion error",
+					provider: "Bedrock",
+					modelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
+					operation: "completePrompt",
+				}),
+			)
+
+			// Verify it's an ApiProviderError
+			const capturedError = mockCaptureException.mock.calls[0][0]
+			expect(capturedError).toBeInstanceOf(ApiProviderError)
+		})
+
+		it("should still throw the error after capturing telemetry", async () => {
+			// Create a handler with a fresh mock
+			const errorHandler = new AwsBedrockHandler({
+				apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
+				awsAccessKey: "test-access-key",
+				awsSecretKey: "test-secret-key",
+				awsRegion: "us-east-1",
+			})
+
+			// Get the mock send from the new handler instance
+			const clientInstance =
+				vi.mocked(BedrockRuntimeClient).mock.results[vi.mocked(BedrockRuntimeClient).mock.results.length - 1]
+					?.value
+			const mockSendFn = clientInstance?.send as ReturnType<typeof vi.fn>
+
+			// Mock the send to throw an error
+			mockSendFn.mockRejectedValueOnce(new Error("Test error for throw verification"))
+
+			const messages: Anthropic.Messages.MessageParam[] = [
+				{
+					role: "user",
+					content: "Hello",
+				},
+			]
+
+			const generator = errorHandler.createMessage("You are a helpful assistant", messages)
+
+			// Verify the error is still thrown after telemetry capture
+			await expect(async () => {
+				for await (const _chunk of generator) {
+					// Should throw
+				}
+			}).rejects.toThrow()
+
+			// Telemetry should have been captured before the error was thrown
+			expect(mockCaptureException).toHaveBeenCalled()
+		})
+	})
 })
 })

+ 92 - 1
src/api/providers/__tests__/gemini.spec.ts

@@ -1,8 +1,18 @@
 // npx vitest run src/api/providers/__tests__/gemini.spec.ts
 // npx vitest run src/api/providers/__tests__/gemini.spec.ts
 
 
+const mockCaptureException = vitest.fn()
+
+vitest.mock("@roo-code/telemetry", () => ({
+	TelemetryService: {
+		instance: {
+			captureException: (...args: unknown[]) => mockCaptureException(...args),
+		},
+	},
+}))
+
 import { Anthropic } from "@anthropic-ai/sdk"
 import { Anthropic } from "@anthropic-ai/sdk"
 
 
-import { type ModelInfo, geminiDefaultModelId } from "@roo-code/types"
+import { type ModelInfo, geminiDefaultModelId, ApiProviderError } from "@roo-code/types"
 
 
 import { t } from "i18next"
 import { t } from "i18next"
 import { GeminiHandler } from "../gemini"
 import { GeminiHandler } from "../gemini"
@@ -13,6 +23,9 @@ describe("GeminiHandler", () => {
 	let handler: GeminiHandler
 	let handler: GeminiHandler
 
 
 	beforeEach(() => {
 	beforeEach(() => {
+		// Reset mocks
+		mockCaptureException.mockClear()
+
 		// Create mock functions
 		// Create mock functions
 		const mockGenerateContentStream = vitest.fn()
 		const mockGenerateContentStream = vitest.fn()
 		const mockGenerateContent = vitest.fn()
 		const mockGenerateContent = vitest.fn()
@@ -229,4 +242,82 @@ describe("GeminiHandler", () => {
 			expect(cost).toBeUndefined()
 			expect(cost).toBeUndefined()
 		})
 		})
 	})
 	})
+
+	describe("error telemetry", () => {
+		const mockMessages: Anthropic.Messages.MessageParam[] = [
+			{
+				role: "user",
+				content: "Hello",
+			},
+		]
+
+		const systemPrompt = "You are a helpful assistant"
+
+		it("should capture telemetry on createMessage error", async () => {
+			const mockError = new Error("Gemini API error")
+			;(handler["client"].models.generateContentStream as any).mockRejectedValue(mockError)
+
+			const stream = handler.createMessage(systemPrompt, mockMessages)
+
+			await expect(async () => {
+				for await (const _chunk of stream) {
+					// Should throw before yielding any chunks
+				}
+			}).rejects.toThrow()
+
+			// Verify telemetry was captured
+			expect(mockCaptureException).toHaveBeenCalledTimes(1)
+			expect(mockCaptureException).toHaveBeenCalledWith(
+				expect.objectContaining({
+					message: "Gemini API error",
+					provider: "Gemini",
+					modelId: GEMINI_MODEL_NAME,
+					operation: "createMessage",
+				}),
+			)
+
+			// Verify it's an ApiProviderError
+			const capturedError = mockCaptureException.mock.calls[0][0]
+			expect(capturedError).toBeInstanceOf(ApiProviderError)
+		})
+
+		it("should capture telemetry on completePrompt error", async () => {
+			const mockError = new Error("Gemini completion error")
+			;(handler["client"].models.generateContent as any).mockRejectedValue(mockError)
+
+			await expect(handler.completePrompt("Test prompt")).rejects.toThrow()
+
+			// Verify telemetry was captured
+			expect(mockCaptureException).toHaveBeenCalledTimes(1)
+			expect(mockCaptureException).toHaveBeenCalledWith(
+				expect.objectContaining({
+					message: "Gemini completion error",
+					provider: "Gemini",
+					modelId: GEMINI_MODEL_NAME,
+					operation: "completePrompt",
+				}),
+			)
+
+			// Verify it's an ApiProviderError
+			const capturedError = mockCaptureException.mock.calls[0][0]
+			expect(capturedError).toBeInstanceOf(ApiProviderError)
+		})
+
+		it("should still throw the error after capturing telemetry", async () => {
+			const mockError = new Error("Gemini API error")
+			;(handler["client"].models.generateContentStream as any).mockRejectedValue(mockError)
+
+			const stream = handler.createMessage(systemPrompt, mockMessages)
+
+			// Verify the error is still thrown
+			await expect(async () => {
+				for await (const _chunk of stream) {
+					// Should throw
+				}
+			}).rejects.toThrow()
+
+			// Telemetry should have been captured before the error was thrown
+			expect(mockCaptureException).toHaveBeenCalled()
+		})
+	})
 })
 })

+ 12 - 5
src/api/providers/__tests__/mistral.spec.ts

@@ -1,3 +1,13 @@
+// Mock TelemetryService - must come before other imports
+const mockCaptureException = vi.hoisted(() => vi.fn())
+vi.mock("@roo-code/telemetry", () => ({
+	TelemetryService: {
+		instance: {
+			captureException: mockCaptureException,
+		},
+	},
+}))
+
 // Mock Mistral client - must come before other imports
 // Mock Mistral client - must come before other imports
 const mockCreate = vi.fn()
 const mockCreate = vi.fn()
 const mockComplete = vi.fn()
 const mockComplete = vi.fn()
@@ -59,6 +69,7 @@ describe("MistralHandler", () => {
 		handler = new MistralHandler(mockOptions)
 		handler = new MistralHandler(mockOptions)
 		mockCreate.mockClear()
 		mockCreate.mockClear()
 		mockComplete.mockClear()
 		mockComplete.mockClear()
+		mockCaptureException.mockClear()
 	})
 	})
 
 
 	describe("constructor", () => {
 	describe("constructor", () => {
@@ -251,11 +262,10 @@ describe("MistralHandler", () => {
 			},
 			},
 		]
 		]
 
 
-		it("should include tools in request when toolProtocol is native", async () => {
+		it("should include tools in request by default (native is default)", async () => {
 			const metadata: ApiHandlerCreateMessageMetadata = {
 			const metadata: ApiHandlerCreateMessageMetadata = {
 				taskId: "test-task",
 				taskId: "test-task",
 				tools: mockTools,
 				tools: mockTools,
-				toolProtocol: "native",
 			}
 			}
 
 
 			const iterator = handler.createMessage(systemPrompt, messages, metadata)
 			const iterator = handler.createMessage(systemPrompt, messages, metadata)
@@ -329,7 +339,6 @@ describe("MistralHandler", () => {
 			const metadata: ApiHandlerCreateMessageMetadata = {
 			const metadata: ApiHandlerCreateMessageMetadata = {
 				taskId: "test-task",
 				taskId: "test-task",
 				tools: mockTools,
 				tools: mockTools,
-				toolProtocol: "native",
 			}
 			}
 
 
 			const iterator = handler.createMessage(systemPrompt, messages, metadata)
 			const iterator = handler.createMessage(systemPrompt, messages, metadata)
@@ -393,7 +402,6 @@ describe("MistralHandler", () => {
 			const metadata: ApiHandlerCreateMessageMetadata = {
 			const metadata: ApiHandlerCreateMessageMetadata = {
 				taskId: "test-task",
 				taskId: "test-task",
 				tools: mockTools,
 				tools: mockTools,
-				toolProtocol: "native",
 			}
 			}
 
 
 			const iterator = handler.createMessage(systemPrompt, messages, metadata)
 			const iterator = handler.createMessage(systemPrompt, messages, metadata)
@@ -427,7 +435,6 @@ describe("MistralHandler", () => {
 			const metadata: ApiHandlerCreateMessageMetadata = {
 			const metadata: ApiHandlerCreateMessageMetadata = {
 				taskId: "test-task",
 				taskId: "test-task",
 				tools: mockTools,
 				tools: mockTools,
-				toolProtocol: "native",
 				tool_choice: "auto", // This should be ignored
 				tool_choice: "auto", // This should be ignored
 			}
 			}
 
 

+ 196 - 0
src/api/providers/__tests__/openai-native.spec.ts

@@ -1,7 +1,19 @@
 // npx vitest run api/providers/__tests__/openai-native.spec.ts
 // npx vitest run api/providers/__tests__/openai-native.spec.ts
 
 
+const mockCaptureException = vitest.fn()
+
+vitest.mock("@roo-code/telemetry", () => ({
+	TelemetryService: {
+		instance: {
+			captureException: (...args: unknown[]) => mockCaptureException(...args),
+		},
+	},
+}))
+
 import { Anthropic } from "@anthropic-ai/sdk"
 import { Anthropic } from "@anthropic-ai/sdk"
 
 
+import { ApiProviderError } from "@roo-code/types"
+
 import { OpenAiNativeHandler } from "../openai-native"
 import { OpenAiNativeHandler } from "../openai-native"
 import { ApiHandlerOptions } from "../../../shared/api"
 import { ApiHandlerOptions } from "../../../shared/api"
 
 
@@ -37,6 +49,7 @@ describe("OpenAiNativeHandler", () => {
 		}
 		}
 		handler = new OpenAiNativeHandler(mockOptions)
 		handler = new OpenAiNativeHandler(mockOptions)
 		mockResponsesCreate.mockClear()
 		mockResponsesCreate.mockClear()
+		mockCaptureException.mockClear()
 		// Clear fetch mock if it exists
 		// Clear fetch mock if it exists
 		if ((global as any).fetch) {
 		if ((global as any).fetch) {
 			delete (global as any).fetch
 			delete (global as any).fetch
@@ -208,6 +221,45 @@ describe("OpenAiNativeHandler", () => {
 			expect(modelInfo.id).toBe("gpt-5.1-codex-max") // Default model
 			expect(modelInfo.id).toBe("gpt-5.1-codex-max") // Default model
 			expect(modelInfo.info).toBeDefined()
 			expect(modelInfo.info).toBeDefined()
 		})
 		})
+
+		it("should have defaultToolProtocol: native for all OpenAI Native models", () => {
+			// Test that all models have defaultToolProtocol: native
+			const testModels = [
+				"gpt-5.1-codex-max",
+				"gpt-5.2",
+				"gpt-5.1",
+				"gpt-5",
+				"gpt-5-mini",
+				"gpt-5-nano",
+				"gpt-4.1",
+				"gpt-4.1-mini",
+				"gpt-4.1-nano",
+				"o3",
+				"o3-high",
+				"o3-low",
+				"o4-mini",
+				"o4-mini-high",
+				"o4-mini-low",
+				"o3-mini",
+				"o3-mini-high",
+				"o3-mini-low",
+				"o1",
+				"o1-preview",
+				"o1-mini",
+				"gpt-4o",
+				"gpt-4o-mini",
+				"codex-mini-latest",
+			]
+
+			for (const modelId of testModels) {
+				const testHandler = new OpenAiNativeHandler({
+					openAiNativeApiKey: "test-api-key",
+					apiModelId: modelId,
+				})
+				const modelInfo = testHandler.getModel()
+				expect(modelInfo.info.defaultToolProtocol).toBe("native")
+			}
+		})
 	})
 	})
 
 
 	describe("GPT-5 models", () => {
 	describe("GPT-5 models", () => {
@@ -897,6 +949,150 @@ describe("OpenAiNativeHandler", () => {
 			}
 			}
 		})
 		})
 	})
 	})
+
+	describe("error telemetry", () => {
+		const errorMessages: Anthropic.Messages.MessageParam[] = [
+			{
+				role: "user",
+				content: "Hello",
+			},
+		]
+
+		const errorSystemPrompt = "You are a helpful assistant"
+
+		beforeEach(() => {
+			mockCaptureException.mockClear()
+		})
+
+		it("should capture telemetry on createMessage error", async () => {
+			// Mock fetch to return error
+			const mockFetch = vitest.fn().mockResolvedValue({
+				ok: false,
+				status: 500,
+				text: async () => "Internal Server Error",
+			})
+			global.fetch = mockFetch as any
+
+			// Mock SDK to fail so it falls back to fetch
+			mockResponsesCreate.mockRejectedValue(new Error("SDK not available"))
+
+			const stream = handler.createMessage(errorSystemPrompt, errorMessages)
+
+			await expect(async () => {
+				for await (const _chunk of stream) {
+					// Should throw before yielding any chunks
+				}
+			}).rejects.toThrow()
+
+			// Verify telemetry was captured
+			expect(mockCaptureException).toHaveBeenCalledTimes(1)
+			expect(mockCaptureException).toHaveBeenCalledWith(
+				expect.objectContaining({
+					message: expect.stringContaining("OpenAI service error"),
+					provider: "OpenAI Native",
+					modelId: "gpt-4.1",
+					operation: "createMessage",
+				}),
+			)
+
+			// Verify it's an ApiProviderError
+			const capturedError = mockCaptureException.mock.calls[0][0]
+			expect(capturedError).toBeInstanceOf(ApiProviderError)
+		})
+
+		it("should capture telemetry on stream processing error", async () => {
+			// Mock fetch to return a stream with an error event
+			const mockFetch = vitest.fn().mockResolvedValue({
+				ok: true,
+				body: new ReadableStream({
+					start(controller) {
+						controller.enqueue(
+							new TextEncoder().encode(
+								'data: {"type":"response.error","error":{"message":"Model overloaded"}}\n\n',
+							),
+						)
+						controller.close()
+					},
+				}),
+			})
+			global.fetch = mockFetch as any
+
+			// Mock SDK to fail so it falls back to fetch
+			mockResponsesCreate.mockRejectedValue(new Error("SDK not available"))
+
+			const stream = handler.createMessage(errorSystemPrompt, errorMessages)
+
+			await expect(async () => {
+				for await (const _chunk of stream) {
+					// Should throw when encountering error event
+				}
+			}).rejects.toThrow()
+
+			// Verify telemetry was captured (may be called multiple times due to error propagation)
+			expect(mockCaptureException).toHaveBeenCalled()
+
+			// Find the call with the stream error message
+			const streamErrorCall = mockCaptureException.mock.calls.find((call: any[]) =>
+				call[0]?.message?.includes("Model overloaded"),
+			)
+			expect(streamErrorCall).toBeDefined()
+			expect(streamErrorCall![0]).toMatchObject({
+				provider: "OpenAI Native",
+				modelId: "gpt-4.1",
+				operation: "createMessage",
+			})
+
+			// Verify it's an ApiProviderError
+			expect(streamErrorCall![0]).toBeInstanceOf(ApiProviderError)
+		})
+
+		it("should capture telemetry on completePrompt error", async () => {
+			// Mock SDK to throw an error
+			mockResponsesCreate.mockRejectedValue(new Error("API Error"))
+
+			await expect(handler.completePrompt("Test prompt")).rejects.toThrow()
+
+			// Verify telemetry was captured
+			expect(mockCaptureException).toHaveBeenCalledTimes(1)
+			expect(mockCaptureException).toHaveBeenCalledWith(
+				expect.objectContaining({
+					message: "API Error",
+					provider: "OpenAI Native",
+					modelId: "gpt-4.1",
+					operation: "completePrompt",
+				}),
+			)
+
+			// Verify it's an ApiProviderError
+			const capturedError = mockCaptureException.mock.calls[0][0]
+			expect(capturedError).toBeInstanceOf(ApiProviderError)
+		})
+
+		it("should still throw the error after capturing telemetry", async () => {
+			// Mock fetch to return error
+			const mockFetch = vitest.fn().mockResolvedValue({
+				ok: false,
+				status: 500,
+				text: async () => "Internal Server Error",
+			})
+			global.fetch = mockFetch as any
+
+			// Mock SDK to fail
+			mockResponsesCreate.mockRejectedValue(new Error("SDK not available"))
+
+			const stream = handler.createMessage(errorSystemPrompt, errorMessages)
+
+			// Verify the error is still thrown
+			await expect(async () => {
+				for await (const _chunk of stream) {
+					// Should throw
+				}
+			}).rejects.toThrow()
+
+			// Telemetry should have been captured before the error was thrown
+			expect(mockCaptureException).toHaveBeenCalled()
+		})
+	})
 })
 })
 
 
 // Additional tests for GPT-5 streaming event coverage
 // Additional tests for GPT-5 streaming event coverage

+ 12 - 6
src/api/providers/__tests__/xai.spec.ts

@@ -1,5 +1,15 @@
 // npx vitest api/providers/__tests__/xai.spec.ts
 // npx vitest api/providers/__tests__/xai.spec.ts
 
 
+// Mock TelemetryService - must come before other imports
+const mockCaptureException = vitest.hoisted(() => vitest.fn())
+vitest.mock("@roo-code/telemetry", () => ({
+	TelemetryService: {
+		instance: {
+			captureException: mockCaptureException,
+		},
+	},
+}))
+
 const mockCreate = vitest.fn()
 const mockCreate = vitest.fn()
 
 
 vitest.mock("openai", () => {
 vitest.mock("openai", () => {
@@ -25,6 +35,7 @@ describe("XAIHandler", () => {
 		// Reset all mocks
 		// Reset all mocks
 		vi.clearAllMocks()
 		vi.clearAllMocks()
 		mockCreate.mockClear()
 		mockCreate.mockClear()
+		mockCaptureException.mockClear()
 
 
 		// Create handler with mock
 		// Create handler with mock
 		handler = new XAIHandler({})
 		handler = new XAIHandler({})
@@ -299,7 +310,7 @@ describe("XAIHandler", () => {
 			},
 			},
 		]
 		]
 
 
-		it("should include tools in request when model supports native tools and tools are provided", async () => {
+		it("should include tools in request when model supports native tools and tools are provided (native is default)", async () => {
 			const handlerWithTools = new XAIHandler({ apiModelId: "grok-3" })
 			const handlerWithTools = new XAIHandler({ apiModelId: "grok-3" })
 
 
 			mockCreate.mockImplementationOnce(() => {
 			mockCreate.mockImplementationOnce(() => {
@@ -315,7 +326,6 @@ describe("XAIHandler", () => {
 			const messageGenerator = handlerWithTools.createMessage("test prompt", [], {
 			const messageGenerator = handlerWithTools.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				taskId: "test-task-id",
 				tools: testTools,
 				tools: testTools,
-				toolProtocol: "native",
 			})
 			})
 			await messageGenerator.next()
 			await messageGenerator.next()
 
 
@@ -350,7 +360,6 @@ describe("XAIHandler", () => {
 			const messageGenerator = handlerWithTools.createMessage("test prompt", [], {
 			const messageGenerator = handlerWithTools.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				taskId: "test-task-id",
 				tools: testTools,
 				tools: testTools,
-				toolProtocol: "native",
 				tool_choice: "auto",
 				tool_choice: "auto",
 			})
 			})
 			await messageGenerator.next()
 			await messageGenerator.next()
@@ -443,7 +452,6 @@ describe("XAIHandler", () => {
 			const stream = handlerWithTools.createMessage("test prompt", [], {
 			const stream = handlerWithTools.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				taskId: "test-task-id",
 				tools: testTools,
 				tools: testTools,
-				toolProtocol: "native",
 			})
 			})
 
 
 			const chunks = []
 			const chunks = []
@@ -484,7 +492,6 @@ describe("XAIHandler", () => {
 			const messageGenerator = handlerWithTools.createMessage("test prompt", [], {
 			const messageGenerator = handlerWithTools.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				taskId: "test-task-id",
 				tools: testTools,
 				tools: testTools,
-				toolProtocol: "native",
 				parallelToolCalls: true,
 				parallelToolCalls: true,
 			})
 			})
 			await messageGenerator.next()
 			await messageGenerator.next()
@@ -551,7 +558,6 @@ describe("XAIHandler", () => {
 			const stream = handlerWithTools.createMessage("test prompt", [], {
 			const stream = handlerWithTools.createMessage("test prompt", [], {
 				taskId: "test-task-id",
 				taskId: "test-task-id",
 				tools: testTools,
 				tools: testTools,
-				toolProtocol: "native",
 			})
 			})
 
 
 			const chunks = []
 			const chunks = []

+ 14 - 0
src/api/providers/bedrock.ts

@@ -30,7 +30,9 @@ import {
 	BEDROCK_GLOBAL_INFERENCE_MODEL_IDS,
 	BEDROCK_GLOBAL_INFERENCE_MODEL_IDS,
 	BEDROCK_SERVICE_TIER_MODEL_IDS,
 	BEDROCK_SERVICE_TIER_MODEL_IDS,
 	BEDROCK_SERVICE_TIER_PRICING,
 	BEDROCK_SERVICE_TIER_PRICING,
+	ApiProviderError,
 } from "@roo-code/types"
 } from "@roo-code/types"
+import { TelemetryService } from "@roo-code/telemetry"
 
 
 import { ApiStream } from "../transform/stream"
 import { ApiStream } from "../transform/stream"
 import { BaseProvider } from "./base-provider"
 import { BaseProvider } from "./base-provider"
@@ -197,6 +199,7 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 	protected options: ProviderSettings
 	protected options: ProviderSettings
 	private client: BedrockRuntimeClient
 	private client: BedrockRuntimeClient
 	private arnInfo: any
 	private arnInfo: any
+	private readonly providerName = "Bedrock"
 
 
 	constructor(options: ProviderSettings) {
 	constructor(options: ProviderSettings) {
 		super()
 		super()
@@ -690,6 +693,11 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 			// Clear timeout on error
 			// Clear timeout on error
 			clearTimeout(timeoutId)
 			clearTimeout(timeoutId)
 
 
+			// Capture error in telemetry before processing
+			const errorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(errorMessage, this.providerName, modelConfig.id, "createMessage")
+			TelemetryService.instance.captureException(apiError)
+
 			// Check if this is a throttling error that should trigger retry logic
 			// Check if this is a throttling error that should trigger retry logic
 			const errorType = this.getErrorType(error)
 			const errorType = this.getErrorType(error)
 
 
@@ -793,6 +801,12 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 			}
 			}
 			return ""
 			return ""
 		} catch (error) {
 		} catch (error) {
+			// Capture error in telemetry
+			const model = this.getModel()
+			const telemetryErrorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(telemetryErrorMessage, this.providerName, model.id, "completePrompt")
+			TelemetryService.instance.captureException(apiError)
+
 			// Use the extracted error handling method for all errors
 			// Use the extracted error handling method for all errors
 			const errorResult = this.handleBedrockError(error, false) // false for non-streaming context
 			const errorResult = this.handleBedrockError(error, false) // false for non-streaming context
 			// Since we're in a non-streaming context, we know the result is a string
 			// Since we're in a non-streaming context, we know the result is a string

+ 19 - 3
src/api/providers/gemini.ts

@@ -9,7 +9,14 @@ import {
 } from "@google/genai"
 } from "@google/genai"
 import type { JWTInput } from "google-auth-library"
 import type { JWTInput } from "google-auth-library"
 
 
-import { type ModelInfo, type GeminiModelId, geminiDefaultModelId, geminiModels } from "@roo-code/types"
+import {
+	type ModelInfo,
+	type GeminiModelId,
+	geminiDefaultModelId,
+	geminiModels,
+	ApiProviderError,
+} from "@roo-code/types"
+import { TelemetryService } from "@roo-code/telemetry"
 
 
 import type { ApiHandlerOptions } from "../../shared/api"
 import type { ApiHandlerOptions } from "../../shared/api"
 import { safeJsonParse } from "../../shared/safeJsonParse"
 import { safeJsonParse } from "../../shared/safeJsonParse"
@@ -32,6 +39,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 	private client: GoogleGenAI
 	private client: GoogleGenAI
 	private lastThoughtSignature?: string
 	private lastThoughtSignature?: string
 	private lastResponseId?: string
 	private lastResponseId?: string
+	private readonly providerName = "Gemini"
 
 
 	constructor({ isVertex, ...options }: GeminiHandlerOptions) {
 	constructor({ isVertex, ...options }: GeminiHandlerOptions) {
 		super()
 		super()
@@ -338,6 +346,10 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 				}
 				}
 			}
 			}
 		} catch (error) {
 		} catch (error) {
+			const errorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(errorMessage, this.providerName, model, "createMessage")
+			TelemetryService.instance.captureException(apiError)
+
 			if (error instanceof Error) {
 			if (error instanceof Error) {
 				throw new Error(t("common:errors.gemini.generate_stream", { error: error.message }))
 				throw new Error(t("common:errors.gemini.generate_stream", { error: error.message }))
 			}
 			}
@@ -401,9 +413,9 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 	}
 	}
 
 
 	async completePrompt(prompt: string): Promise<string> {
 	async completePrompt(prompt: string): Promise<string> {
-		try {
-			const { id: model, info } = this.getModel()
+		const { id: model, info } = this.getModel()
 
 
+		try {
 			const tools: GenerateContentConfig["tools"] = []
 			const tools: GenerateContentConfig["tools"] = []
 			if (this.options.enableUrlContext) {
 			if (this.options.enableUrlContext) {
 				tools.push({ urlContext: {} })
 				tools.push({ urlContext: {} })
@@ -445,6 +457,10 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
 
 
 			return text
 			return text
 		} catch (error) {
 		} catch (error) {
+			const errorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(errorMessage, this.providerName, model, "completePrompt")
+			TelemetryService.instance.captureException(apiError)
+
 			if (error instanceof Error) {
 			if (error instanceof Error) {
 				throw new Error(t("common:errors.gemini.generate_complete_prompt", { error: error.message }))
 				throw new Error(t("common:errors.gemini.generate_complete_prompt", { error: error.message }))
 			}
 			}

+ 24 - 9
src/api/providers/mistral.ts

@@ -2,7 +2,14 @@ import { Anthropic } from "@anthropic-ai/sdk"
 import { Mistral } from "@mistralai/mistralai"
 import { Mistral } from "@mistralai/mistralai"
 import OpenAI from "openai"
 import OpenAI from "openai"
 
 
-import { type MistralModelId, mistralDefaultModelId, mistralModels, MISTRAL_DEFAULT_TEMPERATURE } from "@roo-code/types"
+import {
+	type MistralModelId,
+	mistralDefaultModelId,
+	mistralModels,
+	MISTRAL_DEFAULT_TEMPERATURE,
+	ApiProviderError,
+} from "@roo-code/types"
+import { TelemetryService } from "@roo-code/telemetry"
 
 
 import { ApiHandlerOptions } from "../../shared/api"
 import { ApiHandlerOptions } from "../../shared/api"
 
 
@@ -43,6 +50,7 @@ type MistralTool = {
 export class MistralHandler extends BaseProvider implements SingleCompletionHandler {
 export class MistralHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
 	protected options: ApiHandlerOptions
 	private client: Mistral
 	private client: Mistral
+	private readonly providerName = "Mistral"
 
 
 	constructor(options: ApiHandlerOptions) {
 	constructor(options: ApiHandlerOptions) {
 		super()
 		super()
@@ -96,7 +104,15 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
 		// Temporary debug log for QA
 		// Temporary debug log for QA
 		// console.log("[MISTRAL DEBUG] Raw API request body:", requestOptions)
 		// console.log("[MISTRAL DEBUG] Raw API request body:", requestOptions)
 
 
-		const response = await this.client.chat.stream(requestOptions)
+		let response
+		try {
+			response = await this.client.chat.stream(requestOptions)
+		} catch (error) {
+			const errorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(errorMessage, this.providerName, model, "createMessage")
+			TelemetryService.instance.captureException(apiError)
+			throw new Error(`Mistral completion error: ${errorMessage}`)
+		}
 
 
 		for await (const event of response) {
 		for await (const event of response) {
 			const delta = event.data.choices[0]?.delta
 			const delta = event.data.choices[0]?.delta
@@ -181,9 +197,9 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
 	}
 	}
 
 
 	async completePrompt(prompt: string): Promise<string> {
 	async completePrompt(prompt: string): Promise<string> {
-		try {
-			const { id: model, temperature } = this.getModel()
+		const { id: model, temperature } = this.getModel()
 
 
+		try {
 			const response = await this.client.chat.complete({
 			const response = await this.client.chat.complete({
 				model,
 				model,
 				messages: [{ role: "user", content: prompt }],
 				messages: [{ role: "user", content: prompt }],
@@ -202,11 +218,10 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
 
 
 			return content || ""
 			return content || ""
 		} catch (error) {
 		} catch (error) {
-			if (error instanceof Error) {
-				throw new Error(`Mistral completion error: ${error.message}`)
-			}
-
-			throw error
+			const errorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(errorMessage, this.providerName, model, "completePrompt")
+			TelemetryService.instance.captureException(apiError)
+			throw new Error(`Mistral completion error: ${errorMessage}`)
 		}
 		}
 	}
 	}
 }
 }

+ 17 - 0
src/api/providers/openai-native.ts

@@ -11,7 +11,9 @@ import {
 	type VerbosityLevel,
 	type VerbosityLevel,
 	type ReasoningEffortExtended,
 	type ReasoningEffortExtended,
 	type ServiceTier,
 	type ServiceTier,
+	ApiProviderError,
 } from "@roo-code/types"
 } from "@roo-code/types"
+import { TelemetryService } from "@roo-code/telemetry"
 
 
 import type { ApiHandlerOptions } from "../../shared/api"
 import type { ApiHandlerOptions } from "../../shared/api"
 
 
@@ -28,6 +30,7 @@ export type OpenAiNativeModel = ReturnType<OpenAiNativeHandler["getModel"]>
 export class OpenAiNativeHandler extends BaseProvider implements SingleCompletionHandler {
 export class OpenAiNativeHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
 	protected options: ApiHandlerOptions
 	private client: OpenAI
 	private client: OpenAI
+	private readonly providerName = "OpenAI Native"
 	// Resolved service tier from Responses API (actual tier used by OpenAI)
 	// Resolved service tier from Responses API (actual tier used by OpenAI)
 	private lastServiceTier: ServiceTier | undefined
 	private lastServiceTier: ServiceTier | undefined
 	// Complete response output array (includes reasoning items with encrypted_content)
 	// Complete response output array (includes reasoning items with encrypted_content)
@@ -536,6 +539,11 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
 			// Handle streaming response
 			// Handle streaming response
 			yield* this.handleStreamResponse(response.body, model)
 			yield* this.handleStreamResponse(response.body, model)
 		} catch (error) {
 		} catch (error) {
+			const model = this.getModel()
+			const errorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(errorMessage, this.providerName, model.id, "createMessage")
+			TelemetryService.instance.captureException(apiError)
+
 			if (error instanceof Error) {
 			if (error instanceof Error) {
 				// Re-throw with the original error message if it's already formatted
 				// Re-throw with the original error message if it's already formatted
 				if (error.message.includes("Responses API")) {
 				if (error.message.includes("Responses API")) {
@@ -1013,6 +1021,10 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
 			// If we didn't get any content, don't throw - the API might have returned an empty response
 			// If we didn't get any content, don't throw - the API might have returned an empty response
 			// This can happen in certain edge cases and shouldn't break the flow
 			// This can happen in certain edge cases and shouldn't break the flow
 		} catch (error) {
 		} catch (error) {
+			const errorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(errorMessage, this.providerName, model.id, "createMessage")
+			TelemetryService.instance.captureException(apiError)
+
 			if (error instanceof Error) {
 			if (error instanceof Error) {
 				throw new Error(`Error processing response stream: ${error.message}`)
 				throw new Error(`Error processing response stream: ${error.message}`)
 			}
 			}
@@ -1339,6 +1351,11 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
 
 
 			return ""
 			return ""
 		} catch (error) {
 		} catch (error) {
+			const errorModel = this.getModel()
+			const errorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(errorMessage, this.providerName, errorModel.id, "completePrompt")
+			TelemetryService.instance.captureException(apiError)
+
 			if (error instanceof Error) {
 			if (error instanceof Error) {
 				throw new Error(`OpenAI Native completion error: ${error.message}`)
 				throw new Error(`OpenAI Native completion error: ${error.message}`)
 			}
 			}

+ 8 - 1
src/api/providers/xai.ts

@@ -1,7 +1,8 @@
 import { Anthropic } from "@anthropic-ai/sdk"
 import { Anthropic } from "@anthropic-ai/sdk"
 import OpenAI from "openai"
 import OpenAI from "openai"
 
 
-import { type XAIModelId, xaiDefaultModelId, xaiModels } from "@roo-code/types"
+import { type XAIModelId, xaiDefaultModelId, xaiModels, ApiProviderError } from "@roo-code/types"
+import { TelemetryService } from "@roo-code/telemetry"
 
 
 import { NativeToolCallParser } from "../../core/assistant-message/NativeToolCallParser"
 import { NativeToolCallParser } from "../../core/assistant-message/NativeToolCallParser"
 import type { ApiHandlerOptions } from "../../shared/api"
 import type { ApiHandlerOptions } from "../../shared/api"
@@ -79,6 +80,9 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
 		try {
 		try {
 			stream = await this.client.chat.completions.create(requestOptions)
 			stream = await this.client.chat.completions.create(requestOptions)
 		} catch (error) {
 		} catch (error) {
+			const errorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(errorMessage, this.providerName, modelId, "createMessage")
+			TelemetryService.instance.captureException(apiError)
 			throw handleOpenAIError(error, this.providerName)
 			throw handleOpenAIError(error, this.providerName)
 		}
 		}
 
 
@@ -158,6 +162,9 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
 
 
 			return response.choices[0]?.message.content || ""
 			return response.choices[0]?.message.content || ""
 		} catch (error) {
 		} catch (error) {
+			const errorMessage = error instanceof Error ? error.message : String(error)
+			const apiError = new ApiProviderError(errorMessage, this.providerName, modelId, "completePrompt")
+			TelemetryService.instance.captureException(apiError)
 			throw handleOpenAIError(error, this.providerName)
 			throw handleOpenAIError(error, this.providerName)
 		}
 		}
 	}
 	}