فهرست منبع

Pr 11144 (#11315)

* Latest main branch snapshot from API

* feat: add dedicated Azure OpenAI provider using @ai-sdk/azure package

* feat: add Azure provider UI component and translations

* feat: add Azure provider translations for all locales

* chore: add missing Azure placeholder translations

* Delete .changeset/azure-ai-sdk-migration.md

* fix: add Azure provider validation for onboarding workflow

- Add azureApiKey to SECRET_STATE_KEYS for proper configuration detection
- Add Azure validation case in validateModelsAndKeysProvided
- Add validation translations for azureResourceName and azureDeploymentName across all 18 locales

This fixes the issue where the Finish button does nothing when setting up Azure provider in the onboarding workflow.

* feat(azure): add model metadata, model picker, rename to Azure AI Foundry

- Add static model metadata for 29 Azure models (from models.dev)
  with Roo-specific flags (reasoning, tools, verbosity) matching
  openAiNativeModels
- Add model picker dropdown to Azure provider settings for model
  capability detection (context window, max tokens, pricing)
- Rename provider label from 'Azure OpenAI' to 'Azure AI Foundry'
  across all 18 locales
- Make API key optional (supports Azure managed identity / Entra ID)
- Update default API version from 2024-08-01-preview to 2025-04-01-preview
- Fix maxOutputTokens validation (filter invalid values <= 0)
- Handler separates deployment name (API calls) from model ID
  (capability lookup) with azureDefaultModelInfo (gpt-4o) fallback
- Remove unhelpful 'Get Azure AI Foundry Access' button
- Prevent stale model IDs from other providers carrying over
- Suppress validation errors on fresh provider selection

* fix(azure): add missing isAiSdkProvider() override for reasoning block preservation

* Azure Fixes for Hannes

* Quick Fix for Respones API Only (for Hannes)

* fix: use explicit azureOpenAiDefaultApiVersion fallback when apiVersion is empty

Addresses review feedback: the UI placeholder shows '2025-04-01-preview' via
azureOpenAiDefaultApiVersion, so the handler should use the same constant as
fallback instead of silently deferring to the SDK's internal default.

* fix: remove stale Cerebras references (retired provider)

* fix: add missing retiredProviderMessage translations for all locales

* fix: do not map promptCacheMissTokens to cacheWriteTokens for Azure

Azure uses OpenAI-compatible caching which does not report cache write
tokens separately. promptCacheMissTokens represents tokens NOT found in
cache (processed from scratch), not tokens written to cache. This aligns
the Azure handler with the OpenAI native handler behavior.

---------

Co-authored-by: Hannes Rudolph <[email protected]>
Co-authored-by: Roo Code <[email protected]>
Co-authored-by: daniel-lxs <[email protected]>
Co-authored-by: Matt Rubens <[email protected]>
Danger Mouse 2 روز پیش
والد
کامیت
571be71005
41فایلهای تغییر یافته به همراه2008 افزوده شده و 333 حذف شده
  1. 0 6
      .env.sample
  2. 0 74
      apps/web-evals/src/app/api/runs/[id]/logs/[taskId]/route.ts
  3. 0 147
      apps/web-evals/src/app/api/runs/[id]/logs/failed/route.ts
  4. 1 0
      packages/types/src/global-settings.ts
  5. 17 0
      packages/types/src/provider-settings.ts
  6. 403 0
      packages/types/src/providers/azure.ts
  7. 4 0
      packages/types/src/providers/index.ts
  8. 2 3
      packages/types/src/providers/openai.ts
  9. 50 1
      pnpm-lock.yaml
  10. 3 0
      src/api/index.ts
  11. 431 0
      src/api/providers/__tests__/azure.spec.ts
  12. 201 0
      src/api/providers/azure.ts
  13. 1 0
      src/api/providers/index.ts
  14. 0 66
      src/integrations/terminal/README.md
  15. 1 0
      src/package.json
  16. 7 1
      src/shared/checkExistApiConfig.ts
  17. 9 0
      webview-ui/src/components/settings/ApiOptions.tsx
  18. 3 0
      webview-ui/src/components/settings/constants.ts
  19. 75 0
      webview-ui/src/components/settings/providers/Azure.tsx
  20. 1 0
      webview-ui/src/components/settings/providers/index.ts
  21. 6 0
      webview-ui/src/components/settings/utils/providerModelConfig.ts
  22. 11 0
      webview-ui/src/components/ui/hooks/useSelectedModel.ts
  23. 44 2
      webview-ui/src/i18n/locales/ca/settings.json
  24. 44 2
      webview-ui/src/i18n/locales/de/settings.json
  25. 17 1
      webview-ui/src/i18n/locales/en/settings.json
  26. 44 2
      webview-ui/src/i18n/locales/es/settings.json
  27. 44 2
      webview-ui/src/i18n/locales/fr/settings.json
  28. 44 2
      webview-ui/src/i18n/locales/hi/settings.json
  29. 44 2
      webview-ui/src/i18n/locales/id/settings.json
  30. 44 2
      webview-ui/src/i18n/locales/it/settings.json
  31. 44 2
      webview-ui/src/i18n/locales/ja/settings.json
  32. 44 2
      webview-ui/src/i18n/locales/ko/settings.json
  33. 44 2
      webview-ui/src/i18n/locales/nl/settings.json
  34. 44 2
      webview-ui/src/i18n/locales/pl/settings.json
  35. 44 2
      webview-ui/src/i18n/locales/pt-BR/settings.json
  36. 44 2
      webview-ui/src/i18n/locales/ru/settings.json
  37. 44 2
      webview-ui/src/i18n/locales/tr/settings.json
  38. 44 2
      webview-ui/src/i18n/locales/vi/settings.json
  39. 44 2
      webview-ui/src/i18n/locales/zh-CN/settings.json
  40. 44 2
      webview-ui/src/i18n/locales/zh-TW/settings.json
  41. 17 0
      webview-ui/src/utils/validate.ts

+ 0 - 6
.env.sample

@@ -1,6 +0,0 @@
-POSTHOG_API_KEY=key-goes-here
-
-# Roo Code Cloud / Local Development
-CLERK_BASE_URL=https://epic-chamois-85.clerk.accounts.dev
-ROO_CODE_API_URL=http://localhost:3000
-ROO_CODE_PROVIDER_URL=http://localhost:8080/proxy/v1

+ 0 - 74
apps/web-evals/src/app/api/runs/[id]/logs/[taskId]/route.ts

@@ -1,74 +0,0 @@
-import { NextResponse } from "next/server"
-import type { NextRequest } from "next/server"
-import * as fs from "node:fs/promises"
-import * as path from "node:path"
-
-import { findTask, findRun } from "@roo-code/evals"
-
-export const dynamic = "force-dynamic"
-
-const LOG_BASE_PATH = "/tmp/evals/runs"
-
-// Sanitize path components to prevent path traversal attacks
-function sanitizePathComponent(component: string): string {
-	// Remove any path separators, null bytes, and other dangerous characters
-	return component.replace(/[/\\:\0*?"<>|]/g, "_")
-}
-
-export async function GET(request: NextRequest, { params }: { params: Promise<{ id: string; taskId: string }> }) {
-	const { id, taskId } = await params
-
-	try {
-		const runId = Number(id)
-		const taskIdNum = Number(taskId)
-
-		if (isNaN(runId) || isNaN(taskIdNum)) {
-			return NextResponse.json({ error: "Invalid run ID or task ID" }, { status: 400 })
-		}
-
-		// Verify the run exists
-		await findRun(runId)
-
-		// Get the task to find its language and exercise
-		const task = await findTask(taskIdNum)
-
-		// Verify the task belongs to this run
-		if (task.runId !== runId) {
-			return NextResponse.json({ error: "Task does not belong to this run" }, { status: 404 })
-		}
-
-		// Sanitize language and exercise to prevent path traversal
-		const safeLanguage = sanitizePathComponent(task.language)
-		const safeExercise = sanitizePathComponent(task.exercise)
-
-		// Construct the log file path
-		const logFileName = `${safeLanguage}-${safeExercise}.log`
-		const logFilePath = path.join(LOG_BASE_PATH, String(runId), logFileName)
-
-		// Verify the resolved path is within the expected directory (defense in depth)
-		const resolvedPath = path.resolve(logFilePath)
-		const expectedBase = path.resolve(LOG_BASE_PATH)
-		if (!resolvedPath.startsWith(expectedBase)) {
-			return NextResponse.json({ error: "Invalid log path" }, { status: 400 })
-		}
-
-		// Check if the log file exists and read it (async)
-		try {
-			const logContent = await fs.readFile(logFilePath, "utf-8")
-			return NextResponse.json({ logContent })
-		} catch (err) {
-			if ((err as NodeJS.ErrnoException).code === "ENOENT") {
-				return NextResponse.json({ error: "Log file not found", logContent: null }, { status: 200 })
-			}
-			throw err
-		}
-	} catch (error) {
-		console.error("Error reading task log:", error)
-
-		if (error instanceof Error && error.name === "RecordNotFoundError") {
-			return NextResponse.json({ error: "Task or run not found" }, { status: 404 })
-		}
-
-		return NextResponse.json({ error: "Failed to read log file" }, { status: 500 })
-	}
-}

+ 0 - 147
apps/web-evals/src/app/api/runs/[id]/logs/failed/route.ts

@@ -1,147 +0,0 @@
-import { NextResponse } from "next/server"
-import type { NextRequest } from "next/server"
-import * as fs from "node:fs"
-import * as path from "node:path"
-import archiver from "archiver"
-
-import { findRun, getTasks } from "@roo-code/evals"
-
-export const dynamic = "force-dynamic"
-
-const LOG_BASE_PATH = "/tmp/evals/runs"
-
-// Sanitize path components to prevent path traversal attacks
-function sanitizePathComponent(component: string): string {
-	// Remove any path separators, null bytes, and other dangerous characters
-	return component.replace(/[/\\:\0*?"<>|]/g, "_")
-}
-
-export async function GET(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
-	const { id } = await params
-
-	try {
-		const runId = Number(id)
-
-		if (isNaN(runId)) {
-			return NextResponse.json({ error: "Invalid run ID" }, { status: 400 })
-		}
-
-		// Verify the run exists
-		await findRun(runId)
-
-		// Get all tasks for this run
-		const tasks = await getTasks(runId)
-
-		// Filter for failed tasks only
-		const failedTasks = tasks.filter((task) => task.passed === false)
-
-		if (failedTasks.length === 0) {
-			return NextResponse.json({ error: "No failed tasks to export" }, { status: 400 })
-		}
-
-		// Create a zip archive
-		const archive = archiver("zip", { zlib: { level: 9 } })
-
-		// Collect chunks to build the response
-		const chunks: Buffer[] = []
-
-		archive.on("data", (chunk: Buffer) => {
-			chunks.push(chunk)
-		})
-
-		// Track archive errors
-		let archiveError: Error | null = null
-		archive.on("error", (err: Error) => {
-			archiveError = err
-		})
-
-		// Set up the end promise before finalizing (proper event listener ordering)
-		const archiveEndPromise = new Promise<void>((resolve, reject) => {
-			archive.on("end", resolve)
-			archive.on("error", reject)
-		})
-
-		// Add each failed task's log file and history files to the archive
-		const logDir = path.join(LOG_BASE_PATH, String(runId))
-		let filesAdded = 0
-
-		for (const task of failedTasks) {
-			// Sanitize language and exercise to prevent path traversal
-			const safeLanguage = sanitizePathComponent(task.language)
-			const safeExercise = sanitizePathComponent(task.exercise)
-			const expectedBase = path.resolve(LOG_BASE_PATH)
-
-			// Add the log file
-			const logFileName = `${safeLanguage}-${safeExercise}.log`
-			const logFilePath = path.join(logDir, logFileName)
-
-			// Verify the resolved path is within the expected directory (defense in depth)
-			const resolvedLogPath = path.resolve(logFilePath)
-			if (resolvedLogPath.startsWith(expectedBase) && fs.existsSync(logFilePath)) {
-				archive.file(logFilePath, { name: logFileName })
-				filesAdded++
-			}
-
-			// Add the API conversation history file
-			// Format: {language}-{exercise}.{iteration}_api_conversation_history.json
-			const apiHistoryFileName = `${safeLanguage}-${safeExercise}.${task.iteration}_api_conversation_history.json`
-			const apiHistoryFilePath = path.join(logDir, apiHistoryFileName)
-			const resolvedApiHistoryPath = path.resolve(apiHistoryFilePath)
-			if (resolvedApiHistoryPath.startsWith(expectedBase) && fs.existsSync(apiHistoryFilePath)) {
-				archive.file(apiHistoryFilePath, { name: apiHistoryFileName })
-				filesAdded++
-			}
-
-			// Add the UI messages file
-			// Format: {language}-{exercise}.{iteration}_ui_messages.json
-			const uiMessagesFileName = `${safeLanguage}-${safeExercise}.${task.iteration}_ui_messages.json`
-			const uiMessagesFilePath = path.join(logDir, uiMessagesFileName)
-			const resolvedUiMessagesPath = path.resolve(uiMessagesFilePath)
-			if (resolvedUiMessagesPath.startsWith(expectedBase) && fs.existsSync(uiMessagesFilePath)) {
-				archive.file(uiMessagesFilePath, { name: uiMessagesFileName })
-				filesAdded++
-			}
-		}
-
-		// Check if any files were actually added
-		if (filesAdded === 0) {
-			archive.abort()
-			return NextResponse.json(
-				{ error: "No log files found - they may have been cleared from disk" },
-				{ status: 404 },
-			)
-		}
-
-		// Finalize the archive
-		await archive.finalize()
-
-		// Wait for all data to be collected
-		await archiveEndPromise
-
-		// Check for archive errors
-		if (archiveError) {
-			throw archiveError
-		}
-
-		// Combine all chunks into a single buffer
-		const zipBuffer = Buffer.concat(chunks)
-
-		// Return the zip file
-		return new NextResponse(zipBuffer, {
-			status: 200,
-			headers: {
-				"Content-Type": "application/zip",
-				"Content-Disposition": `attachment; filename="run-${runId}-failed-logs.zip"`,
-				"Content-Length": String(zipBuffer.length),
-			},
-		})
-	} catch (error) {
-		console.error("Error exporting failed logs:", error)
-
-		if (error instanceof Error && error.name === "RecordNotFoundError") {
-			return NextResponse.json({ error: "Run not found" }, { status: 404 })
-		}
-
-		return NextResponse.json({ error: "Failed to export logs" }, { status: 500 })
-	}
-}

+ 1 - 0
packages/types/src/global-settings.ts

@@ -287,6 +287,7 @@ export const SECRET_STATE_KEYS = [
 	"fireworksApiKey",
 	"vercelAiGatewayApiKey",
 	"basetenApiKey",
+	"azureApiKey",
 ] as const
 
 // Global secrets that are part of GlobalSettings (not ProviderSettings)

+ 17 - 0
packages/types/src/provider-settings.ts

@@ -103,6 +103,7 @@ export const providerNames = [
 	...customProviders,
 	...fauxProviders,
 	"anthropic",
+	"azure",
 	"bedrock",
 	"baseten",
 	"deepseek",
@@ -377,12 +378,20 @@ const basetenSchema = apiModelIdProviderModelSchema.extend({
 	basetenApiKey: z.string().optional(),
 })
 
+const azureSchema = apiModelIdProviderModelSchema.extend({
+	azureApiKey: z.string().optional(),
+	azureResourceName: z.string().optional(),
+	azureDeploymentName: z.string().optional(),
+	azureApiVersion: z.string().optional(),
+})
+
 const defaultSchema = z.object({
 	apiProvider: z.undefined(),
 })
 
 export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProvider", [
 	anthropicSchema.merge(z.object({ apiProvider: z.literal("anthropic") })),
+	azureSchema.merge(z.object({ apiProvider: z.literal("azure") })),
 	openRouterSchema.merge(z.object({ apiProvider: z.literal("openrouter") })),
 	bedrockSchema.merge(z.object({ apiProvider: z.literal("bedrock") })),
 	vertexSchema.merge(z.object({ apiProvider: z.literal("vertex") })),
@@ -415,6 +424,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
 export const providerSettingsSchema = z.object({
 	apiProvider: providerNamesWithRetiredSchema.optional(),
 	...anthropicSchema.shape,
+	...azureSchema.shape,
 	...openRouterSchema.shape,
 	...bedrockSchema.shape,
 	...vertexSchema.shape,
@@ -490,6 +500,7 @@ export const isTypicalProvider = (key: unknown): key is TypicalProvider =>
 
 export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
 	anthropic: "apiModelId",
+	azure: "apiModelId",
 	openrouter: "openRouterModelId",
 	bedrock: "apiModelId",
 	vertex: "apiModelId",
@@ -557,6 +568,12 @@ export const MODELS_BY_PROVIDER: Record<
 		label: "Anthropic",
 		models: Object.keys(anthropicModels),
 	},
+	azure: {
+		id: "azure",
+		label: "Azure AI Foundry",
+		// Azure uses deployment names configured by the user (not a fixed upstream model ID list)
+		models: [],
+	},
 	bedrock: {
 		id: "bedrock",
 		label: "Amazon Bedrock",

+ 403 - 0
packages/types/src/providers/azure.ts

@@ -0,0 +1,403 @@
+import type { ModelInfo } from "../model.js"
+
+/**
+ * Azure AI Foundry model metadata.
+ *
+ * NOTE:
+ * - Azure AI Foundry uses *deployment names* at runtime, but Roo still needs underlying model
+ *   capabilities (maxTokens/contextWindow/etc.) for validation and parameter shaping.
+ * - This list is derived from https://models.dev/api.json (provider: "azure") and intentionally
+ *   restricted to OpenAI/Azure OpenAI-style IDs (gpt-*, o*, codex-*).
+ */
+export const azureModels = {
+	"codex-mini": {
+		maxTokens: 100_000,
+		contextWindow: 200_000,
+		supportsImages: false,
+		supportsPromptCache: true,
+		inputPrice: 1.5,
+		outputPrice: 6,
+		cacheReadsPrice: 0.375,
+		supportsTemperature: false,
+		description:
+			"Codex Mini: Cloud-based software engineering agent powered by codex-1, a version of o3 optimized for coding tasks",
+	},
+	"gpt-4": {
+		maxTokens: 8_192,
+		contextWindow: 8_192,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 60,
+		outputPrice: 120,
+		supportsTemperature: true,
+		description: "GPT-4",
+	},
+	"gpt-4-32k": {
+		maxTokens: 32_768,
+		contextWindow: 32_768,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 60,
+		outputPrice: 120,
+		supportsTemperature: true,
+		description: "GPT-4 32K",
+	},
+	"gpt-4-turbo": {
+		maxTokens: 4_096,
+		contextWindow: 128_000,
+		supportsImages: true,
+		supportsPromptCache: false,
+		inputPrice: 10,
+		outputPrice: 30,
+		supportsTemperature: true,
+		description: "GPT-4 Turbo",
+	},
+	"gpt-4-turbo-vision": {
+		maxTokens: 4_096,
+		contextWindow: 128_000,
+		supportsImages: true,
+		supportsPromptCache: false,
+		inputPrice: 10,
+		outputPrice: 30,
+		supportsTemperature: true,
+		description: "GPT-4 Turbo Vision",
+	},
+	"gpt-4.1": {
+		maxTokens: 32_768,
+		contextWindow: 1_047_576,
+		supportsImages: true,
+		supportsPromptCache: true,
+		inputPrice: 2,
+		outputPrice: 8,
+		cacheReadsPrice: 0.5,
+		supportsTemperature: true,
+		description: "GPT-4.1",
+	},
+	"gpt-4.1-mini": {
+		maxTokens: 32_768,
+		contextWindow: 1_047_576,
+		supportsImages: true,
+		supportsPromptCache: true,
+		inputPrice: 0.4,
+		outputPrice: 1.6,
+		cacheReadsPrice: 0.1,
+		supportsTemperature: true,
+		description: "GPT-4.1 mini",
+	},
+	"gpt-4.1-nano": {
+		maxTokens: 32_768,
+		contextWindow: 1_047_576,
+		supportsImages: true,
+		supportsPromptCache: true,
+		inputPrice: 0.1,
+		outputPrice: 0.4,
+		cacheReadsPrice: 0.03,
+		supportsTemperature: true,
+		description: "GPT-4.1 nano",
+	},
+	"gpt-4o": {
+		maxTokens: 16_384,
+		contextWindow: 128_000,
+		supportsImages: true,
+		supportsPromptCache: true,
+		inputPrice: 2.5,
+		outputPrice: 10,
+		cacheReadsPrice: 1.25,
+		supportsTemperature: true,
+		description: "GPT-4o",
+	},
+	"gpt-4o-mini": {
+		maxTokens: 16_384,
+		contextWindow: 128_000,
+		supportsImages: true,
+		supportsPromptCache: true,
+		inputPrice: 0.15,
+		outputPrice: 0.6,
+		cacheReadsPrice: 0.08,
+		supportsTemperature: true,
+		description: "GPT-4o mini",
+	},
+	"gpt-5": {
+		maxTokens: 128_000,
+		contextWindow: 272_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		supportsReasoningEffort: ["minimal", "low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 1.25,
+		outputPrice: 10,
+		cacheReadsPrice: 0.13,
+		supportsVerbosity: true,
+		supportsTemperature: false,
+		description: "GPT-5: The best model for coding and agentic tasks across domains",
+	},
+	"gpt-5-codex": {
+		maxTokens: 128_000,
+		contextWindow: 400_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		supportsReasoningEffort: ["low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 1.25,
+		outputPrice: 10,
+		cacheReadsPrice: 0.13,
+		supportsTemperature: false,
+		description: "GPT-5-Codex: A version of GPT-5 optimized for agentic coding in Codex",
+	},
+	"gpt-5-mini": {
+		maxTokens: 128_000,
+		contextWindow: 272_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		supportsReasoningEffort: ["minimal", "low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 0.25,
+		outputPrice: 2,
+		cacheReadsPrice: 0.03,
+		supportsVerbosity: true,
+		supportsTemperature: false,
+		description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks",
+	},
+	"gpt-5-nano": {
+		maxTokens: 128_000,
+		contextWindow: 272_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		supportsReasoningEffort: ["minimal", "low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 0.05,
+		outputPrice: 0.4,
+		cacheReadsPrice: 0.01,
+		supportsVerbosity: true,
+		supportsTemperature: false,
+		description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5",
+	},
+	"gpt-5-pro": {
+		maxTokens: 272_000,
+		contextWindow: 400_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: false,
+		supportsReasoningEffort: ["minimal", "low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 15,
+		outputPrice: 120,
+		supportsVerbosity: true,
+		supportsTemperature: false,
+		description: "GPT-5 Pro",
+	},
+	"gpt-5.1": {
+		maxTokens: 128_000,
+		contextWindow: 272_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		promptCacheRetention: "24h",
+		supportsReasoningEffort: ["none", "low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 1.25,
+		outputPrice: 10,
+		cacheReadsPrice: 0.125,
+		supportsVerbosity: true,
+		supportsTemperature: false,
+		description: "GPT-5.1: The best model for coding and agentic tasks across domains",
+	},
+	"gpt-5.1-chat": {
+		maxTokens: 16_384,
+		contextWindow: 128_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		promptCacheRetention: "24h",
+		inputPrice: 1.25,
+		outputPrice: 10,
+		cacheReadsPrice: 0.125,
+		supportsTemperature: false,
+		description: "GPT-5.1 Chat: Optimized for conversational AI and chat use cases",
+	},
+	"gpt-5.1-codex": {
+		maxTokens: 128_000,
+		contextWindow: 400_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		promptCacheRetention: "24h",
+		supportsReasoningEffort: ["low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 1.25,
+		outputPrice: 10,
+		cacheReadsPrice: 0.125,
+		supportsTemperature: false,
+		description: "GPT-5.1 Codex: A version of GPT-5.1 optimized for agentic coding in Codex",
+	},
+	"gpt-5.1-codex-max": {
+		maxTokens: 128_000,
+		contextWindow: 400_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		promptCacheRetention: "24h",
+		supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
+		reasoningEffort: "medium",
+		inputPrice: 1.25,
+		outputPrice: 10,
+		cacheReadsPrice: 0.125,
+		supportsTemperature: false,
+		description:
+			"GPT-5.1 Codex Max: Our most intelligent coding model optimized for long-horizon, agentic coding tasks",
+	},
+	"gpt-5.1-codex-mini": {
+		maxTokens: 128_000,
+		contextWindow: 400_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		promptCacheRetention: "24h",
+		supportsReasoningEffort: ["low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 0.25,
+		outputPrice: 2,
+		cacheReadsPrice: 0.025,
+		supportsTemperature: false,
+		description: "GPT-5.1 Codex mini: A version of GPT-5.1 optimized for agentic coding in Codex",
+	},
+	"gpt-5.2": {
+		maxTokens: 128_000,
+		contextWindow: 400_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		promptCacheRetention: "24h",
+		supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
+		reasoningEffort: "medium",
+		inputPrice: 1.75,
+		outputPrice: 14,
+		cacheReadsPrice: 0.125,
+		supportsVerbosity: true,
+		supportsTemperature: false,
+		description: "GPT-5.2: Our flagship model for coding and agentic tasks across industries",
+	},
+	"gpt-5.2-chat": {
+		maxTokens: 16_384,
+		contextWindow: 128_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		inputPrice: 1.75,
+		outputPrice: 14,
+		cacheReadsPrice: 0.175,
+		supportsTemperature: false,
+		description: "GPT-5.2 Chat: Optimized for conversational AI and chat use cases",
+	},
+	"gpt-5.2-codex": {
+		maxTokens: 128_000,
+		contextWindow: 400_000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		promptCacheRetention: "24h",
+		supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
+		reasoningEffort: "medium",
+		inputPrice: 1.75,
+		outputPrice: 14,
+		cacheReadsPrice: 0.175,
+		supportsTemperature: false,
+		description:
+			"GPT-5.2 Codex: Our most intelligent coding model optimized for long-horizon, agentic coding tasks",
+	},
+	o1: {
+		maxTokens: 100_000,
+		contextWindow: 200_000,
+		supportsImages: true,
+		supportsPromptCache: true,
+		inputPrice: 15,
+		outputPrice: 60,
+		cacheReadsPrice: 7.5,
+		supportsTemperature: false,
+		description: "o1",
+	},
+	"o1-mini": {
+		maxTokens: 65_536,
+		contextWindow: 128_000,
+		supportsImages: true,
+		supportsPromptCache: true,
+		inputPrice: 1.1,
+		outputPrice: 4.4,
+		cacheReadsPrice: 0.55,
+		supportsTemperature: false,
+		description: "o1-mini",
+	},
+	"o1-preview": {
+		maxTokens: 32_768,
+		contextWindow: 128_000,
+		supportsImages: true,
+		supportsPromptCache: true,
+		inputPrice: 16.5,
+		outputPrice: 66,
+		cacheReadsPrice: 8.25,
+		supportsTemperature: false,
+		description: "o1-preview",
+	},
+	o3: {
+		maxTokens: 100_000,
+		contextWindow: 200_000,
+		supportsImages: true,
+		supportsPromptCache: true,
+		supportsReasoningEffort: ["low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 2,
+		outputPrice: 8,
+		cacheReadsPrice: 0.5,
+		supportsTemperature: false,
+		description: "o3",
+	},
+	"o3-mini": {
+		maxTokens: 100_000,
+		contextWindow: 200_000,
+		supportsImages: false,
+		supportsPromptCache: true,
+		supportsReasoningEffort: ["low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 1.1,
+		outputPrice: 4.4,
+		cacheReadsPrice: 0.55,
+		supportsTemperature: false,
+		description: "o3-mini",
+	},
+	"o4-mini": {
+		maxTokens: 100_000,
+		contextWindow: 200_000,
+		supportsImages: true,
+		supportsPromptCache: true,
+		supportsReasoningEffort: ["low", "medium", "high"],
+		reasoningEffort: "medium",
+		inputPrice: 1.1,
+		outputPrice: 4.4,
+		cacheReadsPrice: 0.28,
+		supportsTemperature: false,
+		description: "o4-mini",
+	},
+} as const satisfies Record<string, ModelInfo>
+
+export type AzureModelId = keyof typeof azureModels
+
+export const azureDefaultModelId: AzureModelId = "gpt-4o"
+
+export const azureDefaultModelInfo: ModelInfo = azureModels[azureDefaultModelId]

+ 4 - 0
packages/types/src/providers/index.ts

@@ -1,4 +1,5 @@
 export * from "./anthropic.js"
+export * from "./azure.js"
 export * from "./baseten.js"
 export * from "./bedrock.js"
 export * from "./deepseek.js"
@@ -25,6 +26,7 @@ export * from "./zai.js"
 export * from "./minimax.js"
 
 import { anthropicDefaultModelId } from "./anthropic.js"
+import { azureDefaultModelId } from "./azure.js"
 import { basetenDefaultModelId } from "./baseten.js"
 import { bedrockDefaultModelId } from "./bedrock.js"
 import { deepSeekDefaultModelId } from "./deepseek.js"
@@ -107,6 +109,8 @@ export function getProviderDefaultModelId(
 			return qwenCodeDefaultModelId
 		case "vercel-ai-gateway":
 			return vercelAiGatewayDefaultModelId
+		case "azure":
+			return azureDefaultModelId
 		case "anthropic":
 		case "gemini-cli":
 		case "fake-ai":

+ 2 - 3
packages/types/src/providers/openai.ts

@@ -506,9 +506,8 @@ export const openAiModelInfoSaneDefaults: ModelInfo = {
 	outputPrice: 0,
 }
 
-// https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation
-// https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs
-export const azureOpenAiDefaultApiVersion = "2024-08-01-preview"
+// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/api-version-lifecycle
+export const azureOpenAiDefaultApiVersion = "2025-04-01-preview"
 
 export const OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0
 

+ 50 - 1
pnpm-lock.yaml

@@ -752,6 +752,9 @@ importers:
       '@ai-sdk/anthropic':
         specifier: ^3.0.38
         version: 3.0.38([email protected])
+      '@ai-sdk/azure':
+        specifier: ^2.0.6
+        version: 2.0.91([email protected])
       '@ai-sdk/baseten':
         specifier: ^1.0.31
         version: 1.0.31([email protected])
@@ -1435,6 +1438,12 @@ packages:
     peerDependencies:
       zod: 3.25.76
 
+  '@ai-sdk/[email protected]':
+    resolution: {integrity: sha512-9tznVSs6LGQNKKxb8pKd7CkBV9yk+a/ENpFicHCj2CmBUKefxzwJ9JbUqrlK3VF6dGZw3LXq0dWxt7/Yekaj1w==}
+    engines: {node: '>=18'}
+    peerDependencies:
+      zod: 3.25.76
+
   '@ai-sdk/[email protected]':
     resolution: {integrity: sha512-tGbV96WBb5nnfyUYFrPyBxrhw53YlKSJbMC+rH3HhQlUaIs8+m/Bm4M0isrek9owIIf4MmmSDZ5VZL08zz7eFQ==}
     engines: {node: '>=18'}
@@ -1489,12 +1498,24 @@ packages:
     peerDependencies:
       zod: 3.25.76
 
+  '@ai-sdk/[email protected]':
+    resolution: {integrity: sha512-4+qWkBCbL9HPKbgrUO/F2uXZ8GqrYxHa8SWEYIzxEJ9zvWw3ISr3t1/27O1i8MGSym+PzEyHBT48EV4LAwWaEw==}
+    engines: {node: '>=18'}
+    peerDependencies:
+      zod: 3.25.76
+
   '@ai-sdk/[email protected]':
     resolution: {integrity: sha512-W/hiwxIfG29IO0Fob1HwWpFssMsNrxWoX8A7DwNGOtKArDBmJNuGzQeU/k0Fnh8WyvZEnfxkjO4oXkSXfVBayg==}
     engines: {node: '>=18'}
     peerDependencies:
       zod: 3.25.76
 
+  '@ai-sdk/[email protected]':
+    resolution: {integrity: sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ==}
+    engines: {node: '>=18'}
+    peerDependencies:
+      zod: 3.25.76
+
   '@ai-sdk/[email protected]':
     resolution: {integrity: sha512-HliwB/yzufw3iwczbFVE2Fiwf1XqROB/I6ng8EKUsPM5+2wnIa8f4VbljZcDx+grhFrPV+PnRZH7zBqi8WZM7Q==}
     engines: {node: '>=18'}
@@ -1511,6 +1532,10 @@ packages:
     resolution: {integrity: sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA==}
     engines: {node: '>=18'}
 
+  '@ai-sdk/[email protected]':
+    resolution: {integrity: sha512-KCUwswvsC5VsW2PWFqF8eJgSCu5Ysj7m1TxiHTVA6g7k360bk0RNQENT8KTMAYEs+8fWPD3Uu4dEmzGHc+jGng==}
+    engines: {node: '>=18'}
+
   '@ai-sdk/[email protected]':
     resolution: {integrity: sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ==}
     engines: {node: '>=18'}
@@ -11053,6 +11078,13 @@ snapshots:
       '@ai-sdk/provider-utils': 4.0.14([email protected])
       zod: 3.25.76
 
+  '@ai-sdk/[email protected]([email protected])':
+    dependencies:
+      '@ai-sdk/openai': 2.0.89([email protected])
+      '@ai-sdk/provider': 2.0.1
+      '@ai-sdk/provider-utils': 3.0.20([email protected])
+      zod: 3.25.76
+
   '@ai-sdk/[email protected]([email protected])':
     dependencies:
       '@ai-sdk/openai-compatible': 2.0.28([email protected])
@@ -11116,12 +11148,25 @@ snapshots:
       '@ai-sdk/provider-utils': 4.0.14([email protected])
       zod: 3.25.76
 
+  '@ai-sdk/[email protected]([email protected])':
+    dependencies:
+      '@ai-sdk/provider': 2.0.1
+      '@ai-sdk/provider-utils': 3.0.20([email protected])
+      zod: 3.25.76
+
   '@ai-sdk/[email protected]([email protected])':
     dependencies:
       '@ai-sdk/provider': 3.0.8
       '@ai-sdk/provider-utils': 4.0.14([email protected])
       zod: 3.25.76
 
+  '@ai-sdk/[email protected]([email protected])':
+    dependencies:
+      '@ai-sdk/provider': 2.0.1
+      '@standard-schema/spec': 1.1.0
+      eventsource-parser: 3.0.6
+      zod: 3.25.76
+
   '@ai-sdk/[email protected]([email protected])':
     dependencies:
       '@ai-sdk/provider': 2.0.0
@@ -11141,6 +11186,10 @@ snapshots:
     dependencies:
       json-schema: 0.4.0
 
+  '@ai-sdk/[email protected]':
+    dependencies:
+      json-schema: 0.4.0
+
   '@ai-sdk/[email protected]':
     dependencies:
       json-schema: 0.4.0
@@ -14945,7 +14994,7 @@ snapshots:
       sirv: 3.0.1
       tinyglobby: 0.2.14
       tinyrainbow: 2.0.0
-      vitest: 3.2.4(@types/[email protected])(@types/node@20.17.50)(@vitest/[email protected])([email protected])([email protected])([email protected])([email protected])([email protected])
+      vitest: 3.2.4(@types/[email protected])(@types/node@24.2.1)(@vitest/[email protected])([email protected])([email protected])([email protected])([email protected])([email protected])
 
   '@vitest/[email protected]':
     dependencies:

+ 3 - 0
src/api/index.ts

@@ -8,6 +8,7 @@ import { ApiStream } from "./transform/stream"
 import {
 	AnthropicHandler,
 	AwsBedrockHandler,
+	AzureHandler,
 	OpenRouterHandler,
 	VertexHandler,
 	AnthropicVertexHandler,
@@ -128,6 +129,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
 	switch (apiProvider) {
 		case "anthropic":
 			return new AnthropicHandler(options)
+		case "azure":
+			return new AzureHandler(options)
 		case "openrouter":
 			return new OpenRouterHandler(options)
 		case "bedrock":

+ 431 - 0
src/api/providers/__tests__/azure.spec.ts

@@ -0,0 +1,431 @@
+// Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls
+const { mockStreamText, mockGenerateText, mockCreateAzure } = vi.hoisted(() => ({
+	mockStreamText: vi.fn(),
+	mockGenerateText: vi.fn(),
+	mockCreateAzure: vi.fn(() => {
+		// Return a provider function that supports Responses API model creation
+		const mockProvider = vi.fn(() => ({
+			modelId: "gpt-4o",
+			provider: "azure",
+		}))
+		;(mockProvider as any).responses = vi.fn(() => ({
+			modelId: "gpt-4o",
+			provider: "azure.responses",
+		}))
+		return mockProvider
+	}),
+}))
+
+vi.mock("ai", async (importOriginal) => {
+	const actual = await importOriginal<typeof import("ai")>()
+	return {
+		...actual,
+		streamText: mockStreamText,
+		generateText: mockGenerateText,
+	}
+})
+
+vi.mock("@ai-sdk/azure", () => ({
+	createAzure: mockCreateAzure,
+}))
+
+import type { Anthropic } from "@anthropic-ai/sdk"
+
+import type { ApiHandlerOptions } from "../../../shared/api"
+
+import { AzureHandler } from "../azure"
+
+describe("AzureHandler", () => {
+	let handler: AzureHandler
+	let mockOptions: ApiHandlerOptions
+
+	beforeEach(() => {
+		vi.clearAllMocks()
+		mockOptions = {
+			azureApiKey: "test-api-key",
+			azureResourceName: "test-resource",
+			azureDeploymentName: "gpt-4o",
+			azureApiVersion: "2024-08-01-preview",
+		}
+		handler = new AzureHandler(mockOptions)
+	})
+
+	describe("constructor", () => {
+		it("should initialize with provided options", () => {
+			expect(handler).toBeInstanceOf(AzureHandler)
+			expect(handler.getModel().id).toBe(mockOptions.azureDeploymentName)
+		})
+
+		it("should use apiModelId if azureDeploymentName is not provided", () => {
+			const handlerWithModelId = new AzureHandler({
+				...mockOptions,
+				azureDeploymentName: undefined,
+				apiModelId: "gpt-35-turbo",
+			})
+			expect(handlerWithModelId.getModel().id).toBe("gpt-35-turbo")
+		})
+
+		it("should use empty string if neither azureDeploymentName nor apiModelId is provided", () => {
+			const handlerWithoutModel = new AzureHandler({
+				...mockOptions,
+				azureDeploymentName: undefined,
+				apiModelId: undefined,
+			})
+			expect(handlerWithoutModel.getModel().id).toBe("")
+		})
+
+		it("should use default API version if not provided", () => {
+			const handlerWithoutVersion = new AzureHandler({
+				...mockOptions,
+				azureApiVersion: undefined,
+			})
+			expect(handlerWithoutVersion).toBeInstanceOf(AzureHandler)
+			expect(mockCreateAzure).toHaveBeenLastCalledWith(
+				expect.objectContaining({ apiVersion: "2025-04-01-preview" }),
+			)
+		})
+
+		it("should normalize query-style API version input", () => {
+			new AzureHandler({
+				...mockOptions,
+				azureApiVersion: " ?api-version=2024-10-21&foo=bar ",
+			})
+
+			expect(mockCreateAzure).toHaveBeenLastCalledWith(
+				expect.objectContaining({
+					apiVersion: "2024-10-21",
+				}),
+			)
+		})
+
+		it("should use default API version when configured value is blank", () => {
+			new AzureHandler({
+				...mockOptions,
+				azureApiVersion: "   ",
+			})
+
+			expect(mockCreateAzure).toHaveBeenLastCalledWith(
+				expect.objectContaining({ apiVersion: "2025-04-01-preview" }),
+			)
+		})
+	})
+
+	describe("getModel", () => {
+		it("should return model info with deployment name as ID", () => {
+			const model = handler.getModel()
+			expect(model.id).toBe(mockOptions.azureDeploymentName)
+			expect(model.info).toBeDefined()
+		})
+
+		it("should include model parameters from getModelParams", () => {
+			const model = handler.getModel()
+			expect(model).toHaveProperty("temperature")
+			expect(model).toHaveProperty("maxTokens")
+		})
+	})
+
+	describe("isAiSdkProvider", () => {
+		it("should return true", () => {
+			expect(handler.isAiSdkProvider()).toBe(true)
+		})
+	})
+
+	describe("createMessage", () => {
+		const systemPrompt = "You are a helpful assistant."
+		const messages: Anthropic.Messages.MessageParam[] = [
+			{
+				role: "user",
+				content: [
+					{
+						type: "text" as const,
+						text: "Hello!",
+					},
+				],
+			},
+		]
+
+		it("should use the Responses API language model", async () => {
+			async function* mockFullStream() {
+				yield { type: "text-delta", text: "Test response" }
+			}
+
+			mockStreamText.mockReturnValue({
+				fullStream: mockFullStream(),
+				usage: Promise.resolve({ inputTokens: 1, outputTokens: 1 }),
+				providerMetadata: Promise.resolve({}),
+			})
+
+			const stream = handler.createMessage(systemPrompt, messages)
+			for await (const _chunk of stream) {
+				// exhaust stream
+			}
+
+			expect(mockStreamText).toHaveBeenCalled()
+			const requestOptions = mockStreamText.mock.calls[0][0]
+			expect((requestOptions.model as any).provider).toBe("azure.responses")
+		})
+
+		it("should handle streaming responses", async () => {
+			// Mock the fullStream async generator
+			async function* mockFullStream() {
+				yield { type: "text-delta", text: "Test response" }
+			}
+
+			// Mock usage and providerMetadata promises
+			const mockUsage = Promise.resolve({
+				inputTokens: 10,
+				outputTokens: 5,
+			})
+
+			const mockProviderMetadata = Promise.resolve({
+				azure: {
+					promptCacheHitTokens: 2,
+					promptCacheMissTokens: 8,
+				},
+			})
+
+			mockStreamText.mockReturnValue({
+				fullStream: mockFullStream(),
+				usage: mockUsage,
+				providerMetadata: mockProviderMetadata,
+			})
+
+			const stream = handler.createMessage(systemPrompt, messages)
+			const chunks: any[] = []
+			for await (const chunk of stream) {
+				chunks.push(chunk)
+			}
+
+			expect(chunks.length).toBeGreaterThan(0)
+			const textChunks = chunks.filter((chunk) => chunk.type === "text")
+			expect(textChunks).toHaveLength(1)
+			expect(textChunks[0].text).toBe("Test response")
+		})
+
+		it("should include usage information", async () => {
+			async function* mockFullStream() {
+				yield { type: "text-delta", text: "Test response" }
+			}
+
+			const mockUsage = Promise.resolve({
+				inputTokens: 10,
+				outputTokens: 5,
+			})
+
+			const mockProviderMetadata = Promise.resolve({
+				azure: {
+					promptCacheHitTokens: 2,
+					promptCacheMissTokens: 8,
+				},
+			})
+
+			mockStreamText.mockReturnValue({
+				fullStream: mockFullStream(),
+				usage: mockUsage,
+				providerMetadata: mockProviderMetadata,
+			})
+
+			const stream = handler.createMessage(systemPrompt, messages)
+			const chunks: any[] = []
+			for await (const chunk of stream) {
+				chunks.push(chunk)
+			}
+
+			const usageChunks = chunks.filter((chunk) => chunk.type === "usage")
+			expect(usageChunks.length).toBeGreaterThan(0)
+			expect(usageChunks[0].inputTokens).toBe(10)
+			expect(usageChunks[0].outputTokens).toBe(5)
+		})
+
+		it("should include cache metrics in usage information from providerMetadata", async () => {
+			async function* mockFullStream() {
+				yield { type: "text-delta", text: "Test response" }
+			}
+
+			const mockUsage = Promise.resolve({
+				inputTokens: 10,
+				outputTokens: 5,
+			})
+
+			// Azure provides cache metrics via providerMetadata
+			const mockProviderMetadata = Promise.resolve({
+				azure: {
+					promptCacheHitTokens: 2,
+					promptCacheMissTokens: 8,
+				},
+			})
+
+			mockStreamText.mockReturnValue({
+				fullStream: mockFullStream(),
+				usage: mockUsage,
+				providerMetadata: mockProviderMetadata,
+			})
+
+			const stream = handler.createMessage(systemPrompt, messages)
+			const chunks: any[] = []
+			for await (const chunk of stream) {
+				chunks.push(chunk)
+			}
+
+			const usageChunks = chunks.filter((chunk) => chunk.type === "usage")
+			expect(usageChunks.length).toBeGreaterThan(0)
+			expect(usageChunks[0].cacheWriteTokens).toBeUndefined()
+			expect(usageChunks[0].cacheReadTokens).toBe(2) // promptCacheHitTokens
+		})
+
+		it("should handle tool calls via tool-input-start/delta/end events", async () => {
+			async function* mockFullStream() {
+				yield { type: "tool-input-start", id: "tool-1", toolName: "test_tool" }
+				yield { type: "tool-input-delta", id: "tool-1", delta: '{"arg":' }
+				yield { type: "tool-input-delta", id: "tool-1", delta: '"value"}' }
+				yield { type: "tool-input-end", id: "tool-1" }
+			}
+
+			const mockUsage = Promise.resolve({
+				inputTokens: 10,
+				outputTokens: 5,
+			})
+
+			const mockProviderMetadata = Promise.resolve({})
+
+			mockStreamText.mockReturnValue({
+				fullStream: mockFullStream(),
+				usage: mockUsage,
+				providerMetadata: mockProviderMetadata,
+			})
+
+			const stream = handler.createMessage(systemPrompt, messages)
+			const chunks: any[] = []
+			for await (const chunk of stream) {
+				chunks.push(chunk)
+			}
+
+			const toolStartChunks = chunks.filter((chunk) => chunk.type === "tool_call_start")
+			expect(toolStartChunks).toHaveLength(1)
+			expect(toolStartChunks[0].id).toBe("tool-1")
+			expect(toolStartChunks[0].name).toBe("test_tool")
+
+			const toolDeltaChunks = chunks.filter((chunk) => chunk.type === "tool_call_delta")
+			expect(toolDeltaChunks).toHaveLength(2)
+
+			const toolEndChunks = chunks.filter((chunk) => chunk.type === "tool_call_end")
+			expect(toolEndChunks).toHaveLength(1)
+		})
+
+		it("should handle errors from AI SDK", async () => {
+			const mockError = new Error("API Error")
+			;(mockError as any).name = "AI_APICallError"
+			;(mockError as any).status = 500
+
+			async function* mockFullStream(): AsyncGenerator<any> {
+				yield { type: "text-delta", text: "" }
+				throw mockError
+			}
+
+			mockStreamText.mockReturnValue({
+				fullStream: mockFullStream(),
+				usage: Promise.resolve({}),
+				providerMetadata: Promise.resolve({}),
+			})
+
+			const stream = handler.createMessage(systemPrompt, messages)
+			await expect(async () => {
+				const chunks: any[] = []
+				for await (const chunk of stream) {
+					chunks.push(chunk)
+				}
+			}).rejects.toThrow("Azure AI Foundry")
+		})
+	})
+
+	describe("completePrompt", () => {
+		it("should complete a prompt using generateText", async () => {
+			mockGenerateText.mockResolvedValue({
+				text: "Test completion",
+			})
+
+			const result = await handler.completePrompt("Test prompt")
+
+			expect(result).toBe("Test completion")
+			expect(mockGenerateText).toHaveBeenCalledWith(
+				expect.objectContaining({
+					prompt: "Test prompt",
+				}),
+			)
+		})
+
+		it("should use configured temperature", async () => {
+			const handlerWithTemp = new AzureHandler({
+				...mockOptions,
+				modelTemperature: 0.7,
+			})
+
+			mockGenerateText.mockResolvedValue({
+				text: "Test completion",
+			})
+
+			await handlerWithTemp.completePrompt("Test prompt")
+
+			expect(mockGenerateText).toHaveBeenCalledWith(
+				expect.objectContaining({
+					temperature: 0.7,
+				}),
+			)
+		})
+	})
+
+	describe("tools", () => {
+		const systemPrompt = "You are a helpful assistant."
+		const messages: Anthropic.Messages.MessageParam[] = [
+			{
+				role: "user",
+				content: [{ type: "text" as const, text: "Use a tool" }],
+			},
+		]
+
+		it("should pass tools to streamText", async () => {
+			async function* mockFullStream() {
+				yield { type: "text-delta", text: "Using tool" }
+			}
+
+			mockStreamText.mockReturnValue({
+				fullStream: mockFullStream(),
+				usage: Promise.resolve({ inputTokens: 10, outputTokens: 5 }),
+				providerMetadata: Promise.resolve({}),
+			})
+
+			const tools = [
+				{
+					type: "function" as const,
+					function: {
+						name: "test_tool",
+						description: "A test tool",
+						parameters: {
+							type: "object",
+							properties: {
+								arg: { type: "string" },
+							},
+							required: ["arg"],
+						},
+					},
+				},
+			]
+
+			const stream = handler.createMessage(systemPrompt, messages, {
+				taskId: "test-task",
+				tools,
+			})
+
+			const chunks: any[] = []
+			for await (const chunk of stream) {
+				chunks.push(chunk)
+			}
+
+			expect(mockStreamText).toHaveBeenCalledWith(
+				expect.objectContaining({
+					tools: expect.any(Object),
+				}),
+			)
+		})
+	})
+})

+ 201 - 0
src/api/providers/azure.ts

@@ -0,0 +1,201 @@
+import { Anthropic } from "@anthropic-ai/sdk"
+import { createAzure } from "@ai-sdk/azure"
+import { streamText, generateText, ToolSet } from "ai"
+
+import { azureModels, azureDefaultModelInfo, azureOpenAiDefaultApiVersion, type ModelInfo } from "@roo-code/types"
+
+import type { ApiHandlerOptions } from "../../shared/api"
+
+import {
+	convertToAiSdkMessages,
+	convertToolsForAiSdk,
+	processAiSdkStreamPart,
+	mapToolChoice,
+	handleAiSdkError,
+} from "../transform/ai-sdk"
+import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
+import { getModelParams } from "../transform/model-params"
+
+import { DEFAULT_HEADERS } from "./constants"
+import { BaseProvider } from "./base-provider"
+import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+
+const AZURE_DEFAULT_TEMPERATURE = 0
+
+/**
+ * Azure AI Foundry provider using the dedicated @ai-sdk/azure package.
+ * Provides native support for Azure OpenAI deployments with proper resource-based routing.
+ */
+export class AzureHandler extends BaseProvider implements SingleCompletionHandler {
+	protected options: ApiHandlerOptions
+	protected provider: ReturnType<typeof createAzure>
+
+	constructor(options: ApiHandlerOptions) {
+		super()
+		this.options = options
+
+		const rawApiVersion = (options.azureApiVersion ?? "").trim()
+		const queryLikeApiVersion = rawApiVersion.replace(/^\?/, "").trim()
+		const normalizedApiVersion = queryLikeApiVersion.toLowerCase().includes("api-version=")
+			? (new URLSearchParams(queryLikeApiVersion).get("api-version") ?? "")
+			: queryLikeApiVersion
+		const apiVersion = normalizedApiVersion.replace(/^api-version=/i, "").trim()
+
+		// Create the Azure provider using AI SDK
+		// The @ai-sdk/azure package uses resourceName-based routing
+		this.provider = createAzure({
+			resourceName: options.azureResourceName ?? "",
+			apiKey: options.azureApiKey, // Optional — Azure supports managed identity / Entra ID auth
+			...(apiVersion ? { apiVersion } : { apiVersion: azureOpenAiDefaultApiVersion }),
+			headers: DEFAULT_HEADERS,
+		})
+	}
+
+	override getModel(): { id: string; info: ModelInfo; maxTokens?: number; temperature?: number } {
+		// Azure uses deployment names for API calls, but apiModelId for model capabilities.
+		// deploymentId is sent to the Azure API; modelId is used for capability lookup.
+		const deploymentId = this.options.azureDeploymentName ?? this.options.apiModelId ?? ""
+		const modelId = this.options.apiModelId ?? deploymentId
+		const info: ModelInfo =
+			(azureModels as Record<string, ModelInfo>)[modelId] ??
+			(azureModels as Record<string, ModelInfo>)[deploymentId] ??
+			azureDefaultModelInfo
+		const params = getModelParams({
+			format: "openai",
+			modelId: deploymentId, // deployment name for the API
+			model: info,
+			settings: this.options,
+			defaultTemperature: AZURE_DEFAULT_TEMPERATURE,
+		})
+		return { id: deploymentId, info, ...params }
+	}
+
+	/**
+	 * Get the language model for the configured deployment name.
+	 * Azure provider is wired to use the Responses API endpoint.
+	 */
+	protected getLanguageModel() {
+		const { id } = this.getModel()
+		return this.provider.responses(id)
+	}
+
+	/**
+	 * Process usage metrics from the AI SDK response.
+	 * Azure AI Foundry provides standard OpenAI-compatible usage metrics.
+	 */
+	protected processUsageMetrics(
+		usage: {
+			inputTokens?: number
+			outputTokens?: number
+			details?: {
+				cachedInputTokens?: number
+				reasoningTokens?: number
+			}
+		},
+		providerMetadata?: {
+			azure?: {
+				promptCacheHitTokens?: number
+				promptCacheMissTokens?: number
+			}
+		},
+	): ApiStreamUsageChunk {
+		// Extract cache metrics from Azure's providerMetadata if available
+		const cacheReadTokens = providerMetadata?.azure?.promptCacheHitTokens ?? usage.details?.cachedInputTokens
+		// Azure uses OpenAI-compatible caching which does not report cache write tokens separately;
+		// promptCacheMissTokens represents tokens NOT found in cache (processed from scratch), not tokens written to cache.
+		const cacheWriteTokens = undefined
+
+		return {
+			type: "usage",
+			inputTokens: usage.inputTokens || 0,
+			outputTokens: usage.outputTokens || 0,
+			cacheReadTokens,
+			cacheWriteTokens,
+			reasoningTokens: usage.details?.reasoningTokens,
+		}
+	}
+
+	/**
+	 * Get the max tokens parameter to include in the request.
+	 * Returns undefined if no valid maxTokens is configured to let the API use its default.
+	 */
+	protected getMaxOutputTokens(): number | undefined {
+		const { info } = this.getModel()
+		const maxTokens = this.options.modelMaxTokens || info.maxTokens
+		// Azure AI Foundry API requires maxOutputTokens >= 1, so filter out invalid values
+		return maxTokens && maxTokens > 0 ? maxTokens : undefined
+	}
+
+	/**
+	 * Create a message stream using the AI SDK.
+	 */
+	override async *createMessage(
+		systemPrompt: string,
+		messages: Anthropic.Messages.MessageParam[],
+		metadata?: ApiHandlerCreateMessageMetadata,
+	): ApiStream {
+		const { temperature } = this.getModel()
+		const languageModel = this.getLanguageModel()
+
+		// Convert messages to AI SDK format
+		const aiSdkMessages = convertToAiSdkMessages(messages)
+
+		// Convert tools to OpenAI format first, then to AI SDK format
+		const openAiTools = this.convertToolsForOpenAI(metadata?.tools)
+		const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined
+
+		// Build the request options
+		const requestOptions: Parameters<typeof streamText>[0] = {
+			model: languageModel,
+			system: systemPrompt,
+			messages: aiSdkMessages,
+			temperature: this.options.modelTemperature ?? temperature ?? AZURE_DEFAULT_TEMPERATURE,
+			maxOutputTokens: this.getMaxOutputTokens(),
+			tools: aiSdkTools,
+			toolChoice: mapToolChoice(metadata?.tool_choice),
+		}
+
+		// Use streamText for streaming responses
+		const result = streamText(requestOptions)
+
+		try {
+			// Process the full stream to get all events including reasoning
+			for await (const part of result.fullStream) {
+				for (const chunk of processAiSdkStreamPart(part)) {
+					yield chunk
+				}
+			}
+
+			// Yield usage metrics at the end, including cache metrics from providerMetadata
+			const usage = await result.usage
+			const providerMetadata = await result.providerMetadata
+			if (usage) {
+				yield this.processUsageMetrics(usage, providerMetadata as any)
+			}
+		} catch (error) {
+			// Handle AI SDK errors (AI_RetryError, AI_APICallError, etc.)
+			throw handleAiSdkError(error, "Azure AI Foundry")
+		}
+	}
+
+	/**
+	 * Complete a prompt using the AI SDK generateText.
+	 */
+	async completePrompt(prompt: string): Promise<string> {
+		const { temperature } = this.getModel()
+		const languageModel = this.getLanguageModel()
+
+		const { text } = await generateText({
+			model: languageModel,
+			prompt,
+			maxOutputTokens: this.getMaxOutputTokens(),
+			temperature: this.options.modelTemperature ?? temperature ?? AZURE_DEFAULT_TEMPERATURE,
+		})
+
+		return text
+	}
+
+	override isAiSdkProvider(): boolean {
+		return true
+	}
+}

+ 1 - 0
src/api/providers/index.ts

@@ -1,5 +1,6 @@
 export { AnthropicVertexHandler } from "./anthropic-vertex"
 export { AnthropicHandler } from "./anthropic"
+export { AzureHandler } from "./azure"
 export { AwsBedrockHandler } from "./bedrock"
 export { DeepSeekHandler } from "./deepseek"
 export { MoonshotHandler } from "./moonshot"

+ 0 - 66
src/integrations/terminal/README.md

@@ -1,66 +0,0 @@
-NOTICE TO DEVELOPERS:
-
-The Terminal classes are very sensitive to change, partially because of
-the complicated way that shell integration works with VSCE, and
-partially because of the way that Cline interacts with the Terminal\*
-class abstractions that make VSCE shell integration easier to work with.
-
-At the point that PR #1365 is merged, it is unlikely that any Terminal\*
-classes will need to be modified substantially. Generally speaking, we
-should think of this as a stable interface and minimize changes.
-
-`TerminalProcess` class is particularly critical because it
-provides all input handling and event notifications related to terminal
-output to send it to the rest of the program. User interfaces for working
-with data from terminals should only be as follows:
-
-1. By listening to the events:
-
-    - this.on("completed", fullOutput) - provides full output upon completion
-    - this.on("line") - provides new lines, probably more than one
-
-2. By calling `this.getUnretrievedOutput()`
-
-This implementation intentionally returns all terminal output to the user
-interfaces listed above. Any throttling or other stream modification _must_
-be implemented outside of this class.
-
-All other interfaces are private.
-
-Warning: Modifying the `TerminalProcess` class without fully understanding VSCE shell integration architecture may affect the reliability or performance of reading terminal output.
-
-`TerminalProcess` was carefully designed for performance and accuracy:
-
-Performance is obtained by: - Throttling event output on 100ms intervals - Using only indexes to access the output array - Maintaining a zero-copy implementation with a fullOutput string for storage - The fullOutput array is never split on carriage returns
-as this was found to be very slow - Allowing multi-line chunks - Minimizing regular expression calls, as they have been tested to be
-500x slower than the use of string parsing functions for large outputs
-in this implementation
-
-Accuracy is obtained by: - Using only indexes against fullOutput - Paying close attention to off-by-one errors when indexing any content - Always returning exactly the content that was printed by the terminal,
-including all carriage returns which may (or may not) have been in the
-input stream
-
-Additional resources:
-
-- This implementation was rigorously tested using:
-
-    - https://github.com/KJ7LNW/vsce-test-terminal-integration
-
-- There was a serious upstream bug that may not be fully solved,
-  or that may resurface in future VSCE releases, simply due to
-  the complexity of reliably handling terminal-provided escape
-  sequences across multiple shell implementations. This implementation
-  attempts to work around the problems and provide backwards
-  compatibility for VSCE releases that may not have the fix in
-  upstream bug #237208, but there still may be some unhandled
-  corner cases. See this ticket for more detail:
-
-    - https://github.com/microsoft/vscode/issues/237208
-
-- The original Cline PR has quite a bit of information:
-    - https://github.com/cline/cline/pull/1089
-
-Contact me if you have any questions: - GitHub: KJ7LNW - Discord: kj7lnw - [roo-cline at z.ewheeler.org]
-
-Cheers,
--Eric, KJ7LNW

+ 1 - 0
src/package.json

@@ -452,6 +452,7 @@
 	"dependencies": {
 		"@ai-sdk/amazon-bedrock": "^4.0.51",
 		"@ai-sdk/anthropic": "^3.0.38",
+		"@ai-sdk/azure": "^2.0.6",
 		"@ai-sdk/baseten": "^1.0.31",
 		"@ai-sdk/deepseek": "^2.0.18",
 		"@ai-sdk/fireworks": "^2.0.32",

+ 7 - 1
src/shared/checkExistApiConfig.ts

@@ -5,11 +5,17 @@ export function checkExistKey(config: ProviderSettings | undefined) {
 		return false
 	}
 
-	// Special case for fake-ai, openai-codex, qwen-code, and roo providers which don't need any configuration.
+	// Special case for providers which don't need standard API key configuration.
 	if (config.apiProvider && ["fake-ai", "openai-codex", "qwen-code", "roo"].includes(config.apiProvider)) {
 		return true
 	}
 
+	// Azure supports managed identity / Entra ID auth (no API key needed).
+	// Consider it configured if resource name or deployment name is set.
+	if (config.apiProvider === "azure") {
+		return !!(config.azureResourceName || config.azureDeploymentName || config.azureApiKey)
+	}
+
 	// Check all secret keys from the centralized SECRET_STATE_KEYS array.
 	// Filter out keys that are not part of ProviderSettings (global secrets are stored separately)
 	const providerSecretKeys = SECRET_STATE_KEYS.filter((key) => !GLOBAL_SECRET_KEYS.includes(key as any))

+ 9 - 0
webview-ui/src/components/settings/ApiOptions.tsx

@@ -66,6 +66,7 @@ import {
 
 import {
 	Anthropic,
+	Azure,
 	Baseten,
 	Bedrock,
 	DeepSeek,
@@ -526,6 +527,14 @@ const ApiOptions = ({
 						/>
 					)}
 
+					{selectedProvider === "azure" && (
+						<Azure
+							apiConfiguration={apiConfiguration}
+							setApiConfigurationField={setApiConfigurationField}
+							simplifySettings={fromWelcomeView}
+						/>
+					)}
+
 					{selectedProvider === "openai-codex" && (
 						<OpenAICodex
 							apiConfiguration={apiConfiguration}

+ 3 - 0
webview-ui/src/components/settings/constants.ts

@@ -2,6 +2,7 @@ import {
 	type ProviderName,
 	type ModelInfo,
 	anthropicModels,
+	azureModels,
 	bedrockModels,
 	deepSeekModels,
 	moonshotModels,
@@ -21,6 +22,7 @@ import {
 
 export const MODELS_BY_PROVIDER: Partial<Record<ProviderName, Record<string, ModelInfo>>> = {
 	anthropic: anthropicModels,
+	azure: azureModels,
 	bedrock: bedrockModels,
 	deepseek: deepSeekModels,
 	moonshot: moonshotModels,
@@ -41,6 +43,7 @@ export const MODELS_BY_PROVIDER: Partial<Record<ProviderName, Record<string, Mod
 export const PROVIDERS = [
 	{ value: "openrouter", label: "OpenRouter", proxy: false },
 	{ value: "anthropic", label: "Anthropic", proxy: false },
+	{ value: "azure", label: "Azure AI Foundry", proxy: false },
 	{ value: "gemini", label: "Google Gemini", proxy: false },
 	{ value: "deepseek", label: "DeepSeek", proxy: false },
 	{ value: "moonshot", label: "Moonshot", proxy: false },

+ 75 - 0
webview-ui/src/components/settings/providers/Azure.tsx

@@ -0,0 +1,75 @@
+import { useCallback } from "react"
+import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react"
+
+import { type ProviderSettings, azureOpenAiDefaultApiVersion } from "@roo-code/types"
+
+import { useAppTranslation } from "@src/i18n/TranslationContext"
+
+import { inputEventTransform } from "../transforms"
+
+type AzureProps = {
+	apiConfiguration: ProviderSettings
+	setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void
+	simplifySettings?: boolean
+}
+
+export const Azure = ({ apiConfiguration, setApiConfigurationField }: AzureProps) => {
+	const { t } = useAppTranslation()
+
+	const handleInputChange = useCallback(
+		<K extends keyof ProviderSettings, E>(
+			field: K,
+			transform: (event: E) => ProviderSettings[K] = inputEventTransform,
+		) =>
+			(event: E | Event) => {
+				setApiConfigurationField(field, transform(event as E))
+			},
+		[setApiConfigurationField],
+	)
+
+	return (
+		<>
+			<VSCodeTextField
+				value={apiConfiguration?.azureResourceName || ""}
+				onInput={handleInputChange("azureResourceName")}
+				placeholder={t("settings:placeholders.azureResourceName")}
+				className="w-full">
+				<label className="block font-medium mb-1">{t("settings:providers.azureResourceName")}</label>
+			</VSCodeTextField>
+			<div className="text-sm text-vscode-descriptionForeground -mt-2">
+				{t("settings:providers.azureResourceNameDescription")}
+			</div>
+			<VSCodeTextField
+				value={apiConfiguration?.azureDeploymentName || ""}
+				onInput={handleInputChange("azureDeploymentName")}
+				placeholder={t("settings:placeholders.azureDeploymentName")}
+				className="w-full">
+				<label className="block font-medium mb-1">{t("settings:providers.azureDeploymentName")}</label>
+			</VSCodeTextField>
+			<div className="text-sm text-vscode-descriptionForeground -mt-2">
+				{t("settings:providers.azureDeploymentNameDescription")}
+			</div>
+			<VSCodeTextField
+				value={apiConfiguration?.azureApiKey || ""}
+				type="password"
+				onInput={handleInputChange("azureApiKey")}
+				placeholder={t("settings:placeholders.apiKey")}
+				className="w-full">
+				<label className="block font-medium mb-1">{t("settings:providers.azureApiKey")}</label>
+			</VSCodeTextField>
+			<div className="text-sm text-vscode-descriptionForeground -mt-2">
+				{t("settings:providers.apiKeyStorageNotice")}
+			</div>
+			<VSCodeTextField
+				value={apiConfiguration?.azureApiVersion || ""}
+				onInput={handleInputChange("azureApiVersion")}
+				placeholder={`Default: ${azureOpenAiDefaultApiVersion}`}
+				className="w-full">
+				<label className="block font-medium mb-1">{t("settings:providers.azureApiVersion")}</label>
+			</VSCodeTextField>
+			<div className="text-sm text-vscode-descriptionForeground -mt-2">
+				{t("settings:providers.azureApiVersionDescription")}
+			</div>
+		</>
+	)
+}

+ 1 - 0
webview-ui/src/components/settings/providers/index.ts

@@ -1,4 +1,5 @@
 export { Anthropic } from "./Anthropic"
+export { Azure } from "./Azure"
 export { Bedrock } from "./Bedrock"
 export { DeepSeek } from "./DeepSeek"
 export { Gemini } from "./Gemini"

+ 6 - 0
webview-ui/src/components/settings/utils/providerModelConfig.ts

@@ -1,6 +1,7 @@
 import type { ProviderName, ModelInfo, ProviderSettings } from "@roo-code/types"
 import {
 	anthropicDefaultModelId,
+	azureDefaultModelId,
 	bedrockDefaultModelId,
 	deepSeekDefaultModelId,
 	moonshotDefaultModelId,
@@ -27,6 +28,10 @@ export interface ProviderServiceConfig {
 
 export const PROVIDER_SERVICE_CONFIG: Partial<Record<ProviderName, ProviderServiceConfig>> = {
 	anthropic: { serviceName: "Anthropic", serviceUrl: "https://console.anthropic.com" },
+	azure: {
+		serviceName: "Azure AI Foundry",
+		serviceUrl: "https://azure.microsoft.com/en-us/products/ai-foundry/models/openai",
+	},
 	bedrock: { serviceName: "Amazon Bedrock", serviceUrl: "https://aws.amazon.com/bedrock" },
 	deepseek: { serviceName: "DeepSeek", serviceUrl: "https://platform.deepseek.com" },
 	moonshot: { serviceName: "Moonshot", serviceUrl: "https://platform.moonshot.cn" },
@@ -51,6 +56,7 @@ export const PROVIDER_SERVICE_CONFIG: Partial<Record<ProviderName, ProviderServi
 
 export const PROVIDER_DEFAULT_MODEL_IDS: Partial<Record<ProviderName, string>> = {
 	anthropic: anthropicDefaultModelId,
+	azure: azureDefaultModelId,
 	bedrock: bedrockDefaultModelId,
 	deepseek: deepSeekDefaultModelId,
 	moonshot: moonshotDefaultModelId,

+ 11 - 0
webview-ui/src/components/ui/hooks/useSelectedModel.ts

@@ -23,6 +23,7 @@ import {
 	mainlandZAiModels,
 	fireworksModels,
 	basetenModels,
+	azureModels,
 	qwenCodeModels,
 	litellmDefaultModelInfo,
 	lMStudioDefaultModelInfo,
@@ -331,6 +332,16 @@ function getSelectedModel({
 			const info = routerModels["vercel-ai-gateway"]?.[id]
 			return { id, info }
 		}
+		case "azure": {
+			// apiModelId holds the base model selection (from model picker).
+			// azureDeploymentName is the deployment name sent to the Azure API.
+			// Only use apiModelId if it matches a known Azure model (prevents stale values from other providers).
+			const explicitModelId = apiConfiguration.apiModelId
+			const matchesAzureModel = explicitModelId && azureModels[explicitModelId as keyof typeof azureModels]
+			const id = matchesAzureModel ? explicitModelId : defaultModelId
+			const info = azureModels[id as keyof typeof azureModels]
+			return { id, info: info || undefined }
+		}
 		// case "anthropic":
 		// case "fake-ai":
 		default: {

+ 44 - 2
webview-ui/src/i18n/locales/ca/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Cerca proveïdors",
 		"noProviderMatchFound": "No s'han trobat proveïdors",
 		"noMatchFound": "No s'han trobat perfils coincidents",
-		"retiredProviderMessage": "Ho sentim, aquest proveïdor ja no és compatible. Hem vist molt pocs usuaris de Roo que realment l'utilitzaven i necessitem reduir l'abast del nostre codi per poder seguir avançant ràpidament i servir bé la nostra comunitat en aquest espai. Va ser una decisió molt difícil, però ens permet centrar-nos en el que més t'importa. Ho sabem, és una llàstima.",
+		"retiredProviderMessage": "Ho sentim, aquest proveïdor ja no és compatible. Hem vist que molt pocs usuaris de Roo l'utilitzaven realment i necessitem reduir la superfície del nostre codi perquè puguem seguir avançant ràpidament i servir bé a la nostra comunitat. Ha estat una decisió molt difícil, però ens permet centrar-nos en el que més t'importa. Ho sabem, és una llàstima.",
 		"vscodeLmDescription": "L'API del model de llenguatge de VS Code us permet executar models proporcionats per altres extensions de VS Code (incloent-hi, però no limitat a, GitHub Copilot). La manera més senzilla de començar és instal·lar les extensions Copilot i Copilot Chat des del VS Code Marketplace.",
 		"awsCustomArnUse": "Introduïu un ARN vàlid d'Amazon Bedrock per al model que voleu utilitzar. Exemples de format:",
 		"awsCustomArnDesc": "Assegureu-vos que la regió a l'ARN coincideix amb la regió d'AWS seleccionada anteriorment.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Amplia la finestra de context a 1 milió de tokens per a Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Clau API de Baseten",
 		"getBasetenApiKey": "Obtenir clau API de Baseten",
+		"cerebrasApiKey": "Clau API de Cerebras",
+		"getCerebrasApiKey": "Obtenir clau API de Cerebras",
+		"azureResourceName": "Nom del recurs d'Azure",
+		"azureResourceNameDescription": "El nom del teu recurs Azure AI Foundry (p. ex. 'el-meu-recurs-openai').",
+		"azureDeploymentName": "Nom del desplegament d'Azure",
+		"azureDeploymentNameDescription": "El nom del desplegament del model dins del recurs.",
+		"azureApiKey": "Clau API d'Azure",
+		"getAzureApiKey": "Obtenir accés a Azure AI Foundry",
+		"azureApiVersion": "Versió de l'API d'Azure",
+		"azureApiVersionDescription": "La versió de l'API a utilitzar (p. ex. '2024-10-21'). Deixa buit per al valor per defecte.",
+		"chutesApiKey": "Clau API de Chutes",
+		"getChutesApiKey": "Obtenir clau API de Chutes",
 		"fireworksApiKey": "Clau API de Fireworks",
 		"getFireworksApiKey": "Obtenir clau API de Fireworks",
+		"featherlessApiKey": "Clau API de Featherless",
+		"getFeatherlessApiKey": "Obtenir clau API de Featherless",
+		"ioIntelligenceApiKey": "Clau API d'IO Intelligence",
+		"ioIntelligenceApiKeyPlaceholder": "Introdueix la teva clau d'API de IO Intelligence",
+		"getIoIntelligenceApiKey": "Obtenir clau API d'IO Intelligence",
 		"deepSeekApiKey": "Clau API de DeepSeek",
 		"getDeepSeekApiKey": "Obtenir clau API de DeepSeek",
+		"doubaoApiKey": "Clau API de Doubao",
+		"getDoubaoApiKey": "Obtenir clau API de Doubao",
 		"moonshotApiKey": "Clau API de Moonshot",
 		"getMoonshotApiKey": "Obtenir clau API de Moonshot",
 		"moonshotBaseUrl": "Punt d'entrada de Moonshot",
@@ -393,8 +412,23 @@
 		"getMiniMaxApiKey": "Obtenir clau API de MiniMax",
 		"minimaxBaseUrl": "Punt d'entrada de MiniMax",
 		"geminiApiKey": "Clau API de Gemini",
+		"getGroqApiKey": "Obtenir clau API de Groq",
+		"groqApiKey": "Clau API de Groq",
 		"getSambaNovaApiKey": "Obtenir clau API de SambaNova",
 		"sambaNovaApiKey": "Clau API de SambaNova",
+		"getHuggingFaceApiKey": "Obtenir clau API de Hugging Face",
+		"huggingFaceApiKey": "Clau API de Hugging Face",
+		"huggingFaceModelId": "ID del model",
+		"huggingFaceLoading": "Carregant...",
+		"huggingFaceModelsCount": "({{count}} models)",
+		"huggingFaceSelectModel": "Selecciona un model...",
+		"huggingFaceSearchModels": "Cerca models...",
+		"huggingFaceNoModelsFound": "No s'han trobat models",
+		"huggingFaceProvider": "Proveïdor",
+		"huggingFaceProviderAuto": "Automàtic",
+		"huggingFaceSelectProvider": "Selecciona un proveïdor...",
+		"huggingFaceSearchProviders": "Cerca proveïdors...",
+		"huggingFaceNoProvidersFound": "No s'han trobat proveïdors",
 		"getGeminiApiKey": "Obtenir clau API de Gemini",
 		"openAiApiKey": "Clau API d'OpenAI",
 		"apiKey": "Clau API",
@@ -466,6 +500,10 @@
 			"description": "Ollama permet executar models localment al vostre ordinador. Per a instruccions sobre com començar, consulteu la Guia d'inici ràpid.",
 			"warning": "Nota: Roo Code utilitza prompts complexos i funciona millor amb models Claude. Els models menys capaços poden no funcionar com s'espera."
 		},
+		"unboundApiKey": "Clau API d'Unbound",
+		"getUnboundApiKey": "Obtenir clau API d'Unbound",
+		"unboundRefreshModelsSuccess": "Llista de models actualitzada! Ara podeu seleccionar entre els últims models.",
+		"unboundInvalidApiKey": "Clau API no vàlida. Si us plau, comproveu la vostra clau API i torneu-ho a provar.",
 		"roo": {
 			"authenticatedMessage": "Autenticat de forma segura a través del teu compte de Roo Code Cloud.",
 			"connectButton": "Connecta amb Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "El proveïdor '{{provider}}' no està permès per la vostra organització",
 		"modelNotAllowed": "El model '{{model}}' no està permès per al proveïdor '{{provider}}' per la vostra organització",
 		"profileInvalid": "Aquest perfil conté un proveïdor o model que no està permès per la vostra organització",
-		"qwenCodeOauthPath": "Has de proporcionar una ruta vàlida de credencials OAuth"
+		"qwenCodeOauthPath": "Has de proporcionar una ruta vàlida de credencials OAuth",
+		"azureResourceName": "Has de proporcionar un nom de recurs d'Azure.",
+		"azureDeploymentName": "Has de proporcionar un nom de desplegament d'Azure."
 	},
 	"placeholders": {
 		"apiKey": "Introduïu la clau API...",
@@ -934,6 +974,8 @@
 		"projectId": "Introduïu l'ID del projecte...",
 		"customArn": "Introduïu l'ARN (p. ex. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Introduïu l'URL base...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "p. ex. meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "p. ex. lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/de/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Suchanbieter durchsuchen",
 		"noProviderMatchFound": "Keine Anbieter gefunden",
 		"noMatchFound": "Keine passenden Profile gefunden",
-		"retiredProviderMessage": "Leider wird dieser Anbieter nicht mehr unterstützt. Wir haben festgestellt, dass nur sehr wenige Roo-Nutzer ihn tatsächlich verwendet haben, und wir müssen den Umfang unserer Codebasis reduzieren, damit wir weiterhin schnell liefern und unserer Community in diesem Bereich gut dienen können. Es war eine wirklich schwere Entscheidung, aber sie ermöglicht uns, uns auf das zu konzentrieren, was dir am wichtigsten ist. Es ist ärgerlich, das wissen wir.",
+		"retiredProviderMessage": "Tut uns leid, dieser Anbieter wird nicht mehr unterstützt. Wir haben gesehen, dass nur sehr wenige Roo-Nutzer ihn tatsächlich verwendet haben, und wir müssen den Umfang unserer Codebasis reduzieren, damit wir weiterhin schnell liefern und unserer Community in diesem Bereich gut dienen können. Es war eine wirklich schwere Entscheidung, aber sie lässt uns auf das konzentrieren, was dir am wichtigsten ist. Es ist ärgerlich, das wissen wir.",
 		"vscodeLmDescription": "Die VS Code Language Model API ermöglicht das Ausführen von Modellen, die von anderen VS Code-Erweiterungen bereitgestellt werden (einschließlich, aber nicht beschränkt auf GitHub Copilot). Der einfachste Weg, um zu starten, besteht darin, die Erweiterungen Copilot und Copilot Chat aus dem VS Code Marketplace zu installieren.",
 		"awsCustomArnUse": "Geben Sie eine gültige Amazon Bedrock ARN für das Modell ein, das Sie verwenden möchten. Formatbeispiele:",
 		"awsCustomArnDesc": "Stellen Sie sicher, dass die Region in der ARN mit Ihrer oben ausgewählten AWS-Region übereinstimmt.",
@@ -317,6 +317,8 @@
 		"getOpenRouterApiKey": "OpenRouter API-Schlüssel erhalten",
 		"vercelAiGatewayApiKey": "Vercel AI Gateway API-Schlüssel",
 		"getVercelAiGatewayApiKey": "Vercel AI Gateway API-Schlüssel erhalten",
+		"doubaoApiKey": "Doubao API-Schlüssel",
+		"getDoubaoApiKey": "Doubao API-Schlüssel erhalten",
 		"apiKeyStorageNotice": "API-Schlüssel werden sicher im VSCode Secret Storage gespeichert",
 		"openAiCodexRateLimits": {
 			"title": "Usage Limits for Codex{{planLabel}}",
@@ -378,8 +380,25 @@
 		"vertex1MContextBetaDescription": "Erweitert das Kontextfenster für Claude Sonnet 4 / 4.5 / Claude Opus 4.6 auf 1 Million Token",
 		"basetenApiKey": "Baseten API-Schlüssel",
 		"getBasetenApiKey": "Baseten API-Schlüssel erhalten",
+		"cerebrasApiKey": "Cerebras API-Schlüssel",
+		"getCerebrasApiKey": "Cerebras API-Schlüssel erhalten",
+		"azureResourceName": "Azure Ressourcenname",
+		"azureResourceNameDescription": "Der Name Ihrer Azure AI Foundry-Ressource (z.B. 'meine-openai-ressource').",
+		"azureDeploymentName": "Azure Bereitstellungsname",
+		"azureDeploymentNameDescription": "Der Name Ihrer Modellbereitstellung innerhalb der Ressource.",
+		"azureApiKey": "Azure API-Schlüssel",
+		"getAzureApiKey": "Azure AI Foundry-Zugang erhalten",
+		"azureApiVersion": "Azure API-Version",
+		"azureApiVersionDescription": "Die zu verwendende API-Version (z.B. '2024-10-21'). Leer lassen für Standardwert.",
+		"chutesApiKey": "Chutes API-Schlüssel",
+		"getChutesApiKey": "Chutes API-Schlüssel erhalten",
 		"fireworksApiKey": "Fireworks API-Schlüssel",
 		"getFireworksApiKey": "Fireworks API-Schlüssel erhalten",
+		"featherlessApiKey": "Featherless API-Schlüssel",
+		"getFeatherlessApiKey": "Featherless API-Schlüssel erhalten",
+		"ioIntelligenceApiKey": "IO Intelligence API-Schlüssel",
+		"ioIntelligenceApiKeyPlaceholder": "Gib deinen IO Intelligence API-Schlüssel ein",
+		"getIoIntelligenceApiKey": "IO Intelligence API-Schlüssel erhalten",
 		"deepSeekApiKey": "DeepSeek API-Schlüssel",
 		"getDeepSeekApiKey": "DeepSeek API-Schlüssel erhalten",
 		"moonshotApiKey": "Moonshot API-Schlüssel",
@@ -393,8 +412,23 @@
 		"getMiniMaxApiKey": "MiniMax API-Schlüssel erhalten",
 		"minimaxBaseUrl": "MiniMax-Einstiegspunkt",
 		"geminiApiKey": "Gemini API-Schlüssel",
+		"getGroqApiKey": "Groq API-Schlüssel erhalten",
+		"groqApiKey": "Groq API-Schlüssel",
 		"getSambaNovaApiKey": "SambaNova API-Schlüssel erhalten",
 		"sambaNovaApiKey": "SambaNova API-Schlüssel",
+		"getHuggingFaceApiKey": "Hugging Face API-Schlüssel erhalten",
+		"huggingFaceApiKey": "Hugging Face API-Schlüssel",
+		"huggingFaceModelId": "Modell-ID",
+		"huggingFaceLoading": "Lädt...",
+		"huggingFaceModelsCount": "({{count}} Modelle)",
+		"huggingFaceSelectModel": "Modell auswählen...",
+		"huggingFaceSearchModels": "Modelle durchsuchen...",
+		"huggingFaceNoModelsFound": "Keine Modelle gefunden",
+		"huggingFaceProvider": "Anbieter",
+		"huggingFaceProviderAuto": "Automatisch",
+		"huggingFaceSelectProvider": "Anbieter auswählen...",
+		"huggingFaceSearchProviders": "Anbieter durchsuchen...",
+		"huggingFaceNoProvidersFound": "Keine Anbieter gefunden",
 		"getGeminiApiKey": "Gemini API-Schlüssel erhalten",
 		"openAiApiKey": "OpenAI API-Schlüssel",
 		"apiKey": "API-Schlüssel",
@@ -466,6 +500,10 @@
 			"description": "Ollama ermöglicht es dir, Modelle lokal auf deinem Computer auszuführen. Eine Anleitung zum Einstieg findest du im Schnellstart-Guide.",
 			"warning": "Hinweis: Roo Code verwendet komplexe Prompts und funktioniert am besten mit Claude-Modellen. Weniger leistungsfähige Modelle funktionieren möglicherweise nicht wie erwartet."
 		},
+		"unboundApiKey": "Unbound API-Schlüssel",
+		"getUnboundApiKey": "Unbound API-Schlüssel erhalten",
+		"unboundRefreshModelsSuccess": "Modellliste aktualisiert! Sie können jetzt aus den neuesten Modellen auswählen.",
+		"unboundInvalidApiKey": "Ungültiger API-Schlüssel. Bitte überprüfen Sie Ihren API-Schlüssel und versuchen Sie es erneut.",
 		"roo": {
 			"authenticatedMessage": "Sicher authentifiziert über dein Roo Code Cloud-Konto.",
 			"connectButton": "Mit Roo Code Cloud verbinden"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "Anbieter '{{provider}}' ist von deiner Organisation nicht erlaubt",
 		"modelNotAllowed": "Modell '{{model}}' ist für Anbieter '{{provider}}' von deiner Organisation nicht erlaubt",
 		"profileInvalid": "Dieses Profil enthält einen Anbieter oder ein Modell, das von deiner Organisation nicht erlaubt ist",
-		"qwenCodeOauthPath": "Du musst einen gültigen OAuth-Anmeldedaten-Pfad angeben"
+		"qwenCodeOauthPath": "Du musst einen gültigen OAuth-Anmeldedaten-Pfad angeben",
+		"azureResourceName": "Du musst einen Azure-Ressourcennamen angeben.",
+		"azureDeploymentName": "Du musst einen Azure-Bereitstellungsnamen angeben."
 	},
 	"placeholders": {
 		"apiKey": "API-Schlüssel eingeben...",
@@ -934,6 +974,8 @@
 		"projectId": "Projekt-ID eingeben...",
 		"customArn": "ARN eingeben (z.B. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Basis-URL eingeben...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "z.B. meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "z.B. lmstudio-community/llama-3.2-1b-instruct",

+ 17 - 1
webview-ui/src/i18n/locales/en/settings.json

@@ -445,6 +445,18 @@
 		"vertex1MContextBetaDescription": "Extends context window to 1 million tokens for Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Baseten API Key",
 		"getBasetenApiKey": "Get Baseten API Key",
+		"cerebrasApiKey": "Cerebras API Key",
+		"getCerebrasApiKey": "Get Cerebras API Key",
+		"azureResourceName": "Azure Resource Name",
+		"azureResourceNameDescription": "The name of your Azure AI Foundry resource (e.g., 'my-openai-resource').",
+		"azureDeploymentName": "Azure Deployment Name",
+		"azureDeploymentNameDescription": "The name of your model deployment within the resource.",
+		"azureApiKey": "Azure API Key",
+		"getAzureApiKey": "Get Azure AI Foundry Access",
+		"azureApiVersion": "Azure API Version",
+		"azureApiVersionDescription": "The API version to use (e.g., '2024-10-21'). Leave empty for the default.",
+		"chutesApiKey": "Chutes API Key",
+		"getChutesApiKey": "Get Chutes API Key",
 		"fireworksApiKey": "Fireworks API Key",
 		"getFireworksApiKey": "Get Fireworks API Key",
 		"deepSeekApiKey": "DeepSeek API Key",
@@ -988,7 +1000,9 @@
 		"providerNotAllowed": "Provider '{{provider}}' is not allowed by your organization",
 		"modelNotAllowed": "Model '{{model}}' is not allowed for provider '{{provider}}' by your organization",
 		"profileInvalid": "This profile contains a provider or model that is not allowed by your organization",
-		"qwenCodeOauthPath": "You must provide a valid OAuth credentials path."
+		"qwenCodeOauthPath": "You must provide a valid OAuth credentials path.",
+		"azureResourceName": "You must provide an Azure resource name.",
+		"azureDeploymentName": "You must provide an Azure deployment name."
 	},
 	"placeholders": {
 		"apiKey": "Enter API Key...",
@@ -1001,6 +1015,8 @@
 		"projectId": "Enter Project ID...",
 		"customArn": "Enter ARN (e.g. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Enter base URL...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "e.g. meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "e.g. lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/es/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Buscar proveedores",
 		"noProviderMatchFound": "No se encontraron proveedores",
 		"noMatchFound": "No se encontraron perfiles coincidentes",
-		"retiredProviderMessage": "Lo sentimos, este proveedor ya no es compatible. Vimos que muy pocos usuarios de Roo lo usaban realmente y necesitamos reducir el alcance de nuestro código para poder seguir avanzando rápido y servir bien a nuestra comunidad en este espacio. Fue una decisión muy difícil, pero nos permite enfocarnos en lo que más te importa. Lo sabemos, es una lástima.",
+		"retiredProviderMessage": "Lo sentimos, este proveedor ya no es compatible. Vimos que muy pocos usuarios de Roo lo usaban realmente y necesitamos reducir el alcance de nuestro código para poder seguir avanzando rápido y servir bien a nuestra comunidad. Fue una decisión muy difícil, pero nos permite centrarnos en lo que más te importa. Lo sabemos, es una lástima.",
 		"vscodeLmDescription": "La API del Modelo de Lenguaje de VS Code le permite ejecutar modelos proporcionados por otras extensiones de VS Code (incluido, entre otros, GitHub Copilot). La forma más sencilla de empezar es instalar las extensiones Copilot y Copilot Chat desde el VS Code Marketplace.",
 		"awsCustomArnUse": "Ingrese un ARN de Amazon Bedrock válido para el modelo que desea utilizar. Ejemplos de formato:",
 		"awsCustomArnDesc": "Asegúrese de que la región en el ARN coincida con la región de AWS seleccionada anteriormente.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Amplía la ventana de contexto a 1 millón de tokens para Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Clave API de Baseten",
 		"getBasetenApiKey": "Obtener clave API de Baseten",
+		"cerebrasApiKey": "Clave API de Cerebras",
+		"getCerebrasApiKey": "Obtener clave API de Cerebras",
+		"azureResourceName": "Nombre del recurso Azure",
+		"azureResourceNameDescription": "El nombre de tu recurso Azure AI Foundry (ej. 'mi-recurso-openai').",
+		"azureDeploymentName": "Nombre del despliegue Azure",
+		"azureDeploymentNameDescription": "El nombre del despliegue de tu modelo dentro del recurso.",
+		"azureApiKey": "Clave API de Azure",
+		"getAzureApiKey": "Obtener acceso a Azure AI Foundry",
+		"azureApiVersion": "Versión de API de Azure",
+		"azureApiVersionDescription": "La versión de la API a usar (ej. '2024-10-21'). Dejar vacío para usar el valor predeterminado.",
+		"chutesApiKey": "Clave API de Chutes",
+		"getChutesApiKey": "Obtener clave API de Chutes",
 		"fireworksApiKey": "Clave API de Fireworks",
 		"getFireworksApiKey": "Obtener clave API de Fireworks",
+		"featherlessApiKey": "Clave API de Featherless",
+		"getFeatherlessApiKey": "Obtener clave API de Featherless",
+		"ioIntelligenceApiKey": "Clave API de IO Intelligence",
+		"ioIntelligenceApiKeyPlaceholder": "Introduce tu clave de API de IO Intelligence",
+		"getIoIntelligenceApiKey": "Obtener clave API de IO Intelligence",
 		"deepSeekApiKey": "Clave API de DeepSeek",
 		"getDeepSeekApiKey": "Obtener clave API de DeepSeek",
+		"doubaoApiKey": "Clave API de Doubao",
+		"getDoubaoApiKey": "Obtener clave API de Doubao",
 		"moonshotApiKey": "Clave API de Moonshot",
 		"getMoonshotApiKey": "Obtener clave API de Moonshot",
 		"moonshotBaseUrl": "Punto de entrada de Moonshot",
@@ -393,8 +412,23 @@
 		"getMiniMaxApiKey": "Obtener clave API de MiniMax",
 		"minimaxBaseUrl": "Punto de entrada de MiniMax",
 		"geminiApiKey": "Clave API de Gemini",
+		"getGroqApiKey": "Obtener clave API de Groq",
+		"groqApiKey": "Clave API de Groq",
 		"getSambaNovaApiKey": "Obtener clave API de SambaNova",
 		"sambaNovaApiKey": "Clave API de SambaNova",
+		"getHuggingFaceApiKey": "Obtener clave API de Hugging Face",
+		"huggingFaceApiKey": "Clave API de Hugging Face",
+		"huggingFaceModelId": "ID del modelo",
+		"huggingFaceLoading": "Cargando...",
+		"huggingFaceModelsCount": "({{count}} modelos)",
+		"huggingFaceSelectModel": "Seleccionar un modelo...",
+		"huggingFaceSearchModels": "Buscar modelos...",
+		"huggingFaceNoModelsFound": "No se encontraron modelos",
+		"huggingFaceProvider": "Proveedor",
+		"huggingFaceProviderAuto": "Automático",
+		"huggingFaceSelectProvider": "Seleccionar un proveedor...",
+		"huggingFaceSearchProviders": "Buscar proveedores...",
+		"huggingFaceNoProvidersFound": "No se encontraron proveedores",
 		"getGeminiApiKey": "Obtener clave API de Gemini",
 		"openAiApiKey": "Clave API de OpenAI",
 		"apiKey": "Clave API",
@@ -466,6 +500,10 @@
 			"description": "Ollama le permite ejecutar modelos localmente en su computadora. Para obtener instrucciones sobre cómo comenzar, consulte la guía de inicio rápido.",
 			"warning": "Nota: Roo Code utiliza prompts complejos y funciona mejor con modelos Claude. Los modelos menos capaces pueden no funcionar como se espera."
 		},
+		"unboundApiKey": "Clave API de Unbound",
+		"getUnboundApiKey": "Obtener clave API de Unbound",
+		"unboundRefreshModelsSuccess": "¡Lista de modelos actualizada! Ahora puede seleccionar entre los últimos modelos.",
+		"unboundInvalidApiKey": "Clave API inválida. Por favor, verifique su clave API e inténtelo de nuevo.",
 		"roo": {
 			"authenticatedMessage": "Autenticado de forma segura a través de tu cuenta de Roo Code Cloud.",
 			"connectButton": "Conectar a Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "El proveedor '{{provider}}' no está permitido por su organización",
 		"modelNotAllowed": "El modelo '{{model}}' no está permitido para el proveedor '{{provider}}' por su organización",
 		"profileInvalid": "Este perfil contiene un proveedor o modelo que no está permitido por su organización",
-		"qwenCodeOauthPath": "Debes proporcionar una ruta válida de credenciales OAuth"
+		"qwenCodeOauthPath": "Debes proporcionar una ruta válida de credenciales OAuth",
+		"azureResourceName": "Debes proporcionar un nombre de recurso de Azure.",
+		"azureDeploymentName": "Debes proporcionar un nombre de despliegue de Azure."
 	},
 	"placeholders": {
 		"apiKey": "Ingrese clave API...",
@@ -934,6 +974,8 @@
 		"projectId": "Ingrese ID del proyecto...",
 		"customArn": "Ingrese ARN (ej. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Ingrese URL base...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "ej. meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "ej. lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/fr/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Rechercher des fournisseurs",
 		"noProviderMatchFound": "Aucun fournisseur trouvé",
 		"noMatchFound": "Aucun profil correspondant trouvé",
-		"retiredProviderMessage": "Désolé, ce fournisseur n'est plus pris en charge. Nous avons constaté que très peu d'utilisateurs de Roo l'utilisaient réellement et nous devons réduire la portée de notre code pour continuer à avancer rapidement et bien servir notre communauté. C'était une décision vraiment difficile, mais elle nous permet de nous concentrer sur ce qui compte le plus pour toi. C'est rageant, on le sait.",
+		"retiredProviderMessage": "Désolé, ce fournisseur n'est plus pris en charge. Nous avons constaté que très peu d'utilisateurs de Roo l'utilisaient réellement et nous devons réduire la surface de notre base de code pour continuer à avancer rapidement et bien servir notre communauté. C'était une décision vraiment difficile, mais elle nous permet de nous concentrer sur ce qui compte le plus pour toi. C'est nul, on sait.",
 		"vscodeLmDescription": "L'API du modèle de langage VS Code vous permet d'exécuter des modèles fournis par d'autres extensions VS Code (y compris, mais sans s'y limiter, GitHub Copilot). Le moyen le plus simple de commencer est d'installer les extensions Copilot et Copilot Chat depuis le VS Code Marketplace.",
 		"awsCustomArnUse": "Entrez un ARN Amazon Bedrock valide pour le modèle que vous souhaitez utiliser. Exemples de format :",
 		"awsCustomArnDesc": "Assurez-vous que la région dans l'ARN correspond à la région AWS sélectionnée ci-dessus.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Étend la fenêtre de contexte à 1 million de tokens pour Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Clé API Baseten",
 		"getBasetenApiKey": "Obtenir la clé API Baseten",
+		"cerebrasApiKey": "Clé API Cerebras",
+		"getCerebrasApiKey": "Obtenir la clé API Cerebras",
+		"azureResourceName": "Nom de la ressource Azure",
+		"azureResourceNameDescription": "Le nom de votre ressource Azure AI Foundry (ex. 'ma-ressource-openai').",
+		"azureDeploymentName": "Nom du déploiement Azure",
+		"azureDeploymentNameDescription": "Le nom du déploiement de votre modèle dans la ressource.",
+		"azureApiKey": "Clé API Azure",
+		"getAzureApiKey": "Obtenir l'accès Azure AI Foundry",
+		"azureApiVersion": "Version de l'API Azure",
+		"azureApiVersionDescription": "La version de l'API à utiliser (ex. '2024-10-21'). Laisser vide pour la valeur par défaut.",
+		"chutesApiKey": "Clé API Chutes",
+		"getChutesApiKey": "Obtenir la clé API Chutes",
 		"fireworksApiKey": "Clé API Fireworks",
 		"getFireworksApiKey": "Obtenir la clé API Fireworks",
+		"featherlessApiKey": "Clé API Featherless",
+		"getFeatherlessApiKey": "Obtenir la clé API Featherless",
+		"ioIntelligenceApiKey": "Clé API IO Intelligence",
+		"ioIntelligenceApiKeyPlaceholder": "Saisissez votre clé d'API IO Intelligence",
+		"getIoIntelligenceApiKey": "Obtenir la clé API IO Intelligence",
 		"deepSeekApiKey": "Clé API DeepSeek",
 		"getDeepSeekApiKey": "Obtenir la clé API DeepSeek",
+		"doubaoApiKey": "Clé API Doubao",
+		"getDoubaoApiKey": "Obtenir la clé API Doubao",
 		"moonshotApiKey": "Clé API Moonshot",
 		"getMoonshotApiKey": "Obtenir la clé API Moonshot",
 		"moonshotBaseUrl": "Point d'entrée Moonshot",
@@ -393,8 +412,23 @@
 		"getMiniMaxApiKey": "Obtenir la clé API MiniMax",
 		"minimaxBaseUrl": "Point d'entrée MiniMax",
 		"geminiApiKey": "Clé API Gemini",
+		"getGroqApiKey": "Obtenir la clé API Groq",
+		"groqApiKey": "Clé API Groq",
 		"getSambaNovaApiKey": "Obtenir la clé API SambaNova",
 		"sambaNovaApiKey": "Clé API SambaNova",
+		"getHuggingFaceApiKey": "Obtenir la clé API Hugging Face",
+		"huggingFaceApiKey": "Clé API Hugging Face",
+		"huggingFaceModelId": "ID du modèle",
+		"huggingFaceLoading": "Chargement...",
+		"huggingFaceModelsCount": "({{count}} modèles)",
+		"huggingFaceSelectModel": "Sélectionner un modèle...",
+		"huggingFaceSearchModels": "Rechercher des modèles...",
+		"huggingFaceNoModelsFound": "Aucun modèle trouvé",
+		"huggingFaceProvider": "Fournisseur",
+		"huggingFaceProviderAuto": "Automatique",
+		"huggingFaceSelectProvider": "Sélectionner un fournisseur...",
+		"huggingFaceSearchProviders": "Rechercher des fournisseurs...",
+		"huggingFaceNoProvidersFound": "Aucun fournisseur trouvé",
 		"getGeminiApiKey": "Obtenir la clé API Gemini",
 		"openAiApiKey": "Clé API OpenAI",
 		"apiKey": "Clé API",
@@ -466,6 +500,10 @@
 			"description": "Ollama vous permet d'exécuter des modèles localement sur votre ordinateur. Pour obtenir des instructions sur la mise en route, consultez le guide de démarrage rapide.",
 			"warning": "Remarque : Roo Code utilise des prompts complexes et fonctionne mieux avec les modèles Claude. Les modèles moins performants peuvent ne pas fonctionner comme prévu."
 		},
+		"unboundApiKey": "Clé API Unbound",
+		"getUnboundApiKey": "Obtenir la clé API Unbound",
+		"unboundRefreshModelsSuccess": "Liste des modèles mise à jour ! Vous pouvez maintenant sélectionner parmi les derniers modèles.",
+		"unboundInvalidApiKey": "Clé API invalide. Veuillez vérifier votre clé API et réessayer.",
 		"roo": {
 			"authenticatedMessage": "Authentifié de manière sécurisée via ton compte Roo Code Cloud.",
 			"connectButton": "Se connecter à Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "Le fournisseur '{{provider}}' n'est pas autorisé par votre organisation",
 		"modelNotAllowed": "Le modèle '{{model}}' n'est pas autorisé pour le fournisseur '{{provider}}' par votre organisation",
 		"profileInvalid": "Ce profil contient un fournisseur ou un modèle qui n'est pas autorisé par votre organisation",
-		"qwenCodeOauthPath": "Tu dois fournir un chemin valide pour les identifiants OAuth"
+		"qwenCodeOauthPath": "Tu dois fournir un chemin valide pour les identifiants OAuth",
+		"azureResourceName": "Tu dois fournir un nom de ressource Azure.",
+		"azureDeploymentName": "Tu dois fournir un nom de déploiement Azure."
 	},
 	"placeholders": {
 		"apiKey": "Saisissez la clé API...",
@@ -934,6 +974,8 @@
 		"projectId": "Saisissez l'ID du projet...",
 		"customArn": "Saisissez l'ARN (ex. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Saisissez l'URL de base...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "ex. meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "ex. lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/hi/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "प्रदाता खोजें",
 		"noProviderMatchFound": "कोई प्रदाता नहीं मिला",
 		"noMatchFound": "कोई मिलान प्रोफ़ाइल नहीं मिला",
-		"retiredProviderMessage": "क्षमा करें, यह प्रदाता अब समर्थित नहीं है। हमने देखा कि बहुत कम Roo उपयोगकर्ता वास्तव में इसका उपयोग कर रहे थे और हमें अपने कोडबेस का दायरा कम करने की आवश्यकता है ताकि हम तेज़ी से काम करना और अपने समुदाय की अच्छी सेवा करना जारी रख सकें। यह वाकई एक कठिन निर्णय था, लेकिन इससे हम उस पर ध्यान केंद्रित कर सकते हैं जो तुम्हारे लिए सबसे ज़्यादा मायने रखता है। हम जानते हैं, यह बुरा लगता है।",
+		"retiredProviderMessage": "क्षमा करें, यह प्रदाता अब समर्थित नहीं है। हमने देखा कि बहुत कम Roo उपयोगकर्ता वास्तव में इसका उपयोग कर रहे थे और हमें अपने कोडबेस के दायरे को कम करने की जरूरत है ताकि हम तेजी से आगे बढ़ सकें और अपने समुदाय की बेहतर सेवा कर सकें। यह वाकई एक कठिन निर्णय था लेकिन यह हमें उन चीजों पर ध्यान केंद्रित करने देता है जो तुम्हारे लिए सबसे ज्यादा मायने रखती हैं। हम जानते हैं, यह बुरा लगता है।",
 		"vscodeLmDescription": "VS कोड भाषा मॉडल API आपको अन्य VS कोड एक्सटेंशन (जैसे GitHub Copilot) द्वारा प्रदान किए गए मॉडल चलाने की अनुमति देता है। शुरू करने का सबसे आसान तरीका VS कोड मार्केटप्लेस से Copilot और Copilot चैट एक्सटेंशन इंस्टॉल करना है।",
 		"awsCustomArnUse": "आप जिस मॉडल का उपयोग करना चाहते हैं, उसके लिए एक वैध Amazon बेडरॉक ARN दर्ज करें। प्रारूप उदाहरण:",
 		"awsCustomArnDesc": "सुनिश्चित करें कि ARN में क्षेत्र ऊपर चयनित AWS क्षेत्र से मेल खाता है।",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Claude Sonnet 4 / 4.5 / Claude Opus 4.6 के लिए संदर्भ विंडो को 1 मिलियन टोकन तक बढ़ाता है",
 		"basetenApiKey": "Baseten API कुंजी",
 		"getBasetenApiKey": "Baseten API कुंजी प्राप्त करें",
+		"cerebrasApiKey": "Cerebras API कुंजी",
+		"getCerebrasApiKey": "Cerebras API कुंजी प्राप्त करें",
+		"azureResourceName": "Azure रिसोर्स नाम",
+		"azureResourceNameDescription": "आपके Azure AI Foundry रिसोर्स का नाम (उदा. 'my-openai-resource')।",
+		"azureDeploymentName": "Azure डिप्लॉयमेंट नाम",
+		"azureDeploymentNameDescription": "रिसोर्स के अंदर आपके मॉडल डिप्लॉयमेंट का नाम।",
+		"azureApiKey": "Azure API कुंजी",
+		"getAzureApiKey": "Azure AI Foundry एक्सेस प्राप्त करें",
+		"azureApiVersion": "Azure API संस्करण",
+		"azureApiVersionDescription": "उपयोग करने के लिए API संस्करण (उदा. '2024-10-21')। डिफ़ॉल्ट के लिए खाली छोड़ें।",
+		"chutesApiKey": "Chutes API कुंजी",
+		"getChutesApiKey": "Chutes API कुंजी प्राप्त करें",
 		"fireworksApiKey": "Fireworks API कुंजी",
 		"getFireworksApiKey": "Fireworks API कुंजी प्राप्त करें",
+		"featherlessApiKey": "Featherless API कुंजी",
+		"getFeatherlessApiKey": "Featherless API कुंजी प्राप्त करें",
+		"ioIntelligenceApiKey": "IO Intelligence API कुंजी",
+		"ioIntelligenceApiKeyPlaceholder": "अपना आईओ इंटेलिजेंस एपीआई कुंजी दर्ज करें",
+		"getIoIntelligenceApiKey": "IO Intelligence API कुंजी प्राप्त करें",
 		"deepSeekApiKey": "DeepSeek API कुंजी",
 		"getDeepSeekApiKey": "DeepSeek API कुंजी प्राप्त करें",
+		"doubaoApiKey": "डौबाओ API कुंजी",
+		"getDoubaoApiKey": "डौबाओ API कुंजी प्राप्त करें",
 		"moonshotApiKey": "Moonshot API कुंजी",
 		"getMoonshotApiKey": "Moonshot API कुंजी प्राप्त करें",
 		"moonshotBaseUrl": "Moonshot प्रवेश बिंदु",
@@ -393,8 +412,23 @@
 		"getMiniMaxApiKey": "MiniMax API कुंजी प्राप्त करें",
 		"minimaxBaseUrl": "MiniMax प्रवेश बिंदु",
 		"geminiApiKey": "Gemini API कुंजी",
+		"getGroqApiKey": "Groq API कुंजी प्राप्त करें",
+		"groqApiKey": "Groq API कुंजी",
 		"getSambaNovaApiKey": "SambaNova API कुंजी प्राप्त करें",
 		"sambaNovaApiKey": "SambaNova API कुंजी",
+		"getHuggingFaceApiKey": "Hugging Face API कुंजी प्राप्त करें",
+		"huggingFaceApiKey": "Hugging Face API कुंजी",
+		"huggingFaceModelId": "मॉडल ID",
+		"huggingFaceLoading": "लोड हो रहा है...",
+		"huggingFaceModelsCount": "({{count}} मॉडल)",
+		"huggingFaceSelectModel": "एक मॉडल चुनें...",
+		"huggingFaceSearchModels": "मॉडल खोजें...",
+		"huggingFaceNoModelsFound": "कोई मॉडल नहीं मिला",
+		"huggingFaceProvider": "प्रदाता",
+		"huggingFaceProviderAuto": "स्वचालित",
+		"huggingFaceSelectProvider": "एक प्रदाता चुनें...",
+		"huggingFaceSearchProviders": "प्रदाता खोजें...",
+		"huggingFaceNoProvidersFound": "कोई प्रदाता नहीं मिला",
 		"getGeminiApiKey": "Gemini API कुंजी प्राप्त करें",
 		"openAiApiKey": "OpenAI API कुंजी",
 		"apiKey": "API कुंजी",
@@ -466,6 +500,10 @@
 			"description": "Ollama आपको अपने कंप्यूटर पर स्थानीय रूप से मॉडल चलाने की अनुमति देता है। आरंभ करने के निर्देशों के लिए, उनकी क्विकस्टार्ट गाइड देखें।",
 			"warning": "नोट: Roo Code जटिल प्रॉम्प्ट्स का उपयोग करता है और Claude मॉडल के साथ सबसे अच्छा काम करता है। कम क्षमता वाले मॉडल अपेक्षित रूप से काम नहीं कर सकते हैं।"
 		},
+		"unboundApiKey": "Unbound API कुंजी",
+		"getUnboundApiKey": "Unbound API कुंजी प्राप्त करें",
+		"unboundRefreshModelsSuccess": "मॉडल सूची अपडेट हो गई है! अब आप नवीनतम मॉडलों में से चुन सकते हैं।",
+		"unboundInvalidApiKey": "अमान्य API कुंजी। कृपया अपनी API कुंजी की जांच करें और पुनः प्रयास करें।",
 		"roo": {
 			"authenticatedMessage": "आपके Roo Code Cloud खाते के माध्यम से सुरक्षित रूप से प्रमाणित।",
 			"connectButton": "Roo Code Cloud से कनेक्ट करें"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "प्रदाता '{{provider}}' आपके संगठन द्वारा अनुमत नहीं है",
 		"modelNotAllowed": "मॉडल '{{model}}' प्रदाता '{{provider}}' के लिए आपके संगठन द्वारा अनुमत नहीं है",
 		"profileInvalid": "इस प्रोफ़ाइल में एक प्रदाता या मॉडल शामिल है जो आपके संगठन द्वारा अनुमत नहीं है",
-		"qwenCodeOauthPath": "आपको एक वैध OAuth क्रेडेंशियल पथ प्रदान करना होगा"
+		"qwenCodeOauthPath": "आपको एक वैध OAuth क्रेडेंशियल पथ प्रदान करना होगा",
+		"azureResourceName": "आपको एक Azure संसाधन नाम प्रदान करना होगा।",
+		"azureDeploymentName": "आपको एक Azure परिनियोजन नाम प्रदान करना होगा।"
 	},
 	"placeholders": {
 		"apiKey": "API कुंजी दर्ज करें...",
@@ -934,6 +974,8 @@
 		"projectId": "प्रोजेक्ट ID दर्ज करें...",
 		"customArn": "ARN दर्ज करें (उदा. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "बेस URL दर्ज करें...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "उदा. meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "उदा. lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/id/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Cari penyedia",
 		"noProviderMatchFound": "Tidak ada penyedia ditemukan",
 		"noMatchFound": "Tidak ada profil yang cocok ditemukan",
-		"retiredProviderMessage": "Maaf, penyedia ini tidak lagi didukung. Kami melihat sangat sedikit pengguna Roo yang benar-benar menggunakannya dan kami perlu mengurangi cakupan kode kami agar bisa terus bergerak cepat dan melayani komunitas kami dengan baik. Ini adalah keputusan yang sangat sulit, tapi ini memungkinkan kami fokus pada apa yang paling penting bagimu. Memang menyebalkan, kami tahu.",
+		"retiredProviderMessage": "Maaf, penyedia ini tidak lagi didukung. Kami melihat sangat sedikit pengguna Roo yang benar-benar menggunakannya dan kami perlu mengurangi cakupan kode kami agar bisa terus bergerak cepat dan melayani komunitas kami dengan baik. Ini adalah keputusan yang sangat sulit, tetapi memungkinkan kami fokus pada hal yang paling penting bagimu. Kami tahu ini menyebalkan.",
 		"vscodeLmDescription": " API Model Bahasa VS Code memungkinkan kamu menjalankan model yang disediakan oleh ekstensi VS Code lainnya (termasuk namun tidak terbatas pada GitHub Copilot). Cara termudah untuk memulai adalah menginstal ekstensi Copilot dan Copilot Chat dari VS Code Marketplace.",
 		"awsCustomArnUse": "Masukkan ARN Amazon Bedrock yang valid untuk model yang ingin kamu gunakan. Contoh format:",
 		"awsCustomArnDesc": "Pastikan region di ARN cocok dengan AWS Region yang kamu pilih di atas.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Memperluas jendela konteks menjadi 1 juta token untuk Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Baseten API Key",
 		"getBasetenApiKey": "Dapatkan Baseten API Key",
+		"cerebrasApiKey": "Cerebras API Key",
+		"getCerebrasApiKey": "Dapatkan Cerebras API Key",
+		"azureResourceName": "Nama Resource Azure",
+		"azureResourceNameDescription": "Nama resource Azure AI Foundry Anda (contoh: 'my-openai-resource').",
+		"azureDeploymentName": "Nama Deployment Azure",
+		"azureDeploymentNameDescription": "Nama deployment model dalam resource.",
+		"azureApiKey": "Azure API Key",
+		"getAzureApiKey": "Dapatkan Akses Azure AI Foundry",
+		"azureApiVersion": "Versi API Azure",
+		"azureApiVersionDescription": "Versi API yang akan digunakan (contoh: '2024-10-21'). Biarkan kosong untuk nilai default.",
+		"chutesApiKey": "Chutes API Key",
+		"getChutesApiKey": "Dapatkan Chutes API Key",
 		"fireworksApiKey": "Fireworks API Key",
 		"getFireworksApiKey": "Dapatkan Fireworks API Key",
+		"featherlessApiKey": "Featherless API Key",
+		"getFeatherlessApiKey": "Dapatkan Featherless API Key",
+		"ioIntelligenceApiKey": "IO Intelligence API Key",
+		"ioIntelligenceApiKeyPlaceholder": "Masukkan kunci API IO Intelligence Anda",
+		"getIoIntelligenceApiKey": "Dapatkan IO Intelligence API Key",
 		"deepSeekApiKey": "DeepSeek API Key",
 		"getDeepSeekApiKey": "Dapatkan DeepSeek API Key",
+		"doubaoApiKey": "Kunci API Doubao",
+		"getDoubaoApiKey": "Dapatkan Kunci API Doubao",
 		"moonshotApiKey": "Kunci API Moonshot",
 		"getMoonshotApiKey": "Dapatkan Kunci API Moonshot",
 		"moonshotBaseUrl": "Titik Masuk Moonshot",
@@ -393,9 +412,24 @@
 		"getMiniMaxApiKey": "Dapatkan Kunci API MiniMax",
 		"minimaxBaseUrl": "Titik Masuk MiniMax",
 		"geminiApiKey": "Gemini API Key",
+		"getGroqApiKey": "Dapatkan Groq API Key",
+		"groqApiKey": "Groq API Key",
 		"getSambaNovaApiKey": "Dapatkan SambaNova API Key",
 		"sambaNovaApiKey": "SambaNova API Key",
+		"getHuggingFaceApiKey": "Dapatkan Kunci API Hugging Face",
+		"huggingFaceApiKey": "Kunci API Hugging Face",
+		"huggingFaceModelId": "ID Model",
 		"getGeminiApiKey": "Dapatkan Gemini API Key",
+		"huggingFaceLoading": "Memuat...",
+		"huggingFaceModelsCount": "({{count}} model)",
+		"huggingFaceSelectModel": "Pilih model...",
+		"huggingFaceSearchModels": "Cari model...",
+		"huggingFaceNoModelsFound": "Tidak ada model ditemukan",
+		"huggingFaceProvider": "Penyedia",
+		"huggingFaceProviderAuto": "Otomatis",
+		"huggingFaceSelectProvider": "Pilih penyedia...",
+		"huggingFaceSearchProviders": "Cari penyedia...",
+		"huggingFaceNoProvidersFound": "Tidak ada penyedia ditemukan",
 		"openAiApiKey": "OpenAI API Key",
 		"apiKey": "API Key",
 		"openAiBaseUrl": "Base URL",
@@ -466,6 +500,10 @@
 			"description": "Ollama memungkinkan kamu menjalankan model secara lokal di komputer. Untuk instruksi cara memulai, lihat panduan quickstart mereka.",
 			"warning": "Catatan: Roo Code menggunakan prompt kompleks dan bekerja terbaik dengan model Claude. Model yang kurang mampu mungkin tidak bekerja seperti yang diharapkan."
 		},
+		"unboundApiKey": "Unbound API Key",
+		"getUnboundApiKey": "Dapatkan Unbound API Key",
+		"unboundRefreshModelsSuccess": "Daftar model diperbarui! Kamu sekarang dapat memilih dari model terbaru.",
+		"unboundInvalidApiKey": "API key tidak valid. Silakan periksa API key kamu dan coba lagi.",
 		"roo": {
 			"authenticatedMessage": "Terautentikasi dengan aman melalui akun Roo Code Cloud Anda.",
 			"connectButton": "Hubungkan ke Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "Provider '{{provider}}' tidak diizinkan oleh organisasi kamu",
 		"modelNotAllowed": "Model '{{model}}' tidak diizinkan untuk provider '{{provider}}' oleh organisasi kamu",
 		"profileInvalid": "Profil ini berisi provider atau model yang tidak diizinkan oleh organisasi kamu",
-		"qwenCodeOauthPath": "Kamu harus memberikan jalur kredensial OAuth yang valid"
+		"qwenCodeOauthPath": "Kamu harus memberikan jalur kredensial OAuth yang valid",
+		"azureResourceName": "Kamu harus memberikan nama sumber daya Azure.",
+		"azureDeploymentName": "Kamu harus memberikan nama deployment Azure."
 	},
 	"placeholders": {
 		"apiKey": "Masukkan API Key...",
@@ -934,6 +974,8 @@
 		"projectId": "Masukkan Project ID...",
 		"customArn": "Masukkan ARN (misalnya arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Masukkan base URL...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "misalnya meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "misalnya lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/it/settings.json

@@ -309,7 +309,7 @@
 		"noMatchFound": "Nessun profilo corrispondente trovato",
 		"searchProviderPlaceholder": "Cerca fornitori",
 		"noProviderMatchFound": "Nessun fornitore trovato",
-		"retiredProviderMessage": "Ci dispiace, questo provider non è più supportato. Abbiamo visto che pochissimi utenti di Roo lo utilizzavano effettivamente e dobbiamo ridurre la portata del nostro codice per continuare a procedere velocemente e servire bene la nostra community. È stata una decisione davvero difficile, ma ci permette di concentrarci su ciò che conta di più per te. Lo sappiamo, è una seccatura.",
+		"retiredProviderMessage": "Ci dispiace, questo fornitore non è più supportato. Abbiamo notato che pochissimi utenti Roo lo utilizzavano effettivamente e dobbiamo ridurre la superficie del nostro codice per continuare a sviluppare rapidamente e servire bene la nostra comunità. È stata una decisione davvero difficile, ma ci permette di concentrarci su ciò che conta di più per te. Lo sappiamo, è frustrante.",
 		"vscodeLmDescription": "L'API del Modello di Linguaggio di VS Code consente di eseguire modelli forniti da altre estensioni di VS Code (incluso, ma non limitato a, GitHub Copilot). Il modo più semplice per iniziare è installare le estensioni Copilot e Copilot Chat dal VS Code Marketplace.",
 		"awsCustomArnUse": "Inserisci un ARN Amazon Bedrock valido per il modello che desideri utilizzare. Esempi di formato:",
 		"awsCustomArnDesc": "Assicurati che la regione nell'ARN corrisponda alla regione AWS selezionata sopra.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Estende la finestra di contesto a 1 milione di token per Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Chiave API Baseten",
 		"getBasetenApiKey": "Ottieni chiave API Baseten",
+		"cerebrasApiKey": "Chiave API Cerebras",
+		"getCerebrasApiKey": "Ottieni chiave API Cerebras",
+		"azureResourceName": "Nome risorsa Azure",
+		"azureResourceNameDescription": "Il nome della tua risorsa Azure AI Foundry (es. 'mia-risorsa-openai').",
+		"azureDeploymentName": "Nome distribuzione Azure",
+		"azureDeploymentNameDescription": "Il nome della distribuzione del modello all'interno della risorsa.",
+		"azureApiKey": "Chiave API Azure",
+		"getAzureApiKey": "Ottieni accesso Azure AI Foundry",
+		"azureApiVersion": "Versione API Azure",
+		"azureApiVersionDescription": "La versione dell'API da utilizzare (es. '2024-10-21'). Lascia vuoto per il valore predefinito.",
+		"chutesApiKey": "Chiave API Chutes",
+		"getChutesApiKey": "Ottieni chiave API Chutes",
 		"fireworksApiKey": "Chiave API Fireworks",
 		"getFireworksApiKey": "Ottieni chiave API Fireworks",
+		"featherlessApiKey": "Chiave API Featherless",
+		"getFeatherlessApiKey": "Ottieni chiave API Featherless",
+		"ioIntelligenceApiKey": "Chiave API IO Intelligence",
+		"ioIntelligenceApiKeyPlaceholder": "Inserisci la tua chiave API IO Intelligence",
+		"getIoIntelligenceApiKey": "Ottieni chiave API IO Intelligence",
 		"deepSeekApiKey": "Chiave API DeepSeek",
 		"getDeepSeekApiKey": "Ottieni chiave API DeepSeek",
+		"doubaoApiKey": "Chiave API Doubao",
+		"getDoubaoApiKey": "Ottieni chiave API Doubao",
 		"moonshotApiKey": "Chiave API Moonshot",
 		"getMoonshotApiKey": "Ottieni chiave API Moonshot",
 		"moonshotBaseUrl": "Punto di ingresso Moonshot",
@@ -393,8 +412,23 @@
 		"getMiniMaxApiKey": "Ottieni chiave API MiniMax",
 		"minimaxBaseUrl": "Punto di ingresso MiniMax",
 		"geminiApiKey": "Chiave API Gemini",
+		"getGroqApiKey": "Ottieni chiave API Groq",
+		"groqApiKey": "Chiave API Groq",
 		"getSambaNovaApiKey": "Ottieni chiave API SambaNova",
 		"sambaNovaApiKey": "Chiave API SambaNova",
+		"getHuggingFaceApiKey": "Ottieni chiave API Hugging Face",
+		"huggingFaceApiKey": "Chiave API Hugging Face",
+		"huggingFaceModelId": "ID modello",
+		"huggingFaceLoading": "Caricamento...",
+		"huggingFaceModelsCount": "({{count}} modelli)",
+		"huggingFaceSelectModel": "Seleziona un modello...",
+		"huggingFaceSearchModels": "Cerca modelli...",
+		"huggingFaceNoModelsFound": "Nessun modello trovato",
+		"huggingFaceProvider": "Provider",
+		"huggingFaceProviderAuto": "Automatico",
+		"huggingFaceSelectProvider": "Seleziona un provider...",
+		"huggingFaceSearchProviders": "Cerca provider...",
+		"huggingFaceNoProvidersFound": "Nessun provider trovato",
 		"getGeminiApiKey": "Ottieni chiave API Gemini",
 		"openAiApiKey": "Chiave API OpenAI",
 		"apiKey": "Chiave API",
@@ -466,6 +500,10 @@
 			"description": "Ollama ti permette di eseguire modelli localmente sul tuo computer. Per iniziare, consulta la guida rapida.",
 			"warning": "Nota: Roo Code utiliza prompt complessi e funziona meglio con i modelli Claude. I modelli con capacità inferiori potrebbero non funzionare come previsto."
 		},
+		"unboundApiKey": "Chiave API Unbound",
+		"getUnboundApiKey": "Ottieni chiave API Unbound",
+		"unboundRefreshModelsSuccess": "Lista dei modelli aggiornata! Ora puoi selezionare tra gli ultimi modelli.",
+		"unboundInvalidApiKey": "Chiave API non valida. Controlla la tua chiave API e riprova.",
 		"roo": {
 			"authenticatedMessage": "Autenticato in modo sicuro tramite il tuo account Roo Code Cloud.",
 			"connectButton": "Connetti a Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "Il fornitore '{{provider}}' non è consentito dalla tua organizzazione",
 		"modelNotAllowed": "Il modello '{{model}}' non è consentito per il fornitore '{{provider}}' dalla tua organizzazione.",
 		"profileInvalid": "Questo profilo contiene un fornitore o un modello non consentito dalla tua organizzazione.",
-		"qwenCodeOauthPath": "Devi fornire un percorso valido per le credenziali OAuth"
+		"qwenCodeOauthPath": "Devi fornire un percorso valido per le credenziali OAuth",
+		"azureResourceName": "Devi fornire un nome risorsa Azure.",
+		"azureDeploymentName": "Devi fornire un nome distribuzione Azure."
 	},
 	"placeholders": {
 		"apiKey": "Inserisci chiave API...",
@@ -934,6 +974,8 @@
 		"projectId": "Inserisci ID progetto...",
 		"customArn": "Inserisci ARN (es. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Inserisci URL base...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "es. meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "es. lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/ja/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "プロバイダーを検索",
 		"noProviderMatchFound": "プロバイダーが見つかりません",
 		"noMatchFound": "一致するプロファイルが見つかりません",
-		"retiredProviderMessage": "申し訳ございませんが、このプロバイダーはサポートを終了しました。実際に利用しているRooユーザーが非常に少なく、コードベースの範囲を縮小して、迅速な開発とコミュニティへの貢献を続ける必要がありました。本当に難しい決断でしたが、あなたにとって最も重要なことに集中するためです。残念ですが、ご理解ください。",
+		"retiredProviderMessage": "申し訳ありませんが、このプロバイダーはサポートされなくなりました。実際に使用しているRooユーザーが非常に少ないことがわかり、コードベースの範囲を縮小して、迅速な開発を続け、コミュニティにより良いサービスを提供する必要がありました。本当に難しい決断でしたが、あなたにとって最も重要なことに集中することができます。残念なことだと分かっています。",
 		"vscodeLmDescription": "VS Code言語モデルAPIを使用すると、他のVS Code拡張機能(GitHub Copilotなど)が提供するモデルを実行できます。最も簡単な方法は、VS Code MarketplaceからCopilotおよびCopilot Chat拡張機能をインストールすることです。",
 		"awsCustomArnUse": "使用したいモデルの有効なAmazon Bedrock ARNを入力してください。形式の例:",
 		"awsCustomArnDesc": "ARN内のリージョンが上で選択したAWSリージョンと一致していることを確認してください。",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Claude Sonnet 4 / 4.5 / Claude Opus 4.6のコンテキストウィンドウを100万トークンに拡張します",
 		"basetenApiKey": "Baseten APIキー",
 		"getBasetenApiKey": "Baseten APIキーを取得",
+		"cerebrasApiKey": "Cerebras APIキー",
+		"getCerebrasApiKey": "Cerebras APIキーを取得",
+		"azureResourceName": "Azureリソース名",
+		"azureResourceNameDescription": "Azure AI Foundryリソースの名前(例:'my-openai-resource')。",
+		"azureDeploymentName": "Azureデプロイメント名",
+		"azureDeploymentNameDescription": "リソース内のモデルデプロイメントの名前。",
+		"azureApiKey": "Azure APIキー",
+		"getAzureApiKey": "Azure AI Foundryアクセスを取得",
+		"azureApiVersion": "Azure APIバージョン",
+		"azureApiVersionDescription": "使用するAPIバージョン(例:'2024-10-21')。デフォルトを使用する場合は空にしてください。",
+		"chutesApiKey": "Chutes APIキー",
+		"getChutesApiKey": "Chutes APIキーを取得",
 		"fireworksApiKey": "Fireworks APIキー",
 		"getFireworksApiKey": "Fireworks APIキーを取得",
+		"featherlessApiKey": "Featherless APIキー",
+		"getFeatherlessApiKey": "Featherless APIキーを取得",
+		"ioIntelligenceApiKey": "IO Intelligence APIキー",
+		"ioIntelligenceApiKeyPlaceholder": "IO Intelligence APIキーを入力してください",
+		"getIoIntelligenceApiKey": "IO Intelligence APIキーを取得",
 		"deepSeekApiKey": "DeepSeek APIキー",
 		"getDeepSeekApiKey": "DeepSeek APIキーを取得",
+		"doubaoApiKey": "Doubao APIキー",
+		"getDoubaoApiKey": "Doubao APIキーを取得",
 		"moonshotApiKey": "Moonshot APIキー",
 		"getMoonshotApiKey": "Moonshot APIキーを取得",
 		"moonshotBaseUrl": "Moonshot エントリーポイント",
@@ -393,8 +412,23 @@
 		"getMiniMaxApiKey": "MiniMax APIキーを取得",
 		"minimaxBaseUrl": "MiniMax エントリーポイント",
 		"geminiApiKey": "Gemini APIキー",
+		"getGroqApiKey": "Groq APIキーを取得",
+		"groqApiKey": "Groq APIキー",
 		"getSambaNovaApiKey": "SambaNova APIキーを取得",
 		"sambaNovaApiKey": "SambaNova APIキー",
+		"getHuggingFaceApiKey": "Hugging Face APIキーを取得",
+		"huggingFaceApiKey": "Hugging Face APIキー",
+		"huggingFaceModelId": "モデルID",
+		"huggingFaceLoading": "読み込み中...",
+		"huggingFaceModelsCount": "({{count}}個のモデル)",
+		"huggingFaceSelectModel": "モデルを選択...",
+		"huggingFaceSearchModels": "モデルを検索...",
+		"huggingFaceNoModelsFound": "モデルが見つかりません",
+		"huggingFaceProvider": "プロバイダー",
+		"huggingFaceProviderAuto": "自動",
+		"huggingFaceSelectProvider": "プロバイダーを選択...",
+		"huggingFaceSearchProviders": "プロバイダーを検索...",
+		"huggingFaceNoProvidersFound": "プロバイダーが見つかりません",
 		"getGeminiApiKey": "Gemini APIキーを取得",
 		"openAiApiKey": "OpenAI APIキー",
 		"apiKey": "APIキー",
@@ -466,6 +500,10 @@
 			"description": "Ollamaを使用すると、ローカルコンピューターでモデルを実行できます。始め方については、クイックスタートガイドをご覧ください。",
 			"warning": "注意:Roo Codeは複雑なプロンプトを使用し、Claudeモデルで最適に動作します。能力の低いモデルは期待通りに動作しない場合があります。"
 		},
+		"unboundApiKey": "Unbound APIキー",
+		"getUnboundApiKey": "Unbound APIキーを取得",
+		"unboundRefreshModelsSuccess": "モデルリストが更新されました!最新のモデルから選択できます。",
+		"unboundInvalidApiKey": "無効なAPIキーです。APIキーを確認して、もう一度お試しください。",
 		"roo": {
 			"authenticatedMessage": "Roo Code Cloudアカウントを通じて安全に認証されています。",
 			"connectButton": "Roo Code Cloudに接続"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "プロバイダー「{{provider}}」は組織によって許可されていません",
 		"modelNotAllowed": "モデル「{{model}}」はプロバイダー「{{provider}}」に対して組織によって許可されていません",
 		"profileInvalid": "このプロファイルには、組織によって許可されていないプロバイダーまたはモデルが含まれています",
-		"qwenCodeOauthPath": "有効なOAuth認証情報のパスを提供する必要があります"
+		"qwenCodeOauthPath": "有効なOAuth認証情報のパスを提供する必要があります",
+		"azureResourceName": "Azureリソース名を入力してください。",
+		"azureDeploymentName": "Azureデプロイメント名を入力してください。"
 	},
 	"placeholders": {
 		"apiKey": "API キーを入力...",
@@ -934,6 +974,8 @@
 		"projectId": "プロジェクト ID を入力...",
 		"customArn": "ARN を入力(例:arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "ベース URL を入力...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "例:meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "例:lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/ko/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "공급자 검색",
 		"noProviderMatchFound": "공급자를 찾을 수 없습니다",
 		"noMatchFound": "일치하는 프로필이 없습니다",
-		"retiredProviderMessage": "죄송합니다. 이 공급자는 더 이상 지원되지 않습니다. 실제로 사용하는 Roo 사용자가 매우 적었고, 빠르게 개발하고 커뮤니티에 잘 봉사하기 위해 코드베이스의 범위를 줄여야 했습니다. 정말 어려운 결정이었지만, 당신에게 가장 중요한 것에 집중할 수 있게 해줍니다. 불편을 드려 죄송합니다.",
+		"retiredProviderMessage": "죄송합니다. 이 공급자는 더 이상 지원되지 않습니다. 실제로 이를 사용하는 Roo 사용자가 매우 적었고, 빠르게 개발하고 커뮤니티에 더 나은 서비스를 제공하기 위해 코드베이스의 범위를 줄여야 했습니다. 정말 어려운 결정이었지만, 여러분에게 가장 중요한 것에 집중할 수 있게 해줍니다. 아쉽다는 거, 저희도 알아요.",
 		"vscodeLmDescription": "VS Code 언어 모델 API를 사용하면 GitHub Copilot을 포함한 기타 VS Code 확장 프로그램이 제공하는 모델을 실행할 수 있습니다. 시작하려면 VS Code 마켓플레이스에서 Copilot 및 Copilot Chat 확장 프로그램을 설치하는 것이 가장 쉽습니다.",
 		"awsCustomArnUse": "사용하려는 모델의 유효한 Amazon Bedrock ARN을 입력하세요. 형식 예시:",
 		"awsCustomArnDesc": "ARN의 리전이 위에서 선택한 AWS 리전과 일치하는지 확인하세요.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Claude Sonnet 4 / 4.5 / Claude Opus 4.6의 컨텍스트 창을 100만 토큰으로 확장",
 		"basetenApiKey": "Baseten API 키",
 		"getBasetenApiKey": "Baseten API 키 가져오기",
+		"cerebrasApiKey": "Cerebras API 키",
+		"getCerebrasApiKey": "Cerebras API 키 가져오기",
+		"azureResourceName": "Azure 리소스 이름",
+		"azureResourceNameDescription": "Azure AI Foundry 리소스 이름 (예: 'my-openai-resource').",
+		"azureDeploymentName": "Azure 배포 이름",
+		"azureDeploymentNameDescription": "리소스 내 모델 배포 이름.",
+		"azureApiKey": "Azure API 키",
+		"getAzureApiKey": "Azure AI Foundry 액세스 받기",
+		"azureApiVersion": "Azure API 버전",
+		"azureApiVersionDescription": "사용할 API 버전 (예: '2024-10-21'). 기본값을 사용하려면 비워두세요.",
+		"chutesApiKey": "Chutes API 키",
+		"getChutesApiKey": "Chutes API 키 받기",
 		"fireworksApiKey": "Fireworks API 키",
 		"getFireworksApiKey": "Fireworks API 키 받기",
+		"featherlessApiKey": "Featherless API 키",
+		"getFeatherlessApiKey": "Featherless API 키 받기",
+		"ioIntelligenceApiKey": "IO Intelligence API 키",
+		"ioIntelligenceApiKeyPlaceholder": "IO Intelligence API 키를 입력하세요",
+		"getIoIntelligenceApiKey": "IO Intelligence API 키 받기",
 		"deepSeekApiKey": "DeepSeek API 키",
 		"getDeepSeekApiKey": "DeepSeek API 키 받기",
+		"doubaoApiKey": "Doubao API 키",
+		"getDoubaoApiKey": "Doubao API 키 받기",
 		"moonshotApiKey": "Moonshot API 키",
 		"getMoonshotApiKey": "Moonshot API 키 받기",
 		"moonshotBaseUrl": "Moonshot 엔트리포인트",
@@ -393,9 +412,24 @@
 		"getMiniMaxApiKey": "MiniMax API 키 받기",
 		"minimaxBaseUrl": "MiniMax 엔트리포인트",
 		"geminiApiKey": "Gemini API 키",
+		"getGroqApiKey": "Groq API 키 받기",
+		"groqApiKey": "Groq API 키",
 		"getSambaNovaApiKey": "SambaNova API 키 받기",
 		"sambaNovaApiKey": "SambaNova API 키",
 		"getGeminiApiKey": "Gemini API 키 받기",
+		"getHuggingFaceApiKey": "Hugging Face API 키 받기",
+		"huggingFaceApiKey": "Hugging Face API 키",
+		"huggingFaceModelId": "모델 ID",
+		"huggingFaceLoading": "로딩 중...",
+		"huggingFaceModelsCount": "({{count}}개 모델)",
+		"huggingFaceSelectModel": "모델 선택...",
+		"huggingFaceSearchModels": "모델 검색...",
+		"huggingFaceNoModelsFound": "모델을 찾을 수 없음",
+		"huggingFaceProvider": "제공자",
+		"huggingFaceProviderAuto": "자동",
+		"huggingFaceSelectProvider": "제공자 선택...",
+		"huggingFaceSearchProviders": "제공자 검색...",
+		"huggingFaceNoProvidersFound": "제공자를 찾을 수 없음",
 		"apiKey": "API 키",
 		"openAiApiKey": "OpenAI API 키",
 		"openAiBaseUrl": "기본 URL",
@@ -466,6 +500,10 @@
 			"description": "Ollama를 사용하면 컴퓨터에서 로컬로 모델을 실행할 수 있습니다. 시작하는 방법은 빠른 시작 가이드를 참조하세요.",
 			"warning": "참고: Roo Code는 복잡한 프롬프트를 사용하며 Claude 모델에서 가장 잘 작동합니다. 덜 강력한 모델은 예상대로 작동하지 않을 수 있습니다."
 		},
+		"unboundApiKey": "Unbound API 키",
+		"getUnboundApiKey": "Unbound API 키 받기",
+		"unboundRefreshModelsSuccess": "모델 목록이 업데이트되었습니다! 이제 최신 모델에서 선택할 수 있습니다.",
+		"unboundInvalidApiKey": "잘못된 API 키입니다. API 키를 확인하고 다시 시도해 주세요.",
 		"roo": {
 			"authenticatedMessage": "Roo Code Cloud 계정을 통해 안전하게 인증되었습니다.",
 			"connectButton": "Roo Code Cloud에 연결"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "제공자 '{{provider}}'는 조직에서 허용되지 않습니다",
 		"modelNotAllowed": "모델 '{{model}}'은 제공자 '{{provider}}'에 대해 조직에서 허용되지 않습니다",
 		"profileInvalid": "이 프로필에는 조직에서 허용되지 않는 제공자 또는 모델이 포함되어 있습니다",
-		"qwenCodeOauthPath": "유효한 OAuth 자격 증명 경로를 제공해야 합니다"
+		"qwenCodeOauthPath": "유효한 OAuth 자격 증명 경로를 제공해야 합니다",
+		"azureResourceName": "Azure 리소스 이름을 입력해야 합니다.",
+		"azureDeploymentName": "Azure 배포 이름을 입력해야 합니다."
 	},
 	"placeholders": {
 		"apiKey": "API 키 입력...",
@@ -934,6 +974,8 @@
 		"projectId": "프로젝트 ID 입력...",
 		"customArn": "ARN 입력 (예: arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "기본 URL 입력...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "예: meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "예: lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/nl/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Zoek providers",
 		"noProviderMatchFound": "Geen providers gevonden",
 		"noMatchFound": "Geen overeenkomende profielen gevonden",
-		"retiredProviderMessage": "Sorry, deze provider wordt niet meer ondersteund. We zagen dat heel weinig Roo-gebruikers het daadwerkelijk gebruikten en we moeten de omvang van onze codebase verkleinen zodat we snel kunnen blijven leveren en onze community goed kunnen blijven bedienen. Het was een heel moeilijke beslissing, maar het stelt ons in staat om ons te richten op wat het belangrijkst voor je is. Het is vervelend, dat weten we.",
+		"retiredProviderMessage": "Sorry, deze provider wordt niet meer ondersteund. We zagen dat heel weinig Roo-gebruikers deze daadwerkelijk gebruikten en we moeten de omvang van onze codebase verkleinen zodat we snel kunnen blijven leveren en onze community goed kunnen bedienen. Het was een heel moeilijke beslissing, maar het laat ons focussen op wat het belangrijkst voor je is. Het is vervelend, dat weten we.",
 		"vscodeLmDescription": "De VS Code Language Model API stelt je in staat modellen te draaien die door andere VS Code-extensies worden geleverd (waaronder GitHub Copilot). De eenvoudigste manier om te beginnen is door de Copilot- en Copilot Chat-extensies te installeren vanuit de VS Code Marketplace.",
 		"awsCustomArnUse": "Voer een geldige Amazon Bedrock ARN in voor het model dat je wilt gebruiken. Voorbeeldformaten:",
 		"awsCustomArnDesc": "Zorg ervoor dat de regio in de ARN overeenkomt met je geselecteerde AWS-regio hierboven.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Breidt het contextvenster uit tot 1 miljoen tokens voor Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Baseten API-sleutel",
 		"getBasetenApiKey": "Baseten API-sleutel verkrijgen",
+		"cerebrasApiKey": "Cerebras API-sleutel",
+		"getCerebrasApiKey": "Cerebras API-sleutel verkrijgen",
+		"azureResourceName": "Azure Resource-naam",
+		"azureResourceNameDescription": "De naam van je Azure AI Foundry resource (bijv. 'mijn-openai-resource').",
+		"azureDeploymentName": "Azure Deployment-naam",
+		"azureDeploymentNameDescription": "De naam van je model deployment binnen de resource.",
+		"azureApiKey": "Azure API-sleutel",
+		"getAzureApiKey": "Azure AI Foundry-toegang verkrijgen",
+		"azureApiVersion": "Azure API-versie",
+		"azureApiVersionDescription": "De API-versie om te gebruiken (bijv. '2024-10-21'). Laat leeg voor standaardwaarde.",
+		"chutesApiKey": "Chutes API-sleutel",
+		"getChutesApiKey": "Chutes API-sleutel ophalen",
 		"fireworksApiKey": "Fireworks API-sleutel",
 		"getFireworksApiKey": "Fireworks API-sleutel ophalen",
+		"featherlessApiKey": "Featherless API-sleutel",
+		"getFeatherlessApiKey": "Featherless API-sleutel ophalen",
+		"ioIntelligenceApiKey": "IO Intelligence API-sleutel",
+		"ioIntelligenceApiKeyPlaceholder": "Voer je IO Intelligence API-sleutel in",
+		"getIoIntelligenceApiKey": "IO Intelligence API-sleutel ophalen",
 		"deepSeekApiKey": "DeepSeek API-sleutel",
 		"getDeepSeekApiKey": "DeepSeek API-sleutel ophalen",
+		"doubaoApiKey": "Doubao API-sleutel",
+		"getDoubaoApiKey": "Doubao API-sleutel ophalen",
 		"moonshotApiKey": "Moonshot API-sleutel",
 		"getMoonshotApiKey": "Moonshot API-sleutel ophalen",
 		"moonshotBaseUrl": "Moonshot-ingangspunt",
@@ -393,9 +412,24 @@
 		"getMiniMaxApiKey": "MiniMax API-sleutel ophalen",
 		"minimaxBaseUrl": "MiniMax-ingangspunt",
 		"geminiApiKey": "Gemini API-sleutel",
+		"getGroqApiKey": "Groq API-sleutel ophalen",
+		"groqApiKey": "Groq API-sleutel",
 		"getSambaNovaApiKey": "SambaNova API-sleutel ophalen",
 		"sambaNovaApiKey": "SambaNova API-sleutel",
 		"getGeminiApiKey": "Gemini API-sleutel ophalen",
+		"getHuggingFaceApiKey": "Hugging Face API-sleutel ophalen",
+		"huggingFaceApiKey": "Hugging Face API-sleutel",
+		"huggingFaceModelId": "Model ID",
+		"huggingFaceLoading": "Laden...",
+		"huggingFaceModelsCount": "({{count}} modellen)",
+		"huggingFaceSelectModel": "Selecteer een model...",
+		"huggingFaceSearchModels": "Zoek modellen...",
+		"huggingFaceNoModelsFound": "Geen modellen gevonden",
+		"huggingFaceProvider": "Provider",
+		"huggingFaceProviderAuto": "Automatisch",
+		"huggingFaceSelectProvider": "Selecteer een provider...",
+		"huggingFaceSearchProviders": "Zoek providers...",
+		"huggingFaceNoProvidersFound": "Geen providers gevonden",
 		"apiKey": "API-sleutel",
 		"openAiApiKey": "OpenAI API-sleutel",
 		"openAiBaseUrl": "Basis-URL",
@@ -466,6 +500,10 @@
 			"description": "Ollama laat je modellen lokaal op je computer draaien. Zie hun quickstart-gids voor instructies.",
 			"warning": "Let op: Roo Code gebruikt complexe prompts en werkt het beste met Claude-modellen. Minder krachtige modellen werken mogelijk niet zoals verwacht."
 		},
+		"unboundApiKey": "Unbound API-sleutel",
+		"getUnboundApiKey": "Unbound API-sleutel ophalen",
+		"unboundRefreshModelsSuccess": "Modellenlijst bijgewerkt! U kunt nu kiezen uit de nieuwste modellen.",
+		"unboundInvalidApiKey": "Ongeldige API-sleutel. Controleer uw API-sleutel en probeer het opnieuw.",
 		"roo": {
 			"authenticatedMessage": "Veilig geauthenticeerd via je Roo Code Cloud-account.",
 			"connectButton": "Verbinden met Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "Provider '{{provider}}' is niet toegestaan door je organisatie",
 		"modelNotAllowed": "Model '{{model}}' is niet toegestaan voor provider '{{provider}}' door je organisatie",
 		"profileInvalid": "Dit profiel bevat een provider of model dat niet is toegestaan door je organisatie",
-		"qwenCodeOauthPath": "Je moet een geldig OAuth-referentiepad opgeven"
+		"qwenCodeOauthPath": "Je moet een geldig OAuth-referentiepad opgeven",
+		"azureResourceName": "Je moet een Azure-resourcenaam opgeven.",
+		"azureDeploymentName": "Je moet een Azure-implementatienaam opgeven."
 	},
 	"placeholders": {
 		"apiKey": "Voer API-sleutel in...",
@@ -934,6 +974,8 @@
 		"projectId": "Voer project-ID in...",
 		"customArn": "Voer ARN in (bijv. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Voer basis-URL in...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "bijv. meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "bijv. lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/pl/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Szukaj dostawców",
 		"noProviderMatchFound": "Nie znaleziono dostawców",
 		"noMatchFound": "Nie znaleziono pasujących profili",
-		"retiredProviderMessage": "Przepraszamy, ten dostawca nie jest już obsługiwany. Zauważyliśmy, że bardzo niewielu użytkowników Roo faktycznie z niego korzystało, a my musimy zmniejszyć zakres naszego kodu, aby móc dalej szybko dostarczać i dobrze służyć naszej społeczności. To była naprawdę trudna decyzja, ale pozwala nam skupić się na tym, co jest dla ciebie najważniejsze. Wiemy, że to frustrujące.",
+		"retiredProviderMessage": "Przepraszamy, ten dostawca nie jest już obsługiwany. Zauważyliśmy, że bardzo niewielu użytkowników Roo faktycznie z niego korzystało i musimy zmniejszyć zakres naszego kodu, aby móc dalej szybko się rozwijać i dobrze służyć naszej społeczności. To była naprawdę trudna decyzja, ale pozwala nam skupić się na tym, co jest dla ciebie najważniejsze. Wiemy, że to frustrujące.",
 		"vscodeLmDescription": "Interfejs API modelu językowego VS Code umożliwia uruchamianie modeli dostarczanych przez inne rozszerzenia VS Code (w tym, ale nie tylko, GitHub Copilot). Najłatwiejszym sposobem na rozpoczęcie jest zainstalowanie rozszerzeń Copilot i Copilot Chat z VS Code Marketplace.",
 		"awsCustomArnUse": "Wprowadź prawidłowy Amazon Bedrock ARN dla modelu, którego chcesz użyć. Przykłady formatu:",
 		"awsCustomArnDesc": "Upewnij się, że region w ARN odpowiada wybranemu powyżej regionowi AWS.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Rozszerza okno kontekstowe do 1 miliona tokenów dla Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Klucz API Baseten",
 		"getBasetenApiKey": "Uzyskaj klucz API Baseten",
+		"cerebrasApiKey": "Klucz API Cerebras",
+		"getCerebrasApiKey": "Pobierz klucz API Cerebras",
+		"azureResourceName": "Nazwa zasobu Azure",
+		"azureResourceNameDescription": "Nazwa Twojego zasobu Azure AI Foundry (np. 'moj-zasob-openai').",
+		"azureDeploymentName": "Nazwa wdrożenia Azure",
+		"azureDeploymentNameDescription": "Nazwa wdrożenia modelu w zasobie.",
+		"azureApiKey": "Klucz API Azure",
+		"getAzureApiKey": "Uzyskaj dostęp do Azure AI Foundry",
+		"azureApiVersion": "Wersja API Azure",
+		"azureApiVersionDescription": "Wersja API do użycia (np. '2024-10-21'). Pozostaw puste dla wartości domyślnej.",
+		"chutesApiKey": "Klucz API Chutes",
+		"getChutesApiKey": "Uzyskaj klucz API Chutes",
 		"fireworksApiKey": "Klucz API Fireworks",
 		"getFireworksApiKey": "Uzyskaj klucz API Fireworks",
+		"featherlessApiKey": "Klucz API Featherless",
+		"getFeatherlessApiKey": "Uzyskaj klucz API Featherless",
+		"ioIntelligenceApiKey": "Klucz API IO Intelligence",
+		"ioIntelligenceApiKeyPlaceholder": "Wprowadź swój klucz API IO Intelligence",
+		"getIoIntelligenceApiKey": "Uzyskaj klucz API IO Intelligence",
 		"deepSeekApiKey": "Klucz API DeepSeek",
 		"getDeepSeekApiKey": "Uzyskaj klucz API DeepSeek",
+		"doubaoApiKey": "Klucz API Doubao",
+		"getDoubaoApiKey": "Uzyskaj klucz API Doubao",
 		"moonshotApiKey": "Klucz API Moonshot",
 		"getMoonshotApiKey": "Uzyskaj klucz API Moonshot",
 		"moonshotBaseUrl": "Punkt wejścia Moonshot",
@@ -393,9 +412,24 @@
 		"getMiniMaxApiKey": "Uzyskaj klucz API MiniMax",
 		"minimaxBaseUrl": "Punkt wejścia MiniMax",
 		"geminiApiKey": "Klucz API Gemini",
+		"getGroqApiKey": "Uzyskaj klucz API Groq",
+		"groqApiKey": "Klucz API Groq",
 		"getSambaNovaApiKey": "Uzyskaj klucz API SambaNova",
 		"sambaNovaApiKey": "Klucz API SambaNova",
 		"getGeminiApiKey": "Uzyskaj klucz API Gemini",
+		"getHuggingFaceApiKey": "Uzyskaj klucz API Hugging Face",
+		"huggingFaceApiKey": "Klucz API Hugging Face",
+		"huggingFaceModelId": "ID modelu",
+		"huggingFaceLoading": "Ładowanie...",
+		"huggingFaceModelsCount": "({{count}} modeli)",
+		"huggingFaceSelectModel": "Wybierz model...",
+		"huggingFaceSearchModels": "Szukaj modeli...",
+		"huggingFaceNoModelsFound": "Nie znaleziono modeli",
+		"huggingFaceProvider": "Dostawca",
+		"huggingFaceProviderAuto": "Automatyczny",
+		"huggingFaceSelectProvider": "Wybierz dostawcę...",
+		"huggingFaceSearchProviders": "Szukaj dostawców...",
+		"huggingFaceNoProvidersFound": "Nie znaleziono dostawców",
 		"apiKey": "Klucz API",
 		"openAiApiKey": "Klucz API OpenAI",
 		"openAiBaseUrl": "URL bazowy",
@@ -466,6 +500,10 @@
 			"description": "Ollama pozwala na lokalne uruchamianie modeli na twoim komputerze. Aby rozpocząć, zapoznaj się z przewodnikiem szybkiego startu.",
 			"warning": "Uwaga: Roo Code używa złożonych podpowiedzi i działa najlepiej z modelami Claude. Modele o niższych możliwościach mogą nie działać zgodnie z oczekiwaniami."
 		},
+		"unboundApiKey": "Klucz API Unbound",
+		"getUnboundApiKey": "Uzyskaj klucz API Unbound",
+		"unboundRefreshModelsSuccess": "Lista modeli zaktualizowana! Możesz teraz wybierać spośród najnowszych modeli.",
+		"unboundInvalidApiKey": "Nieprawidłowy klucz API. Sprawdź swój klucz API i spróbuj ponownie.",
 		"roo": {
 			"authenticatedMessage": "Bezpiecznie uwierzytelniony przez twoje konto Roo Code Cloud.",
 			"connectButton": "Połącz z Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "Dostawca '{{provider}}' nie jest dozwolony przez Twoją organizację",
 		"modelNotAllowed": "Model '{{model}}' nie jest dozwolony dla dostawcy '{{provider}}' przez Twoją organizację",
 		"profileInvalid": "Ten profil zawiera dostawcę lub model, który nie jest dozwolony przez Twoją organizację",
-		"qwenCodeOauthPath": "Musisz podać prawidłową ścieżkę do poświadczeń OAuth"
+		"qwenCodeOauthPath": "Musisz podać prawidłową ścieżkę do poświadczeń OAuth",
+		"azureResourceName": "Musisz podać nazwę zasobu Azure.",
+		"azureDeploymentName": "Musisz podać nazwę wdrożenia Azure."
 	},
 	"placeholders": {
 		"apiKey": "Wprowadź klucz API...",
@@ -934,6 +974,8 @@
 		"projectId": "Wprowadź ID projektu...",
 		"customArn": "Wprowadź ARN (np. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Wprowadź podstawowy URL...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "np. meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "np. lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/pt-BR/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Pesquisar provedores",
 		"noProviderMatchFound": "Nenhum provedor encontrado",
 		"noMatchFound": "Nenhum perfil correspondente encontrado",
-		"retiredProviderMessage": "Desculpe, este provedor não é mais suportado. Percebemos que muito poucos usuários do Roo realmente o utilizavam e precisamos reduzir o escopo do nosso código para continuar entregando rápido e servindo bem nossa comunidade. Foi uma decisão muito difícil, mas nos permite focar no que mais importa para você. Sabemos que é chato.",
+		"retiredProviderMessage": "Desculpe, este provedor não é mais suportado. Vimos que muito poucos usuários do Roo realmente o utilizavam e precisamos reduzir o escopo do nosso código para continuar avançando rapidamente e servir bem nossa comunidade. Foi uma decisão muito difícil, mas nos permite focar no que mais importa para você. Sabemos que é chato.",
 		"vscodeLmDescription": "A API do Modelo de Linguagem do VS Code permite executar modelos fornecidos por outras extensões do VS Code (incluindo, mas não se limitando, ao GitHub Copilot). A maneira mais fácil de começar é instalar as extensões Copilot e Copilot Chat no VS Code Marketplace.",
 		"awsCustomArnUse": "Insira um ARN Amazon Bedrock válido para o modelo que deseja usar. Exemplos de formato:",
 		"awsCustomArnDesc": "Certifique-se de que a região no ARN corresponde à região AWS selecionada acima.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Estende a janela de contexto para 1 milhão de tokens para o Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Chave de API Baseten",
 		"getBasetenApiKey": "Obter chave de API Baseten",
+		"cerebrasApiKey": "Chave de API Cerebras",
+		"getCerebrasApiKey": "Obter chave de API Cerebras",
+		"azureResourceName": "Nome do recurso Azure",
+		"azureResourceNameDescription": "O nome do seu recurso Azure AI Foundry (ex: 'meu-recurso-openai').",
+		"azureDeploymentName": "Nome da implantação Azure",
+		"azureDeploymentNameDescription": "O nome da implantação do modelo dentro do recurso.",
+		"azureApiKey": "Chave de API Azure",
+		"getAzureApiKey": "Obter acesso ao Azure AI Foundry",
+		"azureApiVersion": "Versão da API Azure",
+		"azureApiVersionDescription": "A versão da API a ser usada (ex: '2024-10-21'). Deixe vazio para o padrão.",
+		"chutesApiKey": "Chave de API Chutes",
+		"getChutesApiKey": "Obter chave de API Chutes",
 		"fireworksApiKey": "Chave de API Fireworks",
 		"getFireworksApiKey": "Obter chave de API Fireworks",
+		"featherlessApiKey": "Chave de API Featherless",
+		"getFeatherlessApiKey": "Obter chave de API Featherless",
+		"ioIntelligenceApiKey": "Chave de API IO Intelligence",
+		"ioIntelligenceApiKeyPlaceholder": "Insira sua chave de API da IO Intelligence",
+		"getIoIntelligenceApiKey": "Obter chave de API IO Intelligence",
 		"deepSeekApiKey": "Chave de API DeepSeek",
 		"getDeepSeekApiKey": "Obter chave de API DeepSeek",
+		"doubaoApiKey": "Chave de API Doubao",
+		"getDoubaoApiKey": "Obter chave de API Doubao",
 		"moonshotApiKey": "Chave de API Moonshot",
 		"getMoonshotApiKey": "Obter chave de API Moonshot",
 		"moonshotBaseUrl": "Ponto de entrada Moonshot",
@@ -393,9 +412,24 @@
 		"getMiniMaxApiKey": "Obter chave de API MiniMax",
 		"minimaxBaseUrl": "Ponto de entrada MiniMax",
 		"geminiApiKey": "Chave de API Gemini",
+		"getGroqApiKey": "Obter chave de API Groq",
+		"groqApiKey": "Chave de API Groq",
 		"getSambaNovaApiKey": "Obter chave de API SambaNova",
 		"sambaNovaApiKey": "Chave de API SambaNova",
 		"getGeminiApiKey": "Obter chave de API Gemini",
+		"getHuggingFaceApiKey": "Obter chave de API Hugging Face",
+		"huggingFaceApiKey": "Chave de API Hugging Face",
+		"huggingFaceModelId": "ID do modelo",
+		"huggingFaceLoading": "Carregando...",
+		"huggingFaceModelsCount": "({{count}} modelos)",
+		"huggingFaceSelectModel": "Selecionar um modelo...",
+		"huggingFaceSearchModels": "Buscar modelos...",
+		"huggingFaceNoModelsFound": "Nenhum modelo encontrado",
+		"huggingFaceProvider": "Provedor",
+		"huggingFaceProviderAuto": "Automático",
+		"huggingFaceSelectProvider": "Selecionar um provedor...",
+		"huggingFaceSearchProviders": "Buscar provedores...",
+		"huggingFaceNoProvidersFound": "Nenhum provedor encontrado",
 		"apiKey": "Chave de API",
 		"openAiApiKey": "Chave de API OpenAI",
 		"openAiBaseUrl": "URL Base",
@@ -466,6 +500,10 @@
 			"description": "O Ollama permite que você execute modelos localmente em seu computador. Para instruções sobre como começar, veja o guia de início rápido deles.",
 			"warning": "Nota: O Roo Code usa prompts complexos e funciona melhor com modelos Claude. Modelos menos capazes podem não funcionar como esperado."
 		},
+		"unboundApiKey": "Chave de API Unbound",
+		"getUnboundApiKey": "Obter chave de API Unbound",
+		"unboundRefreshModelsSuccess": "Lista de modelos atualizada! Agora você pode selecionar entre os modelos mais recentes.",
+		"unboundInvalidApiKey": "Chave API inválida. Por favor, verifique sua chave API e tente novamente.",
 		"roo": {
 			"authenticatedMessage": "Autenticado com segurança através da sua conta Roo Code Cloud.",
 			"connectButton": "Conectar ao Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "O provedor '{{provider}}' não é permitido pela sua organização",
 		"modelNotAllowed": "O modelo '{{model}}' não é permitido para o provedor '{{provider}}' pela sua organização",
 		"profileInvalid": "Este perfil contém um provedor ou modelo que não é permitido pela sua organização",
-		"qwenCodeOauthPath": "Você deve fornecer um caminho válido de credenciais OAuth"
+		"qwenCodeOauthPath": "Você deve fornecer um caminho válido de credenciais OAuth",
+		"azureResourceName": "Você deve fornecer um nome de recurso do Azure.",
+		"azureDeploymentName": "Você deve fornecer um nome de implantação do Azure."
 	},
 	"placeholders": {
 		"apiKey": "Digite a chave API...",
@@ -934,6 +974,8 @@
 		"projectId": "Digite o ID do projeto...",
 		"customArn": "Digite o ARN (ex: arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Digite a URL base...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "ex: meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "ex: lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/ru/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Поиск провайдеров",
 		"noProviderMatchFound": "Провайдеры не найдены",
 		"noMatchFound": "Совпадений не найдено",
-		"retiredProviderMessage": "К сожалению, этот провайдер больше не поддерживается. Мы заметили, что очень немногие пользователи Roo действительно им пользовались, и нам нужно сократить объём кодовой базы, чтобы продолжать быстро развиваться и хорошо служить нашему сообществу. Это было очень непростое решение, но оно позволяет нам сосредоточиться на том, что для тебя важнее всего. Мы знаем, что это неприятно.",
+		"retiredProviderMessage": "К сожалению, этот провайдер больше не поддерживается. Мы увидели, что очень немногие пользователи Roo фактически использовали его, и нам нужно сократить объём нашей кодовой базы, чтобы продолжать быстро развиваться и хорошо обслуживать наше сообщество. Это было очень непростое решение, но оно позволяет нам сосредоточиться на том, что важнее всего для тебя. Мы знаем, что это обидно.",
 		"vscodeLmDescription": "API языковой модели VS Code позволяет запускать модели, предоставляемые другими расширениями VS Code (включая, но не ограничиваясь GitHub Copilot). Для начала установите расширения Copilot и Copilot Chat из VS Code Marketplace.",
 		"awsCustomArnUse": "Введите действительный Amazon Bedrock ARN для используемой модели. Примеры формата:",
 		"awsCustomArnDesc": "Убедитесь, что регион в ARN совпадает с выбранным выше регионом AWS.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Расширяет контекстное окно до 1 миллиона токенов для Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Baseten API-ключ",
 		"getBasetenApiKey": "Получить Baseten API-ключ",
+		"cerebrasApiKey": "Cerebras API-ключ",
+		"getCerebrasApiKey": "Получить Cerebras API-ключ",
+		"azureResourceName": "Имя ресурса Azure",
+		"azureResourceNameDescription": "Имя вашего ресурса Azure AI Foundry (напр. 'my-openai-resource').",
+		"azureDeploymentName": "Имя развертывания Azure",
+		"azureDeploymentNameDescription": "Имя развертывания модели внутри ресурса.",
+		"azureApiKey": "API-ключ Azure",
+		"getAzureApiKey": "Получить доступ к Azure AI Foundry",
+		"azureApiVersion": "Версия API Azure",
+		"azureApiVersionDescription": "Версия API для использования (напр. '2024-10-21'). Оставьте пустым для значения по умолчанию.",
+		"chutesApiKey": "Chutes API-ключ",
+		"getChutesApiKey": "Получить Chutes API-ключ",
 		"fireworksApiKey": "Fireworks API-ключ",
 		"getFireworksApiKey": "Получить Fireworks API-ключ",
+		"featherlessApiKey": "Featherless API-ключ",
+		"getFeatherlessApiKey": "Получить Featherless API-ключ",
+		"ioIntelligenceApiKey": "IO Intelligence API-ключ",
+		"ioIntelligenceApiKeyPlaceholder": "Введите свой ключ API IO Intelligence",
+		"getIoIntelligenceApiKey": "Получить IO Intelligence API-ключ",
 		"deepSeekApiKey": "DeepSeek API-ключ",
 		"getDeepSeekApiKey": "Получить DeepSeek API-ключ",
+		"doubaoApiKey": "Doubao API-ключ",
+		"getDoubaoApiKey": "Получить Doubao API-ключ",
 		"moonshotApiKey": "Moonshot API-ключ",
 		"getMoonshotApiKey": "Получить Moonshot API-ключ",
 		"moonshotBaseUrl": "Точка входа Moonshot",
@@ -393,9 +412,24 @@
 		"getMiniMaxApiKey": "Получить MiniMax API-ключ",
 		"minimaxBaseUrl": "Точка входа MiniMax",
 		"geminiApiKey": "Gemini API-ключ",
+		"getGroqApiKey": "Получить Groq API-ключ",
+		"groqApiKey": "Groq API-ключ",
 		"getSambaNovaApiKey": "Получить SambaNova API-ключ",
 		"sambaNovaApiKey": "SambaNova API-ключ",
 		"getGeminiApiKey": "Получить Gemini API-ключ",
+		"getHuggingFaceApiKey": "Получить Hugging Face API-ключ",
+		"huggingFaceApiKey": "Hugging Face API-ключ",
+		"huggingFaceModelId": "ID модели",
+		"huggingFaceLoading": "Загрузка...",
+		"huggingFaceModelsCount": "({{count}} моделей)",
+		"huggingFaceSelectModel": "Выберите модель...",
+		"huggingFaceSearchModels": "Поиск моделей...",
+		"huggingFaceNoModelsFound": "Модели не найдены",
+		"huggingFaceProvider": "Провайдер",
+		"huggingFaceProviderAuto": "Автоматически",
+		"huggingFaceSelectProvider": "Выберите провайдера...",
+		"huggingFaceSearchProviders": "Поиск провайдеров...",
+		"huggingFaceNoProvidersFound": "Провайдеры не найдены",
 		"apiKey": "API-ключ",
 		"openAiApiKey": "OpenAI API-ключ",
 		"openAiBaseUrl": "Базовый URL",
@@ -466,6 +500,10 @@
 			"description": "Ollama позволяет запускать модели локально на вашем компьютере. Для начала ознакомьтесь с кратким руководством.",
 			"warning": "Примечание: Roo Code использует сложные подсказки и лучше всего работает с моделями Claude. Менее мощные модели могут работать некорректно."
 		},
+		"unboundApiKey": "Unbound API-ключ",
+		"getUnboundApiKey": "Получить Unbound API-ключ",
+		"unboundRefreshModelsSuccess": "Список моделей обновлен! Теперь вы можете выбрать из последних моделей.",
+		"unboundInvalidApiKey": "Недействительный API-ключ. Пожалуйста, проверьте ваш API-ключ и попробуйте снова.",
 		"roo": {
 			"authenticatedMessage": "Безопасно аутентифицирован через твой аккаунт Roo Code Cloud.",
 			"connectButton": "Подключиться к Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "Провайдер '{{provider}}' не разрешен вашей организацией",
 		"modelNotAllowed": "Модель '{{model}}' не разрешена для провайдера '{{provider}}' вашей организацией",
 		"profileInvalid": "Этот профиль содержит провайдера или модель, которые не разрешены вашей организацией",
-		"qwenCodeOauthPath": "Вы должны указать допустимый путь к учетным данным OAuth"
+		"qwenCodeOauthPath": "Вы должны указать допустимый путь к учетным данным OAuth",
+		"azureResourceName": "Вы должны указать имя ресурса Azure.",
+		"azureDeploymentName": "Вы должны указать имя развёртывания Azure."
 	},
 	"placeholders": {
 		"apiKey": "Введите API-ключ...",
@@ -934,6 +974,8 @@
 		"projectId": "Введите Project ID...",
 		"customArn": "Введите ARN (например, arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Введите базовый URL...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "например, meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "например, lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/tr/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Sağlayıcıları ara",
 		"noProviderMatchFound": "Eşleşen sağlayıcı bulunamadı",
 		"noMatchFound": "Eşleşen profil bulunamadı",
-		"retiredProviderMessage": "Üzgünüz, bu sağlayıcı artık desteklenmiyor. Çok az Roo kullanıcısının bunu gerçekten kullandığını gördük ve hızlı gelişmeye devam edip topluluğumuza iyi hizmet verebilmek için kod tabanımızın kapsamını daraltmamız gerekiyor. Gerçekten zor bir karardı ama senin için en önemli olana odaklanmamızı sağlıyor. Bunun can sıkıcı olduğunu biliyoruz.",
+		"retiredProviderMessage": "Üzgünüz, bu sağlayıcı artık desteklenmiyor. Çok az Roo kullanıcısının bunu gerçekten kullandığını gördük ve kod tabanımızın kapsamını azaltmamız gerekiyor, böylece hızlı gelişmeye devam edebilir ve topluluğumuza iyi hizmet verebiliriz. Gerçekten zor bir karardı ama sana en önemli olan şeylere odaklanmamızı sağlıyor. Biliyoruz, bu sinir bozucu.",
 		"vscodeLmDescription": "VS Code Dil Modeli API'si, diğer VS Code uzantıları tarafından sağlanan modelleri çalıştırmanıza olanak tanır (GitHub Copilot dahil ancak bunlarla sınırlı değildir). Başlamanın en kolay yolu, VS Code Marketplace'ten Copilot ve Copilot Chat uzantılarını yüklemektir.",
 		"awsCustomArnUse": "Kullanmak istediğiniz model için geçerli bir Amazon Bedrock ARN'si girin. Format örnekleri:",
 		"awsCustomArnDesc": "ARN içindeki bölgenin yukarıda seçilen AWS Bölgesiyle eşleştiğinden emin olun.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Claude Sonnet 4 / 4.5 / Claude Opus 4.6 için bağlam penceresini 1 milyon token'a genişletir",
 		"basetenApiKey": "Baseten API Anahtarı",
 		"getBasetenApiKey": "Baseten API Anahtarı Al",
+		"cerebrasApiKey": "Cerebras API Anahtarı",
+		"getCerebrasApiKey": "Cerebras API Anahtarını Al",
+		"azureResourceName": "Azure Kaynak Adı",
+		"azureResourceNameDescription": "Azure AI Foundry kaynak adınız (örn. 'benim-openai-kaynagim').",
+		"azureDeploymentName": "Azure Dağıtım Adı",
+		"azureDeploymentNameDescription": "Kaynak içindeki model dağıtım adınız.",
+		"azureApiKey": "Azure API Anahtarı",
+		"getAzureApiKey": "Azure AI Foundry Erişimi Al",
+		"azureApiVersion": "Azure API Sürümü",
+		"azureApiVersionDescription": "Kullanılacak API sürümü (örn. '2024-10-21'). Varsayılan için boş bırakın.",
+		"chutesApiKey": "Chutes API Anahtarı",
+		"getChutesApiKey": "Chutes API Anahtarı Al",
 		"fireworksApiKey": "Fireworks API Anahtarı",
 		"getFireworksApiKey": "Fireworks API Anahtarı Al",
+		"featherlessApiKey": "Featherless API Anahtarı",
+		"getFeatherlessApiKey": "Featherless API Anahtarı Al",
+		"ioIntelligenceApiKey": "IO Intelligence API Anahtarı",
+		"ioIntelligenceApiKeyPlaceholder": "IO Intelligence API anahtarınızı girin",
+		"getIoIntelligenceApiKey": "IO Intelligence API Anahtarı Al",
 		"deepSeekApiKey": "DeepSeek API Anahtarı",
 		"getDeepSeekApiKey": "DeepSeek API Anahtarı Al",
+		"doubaoApiKey": "Doubao API Anahtarı",
+		"getDoubaoApiKey": "Doubao API Anahtarı Al",
 		"moonshotApiKey": "Moonshot API Anahtarı",
 		"getMoonshotApiKey": "Moonshot API Anahtarı Al",
 		"moonshotBaseUrl": "Moonshot Giriş Noktası",
@@ -393,8 +412,23 @@
 		"getMiniMaxApiKey": "MiniMax API Anahtarı Al",
 		"minimaxBaseUrl": "MiniMax Giriş Noktası",
 		"geminiApiKey": "Gemini API Anahtarı",
+		"getGroqApiKey": "Groq API Anahtarı Al",
+		"groqApiKey": "Groq API Anahtarı",
 		"getSambaNovaApiKey": "SambaNova API Anahtarı Al",
 		"sambaNovaApiKey": "SambaNova API Anahtarı",
+		"getHuggingFaceApiKey": "Hugging Face API Anahtarı Al",
+		"huggingFaceApiKey": "Hugging Face API Anahtarı",
+		"huggingFaceModelId": "Model ID",
+		"huggingFaceLoading": "Yükleniyor...",
+		"huggingFaceModelsCount": "({{count}} model)",
+		"huggingFaceSelectModel": "Bir model seç...",
+		"huggingFaceSearchModels": "Modelleri ara...",
+		"huggingFaceNoModelsFound": "Model bulunamadı",
+		"huggingFaceProvider": "Sağlayıcı",
+		"huggingFaceProviderAuto": "Otomatik",
+		"huggingFaceSelectProvider": "Bir sağlayıcı seç...",
+		"huggingFaceSearchProviders": "Sağlayıcıları ara...",
+		"huggingFaceNoProvidersFound": "Sağlayıcı bulunamadı",
 		"getGeminiApiKey": "Gemini API Anahtarı Al",
 		"openAiApiKey": "OpenAI API Anahtarı",
 		"apiKey": "API Anahtarı",
@@ -466,6 +500,10 @@
 			"description": "Ollama, modelleri bilgisayarınızda yerel olarak çalıştırmanıza olanak tanır. Başlamak için hızlı başlangıç kılavuzlarına bakın.",
 			"warning": "Not: Roo Code karmaşık istemler kullanır ve Claude modelleriyle en iyi şekilde çalışır. Daha az yetenekli modeller beklendiği gibi çalışmayabilir."
 		},
+		"unboundApiKey": "Unbound API Anahtarı",
+		"getUnboundApiKey": "Unbound API Anahtarı Al",
+		"unboundRefreshModelsSuccess": "Model listesi güncellendi! Artık en son modeller arasından seçim yapabilirsiniz.",
+		"unboundInvalidApiKey": "Geçersiz API anahtarı. Lütfen API anahtarınızı kontrol edin ve tekrar deneyin.",
 		"roo": {
 			"authenticatedMessage": "Roo Code Cloud hesabın üzerinden güvenli bir şekilde kimlik doğrulandı.",
 			"connectButton": "Roo Code Cloud'a Bağlan"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "Sağlayıcı '{{provider}}' kuruluşunuz tarafından izin verilmiyor",
 		"modelNotAllowed": "Model '{{model}}' sağlayıcı '{{provider}}' için kuruluşunuz tarafından izin verilmiyor",
 		"profileInvalid": "Bu profil, kuruluşunuz tarafından izin verilmeyen bir sağlayıcı veya model içeriyor",
-		"qwenCodeOauthPath": "Geçerli bir OAuth kimlik bilgileri yolu sağlamalısın"
+		"qwenCodeOauthPath": "Geçerli bir OAuth kimlik bilgileri yolu sağlamalısın",
+		"azureResourceName": "Bir Azure kaynak adı sağlamalısın.",
+		"azureDeploymentName": "Bir Azure dağıtım adı sağlamalısın."
 	},
 	"placeholders": {
 		"apiKey": "API anahtarını girin...",
@@ -934,6 +974,8 @@
 		"projectId": "Proje ID'sini girin...",
 		"customArn": "ARN girin (örn. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Temel URL'yi girin...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "örn. meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "örn. lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/vi/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "Tìm kiếm nhà cung cấp",
 		"noProviderMatchFound": "Không tìm thấy nhà cung cấp",
 		"noMatchFound": "Không tìm thấy hồ sơ phù hợp",
-		"retiredProviderMessage": "Xin lỗi, nhà cung cấp này không còn được hỗ trợ. Chúng tôi nhận thấy rất ít người dùng Roo thực sự sử dụng nó và chúng tôi cần thu hẹp phạm vi mã nguồn để tiếp tục phát triển nhanh và phục vụ tốt cộng đồng. Đây là một quyết định thực sự khó khăn nhưng nó cho phép chúng tôi tập trung vào điều quan trọng nhất với bạn. Chúng tôi biết điều này thật phiền, xin thông cảm.",
+		"retiredProviderMessage": "Xin lỗi, nhà cung cấp này không còn được hỗ trợ nữa. Chúng tôi thấy rất ít người dùng Roo thực sự sử dụng nó và chúng tôi cần giảm phạm vi mã nguồn để có thể tiếp tục phát triển nhanh và phục vụ cộng đồng tốt hơn. Đây là một quyết định rất khó khăn nhưng nó cho phép chúng tôi tập trung vào những gì quan trọng nhất với bạn. Chúng tôi biết điều này thật phiền.",
 		"vscodeLmDescription": "API Mô hình Ngôn ngữ VS Code cho phép bạn chạy các mô hình được cung cấp bởi các tiện ích mở rộng khác của VS Code (bao gồm nhưng không giới hạn ở GitHub Copilot). Cách dễ nhất để bắt đầu là cài đặt các tiện ích mở rộng Copilot và Copilot Chat từ VS Code Marketplace.",
 		"awsCustomArnUse": "Nhập một ARN Amazon Bedrock hợp lệ cho mô hình bạn muốn sử dụng. Ví dụ về định dạng:",
 		"awsCustomArnDesc": "Đảm bảo rằng vùng trong ARN khớp với vùng AWS đã chọn ở trên.",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "Mở rộng cửa sổ ngữ cảnh lên 1 triệu token cho Claude Sonnet 4 / 4.5 / Claude Opus 4.6",
 		"basetenApiKey": "Khóa API Baseten",
 		"getBasetenApiKey": "Lấy khóa API Baseten",
+		"cerebrasApiKey": "Khóa API Cerebras",
+		"getCerebrasApiKey": "Lấy khóa API Cerebras",
+		"azureResourceName": "Tên tài nguyên Azure",
+		"azureResourceNameDescription": "Tên tài nguyên Azure AI Foundry của bạn (ví dụ: 'my-openai-resource').",
+		"azureDeploymentName": "Tên triển khai Azure",
+		"azureDeploymentNameDescription": "Tên triển khai mô hình trong tài nguyên.",
+		"azureApiKey": "Khóa API Azure",
+		"getAzureApiKey": "Lấy quyền truy cập Azure AI Foundry",
+		"azureApiVersion": "Phiên bản API Azure",
+		"azureApiVersionDescription": "Phiên bản API để sử dụng (ví dụ: '2024-10-21'). Để trống để dùng giá trị mặc định.",
+		"chutesApiKey": "Khóa API Chutes",
+		"getChutesApiKey": "Lấy khóa API Chutes",
 		"fireworksApiKey": "Khóa API Fireworks",
 		"getFireworksApiKey": "Lấy khóa API Fireworks",
+		"featherlessApiKey": "Khóa API Featherless",
+		"getFeatherlessApiKey": "Lấy khóa API Featherless",
+		"ioIntelligenceApiKey": "Khóa API IO Intelligence",
+		"ioIntelligenceApiKeyPlaceholder": "Nhập khóa API IO Intelligence của bạn",
+		"getIoIntelligenceApiKey": "Lấy khóa API IO Intelligence",
 		"deepSeekApiKey": "Khóa API DeepSeek",
 		"getDeepSeekApiKey": "Lấy khóa API DeepSeek",
+		"doubaoApiKey": "Khóa API Doubao",
+		"getDoubaoApiKey": "Lấy khóa API Doubao",
 		"moonshotApiKey": "Khóa API Moonshot",
 		"getMoonshotApiKey": "Lấy khóa API Moonshot",
 		"moonshotBaseUrl": "Điểm vào Moonshot",
@@ -393,8 +412,23 @@
 		"getMiniMaxApiKey": "Lấy khóa API MiniMax",
 		"minimaxBaseUrl": "Điểm vào MiniMax",
 		"geminiApiKey": "Khóa API Gemini",
+		"getGroqApiKey": "Lấy khóa API Groq",
+		"groqApiKey": "Khóa API Groq",
 		"getSambaNovaApiKey": "Lấy khóa API SambaNova",
 		"sambaNovaApiKey": "Khóa API SambaNova",
+		"getHuggingFaceApiKey": "Lấy Khóa API Hugging Face",
+		"huggingFaceApiKey": "Khóa API Hugging Face",
+		"huggingFaceModelId": "ID Mô hình",
+		"huggingFaceLoading": "Đang tải...",
+		"huggingFaceModelsCount": "({{count}} mô hình)",
+		"huggingFaceSelectModel": "Chọn một mô hình...",
+		"huggingFaceSearchModels": "Tìm kiếm mô hình...",
+		"huggingFaceNoModelsFound": "Không tìm thấy mô hình",
+		"huggingFaceProvider": "Nhà cung cấp",
+		"huggingFaceProviderAuto": "Tự động",
+		"huggingFaceSelectProvider": "Chọn một nhà cung cấp...",
+		"huggingFaceSearchProviders": "Tìm kiếm nhà cung cấp...",
+		"huggingFaceNoProvidersFound": "Không tìm thấy nhà cung cấp",
 		"getGeminiApiKey": "Lấy khóa API Gemini",
 		"openAiApiKey": "Khóa API OpenAI",
 		"apiKey": "Khóa API",
@@ -466,6 +500,10 @@
 			"description": "Ollama cho phép bạn chạy các mô hình cục bộ trên máy tính của bạn. Để biết hướng dẫn về cách bắt đầu, xem hướng dẫn nhanh của họ.",
 			"warning": "Lưu ý: Roo Code sử dụng các lời nhắc phức tạp và hoạt động tốt nhất với các mô hình Claude. Các mô hình kém mạnh hơn có thể không hoạt động như mong đợi."
 		},
+		"unboundApiKey": "Khóa API Unbound",
+		"getUnboundApiKey": "Lấy khóa API Unbound",
+		"unboundRefreshModelsSuccess": "Đã cập nhật danh sách mô hình! Bây giờ bạn có thể chọn từ các mô hình mới nhất.",
+		"unboundInvalidApiKey": "Khóa API không hợp lệ. Vui lòng kiểm tra khóa API của bạn và thử lại.",
 		"roo": {
 			"authenticatedMessage": "Đã xác thực an toàn thông qua tài khoản Roo Code Cloud của bạn.",
 			"connectButton": "Kết nối với Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "Nhà cung cấp '{{provider}}' không được phép bởi tổ chức của bạn",
 		"modelNotAllowed": "Mô hình '{{model}}' không được phép cho nhà cung cấp '{{provider}}' bởi tổ chức của bạn",
 		"profileInvalid": "Hồ sơ này chứa một nhà cung cấp hoặc mô hình không được phép bởi tổ chức của bạn",
-		"qwenCodeOauthPath": "Bạn phải cung cấp đường dẫn thông tin xác thực OAuth hợp lệ"
+		"qwenCodeOauthPath": "Bạn phải cung cấp đường dẫn thông tin xác thực OAuth hợp lệ",
+		"azureResourceName": "Bạn phải cung cấp tên tài nguyên Azure.",
+		"azureDeploymentName": "Bạn phải cung cấp tên triển khai Azure."
 	},
 	"placeholders": {
 		"apiKey": "Nhập khóa API...",
@@ -934,6 +974,8 @@
 		"projectId": "Nhập ID dự án...",
 		"customArn": "Nhập ARN (vd: arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "Nhập URL cơ sở...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "vd: meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "vd: lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/zh-CN/settings.json

@@ -309,7 +309,7 @@
 		"searchProviderPlaceholder": "搜索提供商",
 		"noProviderMatchFound": "未找到提供商",
 		"noMatchFound": "未找到匹配的配置文件",
-		"retiredProviderMessage": "抱歉,此供应商已不再受支持。我们发现实际使用它的 Roo 用户非常少,我们需要缩减代码库的范围,以便继续快速交付并更好地服务社区。这是一个非常艰难的决定,但它让我们专注于对你最重要的事情。我们知道这很遗憾。",
+		"retiredProviderMessage": "抱歉,该提供商已不再受支持。我们发现实际使用该提供商的 Roo 用户非常少,我们需要缩减代码库的覆盖范围,以便继续快速开发并更好地服务社区。这是一个非常艰难的决定,但它让我们专注于对你最重要的事情。我们知道这很遗憾。",
 		"vscodeLmDescription": "VS Code 语言模型 API 允许您运行由其他 VS Code 扩展(包括但不限于 GitHub Copilot)提供的模型。最简单的方法是从 VS Code 市场安装 Copilot 和 Copilot Chat 扩展。",
 		"awsCustomArnUse": "请输入有效的 Amazon Bedrock ARN(Amazon资源名称),格式示例:",
 		"awsCustomArnDesc": "请确保ARN中的区域与上方选择的AWS区域一致。",
@@ -378,10 +378,29 @@
 		"vertex1MContextBetaDescription": "为 Claude Sonnet 4 / 4.5 / Claude Opus 4.6 将上下文窗口扩展至 100 万个 token",
 		"basetenApiKey": "Baseten API 密钥",
 		"getBasetenApiKey": "获取 Baseten API 密钥",
+		"cerebrasApiKey": "Cerebras API 密钥",
+		"getCerebrasApiKey": "获取 Cerebras API 密钥",
+		"azureResourceName": "Azure 资源名称",
+		"azureResourceNameDescription": "您的 Azure AI Foundry 资源名称(例如:'my-openai-resource')。",
+		"azureDeploymentName": "Azure 部署名称",
+		"azureDeploymentNameDescription": "资源中的模型部署名称。",
+		"azureApiKey": "Azure API 密钥",
+		"getAzureApiKey": "获取 Azure AI Foundry 访问权限",
+		"azureApiVersion": "Azure API 版本",
+		"azureApiVersionDescription": "要使用的 API 版本(例如:'2024-10-21')。留空使用默认值。",
+		"chutesApiKey": "Chutes API 密钥",
+		"getChutesApiKey": "获取 Chutes API 密钥",
 		"fireworksApiKey": "Fireworks API 密钥",
 		"getFireworksApiKey": "获取 Fireworks API 密钥",
+		"featherlessApiKey": "Featherless API 密钥",
+		"getFeatherlessApiKey": "获取 Featherless API 密钥",
+		"ioIntelligenceApiKey": "IO Intelligence API 密钥",
+		"ioIntelligenceApiKeyPlaceholder": "输入您的 IO Intelligence API 密钥",
+		"getIoIntelligenceApiKey": "获取 IO Intelligence API 密钥",
 		"deepSeekApiKey": "DeepSeek API 密钥",
 		"getDeepSeekApiKey": "获取 DeepSeek API 密钥",
+		"doubaoApiKey": "豆包 API 密钥",
+		"getDoubaoApiKey": "获取豆包 API 密钥",
 		"moonshotApiKey": "Moonshot API 密钥",
 		"getMoonshotApiKey": "获取 Moonshot API 密钥",
 		"moonshotBaseUrl": "Moonshot 服务站点",
@@ -393,8 +412,23 @@
 		"zaiEntrypoint": "Z AI 服务站点",
 		"zaiEntrypointDescription": "请根据您的位置选择适当的 API 服务站点。如果您在中国,请选择 open.bigmodel.cn。否则,请选择 api.z.ai。",
 		"geminiApiKey": "Gemini API 密钥",
+		"getGroqApiKey": "获取 Groq API 密钥",
+		"groqApiKey": "Groq API 密钥",
 		"getSambaNovaApiKey": "获取 SambaNova API 密钥",
 		"sambaNovaApiKey": "SambaNova API 密钥",
+		"getHuggingFaceApiKey": "获取 Hugging Face API 密钥",
+		"huggingFaceApiKey": "Hugging Face API 密钥",
+		"huggingFaceModelId": "模型 ID",
+		"huggingFaceLoading": "加载中...",
+		"huggingFaceModelsCount": "({{count}} 个模型)",
+		"huggingFaceSelectModel": "选择模型...",
+		"huggingFaceSearchModels": "搜索模型...",
+		"huggingFaceNoModelsFound": "未找到模型",
+		"huggingFaceProvider": "提供商",
+		"huggingFaceProviderAuto": "自动",
+		"huggingFaceSelectProvider": "选择提供商...",
+		"huggingFaceSearchProviders": "搜索提供商...",
+		"huggingFaceNoProvidersFound": "未找到提供商",
 		"getGeminiApiKey": "获取 Gemini API 密钥",
 		"openAiApiKey": "OpenAI API 密钥",
 		"apiKey": "API 密钥",
@@ -466,6 +500,10 @@
 			"description": "Ollama 允许您在本地计算机上运行模型。有关如何开始使用的说明,请参阅其快速入门指南。",
 			"warning": "注意:Roo Code 使用复杂的提示,与 Claude 模型配合最佳。功能较弱的模型可能无法按预期工作。"
 		},
+		"unboundApiKey": "Unbound API 密钥",
+		"getUnboundApiKey": "获取 Unbound API 密钥",
+		"unboundRefreshModelsSuccess": "模型列表已更新!您现在可以从最新模型中选择。",
+		"unboundInvalidApiKey": "无效的API密钥。请检查您的API密钥并重试。",
 		"roo": {
 			"authenticatedMessage": "已通过 Roo Code Cloud 账户安全认证。",
 			"connectButton": "连接到 Roo Code Cloud"
@@ -921,7 +959,9 @@
 		"providerNotAllowed": "提供商 '{{provider}}' 不允许用于您的组织",
 		"modelNotAllowed": "模型 '{{model}}' 不允许用于提供商 '{{provider}}',您的组织不允许",
 		"profileInvalid": "此配置文件包含您的组织不允许的提供商或模型",
-		"qwenCodeOauthPath": "您必须提供有效的 OAuth 凭证路径"
+		"qwenCodeOauthPath": "您必须提供有效的 OAuth 凭证路径",
+		"azureResourceName": "您必须提供 Azure 资源名称。",
+		"azureDeploymentName": "您必须提供 Azure 部署名称。"
 	},
 	"placeholders": {
 		"apiKey": "请输入 API 密钥...",
@@ -934,6 +974,8 @@
 		"projectId": "请输入项目 ID...",
 		"customArn": "请输入 ARN(例:arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "请输入基础 URL...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "例:meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "例:lmstudio-community/llama-3.2-1b-instruct",

+ 44 - 2
webview-ui/src/i18n/locales/zh-TW/settings.json

@@ -323,7 +323,7 @@
 		"searchProviderPlaceholder": "搜尋供應商",
 		"noProviderMatchFound": "找不到供應商",
 		"noMatchFound": "找不到符合的設定檔",
-		"retiredProviderMessage": "抱歉,此供應商已不再受支援。我們發現實際使用它的 Roo 使用者非常少,我們需要縮減程式碼庫的範圍,以便繼續快速交付並更好地服務社群。這是一個非常艱難的決定,但它讓我們專注於對你最重要的事情。我們知道這很遺憾。",
+		"retiredProviderMessage": "抱歉,此供應商已不再受支援。我們發現實際使用此供應商的 Roo 使用者非常少,我們需要縮減程式碼庫的範圍,以便持續快速開發並更好地服務社群。這是一個非常艱難的決定,但它讓我們專注於對你最重要的事情。我們知道這很遺憾。",
 		"vscodeLmDescription": "VS Code 語言模型 API 可以讓您使用其他擴充功能(如 GitHub Copilot)提供的模型。最簡單的方式是從 VS Code Marketplace 安裝 Copilot 和 Copilot Chat 擴充套件。",
 		"awsCustomArnUse": "輸入您要使用的模型的有效 Amazon Bedrock ARN。格式範例:",
 		"awsCustomArnDesc": "確保 ARN 中的區域與您上面選擇的 AWS 區域相符。",
@@ -392,10 +392,29 @@
 		"vertex1MContextBetaDescription": "為 Claude Sonnet 4 / 4.5 / Claude Opus 4.6 將上下文視窗擴展至 100 萬個 token",
 		"basetenApiKey": "Baseten API 金鑰",
 		"getBasetenApiKey": "取得 Baseten API 金鑰",
+		"cerebrasApiKey": "Cerebras API 金鑰",
+		"getCerebrasApiKey": "取得 Cerebras API 金鑰",
+		"azureResourceName": "Azure 資源名稱",
+		"azureResourceNameDescription": "您的 Azure AI Foundry 資源名稱(例如:'my-openai-resource')。",
+		"azureDeploymentName": "Azure 部署名稱",
+		"azureDeploymentNameDescription": "資源中的模型部署名稱。",
+		"azureApiKey": "Azure API 金鑰",
+		"getAzureApiKey": "取得 Azure AI Foundry 存取權限",
+		"azureApiVersion": "Azure API 版本",
+		"azureApiVersionDescription": "要使用的 API 版本(例如:'2024-10-21')。留空使用預設值。",
+		"chutesApiKey": "Chutes API 金鑰",
+		"getChutesApiKey": "取得 Chutes API 金鑰",
 		"fireworksApiKey": "Fireworks API 金鑰",
 		"getFireworksApiKey": "取得 Fireworks API 金鑰",
+		"featherlessApiKey": "Featherless API 金鑰",
+		"getFeatherlessApiKey": "取得 Featherless API 金鑰",
+		"ioIntelligenceApiKey": "IO Intelligence API 金鑰",
+		"ioIntelligenceApiKeyPlaceholder": "輸入您的 IO Intelligence API 金鑰",
+		"getIoIntelligenceApiKey": "取得 IO Intelligence API 金鑰",
 		"deepSeekApiKey": "DeepSeek API 金鑰",
 		"getDeepSeekApiKey": "取得 DeepSeek API 金鑰",
+		"doubaoApiKey": "豆包 API 金鑰",
+		"getDoubaoApiKey": "取得豆包 API 金鑰",
 		"moonshotApiKey": "Moonshot API 金鑰",
 		"getMoonshotApiKey": "取得 Moonshot API 金鑰",
 		"moonshotBaseUrl": "Moonshot 服務端點",
@@ -407,8 +426,23 @@
 		"zaiEntrypoint": "Z AI 服務端點",
 		"zaiEntrypointDescription": "請根據您的位置選擇適當的 API 服務端點。如果您在中國,請選擇 open.bigmodel.cn。否則,請選擇 api.z.ai。",
 		"geminiApiKey": "Gemini API 金鑰",
+		"getGroqApiKey": "取得 Groq API 金鑰",
+		"groqApiKey": "Groq API 金鑰",
 		"getSambaNovaApiKey": "取得 SambaNova API 金鑰",
 		"sambaNovaApiKey": "SambaNova API 金鑰",
+		"getHuggingFaceApiKey": "取得 Hugging Face API 金鑰",
+		"huggingFaceApiKey": "Hugging Face API 金鑰",
+		"huggingFaceModelId": "模型 ID",
+		"huggingFaceLoading": "載入中...",
+		"huggingFaceModelsCount": "({{count}} 個模型)",
+		"huggingFaceSelectModel": "選擇模型...",
+		"huggingFaceSearchModels": "搜尋模型...",
+		"huggingFaceNoModelsFound": "找不到模型",
+		"huggingFaceProvider": "供應商",
+		"huggingFaceProviderAuto": "自動",
+		"huggingFaceSelectProvider": "選擇供應商...",
+		"huggingFaceSearchProviders": "搜尋供應商...",
+		"huggingFaceNoProvidersFound": "找不到供應商",
 		"getGeminiApiKey": "取得 Gemini API 金鑰",
 		"openAiApiKey": "OpenAI API 金鑰",
 		"apiKey": "API 金鑰",
@@ -480,6 +514,10 @@
 			"description": "Ollama 允許您在本機電腦執行模型。請參閱快速入門指南。",
 			"warning": "注意:Roo Code 使用複雜提示詞,與 Claude 模型搭配最佳。功能較弱的模型可能無法正常運作。"
 		},
+		"unboundApiKey": "Unbound API 金鑰",
+		"getUnboundApiKey": "取得 Unbound API 金鑰",
+		"unboundRefreshModelsSuccess": "模型列表已更新!您現在可以從最新模型中選擇。",
+		"unboundInvalidApiKey": "無效的 API 金鑰。請檢查您的 API 金鑰並重試。",
 		"roo": {
 			"authenticatedMessage": "已透過 Roo Code Cloud 帳戶安全認證。",
 			"connectButton": "連線到 Roo Code Cloud"
@@ -935,7 +973,9 @@
 		"providerNotAllowed": "供應商 '{{provider}}' 不允許用於您的組織。",
 		"modelNotAllowed": "模型 '{{model}}' 不允許用於供應商 '{{provider}}',此設定已被組織禁止",
 		"profileInvalid": "此設定檔包含您的組織不允許的供應商或模型",
-		"qwenCodeOauthPath": "您必須提供有效的 OAuth 憑證路徑"
+		"qwenCodeOauthPath": "您必須提供有效的 OAuth 憑證路徑",
+		"azureResourceName": "您必須提供 Azure 資源名稱。",
+		"azureDeploymentName": "您必須提供 Azure 部署名稱。"
 	},
 	"placeholders": {
 		"apiKey": "請輸入 API 金鑰...",
@@ -948,6 +988,8 @@
 		"projectId": "請輸入專案 ID...",
 		"customArn": "請輸入 ARN(例:arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)",
 		"baseUrl": "請輸入基礎 URL...",
+		"azureResourceName": "e.g. my-openai-resource",
+		"azureDeploymentName": "e.g. gpt-4o",
 		"modelId": {
 			"lmStudio": "例:meta-llama-3.1-8b-instruct",
 			"lmStudioDraft": "例:lmstudio-community/llama-3.2-1b-instruct",

+ 17 - 0
webview-ui/src/utils/validate.ts

@@ -123,6 +123,23 @@ function validateModelsAndKeysProvided(apiConfiguration: ProviderSettings): stri
 				return i18next.t("settings:validation.apiKey")
 			}
 			break
+		case "azure":
+			// Don't show validation errors when provider is freshly selected (all fields empty)
+			if (
+				!apiConfiguration.azureApiKey &&
+				!apiConfiguration.azureResourceName &&
+				!apiConfiguration.azureDeploymentName
+			) {
+				break
+			}
+			// API key is optional — Azure supports managed identity / Entra ID auth
+			if (!apiConfiguration.azureResourceName) {
+				return i18next.t("settings:validation.azureResourceName")
+			}
+			if (!apiConfiguration.azureDeploymentName) {
+				return i18next.t("settings:validation.azureDeploymentName")
+			}
+			break
 	}
 
 	return undefined