Просмотр исходного кода

Merge pull request #1723 from feifei325/i18n/fix_missing_settings_and_api

Fix internationalization issues on settings and prompt pages, and resolve rules file accessibility
Matt Rubens 9 месяцев назад
Родитель
Сommit
e2251a3453
40 измененных файлов с 1734 добавлено и 214 удалено
  1. 83 8
      webview-ui/src/components/prompts/PromptsView.tsx
  2. 122 131
      webview-ui/src/components/settings/ApiOptions.tsx
  3. 0 0
      webview-ui/src/i18n/locales/ar/prompts.json
  4. 0 0
      webview-ui/src/i18n/locales/ar/settings.json
  5. 3 3
      webview-ui/src/i18n/locales/ca/prompts.json
  6. 106 2
      webview-ui/src/i18n/locales/ca/settings.json
  7. 0 0
      webview-ui/src/i18n/locales/cs/prompts.json
  8. 0 0
      webview-ui/src/i18n/locales/cs/settings.json
  9. 3 3
      webview-ui/src/i18n/locales/de/prompts.json
  10. 105 1
      webview-ui/src/i18n/locales/de/settings.json
  11. 3 3
      webview-ui/src/i18n/locales/en/prompts.json
  12. 105 1
      webview-ui/src/i18n/locales/en/settings.json
  13. 3 3
      webview-ui/src/i18n/locales/es/prompts.json
  14. 105 1
      webview-ui/src/i18n/locales/es/settings.json
  15. 3 3
      webview-ui/src/i18n/locales/fr/prompts.json
  16. 105 1
      webview-ui/src/i18n/locales/fr/settings.json
  17. 3 3
      webview-ui/src/i18n/locales/hi/prompts.json
  18. 105 1
      webview-ui/src/i18n/locales/hi/settings.json
  19. 0 0
      webview-ui/src/i18n/locales/hu/prompts.json
  20. 0 0
      webview-ui/src/i18n/locales/hu/settings.json
  21. 3 3
      webview-ui/src/i18n/locales/it/prompts.json
  22. 105 1
      webview-ui/src/i18n/locales/it/settings.json
  23. 3 3
      webview-ui/src/i18n/locales/ja/prompts.json
  24. 108 5
      webview-ui/src/i18n/locales/ja/settings.json
  25. 3 3
      webview-ui/src/i18n/locales/ko/prompts.json
  26. 109 6
      webview-ui/src/i18n/locales/ko/settings.json
  27. 5 5
      webview-ui/src/i18n/locales/pl/prompts.json
  28. 105 1
      webview-ui/src/i18n/locales/pl/settings.json
  29. 10 10
      webview-ui/src/i18n/locales/pt-BR/prompts.json
  30. 105 1
      webview-ui/src/i18n/locales/pt-BR/settings.json
  31. 0 0
      webview-ui/src/i18n/locales/pt/prompts.json
  32. 0 0
      webview-ui/src/i18n/locales/pt/settings.json
  33. 0 0
      webview-ui/src/i18n/locales/ru/prompts.json
  34. 0 0
      webview-ui/src/i18n/locales/ru/settings.json
  35. 3 3
      webview-ui/src/i18n/locales/tr/prompts.json
  36. 105 1
      webview-ui/src/i18n/locales/tr/settings.json
  37. 3 3
      webview-ui/src/i18n/locales/zh-CN/prompts.json
  38. 105 1
      webview-ui/src/i18n/locales/zh-CN/settings.json
  39. 3 3
      webview-ui/src/i18n/locales/zh-TW/prompts.json
  40. 105 1
      webview-ui/src/i18n/locales/zh-TW/settings.json

+ 83 - 8
webview-ui/src/components/prompts/PromptsView.tsx

@@ -27,6 +27,7 @@ import { vscode } from "../../utils/vscode"
 import { Tab, TabContent, TabHeader } from "../common/Tab"
 import i18next from "i18next"
 import { useAppTranslation } from "../../i18n/TranslationContext"
+import { Trans } from "react-i18next"
 
 // Get all available groups that should show in prompts view
 const availableGroups = (Object.keys(TOOL_GROUPS) as ToolGroup[]).filter((group) => !TOOL_GROUPS[group].alwaysAvailable)
@@ -781,10 +782,38 @@ const PromptsView = ({ onDone }: PromptsViewProps) => {
 								color: "var(--vscode-descriptionForeground)",
 								marginTop: "5px",
 							}}>
-							{t("prompts:customInstructions.loadFromFile", {
-								modeName: getCurrentMode()?.name || "Code",
-								modeSlug: getCurrentMode()?.slug || "code",
-							})}
+							<Trans
+								i18nKey="prompts:customInstructions.loadFromFile"
+								values={{
+									mode: getCurrentMode()?.name || "Code",
+									slug: getCurrentMode()?.slug || "code",
+								}}
+								components={{
+									span: (
+										<span
+											style={{
+												color: "var(--vscode-textLink-foreground)",
+												cursor: "pointer",
+												textDecoration: "underline",
+											}}
+											onClick={() => {
+												const currentMode = getCurrentMode()
+												if (!currentMode) return
+
+												// Open or create an empty file
+												vscode.postMessage({
+													type: "openFile",
+													text: `./.clinerules-${currentMode.slug}`,
+													values: {
+														create: true,
+														content: "",
+													},
+												})
+											}}
+										/>
+									),
+								}}
+							/>
 						</div>
 					</div>
 				</div>
@@ -866,9 +895,32 @@ const PromptsView = ({ onDone }: PromptsViewProps) => {
 
 						{isSystemPromptDisclosureOpen && (
 							<div className="text-xs text-vscode-descriptionForeground mt-2 ml-5">
-								{t("prompts:advancedSystemPrompt.description", {
-									modeSlug: getCurrentMode()?.slug || "code",
-								})}
+								<Trans
+									i18nKey="prompts:advancedSystemPrompt.description"
+									values={{
+										slug: getCurrentMode()?.slug || "code",
+									}}
+									components={{
+										span: (
+											<span
+												className="text-vscode-textLink-foreground cursor-pointer underline"
+												onClick={() => {
+													const currentMode = getCurrentMode()
+													if (!currentMode) return
+
+													vscode.postMessage({
+														type: "openFile",
+														text: `./.roo/system-prompt-${currentMode.slug}`,
+														values: {
+															create: true,
+															content: "",
+														},
+													})
+												}}
+											/>
+										),
+									}}
+								/>
 							</div>
 						)}
 					</div>
@@ -900,7 +952,30 @@ const PromptsView = ({ onDone }: PromptsViewProps) => {
 						data-testid="global-custom-instructions-textarea"
 					/>
 					<div className="text-xs text-vscode-descriptionForeground mt-1.5 mb-10">
-						{t("prompts:globalCustomInstructions.loadFromFile")}
+						<Trans
+							i18nKey="prompts:globalCustomInstructions.loadFromFile"
+							components={{
+								span: (
+									<span
+										style={{
+											color: "var(--vscode-textLink-foreground)",
+											cursor: "pointer",
+											textDecoration: "underline",
+										}}
+										onClick={() =>
+											vscode.postMessage({
+												type: "openFile",
+												text: "./.clinerules",
+												values: {
+													create: true,
+													content: "",
+												},
+											})
+										}
+									/>
+								),
+							}}
+						/>
 					</div>
 				</div>
 

+ 122 - 131
webview-ui/src/components/settings/ApiOptions.tsx

@@ -412,12 +412,12 @@ const ApiOptions = ({
 						value={apiConfiguration?.requestyApiKey || ""}
 						type="password"
 						onInput={handleInputChange("requestyApiKey")}
-						placeholder="Enter API Key..."
+						placeholder={t("settings:providers.getRequestyApiKey")}
 						className="w-full">
-						<span className="font-medium">Requesty API Key</span>
+						<span className="font-medium">{t("settings:providers.requestyApiKey")}</span>
 					</VSCodeTextField>
 					<div className="text-sm text-vscode-descriptionForeground -mt-2">
-						This key is stored locally and only used to make API requests from this extension.
+						{t("settings:providers.apiKeyStorageNotice")}
 					</div>
 				</>
 			)}
@@ -433,11 +433,11 @@ const ApiOptions = ({
 						<span className="font-medium">OpenAI API Key</span>
 					</VSCodeTextField>
 					<div className="text-sm text-vscode-descriptionForeground -mt-2">
-						This key is stored locally and only used to make API requests from this extension.
+						{t("settings:providers.apiKeyStorageNotice")}
 					</div>
 					{!apiConfiguration?.openAiNativeApiKey && (
 						<VSCodeButtonLink href="https://platform.openai.com/api-keys" appearance="secondary">
-							Get OpenAI API Key
+							{t("settings:providers.getOpenAiApiKey")}
 						</VSCodeButtonLink>
 					)}
 				</>
@@ -451,14 +451,14 @@ const ApiOptions = ({
 						onInput={handleInputChange("mistralApiKey")}
 						placeholder="Enter API Key..."
 						className="w-full">
-						<span className="font-medium">Mistral API Key</span>
+						<span className="font-medium">{t("settings:providers.mistralApiKey")}</span>
 					</VSCodeTextField>
 					<div className="text-sm text-vscode-descriptionForeground -mt-2">
-						This key is stored locally and only used to make API requests from this extension.
+						{t("settings:providers.apiKeyStorageNotice")}
 					</div>
 					{!apiConfiguration?.mistralApiKey && (
 						<VSCodeButtonLink href="https://console.mistral.ai/" appearance="secondary">
-							Get Mistral / Codestral API Key
+							{t("settings:providers.getMistralApiKey")}
 						</VSCodeButtonLink>
 					)}
 					{(apiConfiguration?.apiModelId?.startsWith("codestral-") ||
@@ -470,10 +470,10 @@ const ApiOptions = ({
 								onInput={handleInputChange("mistralCodestralUrl")}
 								placeholder="https://codestral.mistral.ai"
 								className="w-full">
-								<span className="font-medium">Codestral Base URL (Optional)</span>
+								<span className="font-medium">{t("settings:providers.codestralBaseUrl")}</span>
 							</VSCodeTextField>
 							<div className="text-sm text-vscode-descriptionForeground -mt-2">
-								Set an alternative URL for the Codestral model.
+								{t("settings:providers.codestralBaseUrlDesc")}
 							</div>
 						</>
 					)}
@@ -488,13 +488,11 @@ const ApiOptions = ({
 							"awsUseProfile",
 							(e) => (e.target as HTMLInputElement).value === "profile",
 						)}>
-						<VSCodeRadio value="credentials">AWS Credentials</VSCodeRadio>
-						<VSCodeRadio value="profile">AWS Profile</VSCodeRadio>
+						<VSCodeRadio value="credentials">{t("settings:providers.awsCredentials")}</VSCodeRadio>
+						<VSCodeRadio value="profile">{t("settings:providers.awsProfile")}</VSCodeRadio>
 					</VSCodeRadioGroup>
 					<div className="text-sm text-vscode-descriptionForeground -mt-3">
-						Authenticate by providing an access key and secret or use the default AWS credential providers,
-						i.e. ~/.aws/credentials or environment variables. These credentials are only used locally to
-						make API requests from this extension.
+						{t("settings:providers.apiKeyStorageNotice")}
 					</div>
 					{apiConfiguration?.awsUseProfile ? (
 						<VSCodeTextField
@@ -502,7 +500,7 @@ const ApiOptions = ({
 							onInput={handleInputChange("awsProfile")}
 							placeholder="Enter profile name"
 							className="w-full">
-							<span className="font-medium">AWS Profile Name</span>
+							<span className="font-medium">{t("settings:providers.awsProfileName")}</span>
 						</VSCodeTextField>
 					) : (
 						<>
@@ -512,7 +510,7 @@ const ApiOptions = ({
 								onInput={handleInputChange("awsAccessKey")}
 								placeholder="Enter Access Key..."
 								className="w-full">
-								<span className="font-medium">AWS Access Key</span>
+								<span className="font-medium">{t("settings:providers.awsAccessKey")}</span>
 							</VSCodeTextField>
 							<VSCodeTextField
 								value={apiConfiguration?.awsSecretKey || ""}
@@ -520,7 +518,7 @@ const ApiOptions = ({
 								onInput={handleInputChange("awsSecretKey")}
 								placeholder="Enter Secret Key..."
 								className="w-full">
-								<span className="font-medium">AWS Secret Key</span>
+								<span className="font-medium">{t("settings:providers.awsSecretKey")}</span>
 							</VSCodeTextField>
 							<VSCodeTextField
 								value={apiConfiguration?.awsSessionToken || ""}
@@ -528,13 +526,13 @@ const ApiOptions = ({
 								onInput={handleInputChange("awsSessionToken")}
 								placeholder="Enter Session Token..."
 								className="w-full">
-								<span className="font-medium">AWS Session Token</span>
+								<span className="font-medium">{t("settings:providers.awsSessionToken")}</span>
 							</VSCodeTextField>
 						</>
 					)}
 					<div className="dropdown-container">
 						<label htmlFor="aws-region-dropdown" className="font-medium">
-							AWS Region
+							{t("settings:providers.awsRegion")}
 						</label>
 						<Dropdown
 							id="aws-region-dropdown"
@@ -564,7 +562,7 @@ const ApiOptions = ({
 					<Checkbox
 						checked={apiConfiguration?.awsUseCrossRegionInference || false}
 						onChange={handleInputChange("awsUseCrossRegionInference", noTransform)}>
-						Use cross-region inference
+						{t("settings:providers.awsCrossRegion")}
 					</Checkbox>
 				</>
 			)}
@@ -572,27 +570,26 @@ const ApiOptions = ({
 			{selectedProvider === "vertex" && (
 				<>
 					<div className="text-sm text-vscode-descriptionForeground">
-						<div>To use Google Cloud Vertex AI, you need to:</div>
+						<div>{t("settings:providers.googleCloudSetup.title")}</div>
 						<div>
 							<VSCodeLink
 								href="https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#before_you_begin"
 								className="text-sm">
-								1. Create a Google Cloud account, enable the Vertex AI API & enable the desired Claude
-								models.
+								{t("settings:providers.googleCloudSetup.step1")}
 							</VSCodeLink>
 						</div>
 						<div>
 							<VSCodeLink
 								href="https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp"
 								className="text-sm">
-								2. Install the Google Cloud CLI & configure application default credentials.
+								{t("settings:providers.googleCloudSetup.step2")}
 							</VSCodeLink>
 						</div>
 						<div>
 							<VSCodeLink
 								href="https://developers.google.com/workspace/guides/create-credentials?hl=en#service-account"
 								className="text-sm">
-								3. Or create a service account with credentials.
+								{t("settings:providers.googleCloudSetup.step3")}
 							</VSCodeLink>
 						</div>
 					</div>
@@ -601,25 +598,25 @@ const ApiOptions = ({
 						onInput={handleInputChange("vertexJsonCredentials")}
 						placeholder="Enter Credentials JSON..."
 						className="w-full">
-						<span className="font-medium">Google Cloud Credentials</span>
+						<span className="font-medium">{t("settings:providers.googleCloudCredentials")}</span>
 					</VSCodeTextField>
 					<VSCodeTextField
 						value={apiConfiguration?.vertexKeyFile || ""}
 						onInput={handleInputChange("vertexKeyFile")}
 						placeholder="Enter Key File Path..."
 						className="w-full">
-						<span className="font-medium">Google Cloud Key File Path</span>
+						<span className="font-medium">{t("settings:providers.googleCloudKeyFile")}</span>
 					</VSCodeTextField>
 					<VSCodeTextField
 						value={apiConfiguration?.vertexProjectId || ""}
 						onInput={handleInputChange("vertexProjectId")}
 						placeholder="Enter Project ID..."
 						className="w-full">
-						<span className="font-medium">Google Cloud Project ID</span>
+						<span className="font-medium">{t("settings:providers.googleCloudProjectId")}</span>
 					</VSCodeTextField>
 					<div className="dropdown-container">
 						<label htmlFor="vertex-region-dropdown" className="font-medium">
-							Google Cloud Region
+							{t("settings:providers.googleCloudRegion")}
 						</label>
 						<Dropdown
 							id="vertex-region-dropdown"
@@ -647,14 +644,14 @@ const ApiOptions = ({
 						onInput={handleInputChange("geminiApiKey")}
 						placeholder="Enter API Key..."
 						className="w-full">
-						<span className="font-medium">Gemini API Key</span>
+						<span className="font-medium">{t("settings:providers.geminiApiKey")}</span>
 					</VSCodeTextField>
 					<div className="text-sm text-vscode-descriptionForeground -mt-2">
-						This key is stored locally and only used to make API requests from this extension.
+						{t("settings:providers.apiKeyStorageNotice")}
 					</div>
 					{!apiConfiguration?.geminiApiKey && (
 						<VSCodeButtonLink href="https://ai.google.dev/" appearance="secondary">
-							Get Gemini API Key
+							{t("settings:providers.getGeminiApiKey")}
 						</VSCodeButtonLink>
 					)}
 					<div>
@@ -667,7 +664,7 @@ const ApiOptions = ({
 									setApiConfigurationField("googleGeminiBaseUrl", "")
 								}
 							}}>
-							Use custom base URL
+							{t("settings:providers.useCustomBaseUrl")}
 						</Checkbox>
 						{googleGeminiBaseUrlSelected && (
 							<VSCodeTextField
@@ -690,7 +687,7 @@ const ApiOptions = ({
 						onInput={handleInputChange("openAiBaseUrl")}
 						placeholder={"Enter base URL..."}
 						className="w-full">
-						<span className="font-medium">Base URL</span>
+						<span className="font-medium">{t("settings:providers.openAiBaseUrl")}</span>
 					</VSCodeTextField>
 					<VSCodeTextField
 						value={apiConfiguration?.openAiApiKey || ""}
@@ -698,7 +695,7 @@ const ApiOptions = ({
 						onInput={handleInputChange("openAiApiKey")}
 						placeholder="Enter API Key..."
 						className="w-full">
-						<span className="font-medium">API Key</span>
+						<span className="font-medium">{t("settings:providers.openAiApiKey")}</span>
 					</VSCodeTextField>
 					<ModelPicker
 						apiConfiguration={apiConfiguration}
@@ -714,12 +711,12 @@ const ApiOptions = ({
 					<Checkbox
 						checked={apiConfiguration?.openAiStreamingEnabled ?? true}
 						onChange={handleInputChange("openAiStreamingEnabled", noTransform)}>
-						Enable streaming
+						{t("settings:modelInfo.enableStreaming")}
 					</Checkbox>
 					<Checkbox
 						checked={apiConfiguration?.openAiUseAzure ?? false}
 						onChange={handleInputChange("openAiUseAzure", noTransform)}>
-						Use Azure
+						{t("settings:modelInfo.useAzure")}
 					</Checkbox>
 					<div>
 						<Checkbox
@@ -731,7 +728,7 @@ const ApiOptions = ({
 									setApiConfigurationField("azureApiVersion", "")
 								}
 							}}>
-							Set Azure API version
+							{t("settings:modelInfo.azureApiVersion")}
 						</Checkbox>
 						{azureApiVersionSelected && (
 							<VSCodeTextField
@@ -745,8 +742,7 @@ const ApiOptions = ({
 
 					<div className="flex flex-col gap-3">
 						<div className="text-sm text-vscode-descriptionForeground">
-							Configure the capabilities and pricing for your custom OpenAI-compatible model. Be careful
-							when specifying the model capabilities, as they can affect how Roo Code performs.
+							{t("settings:providers.customModel.capabilities")}
 						</div>
 
 						<div>
@@ -770,7 +766,7 @@ const ApiOptions = ({
 											: "var(--vscode-errorForeground)"
 									})(),
 								}}
-								title="Maximum number of tokens the model can generate in a single response"
+								title={t("settings:providers.customModel.maxTokens.description")}
 								onInput={handleInputChange("openAiCustomModelInfo", (e) => {
 									const value = parseInt((e.target as HTMLInputElement).value)
 
@@ -781,11 +777,12 @@ const ApiOptions = ({
 								})}
 								placeholder="e.g. 4096"
 								className="w-full">
-								<span className="font-medium">Max Output Tokens</span>
+								<span className="font-medium">
+									{t("settings:providers.customModel.maxTokens.label")}
+								</span>
 							</VSCodeTextField>
 							<div className="text-sm text-vscode-descriptionForeground">
-								Maximum number of tokens the model can generate in a response. (Specify -1 to allow the
-								server to set the max tokens.)
+								{t("settings:providers.customModel.maxTokens.description")}
 							</div>
 						</div>
 
@@ -810,7 +807,7 @@ const ApiOptions = ({
 											: "var(--vscode-errorForeground)"
 									})(),
 								}}
-								title="Total number of tokens (input + output) the model can process in a single request"
+								title={t("settings:providers.customModel.contextWindow.description")}
 								onInput={handleInputChange("openAiCustomModelInfo", (e) => {
 									const value = (e.target as HTMLInputElement).value
 									const parsed = parseInt(value)
@@ -824,10 +821,12 @@ const ApiOptions = ({
 								})}
 								placeholder="e.g. 128000"
 								className="w-full">
-								<span className="font-medium">Context Window Size</span>
+								<span className="font-medium">
+									{t("settings:providers.customModel.contextWindow.label")}
+								</span>
 							</VSCodeTextField>
 							<div className="text-sm text-vscode-descriptionForeground">
-								Total tokens (input + output) the model can process.
+								{t("settings:providers.customModel.contextWindow.description")}
 							</div>
 						</div>
 
@@ -844,16 +843,18 @@ const ApiOptions = ({
 											supportsImages: checked,
 										}
 									})}>
-									<span className="font-medium">Image Support</span>
+									<span className="font-medium">
+										{t("settings:providers.customModel.imageSupport.label")}
+									</span>
 								</Checkbox>
 								<i
 									className="codicon codicon-info text-vscode-descriptionForeground"
-									title="Enable if the model can process and understand images in the input. Required for image-based assistance and visual code understanding."
+									title={t("settings:providers.customModel.imageSupport.description")}
 									style={{ fontSize: "12px" }}
 								/>
 							</div>
 							<div className="text-sm text-vscode-descriptionForeground pt-1">
-								Is this model capable of processing and understanding images?
+								{t("settings:providers.customModel.imageSupport.description")}
 							</div>
 						</div>
 
@@ -867,16 +868,18 @@ const ApiOptions = ({
 											supportsComputerUse: checked,
 										}
 									})}>
-									<span className="font-medium">Computer Use</span>
+									<span className="font-medium">
+										{t("settings:providers.customModel.computerUse.label")}
+									</span>
 								</Checkbox>
 								<i
 									className="codicon codicon-info text-vscode-descriptionForeground"
-									title="Enable if the model can interact with your computer through commands and file operations. Required for automated tasks and file modifications."
+									title={t("settings:providers.customModel.computerUse.description")}
 									style={{ fontSize: "12px" }}
 								/>
 							</div>
 							<div className="text-sm text-vscode-descriptionForeground pt-1">
-								Is this model capable of interacting with a browser? (e.g. Claude 3.7 Sonnet).
+								{t("settings:providers.customModel.computerUse.description")}
 							</div>
 						</div>
 
@@ -890,16 +893,18 @@ const ApiOptions = ({
 											supportsPromptCache: checked,
 										}
 									})}>
-									<span className="font-medium">Prompt Caching</span>
+									<span className="font-medium">
+										{t("settings:providers.customModel.promptCache.label")}
+									</span>
 								</Checkbox>
 								<i
 									className="codicon codicon-info text-vscode-descriptionForeground"
-									title="Enable if the model supports prompt caching. This can improve performance and reduce costs."
+									title={t("settings:providers.customModel.promptCache.description")}
 									style={{ fontSize: "12px" }}
 								/>
 							</div>
 							<div className="text-sm text-vscode-descriptionForeground pt-1">
-								Is this model capable of caching prompts?
+								{t("settings:providers.customModel.promptCache.description")}
 							</div>
 						</div>
 
@@ -936,10 +941,12 @@ const ApiOptions = ({
 								placeholder="e.g. 0.0001"
 								className="w-full">
 								<div className="flex items-center gap-1">
-									<span className="font-medium">Input Price</span>
+									<span className="font-medium">
+										{t("settings:providers.customModel.pricing.input.label")}
+									</span>
 									<i
 										className="codicon codicon-info text-vscode-descriptionForeground"
-										title="Cost per million tokens in the input/prompt. This affects the cost of sending context and instructions to the model."
+										title={t("settings:providers.customModel.pricing.input.description")}
 										style={{ fontSize: "12px" }}
 									/>
 								</div>
@@ -979,10 +986,12 @@ const ApiOptions = ({
 								placeholder="e.g. 0.0002"
 								className="w-full">
 								<div className="flex items-center gap-1">
-									<span className="font-medium">Output Price</span>
+									<span className="font-medium">
+										{t("settings:providers.customModel.pricing.output.label")}
+									</span>
 									<i
 										className="codicon codicon-info text-vscode-descriptionForeground"
-										title="Cost per million tokens in the model's response. This affects the cost of generated content and completions."
+										title={t("settings:providers.customModel.pricing.output.description")}
 										style={{ fontSize: "12px" }}
 									/>
 								</div>
@@ -1023,10 +1032,14 @@ const ApiOptions = ({
 										placeholder="e.g. 0.0001"
 										className="w-full">
 										<div className="flex items-center gap-1">
-											<span className="font-medium">Cache Reads Price</span>
+											<span className="font-medium">
+												{t("settings:providers.customModel.pricing.cacheReads.label")}
+											</span>
 											<i
 												className="codicon codicon-info text-vscode-descriptionForeground"
-												title="Cost per million tokens for reading from the cache. This is the price charged when a cached response is retrieved."
+												title={t(
+													"settings:providers.customModel.pricing.cacheReads.description",
+												)}
 												style={{ fontSize: "12px" }}
 											/>
 										</div>
@@ -1064,10 +1077,14 @@ const ApiOptions = ({
 										placeholder="e.g. 0.00005"
 										className="w-full">
 										<div className="flex items-center gap-1">
-											<span className="font-medium">Cache Writes Price</span>
+											<span className="font-medium">
+												{t("settings:providers.customModel.pricing.cacheWrites.label")}
+											</span>
 											<i
 												className="codicon codicon-info text-vscode-descriptionForeground"
-												title="Cost per million tokens for writing to the cache. This is the price charged when a prompt is cached for the first time."
+												title={t(
+													"settings:providers.customModel.pricing.cacheWrites.description",
+												)}
 												style={{ fontSize: "12px" }}
 											/>
 										</div>
@@ -1081,7 +1098,7 @@ const ApiOptions = ({
 							onClick={() =>
 								setApiConfigurationField("openAiCustomModelInfo", openAiModelInfoSaneDefaults)
 							}>
-							Reset to Defaults
+							{t("settings:providers.customModel.resetDefaults")}
 						</Button>
 					</div>
 				</>
@@ -1095,14 +1112,14 @@ const ApiOptions = ({
 						onInput={handleInputChange("lmStudioBaseUrl")}
 						placeholder={"Default: http://localhost:1234"}
 						className="w-full">
-						<span className="font-medium">Base URL (optional)</span>
+						<span className="font-medium">{t("settings:providers.lmStudio.baseUrl")}</span>
 					</VSCodeTextField>
 					<VSCodeTextField
 						value={apiConfiguration?.lmStudioModelId || ""}
 						onInput={handleInputChange("lmStudioModelId")}
 						placeholder={"e.g. meta-llama-3.1-8b-instruct"}
 						className="w-full">
-						<span className="font-medium">Model ID</span>
+						<span className="font-medium">{t("settings:providers.lmStudio.modelId")}</span>
 					</VSCodeTextField>
 					{lmStudioModels.length > 0 && (
 						<VSCodeRadioGroup
@@ -1125,10 +1142,9 @@ const ApiOptions = ({
 					<Checkbox
 						checked={apiConfiguration?.lmStudioSpeculativeDecodingEnabled === true}
 						onChange={(checked) => {
-							// Explicitly set the boolean value using direct method.
 							setApiConfigurationField("lmStudioSpeculativeDecodingEnabled", checked)
 						}}>
-						Enable Speculative Decoding
+						{t("settings:providers.lmStudio.speculativeDecoding")}
 					</Checkbox>
 					{apiConfiguration?.lmStudioSpeculativeDecodingEnabled && (
 						<>
@@ -1138,16 +1154,17 @@ const ApiOptions = ({
 									onInput={handleInputChange("lmStudioDraftModelId")}
 									placeholder={"e.g. lmstudio-community/llama-3.2-1b-instruct"}
 									className="w-full">
-									<span className="font-medium">Draft Model ID</span>
+									<span className="font-medium">{t("settings:providers.lmStudio.draftModelId")}</span>
 								</VSCodeTextField>
 								<div className="text-sm text-vscode-descriptionForeground">
-									Draft model must be from the same model family for speculative decoding to work
-									correctly.
+									{t("settings:providers.lmStudio.draftModelDesc")}
 								</div>
 							</div>
 							{lmStudioModels.length > 0 && (
 								<>
-									<div className="font-medium">Select Draft Model</div>
+									<div className="font-medium">
+										{t("settings:providers.lmStudio.selectDraftModel")}
+									</div>
 									<VSCodeRadioGroup
 										value={
 											lmStudioModels.includes(apiConfiguration?.lmStudioDraftModelId || "")
@@ -1169,8 +1186,7 @@ const ApiOptions = ({
 												border: "1px solid var(--vscode-inputValidation-infoBorder)",
 												color: "var(--vscode-inputValidation-infoForeground)",
 											}}>
-											No draft models found. Please ensure LM Studio is running with Server Mode
-											enabled.
+											{t("settings:providers.lmStudio.noModelsFound")}
 										</div>
 									)}
 								</>
@@ -1178,15 +1194,18 @@ const ApiOptions = ({
 						</>
 					)}
 					<div className="text-sm text-vscode-descriptionForeground">
-						LM Studio allows you to run models locally on your computer. For instructions on how to get
-						started, see their <VSCodeLink href="https://lmstudio.ai/docs">quickstart guide</VSCodeLink>.
-						You will also need to start LM Studio's{" "}
-						<VSCodeLink href="https://lmstudio.ai/docs/basics/server">local server</VSCodeLink> feature to
-						use it with this extension.
-						<span className="text-vscode-errorForeground ml-1">
-							<span className="font-medium">Note:</span> Roo Code uses complex prompts and works best with
-							Claude models. Less capable models may not work as expected.
-						</span>
+						<Trans
+							i18nKey="settings:providers.lmStudio.description"
+							components={{
+								a: <VSCodeLink href="https://lmstudio.ai/docs" />,
+								b: <VSCodeLink href="https://lmstudio.ai/docs/basics/server" />,
+								span: (
+									<span className="text-vscode-errorForeground ml-1">
+										<span className="font-medium">Note:</span>
+									</span>
+								),
+							}}
+						/>
 					</div>
 				</>
 			)}
@@ -1199,14 +1218,14 @@ const ApiOptions = ({
 						onInput={handleInputChange("deepSeekApiKey")}
 						placeholder="Enter API Key..."
 						className="w-full">
-						<span className="font-medium">DeepSeek API Key</span>
+						<span className="font-medium">{t("settings:providers.deepSeekApiKey")}</span>
 					</VSCodeTextField>
 					<div className="text-sm text-vscode-descriptionForeground -mt-2">
-						This key is stored locally and only used to make API requests from this extension.
+						{t("settings:providers.apiKeyStorageNotice")}
 					</div>
 					{!apiConfiguration?.deepSeekApiKey && (
 						<VSCodeButtonLink href="https://platform.deepseek.com/" appearance="secondary">
-							Get DeepSeek API Key
+							{t("settings:providers.getDeepSeekApiKey")}
 						</VSCodeButtonLink>
 					)}
 				</>
@@ -1216,7 +1235,7 @@ const ApiOptions = ({
 				<>
 					<div className="dropdown-container">
 						<label htmlFor="vscode-lm-model" className="font-medium">
-							Language Model
+							{t("settings:providers.vscodeLmModel")}
 						</label>
 						{vsCodeLmModels.length > 0 ? (
 							<Select
@@ -1246,16 +1265,11 @@ const ApiOptions = ({
 							</Select>
 						) : (
 							<div className="text-sm text-vscode-descriptionForeground">
-								The VS Code Language Model API allows you to run models provided by other VS Code
-								extensions (including but not limited to GitHub Copilot). The easiest way to get started
-								is to install the Copilot and Copilot Chat extensions from the VS Code Marketplace.
+								{t("settings:providers.vscodeLmDescription")}
 							</div>
 						)}
 					</div>
-					<div className="text-sm text-vscode-errorForeground">
-						Note: This is a very experimental integration and provider support will vary. If you get an
-						error about a model not being supported, that's an issue on the provider's end.
-					</div>
+					<div className="text-sm text-vscode-errorForeground">{t("settings:providers.vscodeLmWarning")}</div>
 				</>
 			)}
 
@@ -1267,14 +1281,14 @@ const ApiOptions = ({
 						onInput={handleInputChange("ollamaBaseUrl")}
 						placeholder={"Default: http://localhost:11434"}
 						className="w-full">
-						<span className="font-medium">Base URL (optional)</span>
+						<span className="font-medium">{t("settings:providers.ollama.baseUrl")}</span>
 					</VSCodeTextField>
 					<VSCodeTextField
 						value={apiConfiguration?.ollamaModelId || ""}
 						onInput={handleInputChange("ollamaModelId")}
 						placeholder={"e.g. llama3.1"}
 						className="w-full">
-						<span className="font-medium">Model ID</span>
+						<span className="font-medium">{t("settings:providers.ollama.modelId")}</span>
 					</VSCodeTextField>
 					{ollamaModels.length > 0 && (
 						<VSCodeRadioGroup
@@ -1295,15 +1309,9 @@ const ApiOptions = ({
 						</VSCodeRadioGroup>
 					)}
 					<div className="text-sm text-vscode-descriptionForeground">
-						Ollama allows you to run models locally on your computer. For instructions on how to get
-						started, see their
-						<VSCodeLink href="https://github.com/ollama/ollama/blob/main/README.md">
-							quickstart guide
-						</VSCodeLink>
-						.
+						{t("settings:providers.ollama.description")}
 						<span className="text-vscode-errorForeground ml-1">
-							<span className="font-medium">Note:</span> Roo Code uses complex prompts and works best with
-							Claude models. Less capable models may not work as expected.
+							{t("settings:providers.ollama.warning")}
 						</span>
 					</div>
 				</>
@@ -1317,14 +1325,14 @@ const ApiOptions = ({
 						onInput={handleInputChange("unboundApiKey")}
 						placeholder="Enter API Key..."
 						className="w-full">
-						<span className="font-medium">Unbound API Key</span>
+						<span className="font-medium">{t("settings:providers.unboundApiKey")}</span>
 					</VSCodeTextField>
 					<div className="text-sm text-vscode-descriptionForeground -mt-2">
-						This key is stored locally and only used to make API requests from this extension.
+						{t("settings:providers.apiKeyStorageNotice")}
 					</div>
 					{!apiConfiguration?.unboundApiKey && (
 						<VSCodeButtonLink href="https://gateway.getunbound.ai" appearance="secondary">
-							Get Unbound API Key
+							{t("settings:providers.getUnboundApiKey")}
 						</VSCodeButtonLink>
 					)}
 				</>
@@ -1333,13 +1341,10 @@ const ApiOptions = ({
 			{selectedProvider === "human-relay" && (
 				<>
 					<div className="text-sm text-vscode-descriptionForeground">
-						No API key is required, but the user needs to help copy and paste the information to the web
-						chat AI.
+						{t("settings:providers.humanRelay.description")}
 					</div>
 					<div className="text-sm text-vscode-descriptionForeground">
-						During use, a dialog box will pop up and the current message will be copied to the clipboard
-						automatically. You need to paste these to web versions of AI (such as ChatGPT or Claude), then
-						copy the AI's reply back to the dialog box and click the confirm button.
+						{t("settings:providers.humanRelay.instructions")}
 					</div>
 				</>
 			)}
@@ -1406,7 +1411,7 @@ const ApiOptions = ({
 				<>
 					<div className="dropdown-container">
 						<label htmlFor="model-id" className="font-medium">
-							Model
+							{t("settings:providers.model")}
 						</label>
 						<Dropdown
 							id="model-id"
@@ -1443,19 +1448,7 @@ const ApiOptions = ({
 								<span className="font-medium">Custom ARN</span>
 							</VSCodeTextField>
 							<div className="text-sm text-vscode-descriptionForeground -mt-2">
-								Enter a valid AWS Bedrock ARN for the model you want to use. Format examples:
-								<ul className="list-disc pl-5 mt-1">
-									<li>
-										arn:aws:bedrock:us-east-1:123456789012:foundation-model/anthropic.claude-3-sonnet-20240229-v1:0
-									</li>
-									<li>
-										arn:aws:bedrock:us-west-2:123456789012:provisioned-model/my-provisioned-model
-									</li>
-									<li>
-										arn:aws:bedrock:us-east-1:123456789012:default-prompt-router/anthropic.claude:1
-									</li>
-								</ul>
-								Make sure the region in the ARN matches your selected AWS Region above.
+								{t("settings:providers.awsCustomArnDesc")}
 							</div>
 							{apiConfiguration?.awsCustomArn &&
 								(() => {
@@ -1467,8 +1460,7 @@ const ApiOptions = ({
 									if (!validation.isValid) {
 										return (
 											<div className="text-sm text-vscode-errorForeground mt-2">
-												{validation.errorMessage ||
-													"Invalid ARN format. Please check the examples above."}
+												{validation.errorMessage || t("settings:providers.invalidArnFormat")}
 											</div>
 										)
 									}
@@ -1483,7 +1475,6 @@ const ApiOptions = ({
 
 									return null
 								})()}
-							=======
 						</>
 					)}
 					<ModelInfoView

+ 0 - 0
webview-ui/src/i18n/locales/ar/prompts.json


+ 0 - 0
webview-ui/src/i18n/locales/ar/settings.json


+ 3 - 3
webview-ui/src/i18n/locales/ca/prompts.json

@@ -36,12 +36,12 @@
 		"title": "Instruccions personalitzades específiques del mode (opcional)",
 		"resetToDefault": "Restablir a valors predeterminats",
 		"description": "Afegiu directrius de comportament específiques per al mode {{modeName}}.",
-		"loadFromFile": "Les instruccions personalitzades específiques per al mode {{modeName}} també es poden carregar des de .clinerules-{{modeSlug}} al vostre espai de treball."
+		"loadFromFile": "Les instruccions personalitzades específiques per al mode {{mode}} també es poden carregar des de <span>.clinerules-{{slug}}</span> al vostre espai de treball."
 	},
 	"globalCustomInstructions": {
 		"title": "Instruccions personalitzades per a tots els modes",
 		"description": "Aquestes instruccions s'apliquen a tots els modes. Proporcionen un conjunt bàsic de comportaments que es poden millorar amb instruccions específiques de cada mode a continuació.\nSi voleu que Roo pensi i parli en un idioma diferent al de la visualització del vostre editor ({{language}}), podeu especificar-ho aquí.",
-		"loadFromFile": "Les instruccions també es poden carregar des de .clinerules al vostre espai de treball."
+		"loadFromFile": "Les instruccions també es poden carregar des de <span>.clinerules</span> al vostre espai de treball."
 	},
 	"systemPrompt": {
 		"preview": "Previsualització del prompt del sistema",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "Avançat: Sobreescriure prompt del sistema",
-		"description": "Podeu reemplaçar completament el prompt del sistema per a aquest mode (a part de la definició de rol i instruccions personalitzades) creant un fitxer a .roo/system-prompt-{{modeSlug}} al vostre espai de treball. Aquesta és una funcionalitat molt avançada que eludeix les salvaguardes integrades i les comprovacions de consistència (especialment al voltant de l'ús d'eines), així que aneu amb compte!"
+		"description": "Podeu reemplaçar completament el prompt del sistema per a aquest mode (a part de la definició de rol i instruccions personalitzades) creant un fitxer a <span>.roo/system-prompt-{{slug}}</span> al vostre espai de treball. Aquesta és una funcionalitat molt avançada que eludeix les salvaguardes integrades i les comprovacions de consistència (especialment al voltant de l'ús d'eines), així que aneu amb compte!"
 	},
 	"createModeDialog": {
 		"title": "Crear nou mode",

+ 106 - 2
webview-ui/src/i18n/locales/ca/settings.json

@@ -76,7 +76,108 @@
 		"openRouterApiKey": "Clau API d'OpenRouter",
 		"apiKeyStorageNotice": "Les claus API s'emmagatzemen de forma segura a l'Emmagatzematge Secret de VSCode",
 		"useCustomBaseUrl": "Utilitzar URL base personalitzada",
-		"openRouterTransformsText": "Comprimir prompts i cadenes de missatges a la mida del context (<a>Transformacions d'OpenRouter</a>)"
+		"openRouterTransformsText": "Comprimir prompts i cadenes de missatges a la mida del context (<a>Transformacions d'OpenRouter</a>)",
+		"model": "Model",
+		"getOpenRouterApiKey": "Obtenir clau API d'OpenRouter",
+		"glamaApiKey": "Clau API de Glama",
+		"getGlamaApiKey": "Obtenir clau API de Glama",
+		"requestyApiKey": "Clau API de Requesty",
+		"getRequestyApiKey": "Obtenir clau API de Requesty",
+		"anthropicApiKey": "Clau API d'Anthropic",
+		"getAnthropicApiKey": "Obtenir clau API d'Anthropic",
+		"deepSeekApiKey": "Clau API de DeepSeek",
+		"getDeepSeekApiKey": "Obtenir clau API de DeepSeek",
+		"geminiApiKey": "Clau API de Gemini",
+		"getGeminiApiKey": "Obtenir clau API de Gemini",
+		"openAiApiKey": "Clau API d'OpenAI",
+		"openAiBaseUrl": "URL base",
+		"getOpenAiApiKey": "Obtenir clau API d'OpenAI",
+		"mistralApiKey": "Clau API de Mistral",
+		"getMistralApiKey": "Obtenir clau API de Mistral / Codestral",
+		"codestralBaseUrl": "URL base de Codestral (opcional)",
+		"codestralBaseUrlDesc": "Establir una URL alternativa per al model Codestral.",
+		"awsCredentials": "Credencials d'AWS",
+		"awsProfile": "Perfil d'AWS",
+		"awsProfileName": "Nom del perfil d'AWS",
+		"awsAccessKey": "Clau d'accés d'AWS",
+		"awsSecretKey": "Clau secreta d'AWS",
+		"awsSessionToken": "Token de sessió d'AWS",
+		"awsRegion": "Regió d'AWS",
+		"awsCrossRegion": "Utilitzar inferència entre regions",
+		"googleCloudSetup": {
+			"title": "Per utilitzar Google Cloud Vertex AI, necessiteu:",
+			"step1": "1. Crear un compte de Google Cloud, habilitar l'API de Vertex AI i habilitar els models Claude necessaris.",
+			"step2": "2. Instal·lar Google Cloud CLI i configurar les credencials d'aplicació per defecte.",
+			"step3": "3. O crear un compte de servei amb credencials."
+		},
+		"googleCloudCredentials": "Credencials de Google Cloud",
+		"googleCloudKeyFile": "Ruta del fitxer de clau de Google Cloud",
+		"googleCloudProjectId": "ID del projecte de Google Cloud",
+		"googleCloudRegion": "Regió de Google Cloud",
+		"lmStudio": {
+			"baseUrl": "URL base (opcional)",
+			"modelId": "ID del model",
+			"speculativeDecoding": "Habilitar descodificació especulativa",
+			"draftModelId": "ID del model d'esborrany",
+			"draftModelDesc": "El model d'esborrany ha de ser de la mateixa família de models perquè la descodificació especulativa funcioni correctament.",
+			"selectDraftModel": "Seleccionar model d'esborrany",
+			"noModelsFound": "No s'han trobat models d'esborrany. Assegureu-vos que LM Studio s'està executant amb el mode servidor habilitat.",
+			"description": "LM Studio permet executar models localment al vostre ordinador. Per a instruccions sobre com començar, consulteu la seva <a>Guia d'inici ràpid</a>. També necessitareu iniciar la funció de <b>Servidor Local</b> de LM Studio per utilitzar-la amb aquesta extensió. <span>Nota:</span> Roo Code utilitza prompts complexos i funciona millor amb models Claude. Els models menys capaços poden no funcionar com s'espera."
+		},
+		"ollama": {
+			"baseUrl": "URL base (opcional)",
+			"modelId": "ID del model",
+			"description": "Ollama permet executar models localment al vostre ordinador. Per a instruccions sobre com començar, consulteu la Guia d'inici ràpid.",
+			"warning": "Nota: Roo Code utilitza prompts complexos i funciona millor amb models Claude. Els models menys capaços poden no funcionar com s'espera."
+		},
+		"unboundApiKey": "Clau API d'Unbound",
+		"getUnboundApiKey": "Obtenir clau API d'Unbound",
+		"humanRelay": {
+			"description": "No es requereix clau API, però l'usuari necessita ajuda per copiar i enganxar informació al xat d'IA web.",
+			"instructions": "Durant l'ús, apareixerà un diàleg i el missatge actual es copiarà automàticament al porta-retalls. Necessiteu enganxar-lo a les versions web d'IA (com ChatGPT o Claude), després copiar la resposta de l'IA de nou al diàleg i fer clic al botó de confirmació."
+		},
+		"customModel": {
+			"capabilities": "Configureu les capacitats i preus per al vostre model personalitzat compatible amb OpenAI. Tingueu cura en especificar les capacitats del model, ja que poden afectar com funciona Roo Code.",
+			"maxTokens": {
+				"label": "Màxim de tokens de sortida",
+				"description": "El nombre màxim de tokens que el model pot generar en una resposta. (Establiu -1 per permetre que el servidor estableixi el màxim de tokens.)"
+			},
+			"contextWindow": {
+				"label": "Mida de la finestra de context",
+				"description": "Total de tokens (entrada + sortida) que el model pot processar."
+			},
+			"imageSupport": {
+				"label": "Suport d'imatges",
+				"description": "Aquest model és capaç de processar i entendre imatges?"
+			},
+			"computerUse": {
+				"label": "Ús de l'ordinador",
+				"description": "Aquest model és capaç d'interactuar amb un navegador? (com Claude 3.7 Sonnet)"
+			},
+			"promptCache": {
+				"label": "Emmagatzematge en caché de prompts",
+				"description": "Aquest model és capaç d'emmagatzemar prompts en caché?"
+			},
+			"pricing": {
+				"input": {
+					"label": "Preu d'entrada",
+					"description": "Cost per milió de tokens en l'entrada/prompt. Això afecta el cost d'enviar context i instruccions al model."
+				},
+				"output": {
+					"label": "Preu de sortida",
+					"description": "Cost per milió de tokens en la resposta del model. Això afecta el cost del contingut generat i les completions."
+				},
+				"cacheReads": {
+					"label": "Preu de lectures de caché",
+					"description": "Cost per milió de tokens per llegir de la caché. Aquest és el preu cobrat quan es recupera una resposta emmagatzemada en caché."
+				},
+				"cacheWrites": {
+					"label": "Preu d'escriptures de caché",
+					"description": "Cost per milió de tokens per escriure a la caché. Aquest és el preu cobrat quan s'emmagatzema un prompt per primera vegada."
+				}
+			},
+			"resetDefaults": "Restablir als valors per defecte"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -208,7 +309,10 @@
 		"gemini": {
 			"freeRequests": "* Gratuït fins a {{count}} sol·licituds per minut. Després d'això, la facturació depèn de la mida del prompt.",
 			"pricingDetails": "Per a més informació, consulteu els detalls de preus."
-		}
+		},
+		"enableStreaming": "Habilitar streaming",
+		"useAzure": "Utilitzar Azure",
+		"azureApiVersion": "Establir versió de l'API d'Azure"
 	},
 	"modelPicker": {
 		"automaticFetch": "L'extensió obté automàticament la llista més recent de models disponibles a <serviceLink>{{serviceName}}</serviceLink>. Si no esteu segur de quin model triar, Roo Code funciona millor amb <defaultModelLink>{{defaultModelId}}</defaultModelLink>. També podeu cercar \"free\" per a opcions gratuïtes actualment disponibles."

+ 0 - 0
webview-ui/src/i18n/locales/cs/prompts.json


+ 0 - 0
webview-ui/src/i18n/locales/cs/settings.json


+ 3 - 3
webview-ui/src/i18n/locales/de/prompts.json

@@ -36,12 +36,12 @@
 		"title": "Modusspezifische benutzerdefinierte Anweisungen (optional)",
 		"resetToDefault": "Auf Standardwerte zurücksetzen",
 		"description": "Fügen Sie verhaltensspezifische Richtlinien für den Modus {{modeName}} hinzu.",
-		"loadFromFile": "Benutzerdefinierte Anweisungen für den Modus {{modeName}} können auch aus .clinerules-{{modeSlug}} in Ihrem Arbeitsbereich geladen werden."
+		"loadFromFile": "Benutzerdefinierte Anweisungen für den Modus {{mode}} können auch aus <span>.clinerules-{{slug}}</span> in Ihrem Arbeitsbereich geladen werden."
 	},
 	"globalCustomInstructions": {
 		"title": "Benutzerdefinierte Anweisungen für alle Modi",
 		"description": "Diese Anweisungen gelten für alle Modi. Sie bieten einen grundlegenden Satz von Verhaltensweisen, die durch modusspezifische Anweisungen unten erweitert werden können.\nWenn Sie möchten, dass Roo in einer anderen Sprache als Ihrer Editor-Anzeigesprache ({{language}}) denkt und spricht, können Sie das hier angeben.",
-		"loadFromFile": "Anweisungen können auch aus .clinerules in Ihrem Arbeitsbereich geladen werden."
+		"loadFromFile": "Anweisungen können auch aus <span>.clinerules</span> in Ihrem Arbeitsbereich geladen werden."
 	},
 	"systemPrompt": {
 		"preview": "System-Prompt Vorschau",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "Erweitert: System-Prompt überschreiben",
-		"description": "Sie können den System-Prompt für diesen Modus vollständig ersetzen (abgesehen von der Rollendefinition und benutzerdefinierten Anweisungen), indem Sie eine Datei unter .roo/system-prompt-{{modeSlug}} in Ihrem Arbeitsbereich erstellen. Dies ist eine sehr fortgeschrittene Funktion, die eingebaute Schutzmaßnahmen und Konsistenzprüfungen umgeht (besonders bei der Werkzeugnutzung), also seien Sie vorsichtig!"
+		"description": "Sie können den System-Prompt für diesen Modus vollständig ersetzen (abgesehen von der Rollendefinition und benutzerdefinierten Anweisungen), indem Sie eine Datei unter <span>.roo/system-prompt-{{slug}}</span> in Ihrem Arbeitsbereich erstellen. Dies ist eine sehr fortgeschrittene Funktion, die eingebaute Schutzmaßnahmen und Konsistenzprüfungen umgeht (besonders bei der Werkzeugnutzung), also seien Sie vorsichtig!"
 	},
 	"createModeDialog": {
 		"title": "Neuen Modus erstellen",

+ 105 - 1
webview-ui/src/i18n/locales/de/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "Konfigurationsprofil",
 		"description": "Beschreibung",
 		"apiProvider": "API-Anbieter",
+		"model": "Modell",
 		"openRouterApiKey": "OpenRouter API-Schlüssel",
+		"getOpenRouterApiKey": "OpenRouter API-Schlüssel erhalten",
 		"apiKeyStorageNotice": "API-Schlüssel werden sicher im VSCode Secret Storage gespeichert",
+		"glamaApiKey": "Glama API-Schlüssel",
+		"getGlamaApiKey": "Glama API-Schlüssel erhalten",
 		"useCustomBaseUrl": "Benutzerdefinierte Basis-URL verwenden",
-		"openRouterTransformsText": "Prompts und Nachrichtenketten auf Kontextgröße komprimieren (<a>OpenRouter Transformationen</a>)"
+		"requestyApiKey": "Requesty API-Schlüssel",
+		"getRequestyApiKey": "Requesty API-Schlüssel erhalten",
+		"openRouterTransformsText": "Prompts und Nachrichtenketten auf Kontextgröße komprimieren (<a>OpenRouter Transformationen</a>)",
+		"anthropicApiKey": "Anthropic API-Schlüssel",
+		"getAnthropicApiKey": "Anthropic API-Schlüssel erhalten",
+		"deepSeekApiKey": "DeepSeek API-Schlüssel",
+		"getDeepSeekApiKey": "DeepSeek API-Schlüssel erhalten",
+		"geminiApiKey": "Gemini API-Schlüssel",
+		"getGeminiApiKey": "Gemini API-Schlüssel erhalten",
+		"openAiApiKey": "OpenAI API-Schlüssel",
+		"openAiBaseUrl": "Basis-URL",
+		"getOpenAiApiKey": "OpenAI API-Schlüssel erhalten",
+		"mistralApiKey": "Mistral API-Schlüssel",
+		"getMistralApiKey": "Mistral / Codestral API-Schlüssel erhalten",
+		"codestralBaseUrl": "Codestral Basis-URL (Optional)",
+		"codestralBaseUrlDesc": "Legen Sie eine alternative URL für das Codestral-Modell fest.",
+		"awsCredentials": "AWS Anmeldedaten",
+		"awsProfile": "AWS Profil",
+		"awsProfileName": "AWS Profilname",
+		"awsAccessKey": "AWS Zugangsschlüssel",
+		"awsSecretKey": "AWS Geheimschlüssel",
+		"awsSessionToken": "AWS Sitzungstoken",
+		"awsRegion": "AWS Region",
+		"awsCrossRegion": "Regionsübergreifende Inferenz verwenden",
+		"googleCloudSetup": {
+			"title": "Um Google Cloud Vertex AI zu verwenden, müssen Sie:",
+			"step1": "1. Ein Google Cloud-Konto erstellen, die Vertex AI API aktivieren & die gewünschten Claude-Modelle aktivieren.",
+			"step2": "2. Die Google Cloud CLI installieren & Standardanmeldeinformationen für die Anwendung konfigurieren.",
+			"step3": "3. Oder ein Servicekonto mit Anmeldeinformationen erstellen."
+		},
+		"googleCloudCredentials": "Google Cloud Anmeldedaten",
+		"googleCloudKeyFile": "Google Cloud Schlüsseldateipfad",
+		"googleCloudProjectId": "Google Cloud Projekt-ID",
+		"googleCloudRegion": "Google Cloud Region",
+		"lmStudio": {
+			"baseUrl": "Basis-URL (optional)",
+			"modelId": "Modell-ID",
+			"speculativeDecoding": "Spekulatives Dekodieren aktivieren",
+			"draftModelId": "Entwurfsmodell-ID",
+			"draftModelDesc": "Das Entwurfsmodell muss aus derselben Modellfamilie stammen, damit das spekulative Dekodieren korrekt funktioniert.",
+			"selectDraftModel": "Entwurfsmodell auswählen",
+			"noModelsFound": "Keine Entwurfsmodelle gefunden. Bitte stellen Sie sicher, dass LM Studio mit aktiviertem Servermodus läuft.",
+			"description": "LM Studio ermöglicht es Ihnen, Modelle lokal auf Ihrem Computer auszuführen. Eine Anleitung zum Einstieg finden Sie in ihrem <a>Schnellstart-Guide</a>. Sie müssen auch die <b>lokale Server</b>-Funktion von LM Studio starten, um es mit dieser Erweiterung zu verwenden. <span>Hinweis:</span> Roo Code verwendet komplexe Prompts und funktioniert am besten mit Claude-Modellen. Weniger leistungsfähige Modelle funktionieren möglicherweise nicht wie erwartet."
+		},
+		"ollama": {
+			"baseUrl": "Basis-URL (optional)",
+			"modelId": "Modell-ID",
+			"description": "Ollama ermöglicht es Ihnen, Modelle lokal auf Ihrem Computer auszuführen. Eine Anleitung zum Einstieg finden Sie im Schnellstart-Guide.",
+			"warning": "Hinweis: Roo Code verwendet komplexe Prompts und funktioniert am besten mit Claude-Modellen. Weniger leistungsfähige Modelle funktionieren möglicherweise nicht wie erwartet."
+		},
+		"unboundApiKey": "Unbound API-Schlüssel",
+		"getUnboundApiKey": "Unbound API-Schlüssel erhalten",
+		"humanRelay": {
+			"description": "Es ist kein API-Schlüssel erforderlich, aber der Benutzer muss beim Kopieren und Einfügen der Informationen in den Web-Chat-KI helfen.",
+			"instructions": "Während der Verwendung wird ein Dialogfeld angezeigt und die aktuelle Nachricht wird automatisch in die Zwischenablage kopiert. Sie müssen diese in Web-Versionen von KI (wie ChatGPT oder Claude) einfügen, dann die Antwort der KI zurück in das Dialogfeld kopieren und auf die Bestätigungsschaltfläche klicken."
+		},
+		"customModel": {
+			"capabilities": "Konfigurieren Sie die Fähigkeiten und Preise für Ihr benutzerdefiniertes OpenAI-kompatibles Modell. Seien Sie vorsichtig bei der Angabe der Modellfähigkeiten, da diese beeinflussen können, wie Roo Code funktioniert.",
+			"maxTokens": {
+				"label": "Maximale Ausgabe-Tokens",
+				"description": "Maximale Anzahl von Tokens, die das Modell in einer Antwort generieren kann. (Geben Sie -1 an, damit der Server die maximalen Tokens festlegt.)"
+			},
+			"contextWindow": {
+				"label": "Kontextfenstergröße",
+				"description": "Gesamte Tokens (Eingabe + Ausgabe), die das Modell verarbeiten kann."
+			},
+			"imageSupport": {
+				"label": "Bildunterstützung",
+				"description": "Ist dieses Modell in der Lage, Bilder zu verarbeiten und zu verstehen?"
+			},
+			"computerUse": {
+				"label": "Computer-Nutzung",
+				"description": "Ist dieses Modell in der Lage, mit einem Browser zu interagieren? (z.B. Claude 3.7 Sonnet)"
+			},
+			"promptCache": {
+				"label": "Prompt-Caching",
+				"description": "Ist dieses Modell in der Lage, Prompts zu cachen?"
+			},
+			"pricing": {
+				"input": {
+					"label": "Eingabepreis",
+					"description": "Kosten pro Million Tokens in der Eingabe/Prompt. Dies beeinflusst die Kosten für das Senden von Kontext und Anweisungen an das Modell."
+				},
+				"output": {
+					"label": "Ausgabepreis",
+					"description": "Kosten pro Million Tokens in der Modellantwort. Dies beeinflusst die Kosten für generierte Inhalte und Vervollständigungen."
+				},
+				"cacheReads": {
+					"label": "Cache-Lesepreis",
+					"description": "Kosten pro Million Tokens für das Lesen aus dem Cache. Dies ist der Preis, der beim Abrufen einer gecachten Antwort berechnet wird."
+				},
+				"cacheWrites": {
+					"label": "Cache-Schreibpreis",
+					"description": "Kosten pro Million Tokens für das Schreiben in den Cache. Dies ist der Preis, der beim ersten Cachen eines Prompts berechnet wird."
+				}
+			},
+			"resetDefaults": "Auf Standardwerte zurücksetzen"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "Ausgabepreis",
 		"cacheReadsPrice": "Cache-Lesepreis",
 		"cacheWritesPrice": "Cache-Schreibpreis",
+		"enableStreaming": "Streaming aktivieren",
+		"useAzure": "Azure verwenden",
+		"azureApiVersion": "Azure API-Version festlegen",
 		"gemini": {
 			"freeRequests": "* Kostenlos bis zu {{count}} Anfragen pro Minute. Danach hängt die Abrechnung von der Prompt-Größe ab.",
 			"pricingDetails": "Weitere Informationen finden Sie in den Preisdetails."

+ 3 - 3
webview-ui/src/i18n/locales/en/prompts.json

@@ -36,12 +36,12 @@
 		"title": "Mode-specific Custom Instructions (optional)",
 		"resetToDefault": "Reset to default",
 		"description": "Add behavioral guidelines specific to {{modeName}} mode.",
-		"loadFromFile": "Custom instructions specific to {{modeName}} mode can also be loaded from .clinerules-{{modeSlug}} in your workspace."
+		"loadFromFile": "Custom instructions specific to {{mode}} mode can also be loaded from <span>.clinerules-{{slug}}</span> in your workspace."
 	},
 	"globalCustomInstructions": {
 		"title": "Custom Instructions for All Modes",
 		"description": "These instructions apply to all modes. They provide a base set of behaviors that can be enhanced by mode-specific instructions below.\nIf you would like Roo to think and speak in a different language than your editor display language ({{language}}), you can specify it here.",
-		"loadFromFile": "Instructions can also be loaded from .clinerules in your workspace."
+		"loadFromFile": "Instructions can also be loaded from <span>.clinerules</span> in your workspace."
 	},
 	"systemPrompt": {
 		"preview": "Preview System Prompt",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "Advanced: Override System Prompt",
-		"description": "You can completely replace the system prompt for this mode (aside from the role definition and custom instructions) by creating a file at .roo/system-prompt-{{modeSlug}} in your workspace. This is a very advanced feature that bypasses built-in safeguards and consistency checks (especially around tool usage), so be careful!"
+		"description": "You can completely replace the system prompt for this mode (aside from the role definition and custom instructions) by creating a file at <span>.roo/system-prompt-{{slug}}</span> in your workspace. This is a very advanced feature that bypasses built-in safeguards and consistency checks (especially around tool usage), so be careful!"
 	},
 	"createModeDialog": {
 		"title": "Create New Mode",

+ 105 - 1
webview-ui/src/i18n/locales/en/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "Configuration Profile",
 		"description": "Description",
 		"apiProvider": "API Provider",
+		"model": "Model",
 		"openRouterApiKey": "OpenRouter API Key",
+		"getOpenRouterApiKey": "Get OpenRouter API Key",
 		"apiKeyStorageNotice": "API keys are stored securely in VSCode's Secret Storage",
+		"glamaApiKey": "Glama API Key",
+		"getGlamaApiKey": "Get Glama API Key",
 		"useCustomBaseUrl": "Use custom base URL",
-		"openRouterTransformsText": "Compress prompts and message chains to the context size (<a>OpenRouter Transforms</a>)"
+		"requestyApiKey": "Requesty API Key",
+		"getRequestyApiKey": "Get Requesty API Key",
+		"openRouterTransformsText": "Compress prompts and message chains to the context size (<a>OpenRouter Transforms</a>)",
+		"anthropicApiKey": "Anthropic API Key",
+		"getAnthropicApiKey": "Get Anthropic API Key",
+		"deepSeekApiKey": "DeepSeek API Key",
+		"getDeepSeekApiKey": "Get DeepSeek API Key",
+		"geminiApiKey": "Gemini API Key",
+		"getGeminiApiKey": "Get Gemini API Key",
+		"openAiApiKey": "OpenAI API Key",
+		"openAiBaseUrl": "Base URL",
+		"getOpenAiApiKey": "Get OpenAI API Key",
+		"mistralApiKey": "Mistral API Key",
+		"getMistralApiKey": "Get Mistral / Codestral API Key",
+		"codestralBaseUrl": "Codestral Base URL (Optional)",
+		"codestralBaseUrlDesc": "Set an alternative URL for the Codestral model.",
+		"awsCredentials": "AWS Credentials",
+		"awsProfile": "AWS Profile",
+		"awsProfileName": "AWS Profile Name",
+		"awsAccessKey": "AWS Access Key",
+		"awsSecretKey": "AWS Secret Key",
+		"awsSessionToken": "AWS Session Token",
+		"awsRegion": "AWS Region",
+		"awsCrossRegion": "Use cross-region inference",
+		"googleCloudSetup": {
+			"title": "To use Google Cloud Vertex AI, you need to:",
+			"step1": "1. Create a Google Cloud account, enable the Vertex AI API & enable the desired Claude models.",
+			"step2": "2. Install the Google Cloud CLI & configure application default credentials.",
+			"step3": "3. Or create a service account with credentials."
+		},
+		"googleCloudCredentials": "Google Cloud Credentials",
+		"googleCloudKeyFile": "Google Cloud Key File Path",
+		"googleCloudProjectId": "Google Cloud Project ID",
+		"googleCloudRegion": "Google Cloud Region",
+		"lmStudio": {
+			"baseUrl": "Base URL (optional)",
+			"modelId": "Model ID",
+			"speculativeDecoding": "Enable Speculative Decoding",
+			"draftModelId": "Draft Model ID",
+			"draftModelDesc": "Draft model must be from the same model family for speculative decoding to work correctly.",
+			"selectDraftModel": "Select Draft Model",
+			"noModelsFound": "No draft models found. Please ensure LM Studio is running with Server Mode enabled.",
+			"description": "LM Studio allows you to run models locally on your computer. For instructions on how to get started, see their <a>quickstart guide</a>. You will also need to start LM Studio's <b>local server</b> feature to use it with this extension. <span>Note:</span> Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected."
+		},
+		"ollama": {
+			"baseUrl": "Base URL (optional)",
+			"modelId": "Model ID",
+			"description": "Ollama allows you to run models locally on your computer. For instructions on how to get started, see their quickstart guide.",
+			"warning": "Note: Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected."
+		},
+		"unboundApiKey": "Unbound API Key",
+		"getUnboundApiKey": "Get Unbound API Key",
+		"humanRelay": {
+			"description": "No API key is required, but the user needs to help copy and paste the information to the web chat AI.",
+			"instructions": "During use, a dialog box will pop up and the current message will be copied to the clipboard automatically. You need to paste these to web versions of AI (such as ChatGPT or Claude), then copy the AI's reply back to the dialog box and click the confirm button."
+		},
+		"customModel": {
+			"capabilities": "Configure the capabilities and pricing for your custom OpenAI-compatible model. Be careful when specifying the model capabilities, as they can affect how Roo Code performs.",
+			"maxTokens": {
+				"label": "Max Output Tokens",
+				"description": "Maximum number of tokens the model can generate in a response. (Specify -1 to allow the server to set the max tokens.)"
+			},
+			"contextWindow": {
+				"label": "Context Window Size",
+				"description": "Total tokens (input + output) the model can process."
+			},
+			"imageSupport": {
+				"label": "Image Support",
+				"description": "Is this model capable of processing and understanding images?"
+			},
+			"computerUse": {
+				"label": "Computer Use",
+				"description": "Is this model capable of interacting with a browser? (e.g. Claude 3.7 Sonnet)."
+			},
+			"promptCache": {
+				"label": "Prompt Caching",
+				"description": "Is this model capable of caching prompts?"
+			},
+			"pricing": {
+				"input": {
+					"label": "Input Price",
+					"description": "Cost per million tokens in the input/prompt. This affects the cost of sending context and instructions to the model."
+				},
+				"output": {
+					"label": "Output Price",
+					"description": "Cost per million tokens in the model's response. This affects the cost of generated content and completions."
+				},
+				"cacheReads": {
+					"label": "Cache Reads Price",
+					"description": "Cost per million tokens for reading from the cache. This is the price charged when a cached response is retrieved."
+				},
+				"cacheWrites": {
+					"label": "Cache Writes Price",
+					"description": "Cost per million tokens for writing to the cache. This is the price charged when a prompt is cached for the first time."
+				}
+			},
+			"resetDefaults": "Reset to Defaults"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "Output price",
 		"cacheReadsPrice": "Cache reads price",
 		"cacheWritesPrice": "Cache writes price",
+		"enableStreaming": "Enable streaming",
+		"useAzure": "Use Azure",
+		"azureApiVersion": "Set Azure API version",
 		"gemini": {
 			"freeRequests": "* Free up to {{count}} requests per minute. After that, billing depends on prompt size.",
 			"pricingDetails": "For more info, see pricing details."

+ 3 - 3
webview-ui/src/i18n/locales/es/prompts.json

@@ -36,12 +36,12 @@
 		"title": "Instrucciones personalizadas para el modo (opcional)",
 		"resetToDefault": "Restablecer a valores predeterminados",
 		"description": "Agrega directrices de comportamiento específicas para el modo {{modeName}}.",
-		"loadFromFile": "Las instrucciones personalizadas para el modo {{modeName}} también se pueden cargar desde .clinerules-{{modeSlug}} en tu espacio de trabajo."
+		"loadFromFile": "Las instrucciones personalizadas para el modo {{mode}} también se pueden cargar desde <span>.clinerules-{{slug}}</span> en tu espacio de trabajo."
 	},
 	"globalCustomInstructions": {
 		"title": "Instrucciones personalizadas para todos los modos",
 		"description": "Estas instrucciones se aplican a todos los modos. Proporcionan un conjunto base de comportamientos que pueden ser mejorados por instrucciones específicas de cada modo.\nSi quieres que Roo piense y hable en un idioma diferente al idioma de visualización de tu editor ({{language}}), puedes especificarlo aquí.",
-		"loadFromFile": "Las instrucciones también se pueden cargar desde .clinerules en tu espacio de trabajo."
+		"loadFromFile": "Las instrucciones también se pueden cargar desde <span>.clinerules</span> en tu espacio de trabajo."
 	},
 	"systemPrompt": {
 		"preview": "Vista previa de la solicitud del sistema",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "Avanzado: Anular solicitud del sistema",
-		"description": "Puedes reemplazar completamente la solicitud del sistema para este modo (aparte de la definición de rol e instrucciones personalizadas) creando un archivo en .roo/system-prompt-{{modeSlug}} en tu espacio de trabajo. ¡Esta es una función muy avanzada que omite las salvaguardas integradas y las verificaciones de consistencia (especialmente en torno al uso de herramientas), así que ten cuidado!"
+		"description": "Puedes reemplazar completamente la solicitud del sistema para este modo (aparte de la definición de rol e instrucciones personalizadas) creando un archivo en <span>.roo/system-prompt-{{slug}}</span> en tu espacio de trabajo. ¡Esta es una función muy avanzada que omite las salvaguardas integradas y las verificaciones de consistencia (especialmente en torno al uso de herramientas), así que ten cuidado!"
 	},
 	"createModeDialog": {
 		"title": "Crear nuevo modo",

+ 105 - 1
webview-ui/src/i18n/locales/es/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "Perfil de configuración",
 		"description": "Descripción",
 		"apiProvider": "Proveedor de API",
+		"model": "Modelo",
 		"openRouterApiKey": "Clave API de OpenRouter",
+		"getOpenRouterApiKey": "Obtener clave API de OpenRouter",
 		"apiKeyStorageNotice": "Las claves API se almacenan de forma segura en el Almacenamiento Secreto de VSCode",
+		"glamaApiKey": "Clave API de Glama",
+		"getGlamaApiKey": "Obtener clave API de Glama",
 		"useCustomBaseUrl": "Usar URL base personalizada",
-		"openRouterTransformsText": "Comprimir prompts y cadenas de mensajes al tamaño del contexto (<a>Transformaciones de OpenRouter</a>)"
+		"requestyApiKey": "Clave API de Requesty",
+		"getRequestyApiKey": "Obtener clave API de Requesty",
+		"openRouterTransformsText": "Comprimir prompts y cadenas de mensajes al tamaño del contexto (<a>Transformaciones de OpenRouter</a>)",
+		"anthropicApiKey": "Clave API de Anthropic",
+		"getAnthropicApiKey": "Obtener clave API de Anthropic",
+		"deepSeekApiKey": "Clave API de DeepSeek",
+		"getDeepSeekApiKey": "Obtener clave API de DeepSeek",
+		"geminiApiKey": "Clave API de Gemini",
+		"getGeminiApiKey": "Obtener clave API de Gemini",
+		"openAiApiKey": "Clave API de OpenAI",
+		"openAiBaseUrl": "URL base",
+		"getOpenAiApiKey": "Obtener clave API de OpenAI",
+		"mistralApiKey": "Clave API de Mistral",
+		"getMistralApiKey": "Obtener clave API de Mistral / Codestral",
+		"codestralBaseUrl": "URL base de Codestral (Opcional)",
+		"codestralBaseUrlDesc": "Establecer una URL alternativa para el modelo Codestral.",
+		"awsCredentials": "Credenciales de AWS",
+		"awsProfile": "Perfil de AWS",
+		"awsProfileName": "Nombre del perfil de AWS",
+		"awsAccessKey": "Clave de acceso de AWS",
+		"awsSecretKey": "Clave secreta de AWS",
+		"awsSessionToken": "Token de sesión de AWS",
+		"awsRegion": "Región de AWS",
+		"awsCrossRegion": "Usar inferencia entre regiones",
+		"googleCloudSetup": {
+			"title": "Para usar Google Cloud Vertex AI, necesita:",
+			"step1": "1. Crear una cuenta de Google Cloud, habilitar la API de Vertex AI y habilitar los modelos Claude deseados.",
+			"step2": "2. Instalar Google Cloud CLI y configurar las credenciales predeterminadas de la aplicación.",
+			"step3": "3. O crear una cuenta de servicio con credenciales."
+		},
+		"googleCloudCredentials": "Credenciales de Google Cloud",
+		"googleCloudKeyFile": "Ruta del archivo de clave de Google Cloud",
+		"googleCloudProjectId": "ID del proyecto de Google Cloud",
+		"googleCloudRegion": "Región de Google Cloud",
+		"lmStudio": {
+			"baseUrl": "URL base (opcional)",
+			"modelId": "ID del modelo",
+			"speculativeDecoding": "Habilitar decodificación especulativa",
+			"draftModelId": "ID del modelo borrador",
+			"draftModelDesc": "El modelo borrador debe ser de la misma familia de modelos para que la decodificación especulativa funcione correctamente.",
+			"selectDraftModel": "Seleccionar modelo borrador",
+			"noModelsFound": "No se encontraron modelos borrador. Asegúrese de que LM Studio esté ejecutándose con el Modo Servidor habilitado.",
+			"description": "LM Studio le permite ejecutar modelos localmente en su computadora. Para obtener instrucciones sobre cómo comenzar, consulte su <a>guía de inicio rápido</a>. También necesitará iniciar la función de <b>servidor local</b> de LM Studio para usarlo con esta extensión. <span>Nota:</span> Roo Code utiliza prompts complejos y funciona mejor con modelos Claude. Los modelos menos capaces pueden no funcionar como se espera."
+		},
+		"ollama": {
+			"baseUrl": "URL base (opcional)",
+			"modelId": "ID del modelo",
+			"description": "Ollama le permite ejecutar modelos localmente en su computadora. Para obtener instrucciones sobre cómo comenzar, consulte la guía de inicio rápido.",
+			"warning": "Nota: Roo Code utiliza prompts complejos y funciona mejor con modelos Claude. Los modelos menos capaces pueden no funcionar como se espera."
+		},
+		"unboundApiKey": "Clave API de Unbound",
+		"getUnboundApiKey": "Obtener clave API de Unbound",
+		"humanRelay": {
+			"description": "No se requiere clave API, pero el usuario necesita ayudar a copiar y pegar la información en el chat web de IA.",
+			"instructions": "Durante el uso, aparecerá un cuadro de diálogo y el mensaje actual se copiará automáticamente al portapapeles. Debe pegarlo en las versiones web de IA (como ChatGPT o Claude), luego copiar la respuesta de la IA de vuelta al cuadro de diálogo y hacer clic en el botón de confirmar."
+		},
+		"customModel": {
+			"capabilities": "Configure las capacidades y precios para su modelo personalizado compatible con OpenAI. Tenga cuidado al especificar las capacidades del modelo, ya que pueden afectar cómo funciona Roo Code.",
+			"maxTokens": {
+				"label": "Tokens máximos de salida",
+				"description": "Número máximo de tokens que el modelo puede generar en una respuesta. (Especifique -1 para permitir que el servidor establezca los tokens máximos.)"
+			},
+			"contextWindow": {
+				"label": "Tamaño de ventana de contexto",
+				"description": "Total de tokens (entrada + salida) que el modelo puede procesar."
+			},
+			"imageSupport": {
+				"label": "Soporte de imágenes",
+				"description": "¿Es este modelo capaz de procesar y entender imágenes?"
+			},
+			"computerUse": {
+				"label": "Uso del ordenador",
+				"description": "¿Es este modelo capaz de interactuar con un navegador? (ej. Claude 3.7 Sonnet)"
+			},
+			"promptCache": {
+				"label": "Caché de prompts",
+				"description": "¿Es este modelo capaz de almacenar prompts en caché?"
+			},
+			"pricing": {
+				"input": {
+					"label": "Precio de entrada",
+					"description": "Costo por millón de tokens en la entrada/prompt. Esto afecta el costo de enviar contexto e instrucciones al modelo."
+				},
+				"output": {
+					"label": "Precio de salida",
+					"description": "Costo por millón de tokens en la respuesta del modelo. Esto afecta el costo del contenido generado y las completaciones."
+				},
+				"cacheReads": {
+					"label": "Precio de lecturas de caché",
+					"description": "Costo por millón de tokens para leer del caché. Este es el precio que se cobra cuando se recupera una respuesta almacenada en caché."
+				},
+				"cacheWrites": {
+					"label": "Precio de escrituras de caché",
+					"description": "Costo por millón de tokens para escribir en el caché. Este es el precio que se cobra cuando se almacena un prompt en caché por primera vez."
+				}
+			},
+			"resetDefaults": "Restablecer valores predeterminados"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "Precio de salida",
 		"cacheReadsPrice": "Precio de lecturas de caché",
 		"cacheWritesPrice": "Precio de escrituras de caché",
+		"enableStreaming": "Habilitar streaming",
+		"useAzure": "Usar Azure",
+		"azureApiVersion": "Establecer versión de API de Azure",
 		"gemini": {
 			"freeRequests": "* Gratis hasta {{count}} solicitudes por minuto. Después de eso, la facturación depende del tamaño del prompt.",
 			"pricingDetails": "Para más información, consulte los detalles de precios."

+ 3 - 3
webview-ui/src/i18n/locales/fr/prompts.json

@@ -36,12 +36,12 @@
 		"title": "Instructions personnalisées spécifiques au mode (optionnel)",
 		"resetToDefault": "Réinitialiser aux valeurs par défaut",
 		"description": "Ajoutez des directives comportementales spécifiques au mode {{modeName}}.",
-		"loadFromFile": "Les instructions personnalisées spécifiques au mode {{modeName}} peuvent également être chargées depuis .clinerules-{{modeSlug}} dans votre espace de travail."
+		"loadFromFile": "Les instructions personnalisées spécifiques au mode {{mode}} peuvent également être chargées depuis <span>.clinerules-{{slug}}</span> dans votre espace de travail."
 	},
 	"globalCustomInstructions": {
 		"title": "Instructions personnalisées pour tous les modes",
 		"description": "Ces instructions s'appliquent à tous les modes. Elles fournissent un ensemble de comportements de base qui peuvent être améliorés par des instructions spécifiques au mode ci-dessous.\nSi vous souhaitez que Roo pense et parle dans une langue différente de celle de votre éditeur ({{language}}), vous pouvez le spécifier ici.",
-		"loadFromFile": "Les instructions peuvent également être chargées depuis .clinerules dans votre espace de travail."
+		"loadFromFile": "Les instructions peuvent également être chargées depuis <span>.clinerules</span> dans votre espace de travail."
 	},
 	"systemPrompt": {
 		"preview": "Aperçu du prompt système",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "Avancé : Remplacer le prompt système",
-		"description": "Vous pouvez complètement remplacer le prompt système pour ce mode (en dehors de la définition du rôle et des instructions personnalisées) en créant un fichier à .roo/system-prompt-{{modeSlug}} dans votre espace de travail. Il s'agit d'une fonctionnalité très avancée qui contourne les garanties intégrées et les vérifications de cohérence (notamment concernant l'utilisation des outils), alors soyez prudent !"
+		"description": "Vous pouvez complètement remplacer le prompt système pour ce mode (en dehors de la définition du rôle et des instructions personnalisées) en créant un fichier à <span>.roo/system-prompt-{{slug}}</span> dans votre espace de travail. Il s'agit d'une fonctionnalité très avancée qui contourne les garanties intégrées et les vérifications de cohérence (notamment concernant l'utilisation des outils), alors soyez prudent !"
 	},
 	"createModeDialog": {
 		"title": "Créer un nouveau mode",

+ 105 - 1
webview-ui/src/i18n/locales/fr/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "Profil de configuration",
 		"description": "Description",
 		"apiProvider": "Fournisseur d'API",
+		"model": "Modèle",
 		"openRouterApiKey": "Clé API OpenRouter",
+		"getOpenRouterApiKey": "Obtenir la clé API OpenRouter",
 		"apiKeyStorageNotice": "Les clés API sont stockées en toute sécurité dans le stockage sécurisé de VSCode",
+		"glamaApiKey": "Clé API Glama",
+		"getGlamaApiKey": "Obtenir la clé API Glama",
 		"useCustomBaseUrl": "Utiliser une URL de base personnalisée",
-		"openRouterTransformsText": "Compresser les prompts et chaînes de messages à la taille du contexte (<a>Transformations OpenRouter</a>)"
+		"requestyApiKey": "Clé API Requesty",
+		"getRequestyApiKey": "Obtenir la clé API Requesty",
+		"openRouterTransformsText": "Compresser les prompts et chaînes de messages à la taille du contexte (<a>Transformations OpenRouter</a>)",
+		"anthropicApiKey": "Clé API Anthropic",
+		"getAnthropicApiKey": "Obtenir la clé API Anthropic",
+		"deepSeekApiKey": "Clé API DeepSeek",
+		"getDeepSeekApiKey": "Obtenir la clé API DeepSeek",
+		"geminiApiKey": "Clé API Gemini",
+		"getGeminiApiKey": "Obtenir la clé API Gemini",
+		"openAiApiKey": "Clé API OpenAI",
+		"openAiBaseUrl": "URL de base",
+		"getOpenAiApiKey": "Obtenir la clé API OpenAI",
+		"mistralApiKey": "Clé API Mistral",
+		"getMistralApiKey": "Obtenir la clé API Mistral / Codestral",
+		"codestralBaseUrl": "URL de base Codestral (Optionnel)",
+		"codestralBaseUrlDesc": "Définir une URL alternative pour le modèle Codestral.",
+		"awsCredentials": "Identifiants AWS",
+		"awsProfile": "Profil AWS",
+		"awsProfileName": "Nom du profil AWS",
+		"awsAccessKey": "Clé d'accès AWS",
+		"awsSecretKey": "Clé secrète AWS",
+		"awsSessionToken": "Jeton de session AWS",
+		"awsRegion": "Région AWS",
+		"awsCrossRegion": "Utiliser l'inférence inter-régions",
+		"googleCloudSetup": {
+			"title": "Pour utiliser Google Cloud Vertex AI, vous devez :",
+			"step1": "1. Créer un compte Google Cloud, activer l'API Vertex AI et activer les modèles Claude souhaités.",
+			"step2": "2. Installer Google Cloud CLI et configurer les identifiants par défaut de l'application.",
+			"step3": "3. Ou créer un compte de service avec des identifiants."
+		},
+		"googleCloudCredentials": "Identifiants Google Cloud",
+		"googleCloudKeyFile": "Chemin du fichier de clé Google Cloud",
+		"googleCloudProjectId": "ID du projet Google Cloud",
+		"googleCloudRegion": "Région Google Cloud",
+		"lmStudio": {
+			"baseUrl": "URL de base (optionnel)",
+			"modelId": "ID du modèle",
+			"speculativeDecoding": "Activer le décodage spéculatif",
+			"draftModelId": "ID du modèle brouillon",
+			"draftModelDesc": "Le modèle brouillon doit être de la même famille de modèles pour que le décodage spéculatif fonctionne correctement.",
+			"selectDraftModel": "Sélectionner le modèle brouillon",
+			"noModelsFound": "Aucun modèle brouillon trouvé. Veuillez vous assurer que LM Studio est en cours d'exécution avec le mode serveur activé.",
+			"description": "LM Studio vous permet d'exécuter des modèles localement sur votre ordinateur. Pour obtenir des instructions sur la mise en route, consultez leur <a>guide de démarrage rapide</a>. Vous devrez également démarrer la fonction <b>serveur local</b> de LM Studio pour l'utiliser avec cette extension. <span>Remarque :</span> Roo Code utilise des prompts complexes et fonctionne mieux avec les modèles Claude. Les modèles moins performants peuvent ne pas fonctionner comme prévu."
+		},
+		"ollama": {
+			"baseUrl": "URL de base (optionnel)",
+			"modelId": "ID du modèle",
+			"description": "Ollama vous permet d'exécuter des modèles localement sur votre ordinateur. Pour obtenir des instructions sur la mise en route, consultez le guide de démarrage rapide.",
+			"warning": "Remarque : Roo Code utilise des prompts complexes et fonctionne mieux avec les modèles Claude. Les modèles moins performants peuvent ne pas fonctionner comme prévu."
+		},
+		"unboundApiKey": "Clé API Unbound",
+		"getUnboundApiKey": "Obtenir la clé API Unbound",
+		"humanRelay": {
+			"description": "Aucune clé API n'est requise, mais l'utilisateur doit aider à copier et coller les informations dans le chat web de l'IA.",
+			"instructions": "Pendant l'utilisation, une boîte de dialogue apparaîtra et le message actuel sera automatiquement copié dans le presse-papiers. Vous devez le coller dans les versions web de l'IA (comme ChatGPT ou Claude), puis copier la réponse de l'IA dans la boîte de dialogue et cliquer sur le bouton de confirmation."
+		},
+		"customModel": {
+			"capabilities": "Configurez les capacités et les prix pour votre modèle personnalisé compatible OpenAI. Soyez prudent lors de la spécification des capacités du modèle, car elles peuvent affecter le fonctionnement de Roo Code.",
+			"maxTokens": {
+				"label": "Tokens de sortie maximum",
+				"description": "Nombre maximum de tokens que le modèle peut générer dans une réponse. (Spécifiez -1 pour permettre au serveur de définir les tokens maximum.)"
+			},
+			"contextWindow": {
+				"label": "Taille de la fenêtre de contexte",
+				"description": "Total des tokens (entrée + sortie) que le modèle peut traiter."
+			},
+			"imageSupport": {
+				"label": "Support des images",
+				"description": "Ce modèle est-il capable de traiter et de comprendre les images ?"
+			},
+			"computerUse": {
+				"label": "Utilisation de l'ordinateur",
+				"description": "Ce modèle est-il capable d'interagir avec un navigateur ? (ex. Claude 3.7 Sonnet)"
+			},
+			"promptCache": {
+				"label": "Mise en cache des prompts",
+				"description": "Ce modèle est-il capable de mettre en cache les prompts ?"
+			},
+			"pricing": {
+				"input": {
+					"label": "Prix d'entrée",
+					"description": "Coût par million de tokens dans l'entrée/prompt. Cela affecte le coût d'envoi du contexte et des instructions au modèle."
+				},
+				"output": {
+					"label": "Prix de sortie",
+					"description": "Coût par million de tokens dans la réponse du modèle. Cela affecte le coût du contenu généré et des complétions."
+				},
+				"cacheReads": {
+					"label": "Prix des lectures de cache",
+					"description": "Coût par million de tokens pour la lecture depuis le cache. C'est le prix facturé lors de la récupération d'une réponse mise en cache."
+				},
+				"cacheWrites": {
+					"label": "Prix des écritures de cache",
+					"description": "Coût par million de tokens pour l'écriture dans le cache. C'est le prix facturé lors de la première mise en cache d'un prompt."
+				}
+			},
+			"resetDefaults": "Réinitialiser les valeurs par défaut"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "Prix de sortie",
 		"cacheReadsPrice": "Prix des lectures de cache",
 		"cacheWritesPrice": "Prix des écritures de cache",
+		"enableStreaming": "Activer le streaming",
+		"useAzure": "Utiliser Azure",
+		"azureApiVersion": "Définir la version de l'API Azure",
 		"gemini": {
 			"freeRequests": "* Gratuit jusqu'à {{count}} requêtes par minute. Après cela, la facturation dépend de la taille du prompt.",
 			"pricingDetails": "Pour plus d'informations, voir les détails de tarification."

+ 3 - 3
webview-ui/src/i18n/locales/hi/prompts.json

@@ -36,12 +36,12 @@
 		"title": "मोड-विशिष्ट कस्टम निर्देश (वैकल्पिक)",
 		"resetToDefault": "डिफ़ॉल्ट पर रीसेट करें",
 		"description": "{{modeName}} मोड के लिए विशिष्ट व्यवहार दिशानिर्देश जोड़ें।",
-		"loadFromFile": "{{modeName}} मोड के लिए विशिष्ट कस्टम निर्देश आपके वर्कस्पेस में .clinerules-{{modeSlug}} से भी लोड किए जा सकते हैं।"
+		"loadFromFile": "{{mode}} मोड के लिए विशिष्ट कस्टम निर्देश आपके वर्कस्पेस में <span>.clinerules-{{slug}}</span> से भी लोड किए जा सकते हैं।"
 	},
 	"globalCustomInstructions": {
 		"title": "सभी मोड्स के लिए कस्टम निर्देश",
 		"description": "ये निर्देश सभी मोड्स पर लागू होते हैं। वे व्यवहारों का एक आधार सेट प्रदान करते हैं जिन्हें नीचे दिए गए मोड-विशिष्ट निर्देशों द्वारा बढ़ाया जा सकता है।\nयदि आप चाहते हैं कि Roo आपके एडिटर की प्रदर्शन भाषा ({{language}}) से अलग भाषा में सोचे और बोले, तो आप यहां इसे निर्दिष्ट कर सकते हैं।",
-		"loadFromFile": "निर्देश आपके वर्कस्पेस में .clinerules से भी लोड किए जा सकते हैं।"
+		"loadFromFile": "निर्देश आपके वर्कस्पेस में <span>.clinerules</span> से भी लोड किए जा सकते हैं।"
 	},
 	"systemPrompt": {
 		"preview": "सिस्टम प्रॉम्प्ट का पूर्वावलोकन",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "उन्नत: सिस्टम प्रॉम्प्ट ओवरराइड करें",
-		"description": "आप अपने वर्कस्पेस में .roo/system-prompt-{{modeSlug}} पर एक फाइल बनाकर इस मोड के लिए सिस्टम प्रॉम्प्ट को पूरी तरह से बदल सकते हैं (भूमिका परिभाषा और कस्टम निर्देशों को छोड़कर)। यह एक बहुत उन्नत सुविधा है जो अंतर्निहित सुरक्षा उपायों और सामंजस्यता जांचों को बायपास करती है (विशेष रूप से टूल उपयोग के आसपास), इसलिए सावधान रहें!"
+		"description": "आप अपने वर्कस्पेस में <span>.roo/system-prompt-{{slug}}</span> पर एक फाइल बनाकर इस मोड के लिए सिस्टम प्रॉम्प्ट को पूरी तरह से बदल सकते हैं (भूमिका परिभाषा और कस्टम निर्देशों को छोड़कर)। यह एक बहुत उन्नत सुविधा है जो अंतर्निहित सुरक्षा उपायों और सामंजस्यता जांचों को बायपास करती है (विशेष रूप से टूल उपयोग के आसपास), इसलिए सावधान रहें!"
 	},
 	"createModeDialog": {
 		"title": "नया मोड बनाएँ",

+ 105 - 1
webview-ui/src/i18n/locales/hi/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "कॉन्फिगरेशन प्रोफाइल",
 		"description": "विवरण",
 		"apiProvider": "API प्रदाता",
+		"model": "मॉडल",
 		"openRouterApiKey": "OpenRouter API कुंजी",
+		"getOpenRouterApiKey": "OpenRouter API कुंजी प्राप्त करें",
 		"apiKeyStorageNotice": "API कुंजियाँ VSCode के सुरक्षित स्टोरेज में सुरक्षित रूप से संग्रहीत हैं",
+		"glamaApiKey": "Glama API कुंजी",
+		"getGlamaApiKey": "Glama API कुंजी प्राप्त करें",
 		"useCustomBaseUrl": "कस्टम बेस URL का उपयोग करें",
-		"openRouterTransformsText": "संदर्भ आकार के लिए प्रॉम्प्ट और संदेश श्रृंखलाओं को संपीड़ित करें (<a>OpenRouter ट्रांसफॉर्म</a>)"
+		"requestyApiKey": "Requesty API कुंजी",
+		"getRequestyApiKey": "Requesty API कुंजी प्राप्त करें",
+		"openRouterTransformsText": "संदर्भ आकार के लिए प्रॉम्प्ट और संदेश श्रृंखलाओं को संपीड़ित करें (<a>OpenRouter ट्रांसफॉर्म</a>)",
+		"anthropicApiKey": "Anthropic API कुंजी",
+		"getAnthropicApiKey": "Anthropic API कुंजी प्राप्त करें",
+		"deepSeekApiKey": "DeepSeek API कुंजी",
+		"getDeepSeekApiKey": "DeepSeek API कुंजी प्राप्त करें",
+		"geminiApiKey": "Gemini API कुंजी",
+		"getGeminiApiKey": "Gemini API कुंजी प्राप्त करें",
+		"openAiApiKey": "OpenAI API कुंजी",
+		"openAiBaseUrl": "बेस URL",
+		"getOpenAiApiKey": "OpenAI API कुंजी प्राप्त करें",
+		"mistralApiKey": "Mistral API कुंजी",
+		"getMistralApiKey": "Mistral / Codestral API कुंजी प्राप्त करें",
+		"codestralBaseUrl": "Codestral बेस URL (वैकल्पिक)",
+		"codestralBaseUrlDesc": "Codestral मॉडल के लिए वैकल्पिक URL सेट करें।",
+		"awsCredentials": "AWS क्रेडेंशियल्स",
+		"awsProfile": "AWS प्रोफाइल",
+		"awsProfileName": "AWS प्रोफाइल नाम",
+		"awsAccessKey": "AWS एक्सेस कुंजी",
+		"awsSecretKey": "AWS सीक्रेट कुंजी",
+		"awsSessionToken": "AWS सत्र टोकन",
+		"awsRegion": "AWS क्षेत्र",
+		"awsCrossRegion": "क्रॉस-क्षेत्र अनुमान का उपयोग करें",
+		"googleCloudSetup": {
+			"title": "Google Cloud Vertex AI का उपयोग करने के लिए, आपको आवश्यकता है:",
+			"step1": "1. Google Cloud खाता बनाएं, Vertex AI API सक्षम करें और वांछित Claude मॉडल सक्षम करें।",
+			"step2": "2. Google Cloud CLI इंस्टॉल करें और एप्लिकेशन डिफ़ॉल्ट क्रेडेंशियल्स कॉन्फ़िगर करें।",
+			"step3": "3. या क्रेडेंशियल्स के साथ एक सर्विस अकाउंट बनाएं।"
+		},
+		"googleCloudCredentials": "Google Cloud क्रेडेंशियल्स",
+		"googleCloudKeyFile": "Google Cloud कुंजी फ़ाइल पथ",
+		"googleCloudProjectId": "Google Cloud प्रोजेक्ट ID",
+		"googleCloudRegion": "Google Cloud क्षेत्र",
+		"lmStudio": {
+			"baseUrl": "बेस URL (वैकल्पिक)",
+			"modelId": "मॉडल ID",
+			"speculativeDecoding": "स्पेक्युलेटिव डिकोडिंग सक्षम करें",
+			"draftModelId": "ड्राफ्ट मॉडल ID",
+			"draftModelDesc": "स्पेक्युलेटिव डिकोडिंग के सही काम करने के लिए ड्राफ्ट मॉडल को समान मॉडल परिवार से होना चाहिए।",
+			"selectDraftModel": "ड्राफ्ट मॉडल चुनें",
+			"noModelsFound": "कोई ड्राफ्ट मॉडल नहीं मिला। कृपया सुनिश्चित करें कि LM Studio सर्वर मोड सक्षम के साथ चल रहा है।",
+			"description": "LM Studio आपको अपने कंप्यूटर पर स्थानीय रूप से मॉडल चलाने की अनुमति देता है। आरंभ करने के निर्देशों के लिए, उनकी <a>क्विकस्टार्ट गाइड</a> देखें। आपको इस एक्सटेंशन के साथ उपयोग करने के लिए LM Studio की <b>स्थानीय सर्वर</b> सुविधा भी शुरू करनी होगी। <span>नोट:</span> Roo Code जटिल प्रॉम्प्ट्स का उपयोग करता है और Claude मॉडल के साथ सबसे अच्छा काम करता है। कम क्षमता वाले मॉडल अपेक्षित रूप से काम नहीं कर सकते हैं।"
+		},
+		"ollama": {
+			"baseUrl": "बेस URL (वैकल्पिक)",
+			"modelId": "मॉडल ID",
+			"description": "Ollama आपको अपने कंप्यूटर पर स्थानीय रूप से मॉडल चलाने की अनुमति देता है। आरंभ करने के निर्देशों के लिए, उनकी क्विकस्टार्ट गाइड देखें।",
+			"warning": "नोट: Roo Code जटिल प्रॉम्प्ट्स का उपयोग करता है और Claude मॉडल के साथ सबसे अच्छा काम करता है। कम क्षमता वाले मॉडल अपेक्षित रूप से काम नहीं कर सकते हैं।"
+		},
+		"unboundApiKey": "Unbound API कुंजी",
+		"getUnboundApiKey": "Unbound API कुंजी प्राप्त करें",
+		"humanRelay": {
+			"description": "कोई API कुंजी आवश्यक नहीं है, लेकिन उपयोगकर्ता को वेब चैट AI में जानकारी कॉपी और पेस्ट करने में मदद करनी होगी।",
+			"instructions": "उपयोग के दौरान, एक डायलॉग बॉक्स पॉप अप होगा और वर्तमान संदेश स्वचालित रूप से क्लिपबोर्ड पर कॉपी हो जाएगा। आपको इन्हें AI के वेब संस्करणों (जैसे ChatGPT या Claude) में पेस्ट करना होगा, फिर AI की प्रतिक्रिया को डायलॉग बॉक्स में वापस कॉपी करें और पुष्टि बटन पर क्लिक करें।"
+		},
+		"customModel": {
+			"capabilities": "अपने कस्टम OpenAI-संगत मॉडल के लिए क्षमताओं और मूल्य निर्धारण को कॉन्फ़िगर करें। मॉडल क्षमताओं को निर्दिष्ट करते समय सावधान रहें, क्योंकि वे Roo Code के प्रदर्शन को प्रभावित कर सकती हैं।",
+			"maxTokens": {
+				"label": "अधिकतम आउटपुट टोकन",
+				"description": "मॉडल एक प्रतिक्रिया में अधिकतम कितने टोकन जनरेट कर सकता है। (सर्वर को अधिकतम टोकन सेट करने की अनुमति देने के लिए -1 निर्दिष्ट करें।)"
+			},
+			"contextWindow": {
+				"label": "संदर्भ विंडो आकार",
+				"description": "कुल टोकन (इनपुट + आउटपुट) जो मॉडल प्रोसेस कर सकता है।"
+			},
+			"imageSupport": {
+				"label": "छवि समर्थन",
+				"description": "क्या यह मॉडल छवियों को प्रोसेस और समझने में सक्षम है?"
+			},
+			"computerUse": {
+				"label": "कंप्यूटर उपयोग",
+				"description": "क्या यह मॉडल ब्राउज़र के साथ इंटरैक्ट करने में सक्षम है? (उदा. Claude 3.7 Sonnet)।"
+			},
+			"promptCache": {
+				"label": "प्रॉम्प्ट कैशिंग",
+				"description": "क्या यह मॉडल प्रॉम्प्ट्स को कैश करने में सक्षम है?"
+			},
+			"pricing": {
+				"input": {
+					"label": "इनपुट मूल्य",
+					"description": "इनपुट/प्रॉम्प्ट में प्रति मिलियन टोकन की लागत। यह मॉडल को संदर्भ और निर्देश भेजने की लागत को प्रभावित करता है।"
+				},
+				"output": {
+					"label": "आउटपुट मूल्य",
+					"description": "मॉडल की प्रतिक्रिया में प्रति मिलियन टोकन की लागत। यह जनरेट की गई सामग्री और पूर्णताओं की लागत को प्रभावित करता है।"
+				},
+				"cacheReads": {
+					"label": "कैश रीड्स मूल्य",
+					"description": "कैश से पढ़ने के लिए प्रति मिलियन टोकन की लागत। यह वह मूल्य है जो कैश की गई प्रतिक्रिया प्राप्त करने पर लगाया जाता है।"
+				},
+				"cacheWrites": {
+					"label": "कैश राइट्स मूल्य",
+					"description": "कैश में लिखने के लिए प्रति मिलियन टोकन की लागत। यह वह मूल्य है जो पहली बार प्रॉम्प्ट को कैश करने पर लगाया जाता है।"
+				}
+			},
+			"resetDefaults": "डिफ़ॉल्ट पर रीसेट करें"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "आउटपुट मूल्य",
 		"cacheReadsPrice": "कैश रीड्स मूल्य",
 		"cacheWritesPrice": "कैश राइट्स मूल्य",
+		"enableStreaming": "स्ट्रीमिंग सक्षम करें",
+		"useAzure": "Azure का उपयोग करें",
+		"azureApiVersion": "Azure API संस्करण सेट करें",
 		"gemini": {
 			"freeRequests": "* प्रति मिनट {{count}} अनुरोधों तक मुफ्त। उसके बाद, बिलिंग प्रॉम्प्ट आकार पर निर्भर करती है।",
 			"pricingDetails": "अधिक जानकारी के लिए, मूल्य निर्धारण विवरण देखें।"

+ 0 - 0
webview-ui/src/i18n/locales/hu/prompts.json


+ 0 - 0
webview-ui/src/i18n/locales/hu/settings.json


+ 3 - 3
webview-ui/src/i18n/locales/it/prompts.json

@@ -36,12 +36,12 @@
 		"title": "Istruzioni personalizzate specifiche per la modalità (opzionale)",
 		"resetToDefault": "Ripristina predefiniti",
 		"description": "Aggiungi linee guida comportamentali specifiche per la modalità {{modeName}}.",
-		"loadFromFile": "Le istruzioni personalizzate specifiche per la modalità {{modeName}} possono essere caricate anche da .clinerules-{{modeSlug}} nel tuo spazio di lavoro."
+		"loadFromFile": "Le istruzioni personalizzate specifiche per la modalità {{mode}} possono essere caricate anche da <span>.clinerules-{{slug}}</span> nel tuo spazio di lavoro."
 	},
 	"globalCustomInstructions": {
 		"title": "Istruzioni personalizzate per tutte le modalità",
 		"description": "Queste istruzioni si applicano a tutte le modalità. Forniscono un insieme base di comportamenti che possono essere migliorati dalle istruzioni specifiche per modalità qui sotto.\nSe desideri che Roo pensi e parli in una lingua diversa dalla lingua di visualizzazione del tuo editor ({{language}}), puoi specificarlo qui.",
-		"loadFromFile": "Le istruzioni possono essere caricate anche da .clinerules nel tuo spazio di lavoro."
+		"loadFromFile": "Le istruzioni possono essere caricate anche da <span>.clinerules</span> nel tuo spazio di lavoro."
 	},
 	"systemPrompt": {
 		"preview": "Anteprima prompt di sistema",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "Avanzato: Sovrascrivi prompt di sistema",
-		"description": "Puoi sostituire completamente il prompt di sistema per questa modalità (a parte la definizione del ruolo e le istruzioni personalizzate) creando un file in .roo/system-prompt-{{modeSlug}} nel tuo spazio di lavoro. Questa è una funzionalità molto avanzata che bypassa le protezioni integrate e i controlli di coerenza (specialmente riguardo all'uso degli strumenti), quindi fai attenzione!"
+		"description": "Puoi sostituire completamente il prompt di sistema per questa modalità (a parte la definizione del ruolo e le istruzioni personalizzate) creando un file in <span>.roo/system-prompt-{{slug}}</span> nel tuo spazio di lavoro. Questa è una funzionalità molto avanzata che bypassa le protezioni integrate e i controlli di coerenza (specialmente riguardo all'uso degli strumenti), quindi fai attenzione!"
 	},
 	"createModeDialog": {
 		"title": "Crea nuova modalità",

+ 105 - 1
webview-ui/src/i18n/locales/it/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "Profilo di configurazione",
 		"description": "Descrizione",
 		"apiProvider": "Fornitore API",
+		"model": "Modello",
 		"openRouterApiKey": "Chiave API OpenRouter",
+		"getOpenRouterApiKey": "Ottieni chiave API OpenRouter",
 		"apiKeyStorageNotice": "Le chiavi API sono memorizzate in modo sicuro nell'Archivio Segreto di VSCode",
+		"glamaApiKey": "Chiave API Glama",
+		"getGlamaApiKey": "Ottieni chiave API Glama",
 		"useCustomBaseUrl": "Usa URL base personalizzato",
-		"openRouterTransformsText": "Comprimi prompt e catene di messaggi alla dimensione del contesto (<a>Trasformazioni OpenRouter</a>)"
+		"requestyApiKey": "Chiave API Requesty",
+		"getRequestyApiKey": "Ottieni chiave API Requesty",
+		"openRouterTransformsText": "Comprimi prompt e catene di messaggi alla dimensione del contesto (<a>Trasformazioni OpenRouter</a>)",
+		"anthropicApiKey": "Chiave API Anthropic",
+		"getAnthropicApiKey": "Ottieni chiave API Anthropic",
+		"deepSeekApiKey": "Chiave API DeepSeek",
+		"getDeepSeekApiKey": "Ottieni chiave API DeepSeek",
+		"geminiApiKey": "Chiave API Gemini",
+		"getGeminiApiKey": "Ottieni chiave API Gemini",
+		"openAiApiKey": "Chiave API OpenAI",
+		"openAiBaseUrl": "URL base",
+		"getOpenAiApiKey": "Ottieni chiave API OpenAI",
+		"mistralApiKey": "Chiave API Mistral",
+		"getMistralApiKey": "Ottieni chiave API Mistral / Codestral",
+		"codestralBaseUrl": "URL base Codestral (opzionale)",
+		"codestralBaseUrlDesc": "Imposta un URL opzionale per i modelli Codestral.",
+		"awsCredentials": "Credenziali AWS",
+		"awsProfile": "Profilo AWS",
+		"awsProfileName": "Nome profilo AWS",
+		"awsAccessKey": "Chiave di accesso AWS",
+		"awsSecretKey": "Chiave segreta AWS",
+		"awsSessionToken": "Token di sessione AWS",
+		"awsRegion": "Regione AWS",
+		"awsCrossRegion": "Usa inferenza cross-regione",
+		"googleCloudSetup": {
+			"title": "Per utilizzare Google Cloud Vertex AI, è necessario:",
+			"step1": "1. Creare un account Google Cloud, abilitare l'API Vertex AI e abilitare i modelli Claude desiderati.",
+			"step2": "2. Installare Google Cloud CLI e configurare le credenziali predefinite dell'applicazione.",
+			"step3": "3. Oppure creare un account di servizio con credenziali."
+		},
+		"googleCloudCredentials": "Credenziali Google Cloud",
+		"googleCloudKeyFile": "Percorso file chiave Google Cloud",
+		"googleCloudProjectId": "ID progetto Google Cloud",
+		"googleCloudRegion": "Regione Google Cloud",
+		"lmStudio": {
+			"baseUrl": "URL base (opzionale)",
+			"modelId": "ID modello",
+			"speculativeDecoding": "Abilita decodifica speculativa",
+			"draftModelId": "ID modello bozza",
+			"draftModelDesc": "Per un corretto funzionamento della decodifica speculativa, il modello bozza deve provenire dalla stessa famiglia di modelli.",
+			"selectDraftModel": "Seleziona modello bozza",
+			"noModelsFound": "Nessun modello bozza trovato. Assicurati che LM Studio sia in esecuzione con la modalità server abilitata.",
+			"description": "LM Studio ti permette di eseguire modelli localmente sul tuo computer. Per iniziare, consulta la loro <a>guida rapida</a>. Dovrai anche avviare la funzionalità <b>server locale</b> di LM Studio per utilizzarlo con questa estensione. <span>Nota:</span> Roo Code utilizza prompt complessi e funziona meglio con i modelli Claude. I modelli con capacità inferiori potrebbero non funzionare come previsto."
+		},
+		"ollama": {
+			"baseUrl": "URL base (opzionale)",
+			"modelId": "ID modello",
+			"description": "Ollama ti permette di eseguire modelli localmente sul tuo computer. Per iniziare, consulta la guida rapida.",
+			"warning": "Nota: Roo Code utilizza prompt complessi e funziona meglio con i modelli Claude. I modelli con capacità inferiori potrebbero non funzionare come previsto."
+		},
+		"unboundApiKey": "Chiave API Unbound",
+		"getUnboundApiKey": "Ottieni chiave API Unbound",
+		"humanRelay": {
+			"description": "Non è richiesta alcuna chiave API, ma l'utente dovrà aiutare a copiare e incollare le informazioni nella chat web AI.",
+			"instructions": "Durante l'uso, apparirà una finestra di dialogo e il messaggio corrente verrà automaticamente copiato negli appunti. Dovrai incollarlo nelle versioni web dell'AI (come ChatGPT o Claude), quindi copiare la risposta dell'AI nella finestra di dialogo e fare clic sul pulsante di conferma."
+		},
+		"customModel": {
+			"capabilities": "Configura le capacità e i prezzi del tuo modello personalizzato compatibile con OpenAI. Fai attenzione quando specifichi le capacità del modello, poiché possono influenzare le prestazioni di Roo Code.",
+			"maxTokens": {
+				"label": "Token di output massimi",
+				"description": "Numero massimo di token che il modello può generare in una risposta. (Specifica -1 per lasciare che il server imposti il massimo token.)"
+			},
+			"contextWindow": {
+				"label": "Dimensione finestra di contesto",
+				"description": "Numero totale di token (input + output) che il modello può elaborare."
+			},
+			"imageSupport": {
+				"label": "Supporto immagini",
+				"description": "Il modello è in grado di elaborare e comprendere le immagini?"
+			},
+			"computerUse": {
+				"label": "Uso del computer",
+				"description": "Il modello è in grado di interagire con il browser? (es. Claude 3.7 Sonnet)."
+			},
+			"promptCache": {
+				"label": "Cache dei prompt",
+				"description": "Il modello è in grado di memorizzare in cache i prompt?"
+			},
+			"pricing": {
+				"input": {
+					"label": "Prezzo input",
+					"description": "Costo per milione di token di input/prompt. Questo influisce sul costo di invio di contesto e istruzioni al modello."
+				},
+				"output": {
+					"label": "Prezzo output",
+					"description": "Costo per milione di token della risposta del modello. Questo influisce sul costo del contenuto generato e dei completamenti."
+				},
+				"cacheReads": {
+					"label": "Prezzo letture cache",
+					"description": "Costo per milione di token per leggere dalla cache. Questo prezzo viene applicato quando si riceve una risposta memorizzata nella cache."
+				},
+				"cacheWrites": {
+					"label": "Prezzo scritture cache",
+					"description": "Costo per milione di token per scrivere nella cache. Questo prezzo viene applicato quando si memorizza un prompt nella cache per la prima volta."
+				}
+			},
+			"resetDefaults": "Ripristina valori predefiniti"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "Prezzo output",
 		"cacheReadsPrice": "Prezzo letture cache",
 		"cacheWritesPrice": "Prezzo scritture cache",
+		"enableStreaming": "Abilita streaming",
+		"useAzure": "Usa Azure",
+		"azureApiVersion": "Imposta versione API Azure",
 		"gemini": {
 			"freeRequests": "* Gratuito fino a {{count}} richieste al minuto. Dopo, la fatturazione dipende dalla dimensione del prompt.",
 			"pricingDetails": "Per maggiori informazioni, vedi i dettagli sui prezzi."

+ 3 - 3
webview-ui/src/i18n/locales/ja/prompts.json

@@ -36,12 +36,12 @@
 		"title": "モード固有のカスタム指示(オプション)",
 		"resetToDefault": "デフォルトにリセット",
 		"description": "{{modeName}}モードに特化した行動ガイドラインを追加します。",
-		"loadFromFile": "{{modeName}}モード固有のカスタム指示は、ワークスペースの.clinerules-{{modeSlug}}からも読み込めます。"
+		"loadFromFile": "{{mode}}モード固有のカスタム指示は、ワークスペースの<span>.clinerules-{{slug}}</span>からも読み込めます。"
 	},
 	"globalCustomInstructions": {
 		"title": "すべてのモードのカスタム指示",
 		"description": "これらの指示はすべてのモードに適用されます。モード固有の指示で強化できる基本的な動作セットを提供します。\nRooにエディタの表示言語({{language}})とは異なる言語で考えたり話したりさせたい場合は、ここで指定できます。",
-		"loadFromFile": "指示はワークスペースの.clinerulesからも読み込めます。"
+		"loadFromFile": "指示はワークスペースの<span>.clinerules</span>からも読み込めます。"
 	},
 	"systemPrompt": {
 		"preview": "システムプロンプトのプレビュー",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "詳細設定:システムプロンプトの上書き",
-		"description": "ワークスペースの.roo/system-prompt-{{modeSlug}}にファイルを作成することで、このモードのシステムプロンプト(役割定義とカスタム指示以外)を完全に置き換えることができます。これは組み込みの安全対策と一貫性チェック(特にツールの使用に関して)をバイパスする非常に高度な機能なので、注意して使用してください!"
+		"description": "ワークスペースの<span>.roo/system-prompt-{{slug}}</span>にファイルを作成することで、このモードのシステムプロンプト(役割定義とカスタム指示以外)を完全に置き換えることができます。これは組み込みの安全対策と一貫性チェック(特にツールの使用に関して)をバイパスする非常に高度な機能なので、注意して使用してください!"
 	},
 	"createModeDialog": {
 		"title": "新しいモードを作成",

+ 108 - 5
webview-ui/src/i18n/locales/ja/settings.json

@@ -73,10 +73,110 @@
 		"configProfile": "設定プロファイル",
 		"description": "説明",
 		"apiProvider": "APIプロバイダー",
+		"model": "モデル",
 		"openRouterApiKey": "OpenRouter APIキー",
+		"getOpenRouterApiKey": "OpenRouter APIキーを取得",
 		"apiKeyStorageNotice": "APIキーはVSCodeのシークレットストレージに安全に保存されます",
+		"glamaApiKey": "Glama APIキー",
+		"getGlamaApiKey": "Glama APIキーを取得",
 		"useCustomBaseUrl": "カスタムベースURLを使用",
-		"openRouterTransformsText": "プロンプトとメッセージチェーンをコンテキストサイズに圧縮(<a>OpenRouter変換</a>)"
+		"requestyApiKey": "Requesty APIキー",
+		"getRequestyApiKey": "Requesty APIキーを取得",
+		"anthropicApiKey": "Anthropic APIキー",
+		"getAnthropicApiKey": "Anthropic APIキーを取得",
+		"deepSeekApiKey": "DeepSeek APIキー",
+		"getDeepSeekApiKey": "DeepSeek APIキーを取得",
+		"geminiApiKey": "Gemini APIキー",
+		"getGeminiApiKey": "Gemini APIキーを取得",
+		"openAiApiKey": "OpenAI APIキー",
+		"openAiBaseUrl": "ベースURL",
+		"getOpenAiApiKey": "OpenAI APIキーを取得",
+		"mistralApiKey": "Mistral APIキー",
+		"getMistralApiKey": "Mistral / Codestral APIキーを取得",
+		"codestralBaseUrl": "Codestral ベースURL(オプション)",
+		"codestralBaseUrlDesc": "Codestralモデルの代替URLを設定します。",
+		"awsCredentials": "AWS認証情報",
+		"awsProfile": "AWSプロファイル",
+		"awsProfileName": "AWSプロファイル名",
+		"awsAccessKey": "AWSアクセスキー",
+		"awsSecretKey": "AWSシークレットキー",
+		"awsSessionToken": "AWSセッショントークン",
+		"awsRegion": "AWSリージョン",
+		"awsCrossRegion": "クロスリージョン推論を使用",
+		"googleCloudSetup": {
+			"title": "Google Cloud Vertex AIを使用するには:",
+			"step1": "1. Google Cloudアカウントを作成し、Vertex AI APIを有効にして、希望するClaudeモデルを有効にします。",
+			"step2": "2. Google Cloud CLIをインストールし、アプリケーションのデフォルト認証情報を設定します。",
+			"step3": "3. または、認証情報付きのサービスアカウントを作成します。"
+		},
+		"googleCloudCredentials": "Google Cloud認証情報",
+		"googleCloudKeyFile": "Google Cloudキーファイルパス",
+		"googleCloudProjectId": "Google Cloudプロジェクトid",
+		"googleCloudRegion": "Google Cloudリージョン",
+		"lmStudio": {
+			"baseUrl": "ベースURL(オプション)",
+			"modelId": "モデルID",
+			"speculativeDecoding": "推論デコーディングを有効化",
+			"draftModelId": "ドラフトモデルID",
+			"draftModelDesc": "推論デコーディングが正しく機能するには、ドラフトモデルは同じモデルファミリーから選択する必要があります。",
+			"selectDraftModel": "ドラフトモデルを選択",
+			"noModelsFound": "ドラフトモデルが見つかりません。LM Studioがサーバーモードで実行されていることを確認してください。",
+			"description": "LM Studioを使用すると、ローカルコンピューターでモデルを実行できます。始め方については、<a>クイックスタートガイド</a>をご覧ください。また、この拡張機能で使用するには、LM Studioの<b>ローカルサーバー</b>機能を起動する必要があります。<span>注意:</span>Roo Codeは複雑なプロンプトを使用し、Claudeモデルで最適に動作します。能力の低いモデルは期待通りに動作しない場合があります。"
+		},
+		"ollama": {
+			"baseUrl": "ベースURL(オプション)",
+			"modelId": "モデルID",
+			"description": "Ollamaを使用すると、ローカルコンピューターでモデルを実行できます。始め方については、クイックスタートガイドをご覧ください。",
+			"warning": "注意:Roo Codeは複雑なプロンプトを使用し、Claudeモデルで最適に動作します。能力の低いモデルは期待通りに動作しない場合があります。"
+		},
+		"unboundApiKey": "Unbound APIキー",
+		"getUnboundApiKey": "Unbound APIキーを取得",
+		"humanRelay": {
+			"description": "APIキーは不要ですが、ユーザーはウェブチャットAIに情報をコピー&ペーストする必要があります。",
+			"instructions": "使用中にダイアログボックスが表示され、現在のメッセージが自動的にクリップボードにコピーされます。これらをウェブ版のAI(ChatGPTやClaudeなど)に貼り付け、AIの返答をダイアログボックスにコピーして確認ボタンをクリックする必要があります。"
+		},
+		"customModel": {
+			"capabilities": "カスタムOpenAI互換モデルの機能と価格を設定します。モデルの機能はRoo Codeのパフォーマンスに影響を与える可能性があるため、慎重に指定してください。",
+			"maxTokens": {
+				"label": "最大出力トークン",
+				"description": "モデルが生成できる応答の最大トークン数。(サーバーが最大トークンを設定できるようにするには-1を指定します。)"
+			},
+			"contextWindow": {
+				"label": "コンテキストウィンドウサイズ",
+				"description": "モデルが処理できる総トークン数(入力+出力)。"
+			},
+			"imageSupport": {
+				"label": "画像サポート",
+				"description": "このモデルは画像の処理と理解が可能ですか?"
+			},
+			"computerUse": {
+				"label": "コンピューター使用",
+				"description": "このモデルはブラウザとの対話が可能ですか?(例:Claude 3.7 Sonnet)"
+			},
+			"promptCache": {
+				"label": "プロンプトキャッシュ",
+				"description": "このモデルはプロンプトのキャッシュが可能ですか?"
+			},
+			"pricing": {
+				"input": {
+					"label": "入力価格",
+					"description": "入力/プロンプトの100万トークンあたりのコスト。これはモデルにコンテキストと指示を送信するコストに影響します。"
+				},
+				"output": {
+					"label": "出力価格",
+					"description": "モデルの応答の100万トークンあたりのコスト。これは生成されたコンテンツと補完のコストに影響します。"
+				},
+				"cacheReads": {
+					"label": "キャッシュ読み取り価格",
+					"description": "キャッシュからの読み取りの100万トークンあたりのコスト。これはキャッシュされた応答を取得する際に課金される価格です。"
+				},
+				"cacheWrites": {
+					"label": "キャッシュ書き込み価格",
+					"description": "キャッシュへの書き込みの100万トークンあたりのコスト。これはプロンプトが初めてキャッシュされる際に課金される価格です。"
+				}
+			},
+			"resetDefaults": "デフォルトにリセット"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -196,8 +296,8 @@
 	"modelInfo": {
 		"supportsImages": "画像をサポート",
 		"noImages": "画像をサポートしていません",
-		"supportsComputerUse": "コンピュータ使用をサポート",
-		"noComputerUse": "コンピュータ使用をサポートしていません",
+		"supportsComputerUse": "コンピュータ使用をサポート",
+		"noComputerUse": "コンピュータ使用をサポートしていません",
 		"supportsPromptCache": "プロンプトキャッシュをサポート",
 		"noPromptCache": "プロンプトキャッシュをサポートしていません",
 		"maxOutput": "最大出力",
@@ -205,9 +305,12 @@
 		"outputPrice": "出力価格",
 		"cacheReadsPrice": "キャッシュ読み取り価格",
 		"cacheWritesPrice": "キャッシュ書き込み価格",
+		"enableStreaming": "ストリーミングを有効化",
+		"useAzure": "Azureを使用",
+		"azureApiVersion": "Azure APIバージョンを設定",
 		"gemini": {
-			"freeRequests": "* 毎分{{count}}リクエストまで無料。それ以降は、プロンプトサイズに応じて課金されます。",
-			"pricingDetails": "詳細については、価格詳細をご覧ください。"
+			"freeRequests": "* 1分間あたり{{count}}リクエストまで無料。それ以降は、プロンプトサイズに応じて課金されます。",
+			"pricingDetails": "詳細は価格情報をご覧ください。"
 		}
 	},
 	"modelPicker": {

+ 3 - 3
webview-ui/src/i18n/locales/ko/prompts.json

@@ -36,12 +36,12 @@
 		"title": "모드별 사용자 지정 지침 (선택 사항)",
 		"resetToDefault": "기본값으로 재설정",
 		"description": "{{modeName}} 모드에 대한 특정 행동 지침을 추가하세요.",
-		"loadFromFile": "{{modeName}} 모드에 대한 사용자 지정 지침은 작업 공간의 .clinerules-{{modeSlug}}에서도 로드할 수 있습니다."
+		"loadFromFile": "{{mode}} 모드에 대한 사용자 지정 지침은 작업 공간의 <span>.clinerules-{{slug}}</span>에서도 로드할 수 있습니다."
 	},
 	"globalCustomInstructions": {
 		"title": "모든 모드에 대한 사용자 지정 지침",
 		"description": "이 지침은 모든 모드에 적용됩니다. 아래의 모드별 지침으로 향상될 수 있는 기본 동작 세트를 제공합니다.\nRoo가 에디터 표시 언어({{language}})와 다른 언어로 생각하고 말하기를 원하시면, 여기에 지정할 수 있습니다.",
-		"loadFromFile": "지침은 작업 공간의 .clinerules에서도 로드할 수 있습니다."
+		"loadFromFile": "지침은 작업 공간의 <span>.clinerules</span>에서도 로드할 수 있습니다."
 	},
 	"systemPrompt": {
 		"preview": "시스템 프롬프트 미리보기",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "고급: 시스템 프롬프트 재정의",
-		"description": "작업 공간의 .roo/system-prompt-{{modeSlug}}에 파일을 생성하여 이 모드의 시스템 프롬프트(역할 정의 및 사용자 지정 지침 제외)를 완전히 대체할 수 있습니다. 이는 내장된 안전 장치와 일관성 검사(특히 도구 사용 관련)를 우회하는 매우 고급 기능이므로 주의하세요!"
+		"description": "작업 공간의 <span>.roo/system-prompt-{{slug}}</span>에 파일을 생성하여 이 모드의 시스템 프롬프트(역할 정의 및 사용자 지정 지침 제외)를 완전히 대체할 수 있습니다. 이는 내장된 안전 장치와 일관성 검사(특히 도구 사용 관련)를 우회하는 매우 고급 기능이므로 주의하세요!"
 	},
 	"createModeDialog": {
 		"title": "새 모드 만들기",

+ 109 - 6
webview-ui/src/i18n/locales/ko/settings.json

@@ -72,11 +72,111 @@
 	"providers": {
 		"configProfile": "구성 프로필",
 		"description": "설명",
-		"apiProvider": "API 공급자",
+		"apiProvider": "API 제공자",
+		"model": "모델",
 		"openRouterApiKey": "OpenRouter API 키",
+		"getOpenRouterApiKey": "OpenRouter API 키 받기",
 		"apiKeyStorageNotice": "API 키는 VSCode의 보안 저장소에 안전하게 저장됩니다",
+		"glamaApiKey": "Glama API 키",
+		"getGlamaApiKey": "Glama API 키 받기",
 		"useCustomBaseUrl": "사용자 정의 기본 URL 사용",
-		"openRouterTransformsText": "프롬프트 및 메시지 체인을 컨텍스트 크기로 압축(<a>OpenRouter 변환</a>)"
+		"requestyApiKey": "Requesty API 키",
+		"getRequestyApiKey": "Requesty API 키 받기",
+		"anthropicApiKey": "Anthropic API 키",
+		"getAnthropicApiKey": "Anthropic API 키 받기",
+		"deepSeekApiKey": "DeepSeek API 키",
+		"getDeepSeekApiKey": "DeepSeek API 키 받기",
+		"geminiApiKey": "Gemini API 키",
+		"getGeminiApiKey": "Gemini API 키 받기",
+		"openAiApiKey": "OpenAI API 키",
+		"openAiBaseUrl": "기본 URL",
+		"getOpenAiApiKey": "OpenAI API 키 받기",
+		"mistralApiKey": "Mistral API 키",
+		"getMistralApiKey": "Mistral / Codestral API 키 받기",
+		"codestralBaseUrl": "Codestral 기본 URL (선택사항)",
+		"codestralBaseUrlDesc": "Codestral 모델의 대체 URL을 설정합니다.",
+		"awsCredentials": "AWS 자격 증명",
+		"awsProfile": "AWS 프로필",
+		"awsProfileName": "AWS 프로필 이름",
+		"awsAccessKey": "AWS 액세스 키",
+		"awsSecretKey": "AWS 시크릿 키",
+		"awsSessionToken": "AWS 세션 토큰",
+		"awsRegion": "AWS 리전",
+		"awsCrossRegion": "교차 리전 추론 사용",
+		"googleCloudSetup": {
+			"title": "Google Cloud Vertex AI를 사용하려면:",
+			"step1": "1. Google Cloud 계정을 만들고, Vertex AI API를 활성화하고, 원하는 Claude 모델을 활성화하세요.",
+			"step2": "2. Google Cloud CLI를 설치하고 애플리케이션 기본 자격 증명을 구성하세요.",
+			"step3": "3. 또는 자격 증명이 있는 서비스 계정을 만드세요."
+		},
+		"googleCloudCredentials": "Google Cloud 자격 증명",
+		"googleCloudKeyFile": "Google Cloud 키 파일 경로",
+		"googleCloudProjectId": "Google Cloud 프로젝트 ID",
+		"googleCloudRegion": "Google Cloud 리전",
+		"lmStudio": {
+			"baseUrl": "기본 URL (선택사항)",
+			"modelId": "모델 ID",
+			"speculativeDecoding": "추론 디코딩 활성화",
+			"draftModelId": "초안 모델 ID",
+			"draftModelDesc": "추론 디코딩이 올바르게 작동하려면 초안 모델이 동일한 모델 패밀리에서 와야 합니다.",
+			"selectDraftModel": "초안 모델 선택",
+			"noModelsFound": "초안 모델을 찾을 수 없습니다. LM Studio가 서버 모드로 실행 중인지 확인하세요.",
+			"description": "LM Studio를 사용하면 컴퓨터에서 로컬로 모델을 실행할 수 있습니다. 시작하는 방법은 <a>빠른 시작 가이드</a>를 참조하세요. 이 확장 프로그램과 함께 사용하려면 LM Studio의 <b>로컬 서버</b> 기능도 시작해야 합니다. <span>참고:</span> Roo Code는 복잡한 프롬프트를 사용하며 Claude 모델에서 가장 잘 작동합니다. 덜 강력한 모델은 예상대로 작동하지 않을 수 있습니다."
+		},
+		"ollama": {
+			"baseUrl": "기본 URL (선택사항)",
+			"modelId": "모델 ID",
+			"description": "Ollama를 사용하면 컴퓨터에서 로컬로 모델을 실행할 수 있습니다. 시작하는 방법은 빠른 시작 가이드를 참조하세요.",
+			"warning": "참고: Roo Code는 복잡한 프롬프트를 사용하며 Claude 모델에서 가장 잘 작동합니다. 덜 강력한 모델은 예상대로 작동하지 않을 수 있습니다."
+		},
+		"unboundApiKey": "Unbound API 키",
+		"getUnboundApiKey": "Unbound API 키 받기",
+		"humanRelay": {
+			"description": "API 키가 필요하지 않지만, 사용자가 웹 채팅 AI에 정보를 복사하여 붙여넣어야 합니다.",
+			"instructions": "사용 중에 대화 상자가 나타나고 현재 메시지가 자동으로 클립보드에 복사됩니다. 이를 웹 버전 AI(예: ChatGPT 또는 Claude)에 붙여넣은 다음, AI의 응답을 대화 상자에 복사하고 확인 버튼을 클릭해야 합니다."
+		},
+		"customModel": {
+			"capabilities": "사용자 정의 OpenAI 호환 모델의 기능과 가격을 구성하세요. 모델 기능이 Roo Code의 성능에 영향을 미칠 수 있으므로 신중하게 지정하세요.",
+			"maxTokens": {
+				"label": "최대 출력 토큰",
+				"description": "모델이 응답에서 생성할 수 있는 최대 토큰 수입니다. (서버가 최대 토큰을 설정하도록 하려면 -1을 지정하세요.)"
+			},
+			"contextWindow": {
+				"label": "컨텍스트 창 크기",
+				"description": "모델이 처리할 수 있는 총 토큰 수(입력 + 출력)입니다."
+			},
+			"imageSupport": {
+				"label": "이미지 지원",
+				"description": "이 모델이 이미지를 처리하고 이해할 수 있습니까?"
+			},
+			"computerUse": {
+				"label": "컴퓨터 사용",
+				"description": "이 모델이 브라우저와 상호 작용할 수 있습니까? (예: Claude 3.7 Sonnet)"
+			},
+			"promptCache": {
+				"label": "프롬프트 캐시",
+				"description": "이 모델이 프롬프트를 캐시할 수 있습니까?"
+			},
+			"pricing": {
+				"input": {
+					"label": "입력 가격",
+					"description": "입력/프롬프트의 백만 토큰당 비용입니다. 이는 모델에 컨텍스트와 지침을 보내는 비용에 영향을 미칩니다."
+				},
+				"output": {
+					"label": "출력 가격",
+					"description": "모델 응답의 백만 토큰당 비용입니다. 이는 생성된 콘텐츠와 완성의 비용에 영향을 미칩니다."
+				},
+				"cacheReads": {
+					"label": "캐시 읽기 가격",
+					"description": "캐시에서 읽기의 백만 토큰당 비용입니다. 이는 캐시된 응답을 검색할 때 청구되는 가격입니다."
+				},
+				"cacheWrites": {
+					"label": "캐시 쓰기 가격",
+					"description": "캐시에 쓰기의 백만 토큰당 비용입니다. 이는 프롬프트가 처음 캐시될 때 청구되는 가격입니다."
+				}
+			},
+			"resetDefaults": "기본값으로 재설정"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -198,16 +298,19 @@
 		"noImages": "이미지 지원 안 함",
 		"supportsComputerUse": "컴퓨터 사용 지원",
 		"noComputerUse": "컴퓨터 사용 지원 안 함",
-		"supportsPromptCache": "프롬프트 캐 지원",
-		"noPromptCache": "프롬프트 캐 지원 안 함",
+		"supportsPromptCache": "프롬프트 캐 지원",
+		"noPromptCache": "프롬프트 캐 지원 안 함",
 		"maxOutput": "최대 출력",
 		"inputPrice": "입력 가격",
 		"outputPrice": "출력 가격",
 		"cacheReadsPrice": "캐시 읽기 가격",
 		"cacheWritesPrice": "캐시 쓰기 가격",
+		"enableStreaming": "스트리밍 활성화",
+		"useAzure": "Azure 사용",
+		"azureApiVersion": "Azure API 버전 설정",
 		"gemini": {
-			"freeRequests": "* 분당 {{count}}개 요청까지 무료. 이후에는 프롬프트 크기에 따라 요금이 부과됩니다.",
-			"pricingDetails": "자세한 내용은 가격 책정 세부 정보를 참조하세요."
+			"freeRequests": "* 분당 {{count}}개 요청까지 무료. 이후에는 프롬프트 크기에 따라 요금이 부과됩니다.",
+			"pricingDetails": "자세한 내용은 가격 정보를 참조하세요."
 		}
 	},
 	"modelPicker": {

+ 5 - 5
webview-ui/src/i18n/locales/pl/prompts.json

@@ -66,19 +66,19 @@
 			},
 			"EXPLAIN": {
 				"label": "Wyjaśnij kod",
-				"description": "Uzyskaj szczegółowe wyjaśnienia fragmentów kodu, funkcji lub całych plików. Przydatne do zrozumienia złożonego kodu lub nauki nowych wzorców. Dostępne w akcjach kodu (ikona żarówki w edytorze) i w menu kontekstowym edytora (prawy przycisk myszy na wybranym kodzie)."
+				"description": "Uzyskaj szczegółowe wyjaśnienia fragmentów kodu, funkcji lub całych plików. Przydatne do zrozumienia złożonego kodu lub nauki nowych wzorców. Dostępne w akcjach kodu (ikona żarówki w edytorze) i w menu kontekstowym edytor (prawy przycisk myszy na wybranym kodzie)."
 			},
 			"FIX": {
 				"label": "Napraw problemy",
-				"description": "Uzyskaj pomoc w identyfikowaniu i rozwiązywaniu błędów, usterek lub problemów z jakością kodu. Zapewnia krok po kroku wskazówki do naprawy problemów. Dostępne w akcjach kodu (ikona żarówki w edytorze) i w menu kontekstowym edytora (prawy przycisk myszy na wybranym kodzie)."
+				"description": "Uzyskaj pomoc w identyfikowaniu i rozwiązywaniu błędów, usterek lub problemów z jakością kodu. Zapewnia krok po kroku wskazówki do naprawy problemów. Dostępne w akcjach kodu (ikona żarówki w edytorze) i w menu kontekstowym edytor (prawy przycisk myszy na wybranym kodzie)."
 			},
 			"IMPROVE": {
 				"label": "Ulepsz kod",
-				"description": "Otrzymuj sugestie dotyczące optymalizacji kodu, lepszych praktyk i ulepszeń architektonicznych przy zachowaniu funkcjonalności. Dostępne w akcjach kodu (ikona żarówki w edytorze) i w menu kontekstowym edytora (prawy przycisk myszy na wybranym kodzie)."
+				"description": "Otrzymuj sugestie dotyczące optymalizacji kodu, lepszych praktyk i ulepszeń architektonicznych przy zachowaniu funkcjonalności. Dostępne w akcjach kodu (ikona żarówki w edytorze) i w menu kontekstowym edytor (prawy przycisk myszy na wybranym kodzie)."
 			},
 			"ADD_TO_CONTEXT": {
 				"label": "Dodaj do kontekstu",
-				"description": "Dodaj kontekst do bieżącego zadania lub rozmowy. Przydatne do dostarczania dodatkowych informacji lub wyjaśnień. Dostępne w akcjach kodu (ikona żarówki w edytorze) i w menu kontekstowym edytora (prawy przycisk myszy na wybranym kodzie)."
+				"description": "Dodaj kontekst do bieżącego zadania lub rozmowy. Przydatne do dostarczania dodatkowych informacji lub wyjaśnień. Dostępne w akcjach kodu (ikona żarówki w edytorze) i w menu kontekstowym edytor (prawy przycisk myszy na wybranym kodzie)."
 			},
 			"TERMINAL_ADD_TO_CONTEXT": {
 				"label": "Dodaj zawartość terminala do kontekstu",
@@ -111,7 +111,7 @@
 		},
 		"slug": {
 			"label": "Slug",
-			"description": "Slug jest używany w adresach URL i nazwach plików. Powinien być małymi literami i zawierać tylko litery, cyfry i myślniki."
+			"description": "Slug jest używany w adresach URL i nazwach plików. Powinien być małymi literami i zawierać tylko litery, liczby i myślniki."
 		},
 		"saveLocation": {
 			"label": "Lokalizacja zapisu",

+ 105 - 1
webview-ui/src/i18n/locales/pl/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "Profil konfiguracji",
 		"description": "Opis",
 		"apiProvider": "Dostawca API",
+		"model": "Model",
 		"openRouterApiKey": "Klucz API OpenRouter",
+		"getOpenRouterApiKey": "Uzyskaj klucz API OpenRouter",
 		"apiKeyStorageNotice": "Klucze API są bezpiecznie przechowywane w Tajnym Magazynie VSCode",
+		"glamaApiKey": "Klucz API Glama",
+		"getGlamaApiKey": "Uzyskaj klucz API Glama",
 		"useCustomBaseUrl": "Użyj niestandardowego URL bazowego",
-		"openRouterTransformsText": "Kompresuj podpowiedzi i łańcuchy wiadomości do rozmiaru kontekstu (<a>Transformacje OpenRouter</a>)"
+		"requestyApiKey": "Klucz API Requesty",
+		"getRequestyApiKey": "Uzyskaj klucz API Requesty",
+		"openRouterTransformsText": "Kompresuj podpowiedzi i łańcuchy wiadomości do rozmiaru kontekstu (<a>Transformacje OpenRouter</a>)",
+		"anthropicApiKey": "Klucz API Anthropic",
+		"getAnthropicApiKey": "Uzyskaj klucz API Anthropic",
+		"deepSeekApiKey": "Klucz API DeepSeek",
+		"getDeepSeekApiKey": "Uzyskaj klucz API DeepSeek",
+		"geminiApiKey": "Klucz API Gemini",
+		"getGeminiApiKey": "Uzyskaj klucz API Gemini",
+		"openAiApiKey": "Klucz API OpenAI",
+		"openAiBaseUrl": "URL bazowy",
+		"getOpenAiApiKey": "Uzyskaj klucz API OpenAI",
+		"mistralApiKey": "Klucz API Mistral",
+		"getMistralApiKey": "Uzyskaj klucz API Mistral / Codestral",
+		"codestralBaseUrl": "URL bazowy Codestral (opcjonalnie)",
+		"codestralBaseUrlDesc": "Ustaw opcjonalny URL dla modeli Codestral.",
+		"awsCredentials": "Poświadczenia AWS",
+		"awsProfile": "Profil AWS",
+		"awsProfileName": "Nazwa profilu AWS",
+		"awsAccessKey": "Klucz dostępu AWS",
+		"awsSecretKey": "Klucz tajny AWS",
+		"awsSessionToken": "Token sesji AWS",
+		"awsRegion": "Region AWS",
+		"awsCrossRegion": "Użyj wnioskowania międzyregionalnego",
+		"googleCloudSetup": {
+			"title": "Aby korzystać z Google Cloud Vertex AI, potrzebujesz:",
+			"step1": "1. Utworzyć konto Google Cloud, włączyć API Vertex AI i włączyć żądane modele Claude.",
+			"step2": "2. Zainstalować Google Cloud CLI i skonfigurować domyślne poświadczenia aplikacji.",
+			"step3": "3. Lub utworzyć konto usługi z poświadczeniami."
+		},
+		"googleCloudCredentials": "Poświadczenia Google Cloud",
+		"googleCloudKeyFile": "Ścieżka pliku klucza Google Cloud",
+		"googleCloudProjectId": "ID projektu Google Cloud",
+		"googleCloudRegion": "Region Google Cloud",
+		"lmStudio": {
+			"baseUrl": "URL bazowy (opcjonalnie)",
+			"modelId": "ID modelu",
+			"speculativeDecoding": "Włącz dekodowanie spekulacyjne",
+			"draftModelId": "ID modelu szkicu",
+			"draftModelDesc": "Aby dekodowanie spekulacyjne działało poprawnie, model szkicu musi pochodzić z tej samej rodziny modeli.",
+			"selectDraftModel": "Wybierz model szkicu",
+			"noModelsFound": "Nie znaleziono modeli szkicu. Upewnij się, że LM Studio jest uruchomione z włączonym trybem serwera.",
+			"description": "LM Studio pozwala na lokalne uruchamianie modeli na twoim komputerze. Aby rozpocząć, zapoznaj się z ich <a>przewodnikiem szybkiego startu</a>. Będziesz również musiał uruchomić funkcję <b>serwera lokalnego</b> LM Studio, aby używać go z tym rozszerzeniem. <span>Uwaga:</span> Roo Code używa złożonych podpowiedzi i działa najlepiej z modelami Claude. Modele o niższych możliwościach mogą nie działać zgodnie z oczekiwaniami."
+		},
+		"ollama": {
+			"baseUrl": "URL bazowy (opcjonalnie)",
+			"modelId": "ID modelu",
+			"description": "Ollama pozwala na lokalne uruchamianie modeli na twoim komputerze. Aby rozpocząć, zapoznaj się z przewodnikiem szybkiego startu.",
+			"warning": "Uwaga: Roo Code używa złożonych podpowiedzi i działa najlepiej z modelami Claude. Modele o niższych możliwościach mogą nie działać zgodnie z oczekiwaniami."
+		},
+		"unboundApiKey": "Klucz API Unbound",
+		"getUnboundApiKey": "Uzyskaj klucz API Unbound",
+		"humanRelay": {
+			"description": "Nie jest wymagany klucz API, ale użytkownik będzie musiał pomóc w kopiowaniu i wklejaniu informacji do czatu internetowego AI.",
+			"instructions": "Podczas użytkowania pojawi się okno dialogowe, a bieżąca wiadomość zostanie automatycznie skopiowana do schowka. Będziesz musiał wkleić ją do internetowych wersji AI (takich jak ChatGPT lub Claude), a następnie skopiować odpowiedź AI z powrotem do okna dialogowego i kliknąć przycisk potwierdzenia."
+		},
+		"customModel": {
+			"capabilities": "Skonfiguruj możliwości i ceny swojego niestandardowego modelu zgodnego z OpenAI. Zachowaj ostrożność podczas określania możliwości modelu, ponieważ mogą one wpływać na wydajność Roo Code.",
+			"maxTokens": {
+				"label": "Maksymalna liczba tokenów wyjściowych",
+				"description": "Maksymalna liczba tokenów, które model może wygenerować w odpowiedzi. (Określ -1, aby pozwolić serwerowi ustawić maksymalną liczbę tokenów.)"
+			},
+			"contextWindow": {
+				"label": "Rozmiar okna kontekstu",
+				"description": "Całkowita liczba tokenów (wejście + wyjście), które model może przetworzyć."
+			},
+			"imageSupport": {
+				"label": "Obsługa obrazów",
+				"description": "Czy model jest w stanie przetwarzać i rozumieć obrazy?"
+			},
+			"computerUse": {
+				"label": "Użycie komputera",
+				"description": "Czy model jest w stanie wchodzić w interakcję z przeglądarką? (np. Claude 3.7 Sonnet)."
+			},
+			"promptCache": {
+				"label": "Buforowanie podpowiedzi",
+				"description": "Czy model jest w stanie buforować podpowiedzi?"
+			},
+			"pricing": {
+				"input": {
+					"label": "Cena wejścia",
+					"description": "Koszt za milion tokenów wejściowych/podpowiedzi. Wpływa to na koszt wysyłania kontekstu i instrukcji do modelu."
+				},
+				"output": {
+					"label": "Cena wyjścia",
+					"description": "Koszt za milion tokenów odpowiedzi modelu. Wpływa to na koszt generowanej treści i uzupełnień."
+				},
+				"cacheReads": {
+					"label": "Cena odczytów bufora",
+					"description": "Koszt za milion tokenów za odczyt z bufora. Ta cena jest naliczana przy otrzymywaniu zbuforowanej odpowiedzi."
+				},
+				"cacheWrites": {
+					"label": "Cena zapisów bufora",
+					"description": "Koszt za milion tokenów za zapis do bufora. Ta cena jest naliczana przy pierwszym buforowaniu podpowiedzi."
+				}
+			},
+			"resetDefaults": "Przywróć domyślne"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "Cena wyjścia",
 		"cacheReadsPrice": "Cena odczytów bufora",
 		"cacheWritesPrice": "Cena zapisów bufora",
+		"enableStreaming": "Włącz strumieniowanie",
+		"useAzure": "Użyj Azure",
+		"azureApiVersion": "Ustaw wersję API Azure",
 		"gemini": {
 			"freeRequests": "* Darmowe do {{count}} zapytań na minutę. Po tym, rozliczanie zależy od rozmiaru podpowiedzi.",
 			"pricingDetails": "Więcej informacji znajdziesz w szczegółach cennika."

+ 10 - 10
webview-ui/src/i18n/locales/pt-BR/prompts.json

@@ -54,7 +54,7 @@
 		"prompt": "Prompt",
 		"enhance": {
 			"apiConfiguration": "Configuração de API",
-			"apiConfigDescription": "Você pode selecionar uma configuração de API para usar sempre para aprimorar prompts, ou simplesmente usar a que está atualmente selecionada",
+			"apiConfigDescription": "Você pode selecionar uma configuração de API para usar sempre para aprimorar prompts, ou simplesmente use o que está atualmente selecionado",
 			"useCurrentConfig": "Usar configuração de API atualmente selecionada",
 			"testPromptPlaceholder": "Digite um prompt para testar o aprimoramento",
 			"previewButton": "Visualizar aprimoramento do prompt"
@@ -62,35 +62,35 @@
 		"types": {
 			"ENHANCE": {
 				"label": "Aprimorar Prompt",
-				"description": "Use o aprimoramento de prompt para obter sugestões ou melhorias personalizadas para suas entradas. Isso garante que o Roo entenda sua intenção e forneça as melhores respostas possíveis. Disponível através do ícone ✨ no chat."
+				"description": "Use prompt enhancement to get tailored suggestions or improvements for your inputs. This ensures Roo understands your intent and provides the best possible responses. Available via the ✨ icon in chat."
 			},
 			"EXPLAIN": {
 				"label": "Explicar Código",
-				"description": "Obtenha explicações detalhadas de trechos de código, funções ou arquivos inteiros. Útil para entender código complexo ou aprender novos padrões. Disponível nas ações de código (ícone de lâmpada no editor) e no menu de contexto do editor (clique com o botão direito no código selecionado)."
+				"description": "Obtenha explicações detalhadas de trechos de código, funções ou arquivos inteiros. Useful for understanding complex code or learning new patterns. Available in code actions (lightbulb icon in the editor) and the editor context menu (right-click on selected code)."
 			},
 			"FIX": {
 				"label": "Corrigir Problemas",
-				"description": "Obtenha ajuda para identificar e resolver bugs, erros ou problemas de qualidade de código. Fornece orientação passo a passo para corrigir problemas. Disponível nas ações de código (ícone de lâmpada no editor) e no menu de contexto do editor (clique com o botão direito no código selecionado)."
+				"description": "Obtenha ajuda para identificar e resolver bugs, erros ou code quality issues. Provides step-by-step guidance for fixing problems. Available in code actions (lightbulb icon in the editor) and the editor context menu (right-click on selected code)."
 			},
 			"IMPROVE": {
 				"label": "Melhorar Código",
-				"description": "Receba sugestões para otimização de código, melhores práticas e melhorias arquitetônicas mantendo a funcionalidade. Disponível nas ações de código (ícone de lâmpada no editor) e no menu de contexto do editor (clique com o botão direito no código selecionado)."
+				"description": "Receba sugestões para code optimization, better practices e architectural improvements while maintaining functionality. Available in code actions (lightbulb icon in the editor) and the editor context menu (right-click on selected code)."
 			},
 			"ADD_TO_CONTEXT": {
 				"label": "Adicionar ao Contexto",
-				"description": "Adicione contexto à sua tarefa ou conversa atual. Útil para fornecer informações adicionais ou esclarecimentos. Disponível nas ações de código (ícone de lâmpada no editor) e no menu de contexto do editor (clique com o botão direito no código selecionado)."
+				"description": "Adicione contexto à sua tarefa ou conversa atual. Useful for providing additional information or clarifications. Available in code actions (lightbulb icon in the editor) and the editor context menu (right-click on selected code)."
 			},
 			"TERMINAL_ADD_TO_CONTEXT": {
 				"label": "Adicionar Conteúdo do Terminal ao Contexto",
-				"description": "Adicione a saída do terminal à sua tarefa ou conversa atual. Útil para fornecer saídas de comandos ou logs. Disponível no menu de contexto do terminal (clique com o botão direito no conteúdo selecionado do terminal)."
+				"description": "Adicione a saída do terminal à sua tarefa ou conversa atual. Useful for providing command outputs or logs. Available in the terminal context menu (right-click on selected terminal content)."
 			},
 			"TERMINAL_FIX": {
 				"label": "Corrigir Comando do Terminal",
-				"description": "Obtenha ajuda para corrigir comandos de terminal que falharam ou precisam de melhorias. Disponível no menu de contexto do terminal (clique com o botão direito no conteúdo selecionado do terminal)."
+				"description": "Obtenha ajuda para corrigir comandos de terminal que falharam ou precisam de melhorias. Available in the terminal context menu (right-click on selected terminal content)."
 			},
 			"TERMINAL_EXPLAIN": {
 				"label": "Explicar Comando do Terminal",
-				"description": "Obtenha explicações detalhadas de comandos de terminal e suas saídas. Disponível no menu de contexto do terminal (clique com o botão direito no conteúdo selecionado do terminal)."
+				"description": "Obtenha explicações detalhadas de comandos de terminal e suas saídas. Available in the terminal context menu (right-click on selected terminal content)."
 			}
 		}
 	},
@@ -139,7 +139,7 @@
 		},
 		"buttons": {
 			"cancel": "Cancelar",
-			"create": "Criar modo"
+			"create": "Criar Modo"
 		},
 		"deleteMode": "Excluir modo"
 	},

+ 105 - 1
webview-ui/src/i18n/locales/pt-BR/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "Perfil de configuração",
 		"description": "Descrição",
 		"apiProvider": "Provedor de API",
+		"model": "Modelo",
 		"openRouterApiKey": "Chave de API OpenRouter",
+		"getOpenRouterApiKey": "Obter chave de API OpenRouter",
 		"apiKeyStorageNotice": "As chaves de API são armazenadas com segurança no Armazenamento Secreto do VSCode",
+		"glamaApiKey": "Chave de API Glama",
+		"getGlamaApiKey": "Obter chave de API Glama",
 		"useCustomBaseUrl": "Usar URL base personalizado",
-		"openRouterTransformsText": "Comprimir prompts e cadeias de mensagens para o tamanho do contexto (<a>Transformações OpenRouter</a>)"
+		"requestyApiKey": "Chave de API Requesty",
+		"getRequestyApiKey": "Obter chave de API Requesty",
+		"openRouterTransformsText": "Comprimir prompts e cadeias de mensagens para o tamanho do contexto (<a>Transformações OpenRouter</a>)",
+		"anthropicApiKey": "Chave de API Anthropic",
+		"getAnthropicApiKey": "Obter chave de API Anthropic",
+		"deepSeekApiKey": "Chave de API DeepSeek",
+		"getDeepSeekApiKey": "Obter chave de API DeepSeek",
+		"geminiApiKey": "Chave de API Gemini",
+		"getGeminiApiKey": "Obter chave de API Gemini",
+		"openAiApiKey": "Chave de API OpenAI",
+		"openAiBaseUrl": "URL Base",
+		"getOpenAiApiKey": "Obter chave de API OpenAI",
+		"mistralApiKey": "Chave de API Mistral",
+		"getMistralApiKey": "Obter chave de API Mistral / Codestral",
+		"codestralBaseUrl": "URL Base Codestral (Opcional)",
+		"codestralBaseUrlDesc": "Defina uma URL alternativa para o modelo Codestral.",
+		"awsCredentials": "Credenciais AWS",
+		"awsProfile": "Perfil AWS",
+		"awsProfileName": "Nome do Perfil AWS",
+		"awsAccessKey": "Chave de Acesso AWS",
+		"awsSecretKey": "Chave Secreta AWS",
+		"awsSessionToken": "Token de Sessão AWS",
+		"awsRegion": "Região AWS",
+		"awsCrossRegion": "Usar inferência entre regiões",
+		"googleCloudSetup": {
+			"title": "Para usar o Google Cloud Vertex AI, você precisa:",
+			"step1": "1. Criar uma conta Google Cloud, ativar a API Vertex AI e ativar os modelos Claude desejados.",
+			"step2": "2. Instalar o CLI do Google Cloud e configurar as credenciais padrão do aplicativo.",
+			"step3": "3. Ou criar uma conta de serviço com credenciais."
+		},
+		"googleCloudCredentials": "Credenciais Google Cloud",
+		"googleCloudKeyFile": "Caminho do Arquivo de Chave Google Cloud",
+		"googleCloudProjectId": "ID do Projeto Google Cloud",
+		"googleCloudRegion": "Região Google Cloud",
+		"lmStudio": {
+			"baseUrl": "URL Base (opcional)",
+			"modelId": "ID do Modelo",
+			"speculativeDecoding": "Ativar Decodificação Especulativa",
+			"draftModelId": "ID do Modelo de Rascunho",
+			"draftModelDesc": "O modelo de rascunho deve ser da mesma família de modelos para que a decodificação especulativa funcione corretamente.",
+			"selectDraftModel": "Selecionar Modelo de Rascunho",
+			"noModelsFound": "Nenhum modelo de rascunho encontrado. Certifique-se de que o LM Studio esteja em execução com o Modo Servidor ativado.",
+			"description": "O LM Studio permite que você execute modelos localmente em seu computador. Para instruções sobre como começar, veja o <a>guia de início rápido</a> deles. Você também precisará iniciar o recurso de <b>servidor local</b> do LM Studio para usá-lo com esta extensão. <span>Nota:</span> O Roo Code usa prompts complexos e funciona melhor com modelos Claude. Modelos menos capazes podem não funcionar como esperado."
+		},
+		"ollama": {
+			"baseUrl": "URL Base (opcional)",
+			"modelId": "ID do Modelo",
+			"description": "O Ollama permite que você execute modelos localmente em seu computador. Para instruções sobre como começar, veja o guia de início rápido deles.",
+			"warning": "Nota: O Roo Code usa prompts complexos e funciona melhor com modelos Claude. Modelos menos capazes podem não funcionar como esperado."
+		},
+		"unboundApiKey": "Chave de API Unbound",
+		"getUnboundApiKey": "Obter chave de API Unbound",
+		"humanRelay": {
+			"description": "Não é necessária chave de API, mas o usuário precisa ajudar a copiar e colar as informações para a IA do chat web.",
+			"instructions": "Durante o uso, uma caixa de diálogo será exibida e a mensagem atual será copiada para a área de transferência automaticamente. Você precisa colar isso nas versões web de IA (como ChatGPT ou Claude), depois copiar a resposta da IA de volta para a caixa de diálogo e clicar no botão confirmar."
+		},
+		"customModel": {
+			"capabilities": "Configure as capacidades e preços para seu modelo personalizado compatível com OpenAI. Tenha cuidado ao especificar as capacidades do modelo, pois elas podem afetar como o Roo Code funciona.",
+			"maxTokens": {
+				"label": "Máximo de Tokens de Saída",
+				"description": "Número máximo de tokens que o modelo pode gerar em uma resposta. (Especifique -1 para permitir que o servidor defina o máximo de tokens.)"
+			},
+			"contextWindow": {
+				"label": "Tamanho da Janela de Contexto",
+				"description": "Total de tokens (entrada + saída) que o modelo pode processar."
+			},
+			"imageSupport": {
+				"label": "Suporte a Imagens",
+				"description": "Este modelo é capaz de processar e entender imagens?"
+			},
+			"computerUse": {
+				"label": "Uso do Computador",
+				"description": "Este modelo é capaz de interagir com um navegador? (ex. Claude 3.7 Sonnet)."
+			},
+			"promptCache": {
+				"label": "Cache de Prompts",
+				"description": "Este modelo é capaz de fazer cache de prompts?"
+			},
+			"pricing": {
+				"input": {
+					"label": "Preço de Entrada",
+					"description": "Custo por milhão de tokens na entrada/prompt. Isso afeta o custo de enviar contexto e instruções para o modelo."
+				},
+				"output": {
+					"label": "Preço de Saída",
+					"description": "Custo por milhão de tokens na resposta do modelo. Isso afeta o custo do conteúdo gerado e das conclusões."
+				},
+				"cacheReads": {
+					"label": "Preço de Leituras de Cache",
+					"description": "Custo por milhão de tokens para leitura do cache. Este é o preço cobrado quando uma resposta em cache é recuperada."
+				},
+				"cacheWrites": {
+					"label": "Preço de Escritas de Cache",
+					"description": "Custo por milhão de tokens para escrita no cache. Este é o preço cobrado quando um prompt é armazenado em cache pela primeira vez."
+				}
+			},
+			"resetDefaults": "Restaurar Padrões"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "Preço de saída",
 		"cacheReadsPrice": "Preço de leituras de cache",
 		"cacheWritesPrice": "Preço de escritas de cache",
+		"enableStreaming": "Ativar streaming",
+		"useAzure": "Usar Azure",
+		"azureApiVersion": "Definir versão da API Azure",
 		"gemini": {
 			"freeRequests": "* Gratuito até {{count}} requisições por minuto. Depois disso, a cobrança depende do tamanho do prompt.",
 			"pricingDetails": "Para mais informações, consulte os detalhes de preços."

+ 0 - 0
webview-ui/src/i18n/locales/pt/prompts.json


+ 0 - 0
webview-ui/src/i18n/locales/pt/settings.json


+ 0 - 0
webview-ui/src/i18n/locales/ru/prompts.json


+ 0 - 0
webview-ui/src/i18n/locales/ru/settings.json


+ 3 - 3
webview-ui/src/i18n/locales/tr/prompts.json

@@ -36,12 +36,12 @@
 		"title": "Moda özgü özel talimatlar (isteğe bağlı)",
 		"resetToDefault": "Varsayılana sıfırla",
 		"description": "{{modeName}} modu için özel davranış yönergeleri ekleyin.",
-		"loadFromFile": "{{modeName}} moduna özgü özel talimatlar, çalışma alanınızdaki .clinerules-{{modeSlug}} dosyasından da yüklenebilir."
+		"loadFromFile": "{{mode}} moduna özgü özel talimatlar ayrıca çalışma alanınızdaki <span>.clinerules-{{slug}}</span> adresinden yüklenebilir."
 	},
 	"globalCustomInstructions": {
 		"title": "Tüm Modlar için Özel Talimatlar",
 		"description": "Bu talimatlar tüm modlara uygulanır. Aşağıdaki moda özgü talimatlarla geliştirilebilen temel davranış seti sağlarlar.\nRoo'nun editörünüzün görüntüleme dilinden ({{language}}) farklı bir dilde düşünmesini ve konuşmasını istiyorsanız, burada belirtebilirsiniz.",
-		"loadFromFile": "Talimatlar, çalışma alanınızdaki .clinerules dosyasından da yüklenebilir."
+		"loadFromFile": "Talimatlar ayrıca çalışma alanınızdaki <span>.clinerules</span> adresinden de yüklenebilir."
 	},
 	"systemPrompt": {
 		"preview": "Sistem promptunu önizle",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "Gelişmiş: Sistem Promptunu Geçersiz Kıl",
-		"description": "Çalışma alanınızda .roo/system-prompt-{{modeSlug}} konumunda bir dosya oluşturarak bu mod için sistem promptunu (rol tanımı ve özel talimatlar dışında) tamamen değiştirebilirsiniz. Bu, yerleşik güvenceleri ve tutarlılık kontrollerini (özellikle araç kullanımı etrafında) atlayan çok gelişmiş bir özelliktir, bu yüzden dikkatli olun!"
+		"description": "Çalışma alanınızda <span>.roo/system-prompt-{{slug}}</span> adresinde bir dosya oluşturarak bu mod için sistem istemini tamamen değiştirebilirsiniz (rol tanımı ve özel talimatlar hariç). Bu, yerleşik güvenlik önlemlerini ve tutarlılık kontrollerini (özellikle araç kullanımıyla ilgili) aşan çok gelişmiş bir özelliktir, bu yüzden dikkatli olun!"
 	},
 	"createModeDialog": {
 		"title": "Yeni Mod Oluştur",

+ 105 - 1
webview-ui/src/i18n/locales/tr/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "Yapılandırma Profili",
 		"description": "Açıklama",
 		"apiProvider": "API Sağlayıcı",
+		"model": "Model",
 		"openRouterApiKey": "OpenRouter API Anahtarı",
+		"getOpenRouterApiKey": "OpenRouter API Anahtarı Al",
 		"apiKeyStorageNotice": "API anahtarları VSCode'un Gizli Depolamasında güvenli bir şekilde saklanır",
+		"glamaApiKey": "Glama API Anahtarı",
+		"getGlamaApiKey": "Glama API Anahtarı Al",
 		"useCustomBaseUrl": "Özel temel URL kullan",
-		"openRouterTransformsText": "İstem ve mesaj zincirlerini bağlam boyutuna sıkıştır (<a>OpenRouter Dönüşümleri</a>)"
+		"requestyApiKey": "Requesty API Anahtarı",
+		"getRequestyApiKey": "Requesty API Anahtarı Al",
+		"openRouterTransformsText": "İstem ve mesaj zincirlerini bağlam boyutuna sıkıştır (<a>OpenRouter Dönüşümleri</a>)",
+		"anthropicApiKey": "Anthropic API Anahtarı",
+		"getAnthropicApiKey": "Anthropic API Anahtarı Al",
+		"deepSeekApiKey": "DeepSeek API Anahtarı",
+		"getDeepSeekApiKey": "DeepSeek API Anahtarı Al",
+		"geminiApiKey": "Gemini API Anahtarı",
+		"getGeminiApiKey": "Gemini API Anahtarı Al",
+		"openAiApiKey": "OpenAI API Anahtarı",
+		"openAiBaseUrl": "Temel URL",
+		"getOpenAiApiKey": "OpenAI API Anahtarı Al",
+		"mistralApiKey": "Mistral API Anahtarı",
+		"getMistralApiKey": "Mistral / Codestral API Anahtarı Al",
+		"codestralBaseUrl": "Codestral Temel URL (İsteğe bağlı)",
+		"codestralBaseUrlDesc": "Codestral modeli için alternatif URL ayarlayın.",
+		"awsCredentials": "AWS Kimlik Bilgileri",
+		"awsProfile": "AWS Profili",
+		"awsProfileName": "AWS Profil Adı",
+		"awsAccessKey": "AWS Erişim Anahtarı",
+		"awsSecretKey": "AWS Gizli Anahtarı",
+		"awsSessionToken": "AWS Oturum Belirteci",
+		"awsRegion": "AWS Bölgesi",
+		"awsCrossRegion": "Bölgeler arası çıkarım kullan",
+		"googleCloudSetup": {
+			"title": "Google Cloud Vertex AI'yi kullanmak için şunları yapmanız gerekir:",
+			"step1": "1. Google Cloud hesabı oluşturun, Vertex AI API'sini etkinleştirin ve istediğiniz Claude modellerini etkinleştirin.",
+			"step2": "2. Google Cloud CLI'yi yükleyin ve uygulama varsayılan kimlik bilgilerini yapılandırın.",
+			"step3": "3. Veya kimlik bilgileriyle bir hizmet hesabı oluşturun."
+		},
+		"googleCloudCredentials": "Google Cloud Kimlik Bilgileri",
+		"googleCloudKeyFile": "Google Cloud Anahtar Dosyası Yolu",
+		"googleCloudProjectId": "Google Cloud Proje Kimliği",
+		"googleCloudRegion": "Google Cloud Bölgesi",
+		"lmStudio": {
+			"baseUrl": "Temel URL (İsteğe bağlı)",
+			"modelId": "Model Kimliği",
+			"speculativeDecoding": "Spekülatif Kod Çözmeyi Etkinleştir",
+			"draftModelId": "Taslak Model Kimliği",
+			"draftModelDesc": "Spekülatif kod çözmenin doğru çalışması için taslak model aynı model ailesinden olmalıdır.",
+			"selectDraftModel": "Taslak Model Seç",
+			"noModelsFound": "Taslak model bulunamadı. Lütfen LM Studio'nun Sunucu Modu etkinken çalıştığından emin olun.",
+			"description": "LM Studio, modelleri bilgisayarınızda yerel olarak çalıştırmanıza olanak tanır. Başlamak için <a>hızlı başlangıç kılavuzlarına</a> bakın. Bu uzantıyla kullanmak için LM Studio'nun <b>yerel sunucu</b> özelliğini de başlatmanız gerekecektir. <span>Not:</span> Roo Code karmaşık istemler kullanır ve Claude modelleriyle en iyi şekilde çalışır. Daha az yetenekli modeller beklendiği gibi çalışmayabilir."
+		},
+		"ollama": {
+			"baseUrl": "Temel URL (İsteğe bağlı)",
+			"modelId": "Model Kimliği",
+			"description": "Ollama, modelleri bilgisayarınızda yerel olarak çalıştırmanıza olanak tanır. Başlamak için hızlı başlangıç kılavuzlarına bakın.",
+			"warning": "Not: Roo Code karmaşık istemler kullanır ve Claude modelleriyle en iyi şekilde çalışır. Daha az yetenekli modeller beklendiği gibi çalışmayabilir."
+		},
+		"unboundApiKey": "Unbound API Anahtarı",
+		"getUnboundApiKey": "Unbound API Anahtarı Al",
+		"humanRelay": {
+			"description": "API anahtarı gerekmez, ancak kullanıcının bilgileri web sohbet yapay zekasına kopyalayıp yapıştırması gerekir.",
+			"instructions": "Kullanım sırasında bir iletişim kutusu açılacak ve mevcut mesaj otomatik olarak panoya kopyalanacaktır. Bunları web yapay zekalarına (ChatGPT veya Claude gibi) yapıştırmanız, ardından yapay zekanın yanıtını iletişim kutusuna kopyalayıp onay düğmesine tıklamanız gerekir."
+		},
+		"customModel": {
+			"capabilities": "Özel OpenAI uyumlu modelinizin yeteneklerini ve fiyatlandırmasını yapılandırın. Model yeteneklerini belirtirken dikkatli olun, çünkü bunlar Roo Code'un performansını etkileyebilir.",
+			"maxTokens": {
+				"label": "Maksimum Çıktı Token'ları",
+				"description": "Modelin bir yanıtta üretebileceği maksimum token sayısı. (Sunucunun maksimum token'ları ayarlamasına izin vermek için -1 belirtin.)"
+			},
+			"contextWindow": {
+				"label": "Bağlam Penceresi Boyutu",
+				"description": "Modelin işleyebileceği toplam token sayısı (giriş + çıkış)."
+			},
+			"imageSupport": {
+				"label": "Görüntü Desteği",
+				"description": "Bu model görüntüleri işleyip anlayabilir mi?"
+			},
+			"computerUse": {
+				"label": "Bilgisayar Kullanımı",
+				"description": "Bu model bir tarayıcıyla etkileşim kurabilir mi? (örn. Claude 3.7 Sonnet)"
+			},
+			"promptCache": {
+				"label": "İstem Önbelleği",
+				"description": "Bu model istemleri önbelleğe alabilir mi?"
+			},
+			"pricing": {
+				"input": {
+					"label": "Giriş Fiyatı",
+					"description": "Giriş/istem başına milyon token maliyeti. Bu, modele bağlam ve talimatlar gönderme maliyetini etkiler."
+				},
+				"output": {
+					"label": "Çıkış Fiyatı",
+					"description": "Model yanıtı başına milyon token maliyeti. Bu, oluşturulan içerik ve tamamlamaların maliyetini etkiler."
+				},
+				"cacheReads": {
+					"label": "Önbellek Okuma Fiyatı",
+					"description": "Önbellekten okuma başına milyon token maliyeti. Bu, önbelleğe alınmış bir yanıt alındığında uygulanan fiyattır."
+				},
+				"cacheWrites": {
+					"label": "Önbellek Yazma Fiyatı",
+					"description": "Önbelleğe yazma başına milyon token maliyeti. Bu, bir istem ilk kez önbelleğe alındığında uygulanan fiyattır."
+				}
+			},
+			"resetDefaults": "Varsayılanlara Sıfırla"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "Çıkış fiyatı",
 		"cacheReadsPrice": "Önbellek okuma fiyatı",
 		"cacheWritesPrice": "Önbellek yazma fiyatı",
+		"enableStreaming": "Akışı etkinleştir",
+		"useAzure": "Azure kullan",
+		"azureApiVersion": "Azure API sürümünü ayarla",
 		"gemini": {
 			"freeRequests": "* Dakikada {{count}} isteğe kadar ücretsiz. Bundan sonra, ücretlendirme istem boyutuna bağlıdır.",
 			"pricingDetails": "Daha fazla bilgi için fiyatlandırma ayrıntılarına bakın."

+ 3 - 3
webview-ui/src/i18n/locales/zh-CN/prompts.json

@@ -36,12 +36,12 @@
 		"title": "模式特定的自定义指令(可选)",
 		"resetToDefault": "重置为默认值",
 		"description": "为{{modeName}}模式添加特定行为指南。",
-		"loadFromFile": "{{modeName}}模式的自定义指令也可以从工作区的.clinerules-{{modeSlug}}加载。"
+		"loadFromFile": "特定于 {{mode}} 模式的自定义指令也可以从工作区中的 <span>.clinerules-{{slug}}</span> 加载。"
 	},
 	"globalCustomInstructions": {
 		"title": "所有模式的自定义指令",
 		"description": "这些指令适用于所有模式。它们提供了一套基本行为,可以通过下面的模式特定指令来增强。\n如果您希望Roo使用不同于编辑器显示语言({{language}})的语言进行思考和对话,您可以在这里指定。",
-		"loadFromFile": "指令也可以从工作区的.clinerules加载。"
+		"loadFromFile": "指令也可以从工作区 <span>.clinerules</span> 加载。"
 	},
 	"systemPrompt": {
 		"preview": "预览系统提示词",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "高级:覆盖系统提示词",
-		"description": "您可以通过在工作区创建文件.roo/system-prompt-{{modeSlug}}来完全替换此模式的系统提示词(角色定义和自定义指令除外)。这是一个非常高级的功能,会绕过内置的安全措施和一致性检查(尤其是围绕工具使用的检查),请谨慎使用!"
+		"description": "您可以通过在工作区创建文件 <span>.roo/system-prompt-{{slug}}</span>,完全替换此模式的系统提示(角色定义和自定义指令除外)。这是一个非常高级的功能,会绕过内置的安全措施和一致性检查(尤其是与工具使用相关的部分),请谨慎操作!"
 	},
 	"createModeDialog": {
 		"title": "创建新模式",

+ 105 - 1
webview-ui/src/i18n/locales/zh-CN/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "配置文件",
 		"description": "描述",
 		"apiProvider": "API 提供商",
+		"model": "模型",
 		"openRouterApiKey": "OpenRouter API 密钥",
+		"getOpenRouterApiKey": "获取 OpenRouter API 密钥",
 		"apiKeyStorageNotice": "API 密钥安全存储在 VSCode 的密钥存储中",
 		"useCustomBaseUrl": "使用自定义基础 URL",
-		"openRouterTransformsText": "将提示和消息链压缩到上下文大小 (<a>OpenRouter 转换</a>)"
+		"glamaApiKey": "Glama API 密钥",
+		"getGlamaApiKey": "获取 Glama API 密钥",
+		"requestyApiKey": "Requesty API 密钥",
+		"getRequestyApiKey": "获取 Requesty API 密钥",
+		"openRouterTransformsText": "将提示和消息链压缩到上下文大小 (<a>OpenRouter 转换</a>)",
+		"anthropicApiKey": "Anthropic API 密钥",
+		"getAnthropicApiKey": "获取 Anthropic API 密钥",
+		"deepSeekApiKey": "DeepSeek API 密钥",
+		"getDeepSeekApiKey": "获取 DeepSeek API 密钥",
+		"geminiApiKey": "Gemini API 密钥",
+		"getGeminiApiKey": "获取 Gemini API 密钥",
+		"openAiApiKey": "OpenAI API 密钥",
+		"openAiBaseUrl": "OpenAI 基础 URL",
+		"getOpenAiApiKey": "获取 OpenAI API 密钥",
+		"mistralApiKey": "Mistral API 密钥",
+		"getMistralApiKey": "获取 Mistral / Codestral API 密钥",
+		"codestralBaseUrl": "Codestral 基础 URL(可选)",
+		"codestralBaseUrlDesc": "为 Codestral 模型设置替代 URL。",
+		"awsCredentials": "AWS 凭证",
+		"awsProfile": "AWS 配置文件",
+		"awsProfileName": "AWS 配置文件名称",
+		"awsAccessKey": "AWS 访问密钥",
+		"awsSecretKey": "AWS 密钥",
+		"awsSessionToken": "AWS 会话令牌",
+		"awsRegion": "AWS 区域",
+		"awsCrossRegion": "使用跨区域推理",
+		"googleCloudSetup": {
+			"title": "要使用 Google Cloud Vertex AI,您需要:",
+			"step1": "1. 创建 Google Cloud 账户,启用 Vertex AI API 并启用所需的 Claude 模型。",
+			"step2": "2. 安装 Google Cloud CLI 并配置应用程序默认凭证。",
+			"step3": "3. 或创建具有凭证的服务账户。"
+		},
+		"googleCloudCredentials": "Google Cloud 凭证",
+		"googleCloudKeyFile": "Google Cloud 密钥文件路径",
+		"googleCloudProjectId": "Google Cloud 项目 ID",
+		"googleCloudRegion": "Google Cloud 区域",
+		"lmStudio": {
+			"baseUrl": "基础 URL(可选)",
+			"modelId": "模型 ID",
+			"speculativeDecoding": "启用推测性解码",
+			"draftModelId": "草稿模型 ID",
+			"draftModelDesc": "草稿模型必须来自相同的模型系列,推测性解码才能正常工作。",
+			"selectDraftModel": "选择草稿模型",
+			"noModelsFound": "未找到草稿模型。请确保 LM Studio 已启用服务器模式运行。",
+			"description": "LM Studio 允许您在本地计算机上运行模型。要了解如何开始,请参阅他们的 <a>快速入门指南</a>。您还需要启动 LM Studio 的 <b>本地服务器</b> 功能,以便与此扩展一起使用。<span>注意:</span>Roo Code 使用复杂的提示,并且在 Claude 模型上效果最佳。功能较弱的模型可能无法正常工作。"
+		},
+		"ollama": {
+			"baseUrl": "基础 URL(可选)",
+			"modelId": "模型 ID",
+			"description": "Ollama 允许您在本地计算机上运行模型。有关如何开始使用的说明,请参阅其快速入门指南。",
+			"warning": "注意:Roo Code 使用复杂的提示,与 Claude 模型配合最佳。功能较弱的模型可能无法按预期工作。"
+		},
+		"unboundApiKey": "Unbound API 密钥",
+		"getUnboundApiKey": "获取 Unbound API 密钥",
+		"humanRelay": {
+			"description": "不需要 API 密钥,但用户需要帮助将信息复制并粘贴到网页聊天 AI。",
+			"instructions": "使用期间,将弹出对话框并自动将当前消息复制到剪贴板。您需要将这些内容粘贴到 AI 的网页版本(如 ChatGPT 或 Claude),然后将 AI 的回复复制回对话框并点击确认按钮。"
+		},
+		"customModel": {
+			"capabilities": "配置您的自定义 OpenAI 兼容模型的功能和定价。在指定模型功能时要小心,因为它们会影响 Roo Code 的性能。",
+			"maxTokens": {
+				"label": "最大输出令牌数",
+				"description": "模型在响应中可以生成的最大令牌数。(指定 -1 允许服务器设置最大令牌数。)"
+			},
+			"contextWindow": {
+				"label": "上下文窗口大小",
+				"description": "模型可以处理的总令牌数(输入 + 输出)。"
+			},
+			"imageSupport": {
+				"label": "图像支持",
+				"description": "此模型是否能够处理和理解图像?"
+			},
+			"computerUse": {
+				"label": "计算机使用",
+				"description": "此模型是否能够与浏览器交互?(例如 Claude 3.7 Sonnet)。"
+			},
+			"promptCache": {
+				"label": "提示缓存",
+				"description": "此模型是否能够缓存提示?"
+			},
+			"pricing": {
+				"input": {
+					"label": "输入价格",
+					"description": "输入/提示中每百万令牌的成本。这会影响向模型发送上下文和指令的成本。"
+				},
+				"output": {
+					"label": "输出价格",
+					"description": "模型响应中每百万令牌的成本。这会影响生成内容和补全的成本。"
+				},
+				"cacheReads": {
+					"label": "缓存读取价格",
+					"description": "从缓存读取每百万令牌的成本。这是检索缓存响应时收取的费用。"
+				},
+				"cacheWrites": {
+					"label": "缓存写入价格",
+					"description": "向缓存写入每百万令牌的成本。这是首次缓存提示时收取的费用。"
+				}
+			},
+			"resetDefaults": "重置为默认值"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "输出价格",
 		"cacheReadsPrice": "缓存读取价格",
 		"cacheWritesPrice": "缓存写入价格",
+		"enableStreaming": "启用流式传输",
+		"useAzure": "使用 Azure 服务",
+		"azureApiVersion": "设置 Azure API 版本",
 		"gemini": {
 			"freeRequests": "* 每分钟免费 {{count}} 个请求。之后,计费取决于提示大小。",
 			"pricingDetails": "有关更多信息,请参阅定价详情。"

+ 3 - 3
webview-ui/src/i18n/locales/zh-TW/prompts.json

@@ -36,12 +36,12 @@
 		"title": "模式特定的自訂指令(選用)",
 		"resetToDefault": "重設為預設值",
 		"description": "為{{modeName}}模式添加特定行為指南。",
-		"loadFromFile": "{{modeName}}模式的自訂指令也可以從工作區的.clinerules-{{modeSlug}}載入。"
+		"loadFromFile": "{{mode}}模式的自訂指令也可以從工作區的<span>.clinerules-{{slug}}</span>載入。"
 	},
 	"globalCustomInstructions": {
 		"title": "所有模式的自訂指令",
 		"description": "這些指令適用於所有模式。它們提供了一套基本行為,可以透過下面的模式特定指令來增強。\n如果您希望Roo使用不同於編輯器顯示語言({{language}})的語言進行思考和對話,您可以在這裡指定。",
-		"loadFromFile": "指令也可以從工作區的.clinerules載入。"
+		"loadFromFile": "指令也可以從工作區的<span>.clinerules</span>載入。"
 	},
 	"systemPrompt": {
 		"preview": "預覽系統提示詞",
@@ -100,7 +100,7 @@
 	},
 	"advancedSystemPrompt": {
 		"title": "進階:覆寫系統提示詞",
-		"description": "您可以透過在工作區建立檔案.roo/system-prompt-{{modeSlug}}來完全替換此模式的系統提示詞(角色定義和自訂指令除外)。這是一個非常進階的功能,會繞過內建的安全措施和一致性檢查(尤其是圍繞工具使用的檢查),請謹慎使用!"
+		"description": "您可以透過在工作區建立檔案<span>.roo/system-prompt-{{slug}}</span>來完全替換此模式的系統提示詞(角色定義和自訂指令除外)。這是一個非常進階的功能,會繞過內建的安全措施和一致性檢查(尤其是圍繞工具使用的檢查),請謹慎使用!"
 	},
 	"createModeDialog": {
 		"title": "建立新模式",

+ 105 - 1
webview-ui/src/i18n/locales/zh-TW/settings.json

@@ -73,10 +73,111 @@
 		"configProfile": "設定檔案",
 		"description": "描述",
 		"apiProvider": "API 提供者",
+		"model": "模型",
 		"openRouterApiKey": "OpenRouter API 金鑰",
+		"getOpenRouterApiKey": "取得 OpenRouter API 金鑰",
 		"apiKeyStorageNotice": "API 金鑰安全儲存在 VSCode 的秘密儲存中",
+		"glamaApiKey": "Glama API 金鑰",
+		"getGlamaApiKey": "取得 Glama API 金鑰",
 		"useCustomBaseUrl": "使用自訂基礎 URL",
-		"openRouterTransformsText": "將提示和訊息鏈壓縮到內容大小 (<a>OpenRouter 轉換</a>)"
+		"requestyApiKey": "Requesty API 金鑰",
+		"getRequestyApiKey": "取得 Requesty API 金鑰",
+		"openRouterTransformsText": "將提示和訊息鏈壓縮到內容大小 (<a>OpenRouter 轉換</a>)",
+		"anthropicApiKey": "Anthropic API 金鑰",
+		"getAnthropicApiKey": "取得 Anthropic API 金鑰",
+		"deepSeekApiKey": "DeepSeek API 金鑰",
+		"getDeepSeekApiKey": "取得 DeepSeek API 金鑰",
+		"geminiApiKey": "Gemini API 金鑰",
+		"getGeminiApiKey": "取得 Gemini API 金鑰",
+		"openAiApiKey": "OpenAI API 金鑰",
+		"openAiBaseUrl": "基礎 URL",
+		"getOpenAiApiKey": "取得 OpenAI API 金鑰",
+		"mistralApiKey": "Mistral API 金鑰",
+		"getMistralApiKey": "取得 Mistral / Codestral API 金鑰",
+		"codestralBaseUrl": "Codestral 基礎 URL(選用)",
+		"codestralBaseUrlDesc": "設定 Codestral 模型的替代 URL。",
+		"awsCredentials": "AWS 憑證",
+		"awsProfile": "AWS 設定檔",
+		"awsProfileName": "AWS 設定檔名稱",
+		"awsAccessKey": "AWS 存取金鑰",
+		"awsSecretKey": "AWS 秘密金鑰",
+		"awsSessionToken": "AWS 工作階段權杖",
+		"awsRegion": "AWS 區域",
+		"awsCrossRegion": "使用跨區域推論",
+		"googleCloudSetup": {
+			"title": "要使用 Google Cloud Vertex AI,您需要:",
+			"step1": "1. 建立 Google Cloud 帳戶,啟用 Vertex AI API 並啟用所需的 Claude 模型。",
+			"step2": "2. 安裝 Google Cloud CLI 並設定應用程式預設憑證。",
+			"step3": "3. 或建立具有憑證的服務帳戶。"
+		},
+		"googleCloudCredentials": "Google Cloud 憑證",
+		"googleCloudKeyFile": "Google Cloud 金鑰檔案路徑",
+		"googleCloudProjectId": "Google Cloud 專案 ID",
+		"googleCloudRegion": "Google Cloud 區域",
+		"lmStudio": {
+			"baseUrl": "基礎 URL(選用)",
+			"modelId": "模型 ID",
+			"speculativeDecoding": "啟用推測解碼",
+			"draftModelId": "草稿模型 ID",
+			"draftModelDesc": "推測解碼要正確運作,草稿模型必須來自相同的模型系列。",
+			"selectDraftModel": "選擇草稿模型",
+			"noModelsFound": "找不到草稿模型。請確保 LM Studio 已在伺服器模式下執行。",
+			"description": "LM Studio 允許您在本機電腦上執行模型。關於如何開始使用,請參閱他們的<a>快速入門指南</a>。您還需要啟動 LM Studio 的<b>本機伺服器</b>功能才能與此擴充功能一起使用。<span>注意:</span>Roo Code 使用複雜的提示,並在 Claude 模型上運作最佳。功能較弱的模型可能無法如預期運作。"
+		},
+		"ollama": {
+			"baseUrl": "基礎 URL(選用)",
+			"modelId": "模型 ID",
+			"description": "Ollama 允許您在本機電腦上執行模型。關於如何開始使用,請參閱快速入門指南。",
+			"warning": "注意:Roo Code 使用複雜的提示,並在 Claude 模型上運作最佳。功能較弱的模型可能無法如預期運作。"
+		},
+		"unboundApiKey": "Unbound API 金鑰",
+		"getUnboundApiKey": "取得 Unbound API 金鑰",
+		"humanRelay": {
+			"description": "不需要 API 金鑰,但使用者需要協助將資訊複製並貼上到網頁聊天 AI。",
+			"instructions": "使用期間會彈出對話框,並自動將目前訊息複製到剪貼簿。您需要將這些內容貼上到網頁版 AI(如 ChatGPT 或 Claude),然後將 AI 的回覆複製回對話框並點擊確認按鈕。"
+		},
+		"customModel": {
+			"capabilities": "設定您的自訂 OpenAI 相容模型的功能和定價。請謹慎指定模型功能,因為它們會影響 Roo Code 的效能。",
+			"maxTokens": {
+				"label": "最大輸出權杖",
+				"description": "模型在回應中可以生成的最大權杖數。(指定 -1 以允許伺服器設定最大權杖數。)"
+			},
+			"contextWindow": {
+				"label": "內容視窗大小",
+				"description": "模型可以處理的總權杖數(輸入 + 輸出)。"
+			},
+			"imageSupport": {
+				"label": "圖像支援",
+				"description": "此模型是否能夠處理和理解圖像?"
+			},
+			"computerUse": {
+				"label": "電腦使用",
+				"description": "此模型是否能夠與瀏覽器互動?(例如 Claude 3.7 Sonnet)"
+			},
+			"promptCache": {
+				"label": "提示快取",
+				"description": "此模型是否能夠快取提示?"
+			},
+			"pricing": {
+				"input": {
+					"label": "輸入價格",
+					"description": "輸入/提示每百萬權杖的成本。這會影響向模型發送內容和指令的成本。"
+				},
+				"output": {
+					"label": "輸出價格",
+					"description": "模型回應每百萬權杖的成本。這會影響生成內容和完成的成本。"
+				},
+				"cacheReads": {
+					"label": "快取讀取價格",
+					"description": "從快取讀取每百萬權杖的成本。這是檢索快取回應時收取的價格。"
+				},
+				"cacheWrites": {
+					"label": "快取寫入價格",
+					"description": "寫入快取每百萬權杖的成本。這是首次快取提示時收取的價格。"
+				}
+			},
+			"resetDefaults": "重設為預設值"
+		}
 	},
 	"browser": {
 		"enable": {
@@ -205,6 +306,9 @@
 		"outputPrice": "輸出價格",
 		"cacheReadsPrice": "快取讀取價格",
 		"cacheWritesPrice": "快取寫入價格",
+		"enableStreaming": "啟用串流媒體",
+		"useAzure": "使用 Azure",
+		"azureApiVersion": "設定 Azure API 版本",
 		"gemini": {
 			"freeRequests": "* 每分鐘免費 {{count}} 個請求。之後,計費取決於提示大小。",
 			"pricingDetails": "有關更多資訊,請參閱定價詳情。"