Browse Source

fix: add error transform to cryptic openAI SDK errors when API key is invalid (#7586)

Co-authored-by: Roo Code <[email protected]>
Co-authored-by: Daniel Riccio <[email protected]>
roomote[bot] 5 tháng trước cách đây
mục cha
commit
65146b1b12

+ 7 - 6
src/api/providers/base-openai-compatible-provider.ts

@@ -10,6 +10,7 @@ import { convertToOpenAiMessages } from "../transform/openai-format"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
+import { handleOpenAIError } from "./utils/openai-error-handler"
 
 type BaseOpenAiCompatibleProviderOptions<ModelName extends string> = ApiHandlerOptions & {
 	providerName: string
@@ -86,7 +87,11 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
 			params.temperature = this.options.modelTemperature
 		}
 
-		return this.client.chat.completions.create(params, requestOptions)
+		try {
+			return this.client.chat.completions.create(params, requestOptions)
+		} catch (error) {
+			throw handleOpenAIError(error, this.providerName)
+		}
 	}
 
 	override async *createMessage(
@@ -127,11 +132,7 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
 
 			return response.choices[0]?.message.content || ""
 		} catch (error) {
-			if (error instanceof Error) {
-				throw new Error(`${this.providerName} completion error: ${error.message}`)
-			}
-
-			throw error
+			throw handleOpenAIError(error, this.providerName)
 		}
 	}
 

+ 9 - 6
src/api/providers/huggingface.ts

@@ -8,11 +8,13 @@ import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from ".
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import { getHuggingFaceModels, getCachedHuggingFaceModels } from "./fetchers/huggingface"
+import { handleOpenAIError } from "./utils/openai-error-handler"
 
 export class HuggingFaceHandler extends BaseProvider implements SingleCompletionHandler {
 	private client: OpenAI
 	private options: ApiHandlerOptions
 	private modelCache: ModelRecord | null = null
+	private readonly providerName = "HuggingFace"
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -64,7 +66,12 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
 			params.max_tokens = this.options.modelMaxTokens
 		}
 
-		const stream = await this.client.chat.completions.create(params)
+		let stream
+		try {
+			stream = await this.client.chat.completions.create(params)
+		} catch (error) {
+			throw handleOpenAIError(error, this.providerName)
+		}
 
 		for await (const chunk of stream) {
 			const delta = chunk.choices[0]?.delta
@@ -97,11 +104,7 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
 
 			return response.choices[0]?.message.content || ""
 		} catch (error) {
-			if (error instanceof Error) {
-				throw new Error(`Hugging Face completion error: ${error.message}`)
-			}
-
-			throw error
+			throw handleOpenAIError(error, this.providerName)
 		}
 	}
 

+ 18 - 3
src/api/providers/lm-studio.ts

@@ -15,18 +15,23 @@ import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { getModels, getModelsFromCache } from "./fetchers/modelCache"
 import { getApiRequestTimeout } from "./utils/timeout-config"
+import { handleOpenAIError } from "./utils/openai-error-handler"
 
 export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
 	private client: OpenAI
+	private readonly providerName = "LM Studio"
 
 	constructor(options: ApiHandlerOptions) {
 		super()
 		this.options = options
 
+		// LM Studio uses "noop" as a placeholder API key
+		const apiKey = "noop"
+
 		this.client = new OpenAI({
 			baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1",
-			apiKey: "noop",
+			apiKey: apiKey,
 			timeout: getApiRequestTimeout(),
 		})
 	}
@@ -88,7 +93,12 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
 				params.draft_model = this.options.lmStudioDraftModelId
 			}
 
-			const results = await this.client.chat.completions.create(params)
+			let results
+			try {
+				results = await this.client.chat.completions.create(params)
+			} catch (error) {
+				throw handleOpenAIError(error, this.providerName)
+			}
 
 			const matcher = new XmlMatcher(
 				"think",
@@ -164,7 +174,12 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
 				params.draft_model = this.options.lmStudioDraftModelId
 			}
 
-			const response = await this.client.chat.completions.create(params)
+			let response
+			try {
+				response = await this.client.chat.completions.create(params)
+			} catch (error) {
+				throw handleOpenAIError(error, this.providerName)
+			}
 			return response.choices[0]?.message.content || ""
 		} catch (error) {
 			throw new Error(

+ 27 - 15
src/api/providers/ollama.ts

@@ -14,12 +14,14 @@ import { ApiStream } from "../transform/stream"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { getApiRequestTimeout } from "./utils/timeout-config"
+import { handleOpenAIError } from "./utils/openai-error-handler"
 
 type CompletionUsage = OpenAI.Chat.Completions.ChatCompletionChunk["usage"]
 
 export class OllamaHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
 	private client: OpenAI
+	private readonly providerName = "Ollama"
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -54,13 +56,18 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
 			...(useR1Format ? convertToR1Format(messages) : convertToOpenAiMessages(messages)),
 		]
 
-		const stream = await this.client.chat.completions.create({
-			model: this.getModel().id,
-			messages: openAiMessages,
-			temperature: this.options.modelTemperature ?? 0,
-			stream: true,
-			stream_options: { include_usage: true },
-		})
+		let stream
+		try {
+			stream = await this.client.chat.completions.create({
+				model: this.getModel().id,
+				messages: openAiMessages,
+				temperature: this.options.modelTemperature ?? 0,
+				stream: true,
+				stream_options: { include_usage: true },
+			})
+		} catch (error) {
+			throw handleOpenAIError(error, this.providerName)
+		}
 		const matcher = new XmlMatcher(
 			"think",
 			(chunk) =>
@@ -106,14 +113,19 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
 		try {
 			const modelId = this.getModel().id
 			const useR1Format = modelId.toLowerCase().includes("deepseek-r1")
-			const response = await this.client.chat.completions.create({
-				model: this.getModel().id,
-				messages: useR1Format
-					? convertToR1Format([{ role: "user", content: prompt }])
-					: [{ role: "user", content: prompt }],
-				temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
-				stream: false,
-			})
+			let response
+			try {
+				response = await this.client.chat.completions.create({
+					model: this.getModel().id,
+					messages: useR1Format
+						? convertToR1Format([{ role: "user", content: prompt }])
+						: [{ role: "user", content: prompt }],
+					temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
+					stream: false,
+				})
+			} catch (error) {
+				throw handleOpenAIError(error, this.providerName)
+			}
 			return response.choices[0]?.message.content || ""
 		} catch (error) {
 			if (error instanceof Error) {

+ 48 - 21
src/api/providers/openai.ts

@@ -24,6 +24,7 @@ import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { getApiRequestTimeout } from "./utils/timeout-config"
+import { handleOpenAIError } from "./utils/openai-error-handler"
 
 // TODO: Rename this to OpenAICompatibleHandler. Also, I think the
 // `OpenAINativeHandler` can subclass from this, since it's obviously
@@ -31,6 +32,7 @@ import { getApiRequestTimeout } from "./utils/timeout-config"
 export class OpenAiHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
 	private client: OpenAI
+	private readonly providerName = "OpenAI"
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -174,10 +176,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 			// Add max_tokens if needed
 			this.addMaxTokensIfNeeded(requestOptions, modelInfo)
 
-			const stream = await this.client.chat.completions.create(
-				requestOptions,
-				isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
-			)
+			let stream
+			try {
+				stream = await this.client.chat.completions.create(
+					requestOptions,
+					isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
+				)
+			} catch (error) {
+				throw handleOpenAIError(error, this.providerName)
+			}
 
 			const matcher = new XmlMatcher(
 				"think",
@@ -236,10 +243,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 			// Add max_tokens if needed
 			this.addMaxTokensIfNeeded(requestOptions, modelInfo)
 
-			const response = await this.client.chat.completions.create(
-				requestOptions,
-				this._isAzureAiInference(modelUrl) ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
-			)
+			let response
+			try {
+				response = await this.client.chat.completions.create(
+					requestOptions,
+					this._isAzureAiInference(modelUrl) ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
+				)
+			} catch (error) {
+				throw handleOpenAIError(error, this.providerName)
+			}
 
 			yield {
 				type: "text",
@@ -281,15 +293,20 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 			// Add max_tokens if needed
 			this.addMaxTokensIfNeeded(requestOptions, modelInfo)
 
-			const response = await this.client.chat.completions.create(
-				requestOptions,
-				isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
-			)
+			let response
+			try {
+				response = await this.client.chat.completions.create(
+					requestOptions,
+					isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
+				)
+			} catch (error) {
+				throw handleOpenAIError(error, this.providerName)
+			}
 
 			return response.choices[0]?.message.content || ""
 		} catch (error) {
 			if (error instanceof Error) {
-				throw new Error(`OpenAI completion error: ${error.message}`)
+				throw new Error(`${this.providerName} completion error: ${error.message}`)
 			}
 
 			throw error
@@ -327,10 +344,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 			// This allows O3 models to limit response length when includeMaxTokens is enabled
 			this.addMaxTokensIfNeeded(requestOptions, modelInfo)
 
-			const stream = await this.client.chat.completions.create(
-				requestOptions,
-				methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
-			)
+			let stream
+			try {
+				stream = await this.client.chat.completions.create(
+					requestOptions,
+					methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
+				)
+			} catch (error) {
+				throw handleOpenAIError(error, this.providerName)
+			}
 
 			yield* this.handleStreamResponse(stream)
 		} else {
@@ -352,10 +374,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 			// This allows O3 models to limit response length when includeMaxTokens is enabled
 			this.addMaxTokensIfNeeded(requestOptions, modelInfo)
 
-			const response = await this.client.chat.completions.create(
-				requestOptions,
-				methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
-			)
+			let response
+			try {
+				response = await this.client.chat.completions.create(
+					requestOptions,
+					methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
+				)
+			} catch (error) {
+				throw handleOpenAIError(error, this.providerName)
+			}
 
 			yield {
 				type: "text",

+ 14 - 2
src/api/providers/openrouter.ts

@@ -25,6 +25,7 @@ import { getModelEndpoints } from "./fetchers/modelEndpointCache"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler } from "../index"
+import { handleOpenAIError } from "./utils/openai-error-handler"
 
 // Image generation types
 interface ImageGenerationResponse {
@@ -85,6 +86,7 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 	private client: OpenAI
 	protected models: ModelRecord = {}
 	protected endpoints: ModelRecord = {}
+	private readonly providerName = "OpenRouter"
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -161,7 +163,12 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 			...(reasoning && { reasoning }),
 		}
 
-		const stream = await this.client.chat.completions.create(completionParams)
+		let stream
+		try {
+			stream = await this.client.chat.completions.create(completionParams)
+		} catch (error) {
+			throw handleOpenAIError(error, this.providerName)
+		}
 
 		let lastUsage: CompletionUsage | undefined = undefined
 
@@ -259,7 +266,12 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
 			...(reasoning && { reasoning }),
 		}
 
-		const response = await this.client.chat.completions.create(completionParams)
+		let response
+		try {
+			response = await this.client.chat.completions.create(completionParams)
+		} catch (error) {
+			throw handleOpenAIError(error, this.providerName)
+		}
 
 		if ("error" in response) {
 			const error = response.error as { message?: string; code?: number }

+ 17 - 3
src/api/providers/requesty.ts

@@ -16,6 +16,7 @@ import { getModels } from "./fetchers/modelCache"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
 import { toRequestyServiceUrl } from "../../shared/utils/requesty"
+import { handleOpenAIError } from "./utils/openai-error-handler"
 
 // Requesty usage includes an extra field for Anthropic use cases.
 // Safely cast the prompt token details section to the appropriate structure.
@@ -42,6 +43,7 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
 	protected models: ModelRecord = {}
 	private client: OpenAI
 	private baseURL: string
+	private readonly providerName = "Requesty"
 
 	constructor(options: ApiHandlerOptions) {
 		super()
@@ -49,9 +51,11 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
 		this.options = options
 		this.baseURL = toRequestyServiceUrl(options.requestyBaseUrl)
 
+		const apiKey = this.options.requestyApiKey ?? "not-provided"
+
 		this.client = new OpenAI({
 			baseURL: this.baseURL,
-			apiKey: this.options.requestyApiKey ?? "not-provided",
+			apiKey: apiKey,
 			defaultHeaders: DEFAULT_HEADERS,
 		})
 	}
@@ -126,7 +130,12 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
 			requesty: { trace_id: metadata?.taskId, extra: { mode: metadata?.mode } },
 		}
 
-		const stream = await this.client.chat.completions.create(completionParams)
+		let stream
+		try {
+			stream = await this.client.chat.completions.create(completionParams)
+		} catch (error) {
+			throw handleOpenAIError(error, this.providerName)
+		}
 		let lastUsage: any = undefined
 
 		for await (const chunk of stream) {
@@ -162,7 +171,12 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
 			temperature: temperature,
 		}
 
-		const response: OpenAI.Chat.ChatCompletion = await this.client.chat.completions.create(completionParams)
+		let response: OpenAI.Chat.ChatCompletion
+		try {
+			response = await this.client.chat.completions.create(completionParams)
+		} catch (error) {
+			throw handleOpenAIError(error, this.providerName)
+		}
 		return response.choices[0]?.message.content || ""
 	}
 }

+ 29 - 0
src/api/providers/utils/openai-error-handler.ts

@@ -0,0 +1,29 @@
+/**
+ * General error handler for OpenAI client errors
+ * Transforms technical errors into user-friendly messages
+ */
+
+import i18n from "../../../i18n/setup"
+
+/**
+ * Handles OpenAI client errors and transforms them into user-friendly messages
+ * @param error - The error to handle
+ * @param providerName - The name of the provider for context in error messages
+ * @returns The original error or a transformed user-friendly error
+ */
+export function handleOpenAIError(error: unknown, providerName: string): Error {
+	if (error instanceof Error) {
+		const msg = error.message || ""
+
+		// Invalid character/ByteString conversion error in API key
+		if (msg.includes("Cannot convert argument to a ByteString")) {
+			return new Error(i18n.t("common:errors.api.invalidKeyInvalidChars"))
+		}
+
+		// For other Error instances, wrap with provider-specific prefix
+		return new Error(`${providerName} completion error: ${msg}`)
+	}
+
+	// Non-Error: wrap with provider-specific prefix
+	return new Error(`${providerName} completion error: ${String(error)}`)
+}

+ 28 - 19
src/api/providers/xai.ts

@@ -12,19 +12,24 @@ import { getModelParams } from "../transform/model-params"
 import { DEFAULT_HEADERS } from "./constants"
 import { BaseProvider } from "./base-provider"
 import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import { handleOpenAIError } from "./utils/openai-error-handler"
 
 const XAI_DEFAULT_TEMPERATURE = 0
 
 export class XAIHandler extends BaseProvider implements SingleCompletionHandler {
 	protected options: ApiHandlerOptions
 	private client: OpenAI
+	private readonly providerName = "xAI"
 
 	constructor(options: ApiHandlerOptions) {
 		super()
 		this.options = options
+
+		const apiKey = this.options.xaiApiKey ?? "not-provided"
+
 		this.client = new OpenAI({
 			baseURL: "https://api.x.ai/v1",
-			apiKey: this.options.xaiApiKey ?? "not-provided",
+			apiKey: apiKey,
 			defaultHeaders: DEFAULT_HEADERS,
 		})
 	}
@@ -48,15 +53,20 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
 		const { id: modelId, info: modelInfo, reasoning } = this.getModel()
 
 		// Use the OpenAI-compatible API.
-		const stream = await this.client.chat.completions.create({
-			model: modelId,
-			max_tokens: modelInfo.maxTokens,
-			temperature: this.options.modelTemperature ?? XAI_DEFAULT_TEMPERATURE,
-			messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
-			stream: true,
-			stream_options: { include_usage: true },
-			...(reasoning && reasoning),
-		})
+		let stream
+		try {
+			stream = await this.client.chat.completions.create({
+				model: modelId,
+				max_tokens: modelInfo.maxTokens,
+				temperature: this.options.modelTemperature ?? XAI_DEFAULT_TEMPERATURE,
+				messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
+				stream: true,
+				stream_options: { include_usage: true },
+				...(reasoning && reasoning),
+			})
+		} catch (error) {
+			throw handleOpenAIError(error, this.providerName)
+		}
 
 		for await (const chunk of stream) {
 			const delta = chunk.choices[0]?.delta
@@ -78,12 +88,15 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
 			if (chunk.usage) {
 				// Extract detailed token information if available
 				// First check for prompt_tokens_details structure (real API response)
-				const promptDetails = "prompt_tokens_details" in chunk.usage ? chunk.usage.prompt_tokens_details : null;
-				const cachedTokens = promptDetails && "cached_tokens" in promptDetails ? promptDetails.cached_tokens : 0;
+				const promptDetails = "prompt_tokens_details" in chunk.usage ? chunk.usage.prompt_tokens_details : null
+				const cachedTokens = promptDetails && "cached_tokens" in promptDetails ? promptDetails.cached_tokens : 0
 
 				// Fall back to direct fields in usage (used in test mocks)
-				const readTokens = cachedTokens || ("cache_read_input_tokens" in chunk.usage ? (chunk.usage as any).cache_read_input_tokens : 0);
-				const writeTokens = "cache_creation_input_tokens" in chunk.usage ? (chunk.usage as any).cache_creation_input_tokens : 0;
+				const readTokens =
+					cachedTokens ||
+					("cache_read_input_tokens" in chunk.usage ? (chunk.usage as any).cache_read_input_tokens : 0)
+				const writeTokens =
+					"cache_creation_input_tokens" in chunk.usage ? (chunk.usage as any).cache_creation_input_tokens : 0
 
 				yield {
 					type: "usage",
@@ -108,11 +121,7 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
 
 			return response.choices[0]?.message.content || ""
 		} catch (error) {
-			if (error instanceof Error) {
-				throw new Error(`xAI completion error: ${error.message}`)
-			}
-
-			throw error
+			throw handleOpenAIError(error, this.providerName)
 		}
 	}
 }

+ 3 - 0
src/i18n/locales/ca/common.json

@@ -107,6 +107,9 @@
 		"roo": {
 			"authenticationRequired": "El proveïdor Roo requereix autenticació al núvol. Si us plau, inicieu sessió a Roo Code Cloud."
 		},
+		"api": {
+			"invalidKeyInvalidChars": "La clau API conté caràcters no vàlids."
+		},
 		"mode_import_failed": "Ha fallat la importació del mode: {{error}}"
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/de/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Roo-Anbieter erfordert Cloud-Authentifizierung. Bitte melde dich bei Roo Code Cloud an."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "API-Schlüssel enthält ungültige Zeichen."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/en/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Roo provider requires cloud authentication. Please sign in to Roo Code Cloud."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "API key contains invalid characters."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/es/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "El proveedor Roo requiere autenticación en la nube. Por favor, inicia sesión en Roo Code Cloud."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "La clave API contiene caracteres inválidos."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/fr/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Le fournisseur Roo nécessite une authentification cloud. Veuillez vous connecter à Roo Code Cloud."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "La clé API contient des caractères invalides."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/hi/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Roo प्रदाता को क्लाउड प्रमाणीकरण की आवश्यकता है। कृपया Roo Code Cloud में साइन इन करें।"
+		},
+		"api": {
+			"invalidKeyInvalidChars": "API कुंजी में अमान्य वर्ण हैं।"
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/id/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Penyedia Roo memerlukan autentikasi cloud. Silakan masuk ke Roo Code Cloud."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "Kunci API mengandung karakter tidak valid."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/it/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Il provider Roo richiede l'autenticazione cloud. Accedi a Roo Code Cloud."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "La chiave API contiene caratteri non validi."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/ja/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Rooプロバイダーはクラウド認証が必要です。Roo Code Cloudにサインインしてください。"
+		},
+		"api": {
+			"invalidKeyInvalidChars": "APIキーに無効な文字が含まれています。"
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/ko/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Roo 제공업체는 클라우드 인증이 필요합니다. Roo Code Cloud에 로그인하세요."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "API 키에 유효하지 않은 문자가 포함되어 있습니다."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/nl/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Roo provider vereist cloud authenticatie. Log in bij Roo Code Cloud."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "API-sleutel bevat ongeldige karakters."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/pl/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Dostawca Roo wymaga uwierzytelnienia w chmurze. Zaloguj się do Roo Code Cloud."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "Klucz API zawiera nieprawidłowe znaki."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/pt-BR/common.json

@@ -107,6 +107,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "O provedor Roo requer autenticação na nuvem. Faça login no Roo Code Cloud."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "A chave API contém caracteres inválidos."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/ru/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Провайдер Roo требует облачной аутентификации. Войдите в Roo Code Cloud."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "API-ключ содержит недопустимые символы."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/tr/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Roo sağlayıcısı bulut kimlik doğrulaması gerektirir. Lütfen Roo Code Cloud'a giriş yapın."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "API anahtarı geçersiz karakterler içeriyor."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/vi/common.json

@@ -103,6 +103,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Nhà cung cấp Roo yêu cầu xác thực đám mây. Vui lòng đăng nhập vào Roo Code Cloud."
+		},
+		"api": {
+			"invalidKeyInvalidChars": "Khóa API chứa ký tự không hợp lệ."
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/zh-CN/common.json

@@ -108,6 +108,9 @@
 		},
 		"roo": {
 			"authenticationRequired": "Roo 提供商需要云认证。请登录 Roo Code Cloud。"
+		},
+		"api": {
+			"invalidKeyInvalidChars": "API 密钥包含无效字符。"
 		}
 	},
 	"warnings": {

+ 3 - 0
src/i18n/locales/zh-TW/common.json

@@ -103,6 +103,9 @@
 		"roo": {
 			"authenticationRequired": "Roo 提供者需要雲端認證。請登入 Roo Code Cloud。"
 		},
+		"api": {
+			"invalidKeyInvalidChars": "API 金鑰包含無效字元。"
+		},
 		"mode_import_failed": "匯入模式失敗:{{error}}"
 	},
 	"warnings": {