Просмотр исходного кода

feat: add OpenRouter embedding provider support (#8973)

* feat: add OpenRouter embedding provider support

Implement comprehensive OpenRouter embedding provider support for codebase indexing with the following features:

- New OpenRouterEmbedder class with full API compatibility
- Support for OpenRouter's OpenAI-compatible embedding endpoint
- Rate limiting and retry logic with exponential backoff
- Base64 embedding handling to bypass OpenAI package limitations
- Global rate limit state management across embedder instances
- Configuration updates for API key storage and provider selection
- UI integration for OpenRouter provider settings
- Comprehensive test suite with mocking
- Model dimension support for OpenRouter's embedding models

This adds OpenRouter as the 7th supported embedding provider alongside OpenAI, Ollama, OpenAI-compatible, Gemini, Mistral, and Vercel AI Gateway.

* Add translation key

* Fix mutex double release bug

* Add translations

* Add more translations

* Fix failing tests

* code-index(openrouter): fix HTTP-Referer header to RooCodeInc/Roo-Code; i18n: add and wire OpenRouter Code Index strings; test: assert default headers in embedder

---------

Co-authored-by: daniel-lxs <[email protected]>
David Markey 2 месяцев назад
Родитель
Сommit
34f45f1b28
49 измененных файлов с 965 добавлено и 22 удалено
  1. 3 1
      packages/types/src/codebase-index.ts
  2. 1 0
      packages/types/src/global-settings.ts
  3. 8 0
      src/core/webview/webviewMessageHandler.ts
  4. 1 0
      src/i18n/locales/ca/embeddings.json
  5. 1 0
      src/i18n/locales/de/embeddings.json
  6. 1 0
      src/i18n/locales/en/embeddings.json
  7. 1 0
      src/i18n/locales/es/embeddings.json
  8. 1 0
      src/i18n/locales/fr/embeddings.json
  9. 1 0
      src/i18n/locales/hi/embeddings.json
  10. 1 0
      src/i18n/locales/id/embeddings.json
  11. 1 0
      src/i18n/locales/it/embeddings.json
  12. 1 0
      src/i18n/locales/ja/embeddings.json
  13. 1 0
      src/i18n/locales/ko/embeddings.json
  14. 1 0
      src/i18n/locales/nl/embeddings.json
  15. 1 0
      src/i18n/locales/pl/embeddings.json
  16. 1 0
      src/i18n/locales/pt-BR/embeddings.json
  17. 1 0
      src/i18n/locales/ru/embeddings.json
  18. 1 0
      src/i18n/locales/tr/embeddings.json
  19. 1 0
      src/i18n/locales/vi/embeddings.json
  20. 1 0
      src/i18n/locales/zh-CN/embeddings.json
  21. 1 0
      src/i18n/locales/zh-TW/embeddings.json
  22. 20 0
      src/services/code-index/config-manager.ts
  23. 289 0
      src/services/code-index/embedders/__tests__/openrouter.spec.ts
  24. 396 0
      src/services/code-index/embedders/openrouter.ts
  25. 2 0
      src/services/code-index/interfaces/config.ts
  26. 8 1
      src/services/code-index/interfaces/embedder.ts
  27. 8 1
      src/services/code-index/interfaces/manager.ts
  28. 6 0
      src/services/code-index/service-factory.ts
  29. 2 0
      src/shared/WebviewMessage.ts
  30. 24 1
      src/shared/embeddingModels.ts
  31. 91 1
      webview-ui/src/components/chat/CodeIndexPopover.tsx
  32. 5 1
      webview-ui/src/i18n/locales/ca/settings.json
  33. 5 1
      webview-ui/src/i18n/locales/de/settings.json
  34. 4 0
      webview-ui/src/i18n/locales/en/settings.json
  35. 5 1
      webview-ui/src/i18n/locales/es/settings.json
  36. 5 1
      webview-ui/src/i18n/locales/fr/settings.json
  37. 5 1
      webview-ui/src/i18n/locales/hi/settings.json
  38. 5 1
      webview-ui/src/i18n/locales/id/settings.json
  39. 5 1
      webview-ui/src/i18n/locales/it/settings.json
  40. 5 1
      webview-ui/src/i18n/locales/ja/settings.json
  41. 5 1
      webview-ui/src/i18n/locales/ko/settings.json
  42. 5 1
      webview-ui/src/i18n/locales/nl/settings.json
  43. 5 1
      webview-ui/src/i18n/locales/pl/settings.json
  44. 5 1
      webview-ui/src/i18n/locales/pt-BR/settings.json
  45. 5 1
      webview-ui/src/i18n/locales/ru/settings.json
  46. 5 1
      webview-ui/src/i18n/locales/tr/settings.json
  47. 5 1
      webview-ui/src/i18n/locales/vi/settings.json
  48. 5 1
      webview-ui/src/i18n/locales/zh-CN/settings.json
  49. 5 1
      webview-ui/src/i18n/locales/zh-TW/settings.json

+ 3 - 1
packages/types/src/codebase-index.ts

@@ -22,7 +22,7 @@ export const codebaseIndexConfigSchema = z.object({
 	codebaseIndexEnabled: z.boolean().optional(),
 	codebaseIndexQdrantUrl: z.string().optional(),
 	codebaseIndexEmbedderProvider: z
-		.enum(["openai", "ollama", "openai-compatible", "gemini", "mistral", "vercel-ai-gateway"])
+		.enum(["openai", "ollama", "openai-compatible", "gemini", "mistral", "vercel-ai-gateway", "openrouter"])
 		.optional(),
 	codebaseIndexEmbedderBaseUrl: z.string().optional(),
 	codebaseIndexEmbedderModelId: z.string().optional(),
@@ -51,6 +51,7 @@ export const codebaseIndexModelsSchema = z.object({
 	gemini: z.record(z.string(), z.object({ dimension: z.number() })).optional(),
 	mistral: z.record(z.string(), z.object({ dimension: z.number() })).optional(),
 	"vercel-ai-gateway": z.record(z.string(), z.object({ dimension: z.number() })).optional(),
+	openrouter: z.record(z.string(), z.object({ dimension: z.number() })).optional(),
 })
 
 export type CodebaseIndexModels = z.infer<typeof codebaseIndexModelsSchema>
@@ -68,6 +69,7 @@ export const codebaseIndexProviderSchema = z.object({
 	codebaseIndexGeminiApiKey: z.string().optional(),
 	codebaseIndexMistralApiKey: z.string().optional(),
 	codebaseIndexVercelAiGatewayApiKey: z.string().optional(),
+	codebaseIndexOpenRouterApiKey: z.string().optional(),
 })
 
 export type CodebaseIndexProvider = z.infer<typeof codebaseIndexProviderSchema>

+ 1 - 0
packages/types/src/global-settings.ts

@@ -232,6 +232,7 @@ export const SECRET_STATE_KEYS = [
 	"codebaseIndexGeminiApiKey",
 	"codebaseIndexMistralApiKey",
 	"codebaseIndexVercelAiGatewayApiKey",
+	"codebaseIndexOpenRouterApiKey",
 	"huggingFaceApiKey",
 	"sambaNovaApiKey",
 	"zaiApiKey",

+ 8 - 0
src/core/webview/webviewMessageHandler.ts

@@ -2556,6 +2556,12 @@ export const webviewMessageHandler = async (
 						settings.codebaseIndexVercelAiGatewayApiKey,
 					)
 				}
+				if (settings.codebaseIndexOpenRouterApiKey !== undefined) {
+					await provider.contextProxy.storeSecret(
+						"codebaseIndexOpenRouterApiKey",
+						settings.codebaseIndexOpenRouterApiKey,
+					)
+				}
 
 				// Send success response first - settings are saved regardless of validation
 				await provider.postMessageToWebview({
@@ -2693,6 +2699,7 @@ export const webviewMessageHandler = async (
 			const hasVercelAiGatewayApiKey = !!(await provider.context.secrets.get(
 				"codebaseIndexVercelAiGatewayApiKey",
 			))
+			const hasOpenRouterApiKey = !!(await provider.context.secrets.get("codebaseIndexOpenRouterApiKey"))
 
 			provider.postMessageToWebview({
 				type: "codeIndexSecretStatus",
@@ -2703,6 +2710,7 @@ export const webviewMessageHandler = async (
 					hasGeminiApiKey,
 					hasMistralApiKey,
 					hasVercelAiGatewayApiKey,
+					hasOpenRouterApiKey,
 				},
 			})
 			break

+ 1 - 0
src/i18n/locales/ca/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "Falta la configuració compatible amb OpenAI per crear l'embedder",
 		"geminiConfigMissing": "Falta la configuració de Gemini per crear l'embedder",
 		"mistralConfigMissing": "Falta la configuració de Mistral per crear l'embedder",
+		"openRouterConfigMissing": "Falta la configuració d'OpenRouter per crear l'embedder",
 		"vercelAiGatewayConfigMissing": "Falta la configuració de Vercel AI Gateway per crear l'embedder",
 		"invalidEmbedderType": "Tipus d'embedder configurat no vàlid: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "No s'ha pogut determinar la dimensió del vector per al model '{{modelId}}' amb el proveïdor '{{provider}}'. Assegura't que la 'Dimensió d'incrustació' estigui configurada correctament als paràmetres del proveïdor compatible amb OpenAI.",

+ 1 - 0
src/i18n/locales/de/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "OpenAI-kompatible Konfiguration fehlt für die Erstellung des Embedders",
 		"geminiConfigMissing": "Gemini-Konfiguration fehlt für die Erstellung des Embedders",
 		"mistralConfigMissing": "Mistral-Konfiguration fehlt für die Erstellung des Embedders",
+		"openRouterConfigMissing": "OpenRouter-Konfiguration fehlt für die Erstellung des Embedders",
 		"vercelAiGatewayConfigMissing": "Vercel AI Gateway-Konfiguration fehlt für die Erstellung des Embedders",
 		"invalidEmbedderType": "Ungültiger Embedder-Typ konfiguriert: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "Konnte die Vektordimension für Modell '{{modelId}}' mit Anbieter '{{provider}}' nicht bestimmen. Stelle sicher, dass die 'Embedding-Dimension' in den OpenAI-kompatiblen Anbietereinstellungen korrekt eingestellt ist.",

+ 1 - 0
src/i18n/locales/en/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "OpenAI Compatible configuration missing for embedder creation",
 		"geminiConfigMissing": "Gemini configuration missing for embedder creation",
 		"mistralConfigMissing": "Mistral configuration missing for embedder creation",
+		"openRouterConfigMissing": "OpenRouter configuration missing for embedder creation",
 		"vercelAiGatewayConfigMissing": "Vercel AI Gateway configuration missing for embedder creation",
 		"invalidEmbedderType": "Invalid embedder type configured: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "Could not determine vector dimension for model '{{modelId}}' with provider '{{provider}}'. Please ensure the 'Embedding Dimension' is correctly set in the OpenAI-Compatible provider settings.",

+ 1 - 0
src/i18n/locales/es/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "Falta la configuración compatible con OpenAI para crear el incrustador",
 		"geminiConfigMissing": "Falta la configuración de Gemini para crear el incrustador",
 		"mistralConfigMissing": "Falta la configuración de Mistral para la creación del incrustador",
+		"openRouterConfigMissing": "Falta la configuración de OpenRouter para la creación del incrustador",
 		"vercelAiGatewayConfigMissing": "Falta la configuración de Vercel AI Gateway para la creación del incrustador",
 		"invalidEmbedderType": "Tipo de incrustador configurado inválido: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "No se pudo determinar la dimensión del vector para el modelo '{{modelId}}' con el proveedor '{{provider}}'. Asegúrate de que la 'Dimensión de incrustación' esté configurada correctamente en los ajustes del proveedor compatible con OpenAI.",

+ 1 - 0
src/i18n/locales/fr/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "Configuration compatible OpenAI manquante pour la création de l'embedder",
 		"geminiConfigMissing": "Configuration Gemini manquante pour la création de l'embedder",
 		"mistralConfigMissing": "Configuration Mistral manquante pour la création de l'embedder",
+		"openRouterConfigMissing": "Configuration OpenRouter manquante pour la création de l'embedder",
 		"vercelAiGatewayConfigMissing": "Configuration Vercel AI Gateway manquante pour la création de l'embedder",
 		"invalidEmbedderType": "Type d'embedder configuré invalide : {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "Impossible de déterminer la dimension du vecteur pour le modèle '{{modelId}}' avec le fournisseur '{{provider}}'. Assure-toi que la 'Dimension d'embedding' est correctement définie dans les paramètres du fournisseur compatible OpenAI.",

+ 1 - 0
src/i18n/locales/hi/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "एम्बेडर बनाने के लिए OpenAI संगत कॉन्फ़िगरेशन गायब है",
 		"geminiConfigMissing": "एम्बेडर बनाने के लिए Gemini कॉन्फ़िगरेशन गायब है",
 		"mistralConfigMissing": "एम्बेडर निर्माण के लिए मिस्ट्रल कॉन्फ़िगरेशन गायब है",
+		"openRouterConfigMissing": "एम्बेडर निर्माण के लिए OpenRouter कॉन्फ़िगरेशन गायब है",
 		"vercelAiGatewayConfigMissing": "एम्बेडर निर्माण के लिए Vercel AI Gateway कॉन्फ़िगरेशन गायब है",
 		"invalidEmbedderType": "अमान्य एम्बेडर प्रकार कॉन्फ़िगर किया गया: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "प्रदाता '{{provider}}' के साथ मॉडल '{{modelId}}' के लिए वेक्टर आयाम निर्धारित नहीं कर सका। कृपया सुनिश्चित करें कि OpenAI-संगत प्रदाता सेटिंग्स में 'एम्बेडिंग आयाम' सही तरीके से सेट है।",

+ 1 - 0
src/i18n/locales/id/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "Konfigurasi yang kompatibel dengan OpenAI tidak ada untuk membuat embedder",
 		"geminiConfigMissing": "Konfigurasi Gemini tidak ada untuk membuat embedder",
 		"mistralConfigMissing": "Konfigurasi Mistral hilang untuk pembuatan embedder",
+		"openRouterConfigMissing": "Konfigurasi OpenRouter hilang untuk pembuatan embedder",
 		"vercelAiGatewayConfigMissing": "Konfigurasi Vercel AI Gateway hilang untuk pembuatan embedder",
 		"invalidEmbedderType": "Tipe embedder yang dikonfigurasi tidak valid: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "Tidak dapat menentukan dimensi vektor untuk model '{{modelId}}' dengan penyedia '{{provider}}'. Pastikan 'Dimensi Embedding' diatur dengan benar di pengaturan penyedia yang kompatibel dengan OpenAI.",

+ 1 - 0
src/i18n/locales/it/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "Configurazione compatibile con OpenAI mancante per la creazione dell'embedder",
 		"geminiConfigMissing": "Configurazione Gemini mancante per la creazione dell'embedder",
 		"mistralConfigMissing": "Configurazione di Mistral mancante per la creazione dell'embedder",
+		"openRouterConfigMissing": "Configurazione di OpenRouter mancante per la creazione dell'embedder",
 		"vercelAiGatewayConfigMissing": "Configurazione di Vercel AI Gateway mancante per la creazione dell'embedder",
 		"invalidEmbedderType": "Tipo di embedder configurato non valido: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "Impossibile determinare la dimensione del vettore per il modello '{{modelId}}' con il provider '{{provider}}'. Assicurati che la 'Dimensione di embedding' sia impostata correttamente nelle impostazioni del provider compatibile con OpenAI.",

+ 1 - 0
src/i18n/locales/ja/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "エンベッダー作成のためのOpenAI互換設定がありません",
 		"geminiConfigMissing": "エンベッダー作成のためのGemini設定がありません",
 		"mistralConfigMissing": "エンベッダー作成のためのMistral設定がありません",
+		"openRouterConfigMissing": "エンベッダー作成のためのOpenRouter設定がありません",
 		"vercelAiGatewayConfigMissing": "エンベッダー作成のためのVercel AI Gateway設定がありません",
 		"invalidEmbedderType": "無効なエンベッダータイプが設定されています: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "プロバイダー '{{provider}}' のモデル '{{modelId}}' の埋め込み次元を決定できませんでした。OpenAI互換プロバイダー設定で「埋め込み次元」が正しく設定されていることを確認してください。",

+ 1 - 0
src/i18n/locales/ko/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "임베더 생성을 위한 OpenAI 호환 구성이 누락되었습니다",
 		"geminiConfigMissing": "임베더 생성을 위한 Gemini 구성이 누락되었습니다",
 		"mistralConfigMissing": "임베더 생성을 위한 Mistral 구성이 없습니다",
+		"openRouterConfigMissing": "임베더 생성을 위한 OpenRouter 구성이 없습니다",
 		"vercelAiGatewayConfigMissing": "임베더 생성을 위한 Vercel AI Gateway 구성이 없습니다",
 		"invalidEmbedderType": "잘못된 임베더 유형이 구성되었습니다: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "프로바이더 '{{provider}}'의 모델 '{{modelId}}'에 대한 벡터 차원을 결정할 수 없습니다. OpenAI 호환 프로바이더 설정에서 '임베딩 차원'이 올바르게 설정되어 있는지 확인하세요.",

+ 1 - 0
src/i18n/locales/nl/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "OpenAI-compatibele configuratie ontbreekt voor het maken van embedder",
 		"geminiConfigMissing": "Gemini-configuratie ontbreekt voor het maken van embedder",
 		"mistralConfigMissing": "Mistral-configuratie ontbreekt voor het maken van de embedder",
+		"openRouterConfigMissing": "OpenRouter-configuratie ontbreekt voor het maken van de embedder",
 		"vercelAiGatewayConfigMissing": "Vercel AI Gateway-configuratie ontbreekt voor het maken van de embedder",
 		"invalidEmbedderType": "Ongeldig embedder-type geconfigureerd: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "Kan de vectordimensie voor model '{{modelId}}' met provider '{{provider}}' niet bepalen. Zorg ervoor dat de 'Embedding Dimensie' correct is ingesteld in de OpenAI-compatibele provider-instellingen.",

+ 1 - 0
src/i18n/locales/pl/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "Brak konfiguracji kompatybilnej z OpenAI do utworzenia embeddera",
 		"geminiConfigMissing": "Brak konfiguracji Gemini do utworzenia embeddera",
 		"mistralConfigMissing": "Brak konfiguracji Mistral do utworzenia embeddera",
+		"openRouterConfigMissing": "Brak konfiguracji OpenRouter do utworzenia embeddera",
 		"vercelAiGatewayConfigMissing": "Brak konfiguracji Vercel AI Gateway do utworzenia embeddera",
 		"invalidEmbedderType": "Skonfigurowano nieprawidłowy typ embeddera: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "Nie można określić wymiaru wektora dla modelu '{{modelId}}' z dostawcą '{{provider}}'. Upewnij się, że 'Wymiar osadzania' jest poprawnie ustawiony w ustawieniach dostawcy kompatybilnego z OpenAI.",

+ 1 - 0
src/i18n/locales/pt-BR/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "Configuração compatível com OpenAI ausente para criação do embedder",
 		"geminiConfigMissing": "Configuração do Gemini ausente para criação do embedder",
 		"mistralConfigMissing": "Configuração do Mistral ausente para a criação do embedder",
+		"openRouterConfigMissing": "Configuração do OpenRouter ausente para a criação do embedder",
 		"vercelAiGatewayConfigMissing": "Configuração do Vercel AI Gateway ausente para a criação do embedder",
 		"invalidEmbedderType": "Tipo de embedder configurado inválido: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "Não foi possível determinar a dimensão do vetor para o modelo '{{modelId}}' com o provedor '{{provider}}'. Certifique-se de que a 'Dimensão de Embedding' esteja configurada corretamente nas configurações do provedor compatível com OpenAI.",

+ 1 - 0
src/i18n/locales/ru/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "Отсутствует конфигурация, совместимая с OpenAI, для создания эмбеддера",
 		"geminiConfigMissing": "Отсутствует конфигурация Gemini для создания эмбеддера",
 		"mistralConfigMissing": "Конфигурация Mistral отсутствует для создания эмбеддера",
+		"openRouterConfigMissing": "Конфигурация OpenRouter отсутствует для создания эмбеддера",
 		"vercelAiGatewayConfigMissing": "Конфигурация Vercel AI Gateway отсутствует для создания эмбеддера",
 		"invalidEmbedderType": "Настроен недопустимый тип эмбеддера: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "Не удалось определить размерность вектора для модели '{{modelId}}' с провайдером '{{provider}}'. Убедитесь, что 'Размерность эмбеддинга' правильно установлена в настройках провайдера, совместимого с OpenAI.",

+ 1 - 0
src/i18n/locales/tr/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "Gömücü oluşturmak için OpenAI uyumlu yapılandırması eksik",
 		"geminiConfigMissing": "Gömücü oluşturmak için Gemini yapılandırması eksik",
 		"mistralConfigMissing": "Gömücü oluşturmak için Mistral yapılandırması eksik",
+		"openRouterConfigMissing": "Gömücü oluşturmak için OpenRouter yapılandırması eksik",
 		"vercelAiGatewayConfigMissing": "Gömücü oluşturmak için Vercel AI Gateway yapılandırması eksik",
 		"invalidEmbedderType": "Geçersiz gömücü türü yapılandırıldı: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "'{{provider}}' sağlayıcısı ile '{{modelId}}' modeli için vektör boyutu belirlenemedi. OpenAI uyumlu sağlayıcı ayarlarında 'Gömme Boyutu'nun doğru ayarlandığından emin ol.",

+ 1 - 0
src/i18n/locales/vi/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "Thiếu cấu hình tương thích OpenAI để tạo embedder",
 		"geminiConfigMissing": "Thiếu cấu hình Gemini để tạo embedder",
 		"mistralConfigMissing": "Thiếu cấu hình Mistral để tạo trình nhúng",
+		"openRouterConfigMissing": "Thiếu cấu hình OpenRouter để tạo trình nhúng",
 		"vercelAiGatewayConfigMissing": "Thiếu cấu hình Vercel AI Gateway để tạo trình nhúng",
 		"invalidEmbedderType": "Loại embedder được cấu hình không hợp lệ: {{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "Không thể xác định kích thước vector cho mô hình '{{modelId}}' với nhà cung cấp '{{provider}}'. Hãy đảm bảo 'Kích thước Embedding' được cài đặt đúng trong cài đặt nhà cung cấp tương thích OpenAI.",

+ 1 - 0
src/i18n/locales/zh-CN/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "创建嵌入器缺少 OpenAI 兼容配置",
 		"geminiConfigMissing": "创建嵌入器缺少 Gemini 配置",
 		"mistralConfigMissing": "创建嵌入器时缺少 Mistral 配置",
+		"openRouterConfigMissing": "创建嵌入器时缺少 OpenRouter 配置",
 		"vercelAiGatewayConfigMissing": "创建嵌入器时缺少 Vercel AI Gateway 配置",
 		"invalidEmbedderType": "配置的嵌入器类型无效:{{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "无法确定提供商 '{{provider}}' 的模型 '{{modelId}}' 的向量维度。请确保在 OpenAI 兼容提供商设置中正确设置了「嵌入维度」。",

+ 1 - 0
src/i18n/locales/zh-TW/embeddings.json

@@ -47,6 +47,7 @@
 		"openAiCompatibleConfigMissing": "建立嵌入器缺少 OpenAI 相容設定",
 		"geminiConfigMissing": "建立嵌入器缺少 Gemini 設定",
 		"mistralConfigMissing": "建立嵌入器時缺少 Mistral 設定",
+		"openRouterConfigMissing": "建立嵌入器時缺少 OpenRouter 設定",
 		"vercelAiGatewayConfigMissing": "建立嵌入器時缺少 Vercel AI Gateway 設定",
 		"invalidEmbedderType": "設定的嵌入器類型無效:{{embedderProvider}}",
 		"vectorDimensionNotDeterminedOpenAiCompatible": "無法確定提供商 '{{provider}}' 的模型 '{{modelId}}' 的向量維度。請確保在 OpenAI 相容提供商設定中正確設定了「嵌入維度」。",

+ 20 - 0
src/services/code-index/config-manager.ts

@@ -20,6 +20,7 @@ export class CodeIndexConfigManager {
 	private geminiOptions?: { apiKey: string }
 	private mistralOptions?: { apiKey: string }
 	private vercelAiGatewayOptions?: { apiKey: string }
+	private openRouterOptions?: { apiKey: string }
 	private qdrantUrl?: string = "http://localhost:6333"
 	private qdrantApiKey?: string
 	private searchMinScore?: number
@@ -71,6 +72,7 @@ export class CodeIndexConfigManager {
 		const geminiApiKey = this.contextProxy?.getSecret("codebaseIndexGeminiApiKey") ?? ""
 		const mistralApiKey = this.contextProxy?.getSecret("codebaseIndexMistralApiKey") ?? ""
 		const vercelAiGatewayApiKey = this.contextProxy?.getSecret("codebaseIndexVercelAiGatewayApiKey") ?? ""
+		const openRouterApiKey = this.contextProxy?.getSecret("codebaseIndexOpenRouterApiKey") ?? ""
 
 		// Update instance variables with configuration
 		this.codebaseIndexEnabled = codebaseIndexEnabled ?? true
@@ -108,6 +110,8 @@ export class CodeIndexConfigManager {
 			this.embedderProvider = "mistral"
 		} else if (codebaseIndexEmbedderProvider === "vercel-ai-gateway") {
 			this.embedderProvider = "vercel-ai-gateway"
+		} else if (codebaseIndexEmbedderProvider === "openrouter") {
+			this.embedderProvider = "openrouter"
 		} else {
 			this.embedderProvider = "openai"
 		}
@@ -129,6 +133,7 @@ export class CodeIndexConfigManager {
 		this.geminiOptions = geminiApiKey ? { apiKey: geminiApiKey } : undefined
 		this.mistralOptions = mistralApiKey ? { apiKey: mistralApiKey } : undefined
 		this.vercelAiGatewayOptions = vercelAiGatewayApiKey ? { apiKey: vercelAiGatewayApiKey } : undefined
+		this.openRouterOptions = openRouterApiKey ? { apiKey: openRouterApiKey } : undefined
 	}
 
 	/**
@@ -147,6 +152,7 @@ export class CodeIndexConfigManager {
 			geminiOptions?: { apiKey: string }
 			mistralOptions?: { apiKey: string }
 			vercelAiGatewayOptions?: { apiKey: string }
+			openRouterOptions?: { apiKey: string }
 			qdrantUrl?: string
 			qdrantApiKey?: string
 			searchMinScore?: number
@@ -167,6 +173,7 @@ export class CodeIndexConfigManager {
 			geminiApiKey: this.geminiOptions?.apiKey ?? "",
 			mistralApiKey: this.mistralOptions?.apiKey ?? "",
 			vercelAiGatewayApiKey: this.vercelAiGatewayOptions?.apiKey ?? "",
+			openRouterApiKey: this.openRouterOptions?.apiKey ?? "",
 			qdrantUrl: this.qdrantUrl ?? "",
 			qdrantApiKey: this.qdrantApiKey ?? "",
 		}
@@ -192,6 +199,7 @@ export class CodeIndexConfigManager {
 				geminiOptions: this.geminiOptions,
 				mistralOptions: this.mistralOptions,
 				vercelAiGatewayOptions: this.vercelAiGatewayOptions,
+				openRouterOptions: this.openRouterOptions,
 				qdrantUrl: this.qdrantUrl,
 				qdrantApiKey: this.qdrantApiKey,
 				searchMinScore: this.currentSearchMinScore,
@@ -234,6 +242,11 @@ export class CodeIndexConfigManager {
 			const qdrantUrl = this.qdrantUrl
 			const isConfigured = !!(apiKey && qdrantUrl)
 			return isConfigured
+		} else if (this.embedderProvider === "openrouter") {
+			const apiKey = this.openRouterOptions?.apiKey
+			const qdrantUrl = this.qdrantUrl
+			const isConfigured = !!(apiKey && qdrantUrl)
+			return isConfigured
 		}
 		return false // Should not happen if embedderProvider is always set correctly
 	}
@@ -269,6 +282,7 @@ export class CodeIndexConfigManager {
 		const prevGeminiApiKey = prev?.geminiApiKey ?? ""
 		const prevMistralApiKey = prev?.mistralApiKey ?? ""
 		const prevVercelAiGatewayApiKey = prev?.vercelAiGatewayApiKey ?? ""
+		const prevOpenRouterApiKey = prev?.openRouterApiKey ?? ""
 		const prevQdrantUrl = prev?.qdrantUrl ?? ""
 		const prevQdrantApiKey = prev?.qdrantApiKey ?? ""
 
@@ -307,6 +321,7 @@ export class CodeIndexConfigManager {
 		const currentGeminiApiKey = this.geminiOptions?.apiKey ?? ""
 		const currentMistralApiKey = this.mistralOptions?.apiKey ?? ""
 		const currentVercelAiGatewayApiKey = this.vercelAiGatewayOptions?.apiKey ?? ""
+		const currentOpenRouterApiKey = this.openRouterOptions?.apiKey ?? ""
 		const currentQdrantUrl = this.qdrantUrl ?? ""
 		const currentQdrantApiKey = this.qdrantApiKey ?? ""
 
@@ -337,6 +352,10 @@ export class CodeIndexConfigManager {
 			return true
 		}
 
+		if (prevOpenRouterApiKey !== currentOpenRouterApiKey) {
+			return true
+		}
+
 		// Check for model dimension changes (generic for all providers)
 		if (prevModelDimension !== currentModelDimension) {
 			return true
@@ -395,6 +414,7 @@ export class CodeIndexConfigManager {
 			geminiOptions: this.geminiOptions,
 			mistralOptions: this.mistralOptions,
 			vercelAiGatewayOptions: this.vercelAiGatewayOptions,
+			openRouterOptions: this.openRouterOptions,
 			qdrantUrl: this.qdrantUrl,
 			qdrantApiKey: this.qdrantApiKey,
 			searchMinScore: this.currentSearchMinScore,

+ 289 - 0
src/services/code-index/embedders/__tests__/openrouter.spec.ts

@@ -0,0 +1,289 @@
+import type { MockedClass, MockedFunction } from "vitest"
+import { describe, it, expect, beforeEach, vi } from "vitest"
+import { OpenAI } from "openai"
+import { OpenRouterEmbedder } from "../openrouter"
+import { getModelDimension, getDefaultModelId } from "../../../../shared/embeddingModels"
+
+// Mock the OpenAI SDK
+vi.mock("openai")
+
+// Mock TelemetryService
+vi.mock("@roo-code/telemetry", () => ({
+	TelemetryService: {
+		instance: {
+			captureEvent: vi.fn(),
+		},
+	},
+	TelemetryEventName: {},
+}))
+
+// Mock i18n
+vi.mock("../../../../i18n", () => ({
+	t: (key: string, params?: Record<string, any>) => {
+		const translations: Record<string, string> = {
+			"embeddings:validation.apiKeyRequired": "validation.apiKeyRequired",
+			"embeddings:authenticationFailed":
+				"Failed to create embeddings: Authentication failed. Please check your OpenRouter API key.",
+			"embeddings:failedWithStatus": `Failed to create embeddings after ${params?.attempts} attempts: HTTP ${params?.statusCode} - ${params?.errorMessage}`,
+			"embeddings:failedWithError": `Failed to create embeddings after ${params?.attempts} attempts: ${params?.errorMessage}`,
+			"embeddings:failedMaxAttempts": `Failed to create embeddings after ${params?.attempts} attempts`,
+			"embeddings:textExceedsTokenLimit": `Text at index ${params?.index} exceeds maximum token limit (${params?.itemTokens} > ${params?.maxTokens}). Skipping.`,
+			"embeddings:rateLimitRetry": `Rate limit hit, retrying in ${params?.delayMs}ms (attempt ${params?.attempt}/${params?.maxRetries})`,
+		}
+		return translations[key] || key
+	},
+}))
+
+const MockedOpenAI = OpenAI as MockedClass<typeof OpenAI>
+
+describe("OpenRouterEmbedder", () => {
+	const mockApiKey = "test-api-key"
+	let mockEmbeddingsCreate: MockedFunction<any>
+	let mockOpenAIInstance: any
+
+	beforeEach(() => {
+		vi.clearAllMocks()
+		vi.spyOn(console, "warn").mockImplementation(() => {})
+		vi.spyOn(console, "error").mockImplementation(() => {})
+
+		// Setup mock OpenAI instance
+		mockEmbeddingsCreate = vi.fn()
+		mockOpenAIInstance = {
+			embeddings: {
+				create: mockEmbeddingsCreate,
+			},
+		}
+
+		MockedOpenAI.mockImplementation(() => mockOpenAIInstance)
+	})
+
+	afterEach(() => {
+		vi.restoreAllMocks()
+	})
+
+	describe("constructor", () => {
+		it("should create an instance with valid API key", () => {
+			const embedder = new OpenRouterEmbedder(mockApiKey)
+			expect(embedder).toBeInstanceOf(OpenRouterEmbedder)
+		})
+
+		it("should throw error with empty API key", () => {
+			expect(() => new OpenRouterEmbedder("")).toThrow("validation.apiKeyRequired")
+		})
+
+		it("should use default model when none specified", () => {
+			const embedder = new OpenRouterEmbedder(mockApiKey)
+			const expectedDefault = getDefaultModelId("openrouter")
+			expect(embedder.embedderInfo.name).toBe("openrouter")
+		})
+
+		it("should use custom model when specified", () => {
+			const customModel = "openai/text-embedding-3-small"
+			const embedder = new OpenRouterEmbedder(mockApiKey, customModel)
+			expect(embedder.embedderInfo.name).toBe("openrouter")
+		})
+
+		it("should initialize OpenAI client with correct headers", () => {
+			new OpenRouterEmbedder(mockApiKey)
+
+			expect(MockedOpenAI).toHaveBeenCalledWith({
+				baseURL: "https://openrouter.ai/api/v1",
+				apiKey: mockApiKey,
+				defaultHeaders: {
+					"HTTP-Referer": "https://github.com/RooCodeInc/Roo-Code",
+					"X-Title": "Roo Code",
+				},
+			})
+		})
+	})
+
+	describe("embedderInfo", () => {
+		it("should return correct embedder info", () => {
+			const embedder = new OpenRouterEmbedder(mockApiKey)
+			expect(embedder.embedderInfo).toEqual({
+				name: "openrouter",
+			})
+		})
+	})
+
+	describe("createEmbeddings", () => {
+		let embedder: OpenRouterEmbedder
+
+		beforeEach(() => {
+			embedder = new OpenRouterEmbedder(mockApiKey)
+		})
+
+		it("should create embeddings successfully", async () => {
+			// Create base64 encoded embedding with values that can be exactly represented in Float32
+			const testEmbedding = new Float32Array([0.25, 0.5, 0.75])
+			const base64String = Buffer.from(testEmbedding.buffer).toString("base64")
+
+			const mockResponse = {
+				data: [
+					{
+						embedding: base64String,
+					},
+				],
+				usage: {
+					prompt_tokens: 5,
+					total_tokens: 5,
+				},
+			}
+
+			mockEmbeddingsCreate.mockResolvedValue(mockResponse)
+
+			const result = await embedder.createEmbeddings(["test text"])
+
+			expect(mockEmbeddingsCreate).toHaveBeenCalledWith({
+				input: ["test text"],
+				model: "openai/text-embedding-3-large",
+				encoding_format: "base64",
+			})
+			expect(result.embeddings).toHaveLength(1)
+			expect(result.embeddings[0]).toEqual([0.25, 0.5, 0.75])
+			expect(result.usage?.promptTokens).toBe(5)
+			expect(result.usage?.totalTokens).toBe(5)
+		})
+
+		it("should handle multiple texts", async () => {
+			const embedding1 = new Float32Array([0.25, 0.5])
+			const embedding2 = new Float32Array([0.75, 1.0])
+			const base64String1 = Buffer.from(embedding1.buffer).toString("base64")
+			const base64String2 = Buffer.from(embedding2.buffer).toString("base64")
+
+			const mockResponse = {
+				data: [
+					{
+						embedding: base64String1,
+					},
+					{
+						embedding: base64String2,
+					},
+				],
+				usage: {
+					prompt_tokens: 10,
+					total_tokens: 10,
+				},
+			}
+
+			mockEmbeddingsCreate.mockResolvedValue(mockResponse)
+
+			const result = await embedder.createEmbeddings(["text1", "text2"])
+
+			expect(result.embeddings).toHaveLength(2)
+			expect(result.embeddings[0]).toEqual([0.25, 0.5])
+			expect(result.embeddings[1]).toEqual([0.75, 1.0])
+		})
+
+		it("should use custom model when provided", async () => {
+			const customModel = "mistralai/mistral-embed-2312"
+			const embedderWithCustomModel = new OpenRouterEmbedder(mockApiKey, customModel)
+
+			const testEmbedding = new Float32Array([0.25, 0.5])
+			const base64String = Buffer.from(testEmbedding.buffer).toString("base64")
+
+			const mockResponse = {
+				data: [
+					{
+						embedding: base64String,
+					},
+				],
+				usage: {
+					prompt_tokens: 5,
+					total_tokens: 5,
+				},
+			}
+
+			mockEmbeddingsCreate.mockResolvedValue(mockResponse)
+
+			await embedderWithCustomModel.createEmbeddings(["test"])
+
+			// Verify the embeddings.create was called with the custom model
+			expect(mockEmbeddingsCreate).toHaveBeenCalledWith({
+				input: ["test"],
+				model: customModel,
+				encoding_format: "base64",
+			})
+		})
+	})
+
+	describe("validateConfiguration", () => {
+		let embedder: OpenRouterEmbedder
+
+		beforeEach(() => {
+			embedder = new OpenRouterEmbedder(mockApiKey)
+		})
+
+		it("should validate configuration successfully", async () => {
+			const testEmbedding = new Float32Array([0.25, 0.5])
+			const base64String = Buffer.from(testEmbedding.buffer).toString("base64")
+
+			const mockResponse = {
+				data: [
+					{
+						embedding: base64String,
+					},
+				],
+				usage: {
+					prompt_tokens: 1,
+					total_tokens: 1,
+				},
+			}
+
+			mockEmbeddingsCreate.mockResolvedValue(mockResponse)
+
+			const result = await embedder.validateConfiguration()
+
+			expect(result.valid).toBe(true)
+			expect(result.error).toBeUndefined()
+			expect(mockEmbeddingsCreate).toHaveBeenCalledWith({
+				input: ["test"],
+				model: "openai/text-embedding-3-large",
+				encoding_format: "base64",
+			})
+		})
+
+		it("should handle validation failure", async () => {
+			const authError = new Error("Invalid API key")
+			;(authError as any).status = 401
+
+			mockEmbeddingsCreate.mockRejectedValue(authError)
+
+			const result = await embedder.validateConfiguration()
+
+			expect(result.valid).toBe(false)
+			expect(result.error).toBe("embeddings:validation.authenticationFailed")
+		})
+	})
+
+	describe("integration with shared models", () => {
+		it("should work with defined OpenRouter models", () => {
+			const openRouterModels = [
+				"openai/text-embedding-3-small",
+				"openai/text-embedding-3-large",
+				"openai/text-embedding-ada-002",
+				"google/gemini-embedding-001",
+				"mistralai/mistral-embed-2312",
+				"mistralai/codestral-embed-2505",
+				"qwen/qwen3-embedding-8b",
+			]
+
+			openRouterModels.forEach((model) => {
+				const dimension = getModelDimension("openrouter", model)
+				expect(dimension).toBeDefined()
+				expect(dimension).toBeGreaterThan(0)
+
+				const embedder = new OpenRouterEmbedder(mockApiKey, model)
+				expect(embedder.embedderInfo.name).toBe("openrouter")
+			})
+		})
+
+		it("should use correct default model", () => {
+			const defaultModel = getDefaultModelId("openrouter")
+			expect(defaultModel).toBe("openai/text-embedding-3-large")
+
+			const dimension = getModelDimension("openrouter", defaultModel)
+			expect(dimension).toBe(3072)
+		})
+	})
+})

+ 396 - 0
src/services/code-index/embedders/openrouter.ts

@@ -0,0 +1,396 @@
+import { OpenAI } from "openai"
+import { IEmbedder, EmbeddingResponse, EmbedderInfo } from "../interfaces/embedder"
+import {
+	MAX_BATCH_TOKENS,
+	MAX_ITEM_TOKENS,
+	MAX_BATCH_RETRIES as MAX_RETRIES,
+	INITIAL_RETRY_DELAY_MS as INITIAL_DELAY_MS,
+} from "../constants"
+import { getDefaultModelId, getModelQueryPrefix } from "../../../shared/embeddingModels"
+import { t } from "../../../i18n"
+import { withValidationErrorHandling, HttpError, formatEmbeddingError } from "../shared/validation-helpers"
+import { TelemetryEventName } from "@roo-code/types"
+import { TelemetryService } from "@roo-code/telemetry"
+import { Mutex } from "async-mutex"
+import { handleOpenAIError } from "../../../api/providers/utils/openai-error-handler"
+
+interface EmbeddingItem {
+	embedding: string | number[]
+	[key: string]: any
+}
+
+interface OpenRouterEmbeddingResponse {
+	data: EmbeddingItem[]
+	usage?: {
+		prompt_tokens?: number
+		total_tokens?: number
+	}
+}
+
+/**
+ * OpenRouter implementation of the embedder interface with batching and rate limiting.
+ * OpenRouter provides an OpenAI-compatible API that gives access to hundreds of models
+ * through a single endpoint, automatically handling fallbacks and cost optimization.
+ */
+export class OpenRouterEmbedder implements IEmbedder {
+	private embeddingsClient: OpenAI
+	private readonly defaultModelId: string
+	private readonly apiKey: string
+	private readonly maxItemTokens: number
+	private readonly baseUrl: string = "https://openrouter.ai/api/v1"
+
+	// Global rate limiting state shared across all instances
+	private static globalRateLimitState = {
+		isRateLimited: false,
+		rateLimitResetTime: 0,
+		consecutiveRateLimitErrors: 0,
+		lastRateLimitError: 0,
+		// Mutex to ensure thread-safe access to rate limit state
+		mutex: new Mutex(),
+	}
+
+	/**
+	 * Creates a new OpenRouter embedder
+	 * @param apiKey The API key for authentication
+	 * @param modelId Optional model identifier (defaults to "openai/text-embedding-3-large")
+	 * @param maxItemTokens Optional maximum tokens per item (defaults to MAX_ITEM_TOKENS)
+	 */
+	constructor(apiKey: string, modelId?: string, maxItemTokens?: number) {
+		if (!apiKey) {
+			throw new Error(t("embeddings:validation.apiKeyRequired"))
+		}
+
+		this.apiKey = apiKey
+
+		// Wrap OpenAI client creation to handle invalid API key characters
+		try {
+			this.embeddingsClient = new OpenAI({
+				baseURL: this.baseUrl,
+				apiKey: apiKey,
+				defaultHeaders: {
+					"HTTP-Referer": "https://github.com/RooCodeInc/Roo-Code",
+					"X-Title": "Roo Code",
+				},
+			})
+		} catch (error) {
+			// Use the error handler to transform ByteString conversion errors
+			throw handleOpenAIError(error, "OpenRouter")
+		}
+
+		this.defaultModelId = modelId || getDefaultModelId("openrouter")
+		this.maxItemTokens = maxItemTokens || MAX_ITEM_TOKENS
+	}
+
+	/**
+	 * Creates embeddings for the given texts with batching and rate limiting
+	 * @param texts Array of text strings to embed
+	 * @param model Optional model identifier
+	 * @returns Promise resolving to embedding response
+	 */
+	async createEmbeddings(texts: string[], model?: string): Promise<EmbeddingResponse> {
+		const modelToUse = model || this.defaultModelId
+
+		// Apply model-specific query prefix if required
+		const queryPrefix = getModelQueryPrefix("openrouter", modelToUse)
+		const processedTexts = queryPrefix
+			? texts.map((text, index) => {
+					// Prevent double-prefixing
+					if (text.startsWith(queryPrefix)) {
+						return text
+					}
+					const prefixedText = `${queryPrefix}${text}`
+					const estimatedTokens = Math.ceil(prefixedText.length / 4)
+					if (estimatedTokens > MAX_ITEM_TOKENS) {
+						console.warn(
+							t("embeddings:textWithPrefixExceedsTokenLimit", {
+								index,
+								estimatedTokens,
+								maxTokens: MAX_ITEM_TOKENS,
+							}),
+						)
+						// Return original text if adding prefix would exceed limit
+						return text
+					}
+					return prefixedText
+				})
+			: texts
+
+		const allEmbeddings: number[][] = []
+		const usage = { promptTokens: 0, totalTokens: 0 }
+		const remainingTexts = [...processedTexts]
+
+		while (remainingTexts.length > 0) {
+			const currentBatch: string[] = []
+			let currentBatchTokens = 0
+			const processedIndices: number[] = []
+
+			for (let i = 0; i < remainingTexts.length; i++) {
+				const text = remainingTexts[i]
+				const itemTokens = Math.ceil(text.length / 4)
+
+				if (itemTokens > this.maxItemTokens) {
+					console.warn(
+						t("embeddings:textExceedsTokenLimit", {
+							index: i,
+							itemTokens,
+							maxTokens: this.maxItemTokens,
+						}),
+					)
+					processedIndices.push(i)
+					continue
+				}
+
+				if (currentBatchTokens + itemTokens <= MAX_BATCH_TOKENS) {
+					currentBatch.push(text)
+					currentBatchTokens += itemTokens
+					processedIndices.push(i)
+				} else {
+					break
+				}
+			}
+
+			// Remove processed items from remainingTexts (in reverse order to maintain correct indices)
+			for (let i = processedIndices.length - 1; i >= 0; i--) {
+				remainingTexts.splice(processedIndices[i], 1)
+			}
+
+			if (currentBatch.length > 0) {
+				const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse)
+				allEmbeddings.push(...batchResult.embeddings)
+				usage.promptTokens += batchResult.usage.promptTokens
+				usage.totalTokens += batchResult.usage.totalTokens
+			}
+		}
+
+		return { embeddings: allEmbeddings, usage }
+	}
+
+	/**
+	 * Helper method to handle batch embedding with retries and exponential backoff
+	 * @param batchTexts Array of texts to embed in this batch
+	 * @param model Model identifier to use
+	 * @returns Promise resolving to embeddings and usage statistics
+	 */
+	private async _embedBatchWithRetries(
+		batchTexts: string[],
+		model: string,
+	): Promise<{ embeddings: number[][]; usage: { promptTokens: number; totalTokens: number } }> {
+		for (let attempts = 0; attempts < MAX_RETRIES; attempts++) {
+			// Check global rate limit before attempting request
+			await this.waitForGlobalRateLimit()
+
+			try {
+				const response = (await this.embeddingsClient.embeddings.create({
+					input: batchTexts,
+					model: model,
+					// OpenAI package (as of v4.78.1) has a parsing issue that truncates embedding dimensions to 256
+					// when processing numeric arrays, which breaks compatibility with models using larger dimensions.
+					// By requesting base64 encoding, we bypass the package's parser and handle decoding ourselves.
+					encoding_format: "base64",
+				})) as OpenRouterEmbeddingResponse
+
+				// Convert base64 embeddings to float32 arrays
+				const processedEmbeddings = response.data.map((item: EmbeddingItem) => {
+					if (typeof item.embedding === "string") {
+						const buffer = Buffer.from(item.embedding, "base64")
+
+						// Create Float32Array view over the buffer
+						const float32Array = new Float32Array(buffer.buffer, buffer.byteOffset, buffer.byteLength / 4)
+
+						return {
+							...item,
+							embedding: Array.from(float32Array),
+						}
+					}
+					return item
+				})
+
+				// Replace the original data with processed embeddings
+				response.data = processedEmbeddings
+
+				const embeddings = response.data.map((item) => item.embedding as number[])
+
+				return {
+					embeddings: embeddings,
+					usage: {
+						promptTokens: response.usage?.prompt_tokens || 0,
+						totalTokens: response.usage?.total_tokens || 0,
+					},
+				}
+			} catch (error) {
+				// Capture telemetry before error is reformatted
+				TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, {
+					error: error instanceof Error ? error.message : String(error),
+					stack: error instanceof Error ? error.stack : undefined,
+					location: "OpenRouterEmbedder:_embedBatchWithRetries",
+					attempt: attempts + 1,
+				})
+
+				const hasMoreAttempts = attempts < MAX_RETRIES - 1
+
+				// Check if it's a rate limit error
+				const httpError = error as HttpError
+				if (httpError?.status === 429) {
+					// Update global rate limit state
+					await this.updateGlobalRateLimitState(httpError)
+
+					if (hasMoreAttempts) {
+						// Calculate delay based on global rate limit state
+						const baseDelay = INITIAL_DELAY_MS * Math.pow(2, attempts)
+						const globalDelay = await this.getGlobalRateLimitDelay()
+						const delayMs = Math.max(baseDelay, globalDelay)
+
+						console.warn(
+							t("embeddings:rateLimitRetry", {
+								delayMs,
+								attempt: attempts + 1,
+								maxRetries: MAX_RETRIES,
+							}),
+						)
+						await new Promise((resolve) => setTimeout(resolve, delayMs))
+						continue
+					}
+				}
+
+				// Log the error for debugging
+				console.error(`OpenRouter embedder error (attempt ${attempts + 1}/${MAX_RETRIES}):`, error)
+
+				// Format and throw the error
+				throw formatEmbeddingError(error, MAX_RETRIES)
+			}
+		}
+
+		throw new Error(t("embeddings:failedMaxAttempts", { attempts: MAX_RETRIES }))
+	}
+
+	/**
+	 * Validates the OpenRouter embedder configuration by testing API connectivity
+	 * @returns Promise resolving to validation result with success status and optional error message
+	 */
+	async validateConfiguration(): Promise<{ valid: boolean; error?: string }> {
+		return withValidationErrorHandling(async () => {
+			try {
+				// Test with a minimal embedding request
+				const testTexts = ["test"]
+				const modelToUse = this.defaultModelId
+
+				const response = (await this.embeddingsClient.embeddings.create({
+					input: testTexts,
+					model: modelToUse,
+					encoding_format: "base64",
+				})) as OpenRouterEmbeddingResponse
+
+				// Check if we got a valid response
+				if (!response?.data || response.data.length === 0) {
+					return {
+						valid: false,
+						error: "embeddings:validation.invalidResponse",
+					}
+				}
+
+				return { valid: true }
+			} catch (error) {
+				// Capture telemetry for validation errors
+				TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, {
+					error: error instanceof Error ? error.message : String(error),
+					stack: error instanceof Error ? error.stack : undefined,
+					location: "OpenRouterEmbedder:validateConfiguration",
+				})
+				throw error
+			}
+		}, "openrouter")
+	}
+
+	/**
+	 * Returns information about this embedder
+	 */
+	get embedderInfo(): EmbedderInfo {
+		return {
+			name: "openrouter",
+		}
+	}
+
+	/**
+	 * Waits if there's an active global rate limit
+	 */
+	private async waitForGlobalRateLimit(): Promise<void> {
+		const release = await OpenRouterEmbedder.globalRateLimitState.mutex.acquire()
+		let mutexReleased = false
+
+		try {
+			const state = OpenRouterEmbedder.globalRateLimitState
+
+			if (state.isRateLimited && state.rateLimitResetTime > Date.now()) {
+				const waitTime = state.rateLimitResetTime - Date.now()
+				// Silent wait - no logging to prevent flooding
+				release()
+				mutexReleased = true
+				await new Promise((resolve) => setTimeout(resolve, waitTime))
+				return
+			}
+
+			// Reset rate limit if time has passed
+			if (state.isRateLimited && state.rateLimitResetTime <= Date.now()) {
+				state.isRateLimited = false
+				state.consecutiveRateLimitErrors = 0
+			}
+		} finally {
+			// Only release if we haven't already
+			if (!mutexReleased) {
+				release()
+			}
+		}
+	}
+
+	/**
+	 * Updates global rate limit state when a 429 error occurs
+	 */
+	private async updateGlobalRateLimitState(error: HttpError): Promise<void> {
+		const release = await OpenRouterEmbedder.globalRateLimitState.mutex.acquire()
+		try {
+			const state = OpenRouterEmbedder.globalRateLimitState
+			const now = Date.now()
+
+			// Increment consecutive rate limit errors
+			if (now - state.lastRateLimitError < 60000) {
+				// Within 1 minute
+				state.consecutiveRateLimitErrors++
+			} else {
+				state.consecutiveRateLimitErrors = 1
+			}
+
+			state.lastRateLimitError = now
+
+			// Calculate exponential backoff based on consecutive errors
+			const baseDelay = 5000 // 5 seconds base
+			const maxDelay = 300000 // 5 minutes max
+			const exponentialDelay = Math.min(baseDelay * Math.pow(2, state.consecutiveRateLimitErrors - 1), maxDelay)
+
+			// Set global rate limit
+			state.isRateLimited = true
+			state.rateLimitResetTime = now + exponentialDelay
+
+			// Silent rate limit activation - no logging to prevent flooding
+		} finally {
+			release()
+		}
+	}
+
+	/**
+	 * Gets the current global rate limit delay
+	 */
+	private async getGlobalRateLimitDelay(): Promise<number> {
+		const release = await OpenRouterEmbedder.globalRateLimitState.mutex.acquire()
+		try {
+			const state = OpenRouterEmbedder.globalRateLimitState
+
+			if (state.isRateLimited && state.rateLimitResetTime > Date.now()) {
+				return state.rateLimitResetTime - Date.now()
+			}
+
+			return 0
+		} finally {
+			release()
+		}
+	}
+}

+ 2 - 0
src/services/code-index/interfaces/config.ts

@@ -15,6 +15,7 @@ export interface CodeIndexConfig {
 	geminiOptions?: { apiKey: string }
 	mistralOptions?: { apiKey: string }
 	vercelAiGatewayOptions?: { apiKey: string }
+	openRouterOptions?: { apiKey: string }
 	qdrantUrl?: string
 	qdrantApiKey?: string
 	searchMinScore?: number
@@ -37,6 +38,7 @@ export type PreviousConfigSnapshot = {
 	geminiApiKey?: string
 	mistralApiKey?: string
 	vercelAiGatewayApiKey?: string
+	openRouterApiKey?: string
 	qdrantUrl?: string
 	qdrantApiKey?: string
 }

+ 8 - 1
src/services/code-index/interfaces/embedder.ts

@@ -28,7 +28,14 @@ export interface EmbeddingResponse {
 	}
 }
 
-export type AvailableEmbedders = "openai" | "ollama" | "openai-compatible" | "gemini" | "mistral" | "vercel-ai-gateway"
+export type AvailableEmbedders =
+	| "openai"
+	| "ollama"
+	| "openai-compatible"
+	| "gemini"
+	| "mistral"
+	| "vercel-ai-gateway"
+	| "openrouter"
 
 export interface EmbedderInfo {
 	name: AvailableEmbedders

+ 8 - 1
src/services/code-index/interfaces/manager.ts

@@ -70,7 +70,14 @@ export interface ICodeIndexManager {
 }
 
 export type IndexingState = "Standby" | "Indexing" | "Indexed" | "Error"
-export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" | "mistral" | "vercel-ai-gateway"
+export type EmbedderProvider =
+	| "openai"
+	| "ollama"
+	| "openai-compatible"
+	| "gemini"
+	| "mistral"
+	| "vercel-ai-gateway"
+	| "openrouter"
 
 export interface IndexProgressUpdate {
 	systemStatus: IndexingState

+ 6 - 0
src/services/code-index/service-factory.ts

@@ -5,6 +5,7 @@ import { OpenAICompatibleEmbedder } from "./embedders/openai-compatible"
 import { GeminiEmbedder } from "./embedders/gemini"
 import { MistralEmbedder } from "./embedders/mistral"
 import { VercelAiGatewayEmbedder } from "./embedders/vercel-ai-gateway"
+import { OpenRouterEmbedder } from "./embedders/openrouter"
 import { EmbedderProvider, getDefaultModelId, getModelDimension } from "../../shared/embeddingModels"
 import { QdrantVectorStore } from "./vector-store/qdrant-client"
 import { codeParser, DirectoryScanner, FileWatcher } from "./processors"
@@ -79,6 +80,11 @@ export class CodeIndexServiceFactory {
 				throw new Error(t("embeddings:serviceFactory.vercelAiGatewayConfigMissing"))
 			}
 			return new VercelAiGatewayEmbedder(config.vercelAiGatewayOptions.apiKey, config.modelId)
+		} else if (provider === "openrouter") {
+			if (!config.openRouterOptions?.apiKey) {
+				throw new Error(t("embeddings:serviceFactory.openRouterConfigMissing"))
+			}
+			return new OpenRouterEmbedder(config.openRouterOptions.apiKey, config.modelId)
 		}
 
 		throw new Error(

+ 2 - 0
src/shared/WebviewMessage.ts

@@ -292,6 +292,7 @@ export interface WebviewMessage {
 			| "gemini"
 			| "mistral"
 			| "vercel-ai-gateway"
+			| "openrouter"
 		codebaseIndexEmbedderBaseUrl?: string
 		codebaseIndexEmbedderModelId: string
 		codebaseIndexEmbedderModelDimension?: number // Generic dimension for all providers
@@ -306,6 +307,7 @@ export interface WebviewMessage {
 		codebaseIndexGeminiApiKey?: string
 		codebaseIndexMistralApiKey?: string
 		codebaseIndexVercelAiGatewayApiKey?: string
+		codebaseIndexOpenRouterApiKey?: string
 	}
 }
 

+ 24 - 1
src/shared/embeddingModels.ts

@@ -2,7 +2,14 @@
  * Defines profiles for different embedding models, including their dimensions.
  */
 
-export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" | "mistral" | "vercel-ai-gateway" // Add other providers as needed
+export type EmbedderProvider =
+	| "openai"
+	| "ollama"
+	| "openai-compatible"
+	| "gemini"
+	| "mistral"
+	| "vercel-ai-gateway"
+	| "openrouter" // Add other providers as needed
 
 export interface EmbeddingModelProfile {
 	dimension: number
@@ -70,6 +77,19 @@ export const EMBEDDING_MODEL_PROFILES: EmbeddingModelProfiles = {
 		"mistral/codestral-embed": { dimension: 1536, scoreThreshold: 0.4 },
 		"mistral/mistral-embed": { dimension: 1024, scoreThreshold: 0.4 },
 	},
+	openrouter: {
+		// OpenAI models via OpenRouter
+		"openai/text-embedding-3-small": { dimension: 1536, scoreThreshold: 0.4 },
+		"openai/text-embedding-3-large": { dimension: 3072, scoreThreshold: 0.4 },
+		"openai/text-embedding-ada-002": { dimension: 1536, scoreThreshold: 0.4 },
+		// Google models via OpenRouter
+		"google/gemini-embedding-001": { dimension: 3072, scoreThreshold: 0.4 },
+		// Mistral models via OpenRouter
+		"mistralai/mistral-embed-2312": { dimension: 1024, scoreThreshold: 0.4 },
+		"mistralai/codestral-embed-2505": { dimension: 3072, scoreThreshold: 0.4 },
+		// Qwen models via OpenRouter
+		"qwen/qwen3-embedding-8b": { dimension: 4096, scoreThreshold: 0.4 },
+	},
 }
 
 /**
@@ -163,6 +183,9 @@ export function getDefaultModelId(provider: EmbedderProvider): string {
 		case "vercel-ai-gateway":
 			return "openai/text-embedding-3-large"
 
+		case "openrouter":
+			return "openai/text-embedding-3-large"
+
 		default:
 			// Fallback for unknown providers
 			console.warn(`Unknown provider for default model ID: ${provider}. Falling back to OpenAI default.`)

+ 91 - 1
webview-ui/src/components/chat/CodeIndexPopover.tsx

@@ -73,6 +73,7 @@ interface LocalCodeIndexSettings {
 	codebaseIndexGeminiApiKey?: string
 	codebaseIndexMistralApiKey?: string
 	codebaseIndexVercelAiGatewayApiKey?: string
+	codebaseIndexOpenRouterApiKey?: string
 }
 
 // Validation schema for codebase index settings
@@ -149,6 +150,16 @@ const createValidationSchema = (provider: EmbedderProvider, t: any) => {
 					.min(1, t("settings:codeIndex.validation.modelSelectionRequired")),
 			})
 
+		case "openrouter":
+			return baseSchema.extend({
+				codebaseIndexOpenRouterApiKey: z
+					.string()
+					.min(1, t("settings:codeIndex.validation.openRouterApiKeyRequired")),
+				codebaseIndexEmbedderModelId: z
+					.string()
+					.min(1, t("settings:codeIndex.validation.modelSelectionRequired")),
+			})
+
 		default:
 			return baseSchema
 	}
@@ -194,6 +205,7 @@ export const CodeIndexPopover: React.FC<CodeIndexPopoverProps> = ({
 		codebaseIndexGeminiApiKey: "",
 		codebaseIndexMistralApiKey: "",
 		codebaseIndexVercelAiGatewayApiKey: "",
+		codebaseIndexOpenRouterApiKey: "",
 	})
 
 	// Initial settings state - stores the settings when popover opens
@@ -229,6 +241,7 @@ export const CodeIndexPopover: React.FC<CodeIndexPopoverProps> = ({
 				codebaseIndexGeminiApiKey: "",
 				codebaseIndexMistralApiKey: "",
 				codebaseIndexVercelAiGatewayApiKey: "",
+				codebaseIndexOpenRouterApiKey: "",
 			}
 			setInitialSettings(settings)
 			setCurrentSettings(settings)
@@ -345,6 +358,14 @@ export const CodeIndexPopover: React.FC<CodeIndexPopoverProps> = ({
 							? SECRET_PLACEHOLDER
 							: ""
 					}
+					if (
+						!prev.codebaseIndexOpenRouterApiKey ||
+						prev.codebaseIndexOpenRouterApiKey === SECRET_PLACEHOLDER
+					) {
+						updated.codebaseIndexOpenRouterApiKey = secretStatus.hasOpenRouterApiKey
+							? SECRET_PLACEHOLDER
+							: ""
+					}
 
 					return updated
 				}
@@ -418,7 +439,8 @@ export const CodeIndexPopover: React.FC<CodeIndexPopoverProps> = ({
 					key === "codebaseIndexOpenAiCompatibleApiKey" ||
 					key === "codebaseIndexGeminiApiKey" ||
 					key === "codebaseIndexMistralApiKey" ||
-					key === "codebaseIndexVercelAiGatewayApiKey"
+					key === "codebaseIndexVercelAiGatewayApiKey" ||
+					key === "codebaseIndexOpenRouterApiKey"
 				) {
 					dataToValidate[key] = "placeholder-valid"
 				}
@@ -669,6 +691,9 @@ export const CodeIndexPopover: React.FC<CodeIndexPopoverProps> = ({
 												<SelectItem value="vercel-ai-gateway">
 													{t("settings:codeIndex.vercelAiGatewayProvider")}
 												</SelectItem>
+												<SelectItem value="openrouter">
+													{t("settings:codeIndex.openRouterProvider")}
+												</SelectItem>
 											</SelectContent>
 										</Select>
 									</div>
@@ -1131,6 +1156,71 @@ export const CodeIndexPopover: React.FC<CodeIndexPopoverProps> = ({
 										</>
 									)}
 
+									{currentSettings.codebaseIndexEmbedderProvider === "openrouter" && (
+										<>
+											<div className="space-y-2">
+												<label className="text-sm font-medium">
+													{t("settings:codeIndex.openRouterApiKeyLabel")}
+												</label>
+												<VSCodeTextField
+													type="password"
+													value={currentSettings.codebaseIndexOpenRouterApiKey || ""}
+													onInput={(e: any) =>
+														updateSetting("codebaseIndexOpenRouterApiKey", e.target.value)
+													}
+													placeholder={t("settings:codeIndex.openRouterApiKeyPlaceholder")}
+													className={cn("w-full", {
+														"border-red-500": formErrors.codebaseIndexOpenRouterApiKey,
+													})}
+												/>
+												{formErrors.codebaseIndexOpenRouterApiKey && (
+													<p className="text-xs text-vscode-errorForeground mt-1 mb-0">
+														{formErrors.codebaseIndexOpenRouterApiKey}
+													</p>
+												)}
+											</div>
+
+											<div className="space-y-2">
+												<label className="text-sm font-medium">
+													{t("settings:codeIndex.modelLabel")}
+												</label>
+												<VSCodeDropdown
+													value={currentSettings.codebaseIndexEmbedderModelId}
+													onChange={(e: any) =>
+														updateSetting("codebaseIndexEmbedderModelId", e.target.value)
+													}
+													className={cn("w-full", {
+														"border-red-500": formErrors.codebaseIndexEmbedderModelId,
+													})}>
+													<VSCodeOption value="" className="p-2">
+														{t("settings:codeIndex.selectModel")}
+													</VSCodeOption>
+													{getAvailableModels().map((modelId) => {
+														const model =
+															codebaseIndexModels?.[
+																currentSettings.codebaseIndexEmbedderProvider
+															]?.[modelId]
+														return (
+															<VSCodeOption key={modelId} value={modelId} className="p-2">
+																{modelId}{" "}
+																{model
+																	? t("settings:codeIndex.modelDimensions", {
+																			dimension: model.dimension,
+																		})
+																	: ""}
+															</VSCodeOption>
+														)
+													})}
+												</VSCodeDropdown>
+												{formErrors.codebaseIndexEmbedderModelId && (
+													<p className="text-xs text-vscode-errorForeground mt-1 mb-0">
+														{formErrors.codebaseIndexEmbedderModelId}
+													</p>
+												)}
+											</div>
+										</>
+									)}
+
 									{/* Qdrant Settings */}
 									<div className="space-y-2">
 										<label className="text-sm font-medium">

+ 5 - 1
webview-ui/src/i18n/locales/ca/settings.json

@@ -58,6 +58,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "Clau API",
 		"vercelAiGatewayApiKeyPlaceholder": "Introduïu la vostra clau API de Vercel AI Gateway",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "Clau de l'API d'OpenRouter",
+		"openRouterApiKeyPlaceholder": "Introduïu la vostra clau de l'API d'OpenRouter",
 		"openaiCompatibleProvider": "Compatible amb OpenAI",
 		"openAiKeyLabel": "Clau API OpenAI",
 		"openAiKeyPlaceholder": "Introduïu la vostra clau API OpenAI",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Es requereix la clau API de Vercel AI Gateway",
 			"ollamaBaseUrlRequired": "Cal una URL base d'Ollama",
 			"baseUrlRequired": "Cal una URL base",
-			"modelDimensionMinValue": "La dimensió del model ha de ser superior a 0"
+			"modelDimensionMinValue": "La dimensió del model ha de ser superior a 0",
+			"openRouterApiKeyRequired": "Clau API d'OpenRouter és requerida"
 		},
 		"advancedConfigLabel": "Configuració avançada",
 		"searchMinScoreLabel": "Llindar de puntuació de cerca",

+ 5 - 1
webview-ui/src/i18n/locales/de/settings.json

@@ -60,6 +60,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "API-Schlüssel",
 		"vercelAiGatewayApiKeyPlaceholder": "Gib deinen Vercel AI Gateway API-Schlüssel ein",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "OpenRouter API-Schlüssel",
+		"openRouterApiKeyPlaceholder": "Gib deinen OpenRouter API-Schlüssel ein",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "API-Schlüssel:",
 		"mistralApiKeyPlaceholder": "Gib deinen Mistral-API-Schlüssel ein",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API-Schlüssel ist erforderlich",
 			"ollamaBaseUrlRequired": "Ollama-Basis-URL ist erforderlich",
 			"baseUrlRequired": "Basis-URL ist erforderlich",
-			"modelDimensionMinValue": "Modellabmessung muss größer als 0 sein"
+			"modelDimensionMinValue": "Modellabmessung muss größer als 0 sein",
+			"openRouterApiKeyRequired": "OpenRouter API-Schlüssel ist erforderlich"
 		},
 		"advancedConfigLabel": "Erweiterte Konfiguration",
 		"searchMinScoreLabel": "Suchergebnis-Schwellenwert",

+ 4 - 0
webview-ui/src/i18n/locales/en/settings.json

@@ -69,6 +69,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "API Key",
 		"vercelAiGatewayApiKeyPlaceholder": "Enter your Vercel AI Gateway API key",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "OpenRouter API Key",
+		"openRouterApiKeyPlaceholder": "Enter your OpenRouter API key",
 		"openaiCompatibleProvider": "OpenAI Compatible",
 		"openAiKeyLabel": "OpenAI API Key",
 		"openAiKeyPlaceholder": "Enter your OpenAI API key",
@@ -135,6 +138,7 @@
 			"geminiApiKeyRequired": "Gemini API key is required",
 			"mistralApiKeyRequired": "Mistral API key is required",
 			"vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API key is required",
+			"openRouterApiKeyRequired": "OpenRouter API key is required",
 			"ollamaBaseUrlRequired": "Ollama base URL is required",
 			"baseUrlRequired": "Base URL is required",
 			"modelDimensionMinValue": "Model dimension must be greater than 0"

+ 5 - 1
webview-ui/src/i18n/locales/es/settings.json

@@ -60,6 +60,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "Clave API",
 		"vercelAiGatewayApiKeyPlaceholder": "Introduce tu clave API de Vercel AI Gateway",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "Clave de API de OpenRouter",
+		"openRouterApiKeyPlaceholder": "Introduce tu clave de API de OpenRouter",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "Clave API:",
 		"mistralApiKeyPlaceholder": "Introduce tu clave de API de Mistral",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Se requiere la clave API de Vercel AI Gateway",
 			"ollamaBaseUrlRequired": "Se requiere la URL base de Ollama",
 			"baseUrlRequired": "Se requiere la URL base",
-			"modelDimensionMinValue": "La dimensión del modelo debe ser mayor que 0"
+			"modelDimensionMinValue": "La dimensión del modelo debe ser mayor que 0",
+			"openRouterApiKeyRequired": "Se requiere la clave API de OpenRouter"
 		},
 		"advancedConfigLabel": "Configuración avanzada",
 		"searchMinScoreLabel": "Umbral de puntuación de búsqueda",

+ 5 - 1
webview-ui/src/i18n/locales/fr/settings.json

@@ -60,6 +60,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "Clé API",
 		"vercelAiGatewayApiKeyPlaceholder": "Entrez votre clé API Vercel AI Gateway",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "Clé d'API OpenRouter",
+		"openRouterApiKeyPlaceholder": "Entrez votre clé d'API OpenRouter",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "Clé d'API:",
 		"mistralApiKeyPlaceholder": "Entrez votre clé d'API Mistral",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "La clé API Vercel AI Gateway est requise",
 			"ollamaBaseUrlRequired": "L'URL de base Ollama est requise",
 			"baseUrlRequired": "L'URL de base est requise",
-			"modelDimensionMinValue": "La dimension du modèle doit être supérieure à 0"
+			"modelDimensionMinValue": "La dimension du modèle doit être supérieure à 0",
+			"openRouterApiKeyRequired": "Clé API OpenRouter est requise"
 		},
 		"advancedConfigLabel": "Configuration avancée",
 		"searchMinScoreLabel": "Seuil de score de recherche",

+ 5 - 1
webview-ui/src/i18n/locales/hi/settings.json

@@ -55,6 +55,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "API कुंजी",
 		"vercelAiGatewayApiKeyPlaceholder": "अपनी Vercel AI Gateway API कुंजी दर्ज करें",
+		"openRouterProvider": "ओपनराउटर",
+		"openRouterApiKeyLabel": "ओपनराउटर एपीआई कुंजी",
+		"openRouterApiKeyPlaceholder": "अपनी ओपनराउटर एपीआई कुंजी दर्ज करें",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "API कुंजी:",
 		"mistralApiKeyPlaceholder": "अपनी मिस्ट्रल एपीआई कुंजी दर्ज करें",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API कुंजी आवश्यक है",
 			"ollamaBaseUrlRequired": "Ollama आधार URL आवश्यक है",
 			"baseUrlRequired": "आधार URL आवश्यक है",
-			"modelDimensionMinValue": "मॉडल आयाम 0 से बड़ा होना चाहिए"
+			"modelDimensionMinValue": "मॉडल आयाम 0 से बड़ा होना चाहिए",
+			"openRouterApiKeyRequired": "OpenRouter API कुंजी आवश्यक है"
 		},
 		"advancedConfigLabel": "उन्नत कॉन्फ़िगरेशन",
 		"searchMinScoreLabel": "खोज स्कोर थ्रेसहोल्ड",

+ 5 - 1
webview-ui/src/i18n/locales/id/settings.json

@@ -55,6 +55,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "API Key",
 		"vercelAiGatewayApiKeyPlaceholder": "Masukkan kunci API Vercel AI Gateway Anda",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "Kunci API OpenRouter",
+		"openRouterApiKeyPlaceholder": "Masukkan kunci API OpenRouter Anda",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "Kunci API:",
 		"mistralApiKeyPlaceholder": "Masukkan kunci API Mistral Anda",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Kunci API Vercel AI Gateway diperlukan",
 			"ollamaBaseUrlRequired": "URL dasar Ollama diperlukan",
 			"baseUrlRequired": "URL dasar diperlukan",
-			"modelDimensionMinValue": "Dimensi model harus lebih besar dari 0"
+			"modelDimensionMinValue": "Dimensi model harus lebih besar dari 0",
+			"openRouterApiKeyRequired": "Kunci API OpenRouter diperlukan"
 		},
 		"advancedConfigLabel": "Konfigurasi Lanjutan",
 		"searchMinScoreLabel": "Ambang Batas Skor Pencarian",

+ 5 - 1
webview-ui/src/i18n/locales/it/settings.json

@@ -55,6 +55,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "Chiave API",
 		"vercelAiGatewayApiKeyPlaceholder": "Inserisci la tua chiave API Vercel AI Gateway",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "Chiave API OpenRouter",
+		"openRouterApiKeyPlaceholder": "Inserisci la tua chiave API OpenRouter",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "Chiave API:",
 		"mistralApiKeyPlaceholder": "Inserisci la tua chiave API Mistral",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "È richiesta la chiave API Vercel AI Gateway",
 			"ollamaBaseUrlRequired": "È richiesto l'URL di base di Ollama",
 			"baseUrlRequired": "È richiesto l'URL di base",
-			"modelDimensionMinValue": "La dimensione del modello deve essere maggiore di 0"
+			"modelDimensionMinValue": "La dimensione del modello deve essere maggiore di 0",
+			"openRouterApiKeyRequired": "Chiave API OpenRouter è richiesta"
 		},
 		"advancedConfigLabel": "Configurazione avanzata",
 		"searchMinScoreLabel": "Soglia punteggio di ricerca",

+ 5 - 1
webview-ui/src/i18n/locales/ja/settings.json

@@ -55,6 +55,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "APIキー",
 		"vercelAiGatewayApiKeyPlaceholder": "Vercel AI GatewayのAPIキーを入力してください",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "OpenRouter APIキー",
+		"openRouterApiKeyPlaceholder": "OpenRouter APIキーを入力してください",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "APIキー:",
 		"mistralApiKeyPlaceholder": "Mistral APIキーを入力してください",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Vercel AI Gateway APIキーが必要です",
 			"ollamaBaseUrlRequired": "OllamaのベースURLが必要です",
 			"baseUrlRequired": "ベースURLが必要です",
-			"modelDimensionMinValue": "モデルの次元は0より大きくなければなりません"
+			"modelDimensionMinValue": "モデルの次元は0より大きくなければなりません",
+			"openRouterApiKeyRequired": "OpenRouter APIキーが必要です"
 		},
 		"advancedConfigLabel": "詳細設定",
 		"searchMinScoreLabel": "検索スコアのしきい値",

+ 5 - 1
webview-ui/src/i18n/locales/ko/settings.json

@@ -58,6 +58,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "API 키",
 		"vercelAiGatewayApiKeyPlaceholder": "Vercel AI Gateway API 키를 입력하세요",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "OpenRouter API 키",
+		"openRouterApiKeyPlaceholder": "OpenRouter API 키를 입력하세요",
 		"openaiCompatibleProvider": "OpenAI 호환",
 		"openAiKeyLabel": "OpenAI API 키",
 		"openAiKeyPlaceholder": "OpenAI API 키를 입력하세요",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API 키가 필요합니다",
 			"ollamaBaseUrlRequired": "Ollama 기본 URL이 필요합니다",
 			"baseUrlRequired": "기본 URL이 필요합니다",
-			"modelDimensionMinValue": "모델 차원은 0보다 커야 합니다"
+			"modelDimensionMinValue": "모델 차원은 0보다 커야 합니다",
+			"openRouterApiKeyRequired": "OpenRouter API 키가 필요합니다"
 		},
 		"advancedConfigLabel": "고급 구성",
 		"searchMinScoreLabel": "검색 점수 임계값",

+ 5 - 1
webview-ui/src/i18n/locales/nl/settings.json

@@ -55,6 +55,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "API-sleutel",
 		"vercelAiGatewayApiKeyPlaceholder": "Voer uw Vercel AI Gateway API-sleutel in",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "OpenRouter API-sleutel",
+		"openRouterApiKeyPlaceholder": "Voer uw OpenRouter API-sleutel in",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "API-sleutel:",
 		"mistralApiKeyPlaceholder": "Voer uw Mistral API-sleutel in",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API-sleutel is vereist",
 			"ollamaBaseUrlRequired": "Ollama basis-URL is vereist",
 			"baseUrlRequired": "Basis-URL is vereist",
-			"modelDimensionMinValue": "Modelafmeting moet groter zijn dan 0"
+			"modelDimensionMinValue": "Modelafmeting moet groter zijn dan 0",
+			"openRouterApiKeyRequired": "OpenRouter API-sleutel is vereist"
 		},
 		"advancedConfigLabel": "Geavanceerde configuratie",
 		"searchMinScoreLabel": "Zoekscore drempel",

+ 5 - 1
webview-ui/src/i18n/locales/pl/settings.json

@@ -58,6 +58,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "Klucz API",
 		"vercelAiGatewayApiKeyPlaceholder": "Wprowadź swój klucz API Vercel AI Gateway",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "Klucz API OpenRouter",
+		"openRouterApiKeyPlaceholder": "Wprowadź swój klucz API OpenRouter",
 		"openaiCompatibleProvider": "Kompatybilny z OpenAI",
 		"openAiKeyLabel": "Klucz API OpenAI",
 		"openAiKeyPlaceholder": "Wprowadź swój klucz API OpenAI",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Klucz API Vercel AI Gateway jest wymagany",
 			"ollamaBaseUrlRequired": "Wymagany jest bazowy adres URL Ollama",
 			"baseUrlRequired": "Wymagany jest bazowy adres URL",
-			"modelDimensionMinValue": "Wymiar modelu musi być większy niż 0"
+			"modelDimensionMinValue": "Wymiar modelu musi być większy niż 0",
+			"openRouterApiKeyRequired": "Wymagany jest klucz API OpenRouter"
 		},
 		"advancedConfigLabel": "Konfiguracja zaawansowana",
 		"searchMinScoreLabel": "Próg wyniku wyszukiwania",

+ 5 - 1
webview-ui/src/i18n/locales/pt-BR/settings.json

@@ -55,6 +55,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "Chave de API",
 		"vercelAiGatewayApiKeyPlaceholder": "Digite sua chave de API do Vercel AI Gateway",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "Chave de API do OpenRouter",
+		"openRouterApiKeyPlaceholder": "Digite sua chave de API do OpenRouter",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "Chave de API:",
 		"mistralApiKeyPlaceholder": "Digite sua chave de API da Mistral",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "A chave de API do Vercel AI Gateway é obrigatória",
 			"ollamaBaseUrlRequired": "A URL base do Ollama é obrigatória",
 			"baseUrlRequired": "A URL base é obrigatória",
-			"modelDimensionMinValue": "A dimensão do modelo deve ser maior que 0"
+			"modelDimensionMinValue": "A dimensão do modelo deve ser maior que 0",
+			"openRouterApiKeyRequired": "Chave API do OpenRouter é obrigatória"
 		},
 		"advancedConfigLabel": "Configuração Avançada",
 		"searchMinScoreLabel": "Limite de pontuação de busca",

+ 5 - 1
webview-ui/src/i18n/locales/ru/settings.json

@@ -55,6 +55,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "Ключ API",
 		"vercelAiGatewayApiKeyPlaceholder": "Введите свой API-ключ Vercel AI Gateway",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "Ключ API OpenRouter",
+		"openRouterApiKeyPlaceholder": "Введите свой ключ API OpenRouter",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "Ключ API:",
 		"mistralApiKeyPlaceholder": "Введите свой API-ключ Mistral",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Требуется API-ключ Vercel AI Gateway",
 			"ollamaBaseUrlRequired": "Требуется базовый URL Ollama",
 			"baseUrlRequired": "Требуется базовый URL",
-			"modelDimensionMinValue": "Размерность модели должна быть больше 0"
+			"modelDimensionMinValue": "Размерность модели должна быть больше 0",
+			"openRouterApiKeyRequired": "Требуется ключ API OpenRouter"
 		},
 		"advancedConfigLabel": "Расширенная конфигурация",
 		"searchMinScoreLabel": "Порог оценки поиска",

+ 5 - 1
webview-ui/src/i18n/locales/tr/settings.json

@@ -58,6 +58,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "API Anahtarı",
 		"vercelAiGatewayApiKeyPlaceholder": "Vercel AI Gateway API anahtarınızı girin",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "OpenRouter API Anahtarı",
+		"openRouterApiKeyPlaceholder": "OpenRouter API anahtarınızı girin",
 		"openaiCompatibleProvider": "OpenAI Uyumlu",
 		"openAiKeyLabel": "OpenAI API Anahtarı",
 		"openAiKeyPlaceholder": "OpenAI API anahtarınızı girin",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API anahtarı gereklidir",
 			"ollamaBaseUrlRequired": "Ollama temel URL'si gereklidir",
 			"baseUrlRequired": "Temel URL'si gereklidir",
-			"modelDimensionMinValue": "Model boyutu 0'dan büyük olmalıdır"
+			"modelDimensionMinValue": "Model boyutu 0'dan büyük olmalıdır",
+			"openRouterApiKeyRequired": "OpenRouter API anahtarı gereklidir"
 		},
 		"advancedConfigLabel": "Gelişmiş Yapılandırma",
 		"searchMinScoreLabel": "Arama Skoru Eşiği",

+ 5 - 1
webview-ui/src/i18n/locales/vi/settings.json

@@ -58,6 +58,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "Khóa API",
 		"vercelAiGatewayApiKeyPlaceholder": "Nhập khóa API Vercel AI Gateway của bạn",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "Khóa API OpenRouter",
+		"openRouterApiKeyPlaceholder": "Nhập khóa API OpenRouter của bạn",
 		"openaiCompatibleProvider": "Tương thích OpenAI",
 		"openAiKeyLabel": "Khóa API OpenAI",
 		"openAiKeyPlaceholder": "Nhập khóa API OpenAI của bạn",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "Cần có khóa API Vercel AI Gateway",
 			"ollamaBaseUrlRequired": "Yêu cầu URL cơ sở Ollama",
 			"baseUrlRequired": "Yêu cầu URL cơ sở",
-			"modelDimensionMinValue": "Kích thước mô hình phải lớn hơn 0"
+			"modelDimensionMinValue": "Kích thước mô hình phải lớn hơn 0",
+			"openRouterApiKeyRequired": "Yêu cầu khóa API OpenRouter"
 		},
 		"advancedConfigLabel": "Cấu hình nâng cao",
 		"searchMinScoreLabel": "Ngưỡng điểm tìm kiếm",

+ 5 - 1
webview-ui/src/i18n/locales/zh-CN/settings.json

@@ -60,6 +60,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "API 密钥",
 		"vercelAiGatewayApiKeyPlaceholder": "输入您的 Vercel AI Gateway API 密钥",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "OpenRouter API 密钥",
+		"openRouterApiKeyPlaceholder": "输入您的 OpenRouter API 密钥",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "API 密钥:",
 		"mistralApiKeyPlaceholder": "输入您的 Mistral API 密钥",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "需要 Vercel AI Gateway API 密钥",
 			"ollamaBaseUrlRequired": "需要 Ollama 基础 URL",
 			"baseUrlRequired": "需要基础 URL",
-			"modelDimensionMinValue": "模型维度必须大于 0"
+			"modelDimensionMinValue": "模型维度必须大于 0",
+			"openRouterApiKeyRequired": "OpenRouter API 密钥是必需的"
 		},
 		"advancedConfigLabel": "高级配置",
 		"searchMinScoreLabel": "搜索分数阈值",

+ 5 - 1
webview-ui/src/i18n/locales/zh-TW/settings.json

@@ -55,6 +55,9 @@
 		"vercelAiGatewayProvider": "Vercel AI Gateway",
 		"vercelAiGatewayApiKeyLabel": "API 金鑰",
 		"vercelAiGatewayApiKeyPlaceholder": "輸入您的 Vercel AI Gateway API 金鑰",
+		"openRouterProvider": "OpenRouter",
+		"openRouterApiKeyLabel": "OpenRouter API 金鑰",
+		"openRouterApiKeyPlaceholder": "輸入您的 OpenRouter API 金鑰",
 		"mistralProvider": "Mistral",
 		"mistralApiKeyLabel": "API 金鑰:",
 		"mistralApiKeyPlaceholder": "輸入您的 Mistral API 金鑰",
@@ -124,7 +127,8 @@
 			"vercelAiGatewayApiKeyRequired": "需要 Vercel AI Gateway API 金鑰",
 			"ollamaBaseUrlRequired": "需要 Ollama 基礎 URL",
 			"baseUrlRequired": "需要基礎 URL",
-			"modelDimensionMinValue": "模型維度必須大於 0"
+			"modelDimensionMinValue": "模型維度必須大於 0",
+			"openRouterApiKeyRequired": "OpenRouter API 密鑰是必需的"
 		},
 		"advancedConfigLabel": "進階設定",
 		"searchMinScoreLabel": "搜尋分數閾值",