Просмотр исходного кода

fix: improve error handling for codebase search embeddings (#4432)

Co-authored-by: Claude <[email protected]>
Co-authored-by: Daniel Riccio <[email protected]>
Hannes Rudolph 6 месяцев назад
Родитель
Сommit
9b18b145b4
27 измененных файлов с 1117 добавлено и 49 удалено
  1. 23 0
      src/i18n/locales/ca/embeddings.json
  2. 23 0
      src/i18n/locales/de/embeddings.json
  3. 23 0
      src/i18n/locales/en/embeddings.json
  4. 23 0
      src/i18n/locales/es/embeddings.json
  5. 23 0
      src/i18n/locales/fr/embeddings.json
  6. 23 0
      src/i18n/locales/hi/embeddings.json
  7. 23 0
      src/i18n/locales/id/embeddings.json
  8. 23 0
      src/i18n/locales/it/embeddings.json
  9. 23 0
      src/i18n/locales/ja/embeddings.json
  10. 23 0
      src/i18n/locales/ko/embeddings.json
  11. 23 0
      src/i18n/locales/nl/embeddings.json
  12. 23 0
      src/i18n/locales/pl/embeddings.json
  13. 23 0
      src/i18n/locales/pt-BR/embeddings.json
  14. 23 0
      src/i18n/locales/ru/embeddings.json
  15. 23 0
      src/i18n/locales/tr/embeddings.json
  16. 23 0
      src/i18n/locales/vi/embeddings.json
  17. 23 0
      src/i18n/locales/zh-CN/embeddings.json
  18. 23 0
      src/i18n/locales/zh-TW/embeddings.json
  19. 83 6
      src/services/code-index/embedders/__tests__/openai-compatible.spec.ts
  20. 467 0
      src/services/code-index/embedders/__tests__/openai.spec.ts
  21. 9 6
      src/services/code-index/embedders/ollama.ts
  22. 40 16
      src/services/code-index/embedders/openai-compatible.ts
  23. 46 12
      src/services/code-index/embedders/openai.ts
  24. 16 0
      src/services/code-index/orchestrator.ts
  25. 23 3
      src/services/code-index/processors/scanner.ts
  26. 8 2
      src/services/code-index/vector-store/__tests__/qdrant-client.spec.ts
  27. 11 4
      src/services/code-index/vector-store/qdrant-client.ts

+ 23 - 0
src/i18n/locales/ca/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Error desconegut",
+	"authenticationFailed": "No s'han pogut crear les incrustacions: ha fallat l'autenticació. Comproveu la vostra clau d'API.",
+	"failedWithStatus": "No s'han pogut crear les incrustacions després de {{attempts}} intents: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "No s'han pogut crear les incrustacions després de {{attempts}} intents: {{errorMessage}}",
+	"failedMaxAttempts": "No s'han pogut crear les incrustacions després de {{attempts}} intents",
+	"textExceedsTokenLimit": "El text a l'índex {{index}} supera el límit màxim de testimonis ({{itemTokens}} > {{maxTokens}}). S'està ometent.",
+	"rateLimitRetry": "S'ha assolit el límit de velocitat, es torna a intentar en {{delayMs}}ms (intent {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "No s'ha pogut llegir el cos de l'error",
+		"requestFailed": "La sol·licitud de l'API d'Ollama ha fallat amb l'estat {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Estructura de resposta no vàlida de l'API d'Ollama: no s'ha trobat la matriu \"embeddings\" o no és una matriu.",
+		"embeddingFailed": "La incrustació d'Ollama ha fallat: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Error desconegut en processar el fitxer {{filePath}}",
+		"unknownErrorDeletingPoints": "Error desconegut en eliminar els punts per a {{filePath}}",
+		"failedToProcessBatchWithError": "No s'ha pogut processar el lot després de {{maxRetries}} intents: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "No s'ha pogut connectar a la base de dades vectorial Qdrant. Assegura't que Qdrant estigui funcionant i sigui accessible a {{qdrantUrl}}. Error: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/de/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Unbekannter Fehler",
+	"authenticationFailed": "Erstellung von Einbettungen fehlgeschlagen: Authentifizierung fehlgeschlagen. Bitte überprüfe deinen API-Schlüssel.",
+	"failedWithStatus": "Erstellung von Einbettungen nach {{attempts}} Versuchen fehlgeschlagen: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "Erstellung von Einbettungen nach {{attempts}} Versuchen fehlgeschlagen: {{errorMessage}}",
+	"failedMaxAttempts": "Erstellung von Einbettungen nach {{attempts}} Versuchen fehlgeschlagen",
+	"textExceedsTokenLimit": "Text bei Index {{index}} überschreitet das maximale Token-Limit ({{itemTokens}} > {{maxTokens}}). Wird übersprungen.",
+	"rateLimitRetry": "Ratenlimit erreicht, Wiederholung in {{delayMs}}ms (Versuch {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Fehlerinhalt konnte nicht gelesen werden",
+		"requestFailed": "Ollama API-Anfrage fehlgeschlagen mit Status {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Ungültige Antwortstruktur von Ollama API: \"embeddings\" Array nicht gefunden oder kein Array.",
+		"embeddingFailed": "Ollama Einbettung fehlgeschlagen: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Unbekannter Fehler beim Verarbeiten der Datei {{filePath}}",
+		"unknownErrorDeletingPoints": "Unbekannter Fehler beim Löschen der Punkte für {{filePath}}",
+		"failedToProcessBatchWithError": "Verarbeitung des Batches nach {{maxRetries}} Versuchen fehlgeschlagen: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Verbindung zur Qdrant-Vektordatenbank fehlgeschlagen. Stelle sicher, dass Qdrant läuft und unter {{qdrantUrl}} erreichbar ist. Fehler: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/en/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Unknown error",
+	"authenticationFailed": "Failed to create embeddings: Authentication failed. Please check your API key.",
+	"failedWithStatus": "Failed to create embeddings after {{attempts}} attempts: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "Failed to create embeddings after {{attempts}} attempts: {{errorMessage}}",
+	"failedMaxAttempts": "Failed to create embeddings after {{attempts}} attempts",
+	"textExceedsTokenLimit": "Text at index {{index}} exceeds maximum token limit ({{itemTokens}} > {{maxTokens}}). Skipping.",
+	"rateLimitRetry": "Rate limit hit, retrying in {{delayMs}}ms (attempt {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Could not read error body",
+		"requestFailed": "Ollama API request failed with status {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Invalid response structure from Ollama API: \"embeddings\" array not found or not an array.",
+		"embeddingFailed": "Ollama embedding failed: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Unknown error processing file {{filePath}}",
+		"unknownErrorDeletingPoints": "Unknown error deleting points for {{filePath}}",
+		"failedToProcessBatchWithError": "Failed to process batch after {{maxRetries}} attempts: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Failed to connect to Qdrant vector database. Please ensure Qdrant is running and accessible at {{qdrantUrl}}. Error: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/es/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Error desconocido",
+	"authenticationFailed": "No se pudieron crear las incrustaciones: Error de autenticación. Comprueba tu clave de API.",
+	"failedWithStatus": "No se pudieron crear las incrustaciones después de {{attempts}} intentos: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "No se pudieron crear las incrustaciones después de {{attempts}} intentos: {{errorMessage}}",
+	"failedMaxAttempts": "No se pudieron crear las incrustaciones después de {{attempts}} intentos",
+	"textExceedsTokenLimit": "El texto en el índice {{index}} supera el límite máximo de tokens ({{itemTokens}} > {{maxTokens}}). Omitiendo.",
+	"rateLimitRetry": "Límite de velocidad alcanzado, reintentando en {{delayMs}}ms (intento {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "No se pudo leer el cuerpo del error",
+		"requestFailed": "La solicitud de la API de Ollama falló con estado {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Estructura de respuesta inválida de la API de Ollama: array \"embeddings\" no encontrado o no es un array.",
+		"embeddingFailed": "Incrustación de Ollama falló: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Error desconocido procesando archivo {{filePath}}",
+		"unknownErrorDeletingPoints": "Error desconocido eliminando puntos para {{filePath}}",
+		"failedToProcessBatchWithError": "Error al procesar lote después de {{maxRetries}} intentos: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Error al conectar con la base de datos vectorial Qdrant. Asegúrate de que Qdrant esté funcionando y sea accesible en {{qdrantUrl}}. Error: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/fr/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Erreur inconnue",
+	"authenticationFailed": "Échec de la création des embeddings : Échec de l'authentification. Veuillez vérifier votre clé API.",
+	"failedWithStatus": "Échec de la création des embeddings après {{attempts}} tentatives : HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "Échec de la création des embeddings après {{attempts}} tentatives : {{errorMessage}}",
+	"failedMaxAttempts": "Échec de la création des embeddings après {{attempts}} tentatives",
+	"textExceedsTokenLimit": "Le texte à l'index {{index}} dépasse la limite maximale de tokens ({{itemTokens}} > {{maxTokens}}). Ignoré.",
+	"rateLimitRetry": "Limite de débit atteinte, nouvelle tentative dans {{delayMs}}ms (tentative {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Impossible de lire le corps de l'erreur",
+		"requestFailed": "Échec de la requête API Ollama avec le statut {{status}} {{statusText}} : {{errorBody}}",
+		"invalidResponseStructure": "Structure de réponse invalide de l'API Ollama : tableau \"embeddings\" non trouvé ou n'est pas un tableau.",
+		"embeddingFailed": "Échec de l'embedding Ollama : {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Erreur inconnue lors du traitement du fichier {{filePath}}",
+		"unknownErrorDeletingPoints": "Erreur inconnue lors de la suppression des points pour {{filePath}}",
+		"failedToProcessBatchWithError": "Échec du traitement du lot après {{maxRetries}} tentatives : {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Échec de la connexion à la base de données vectorielle Qdrant. Veuillez vous assurer que Qdrant fonctionne et est accessible à {{qdrantUrl}}. Erreur : {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/hi/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "अज्ञात त्रुटि",
+	"authenticationFailed": "एम्बेडिंग बनाने में विफल: प्रमाणीकरण विफल। कृपया अपनी एपीआई कुंजी जांचें।",
+	"failedWithStatus": "{{attempts}} प्रयासों के बाद एम्बेडिंग बनाने में विफल: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "{{attempts}} प्रयासों के बाद एम्बेडिंग बनाने में विफल: {{errorMessage}}",
+	"failedMaxAttempts": "{{attempts}} प्रयासों के बाद एम्बेडिंग बनाने में विफल",
+	"textExceedsTokenLimit": "अनुक्रमणिका {{index}} पर पाठ अधिकतम टोकन सीमा ({{itemTokens}} > {{maxTokens}}) से अधिक है। छोड़ा जा रहा है।",
+	"rateLimitRetry": "दर सीमा समाप्त, {{delayMs}}ms में पुन: प्रयास किया जा रहा है (प्रयास {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "त्रुटि सामग्री पढ़ नहीं सका",
+		"requestFailed": "Ollama API अनुरोध स्थिति {{status}} {{statusText}} के साथ विफल: {{errorBody}}",
+		"invalidResponseStructure": "Ollama API से अमान्य प्रतिक्रिया संरचना: \"embeddings\" सरणी नहीं मिली या सरणी नहीं है।",
+		"embeddingFailed": "Ollama एम्बेडिंग विफल: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "फ़ाइल {{filePath}} प्रसंस्करण में अज्ञात त्रुटि",
+		"unknownErrorDeletingPoints": "{{filePath}} के लिए बिंदु हटाने में अज्ञात त्रुटि",
+		"failedToProcessBatchWithError": "{{maxRetries}} प्रयासों के बाद बैच प्रसंस्करण विफल: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Qdrant वेक्टर डेटाबेस से कनेक्ट करने में विफल। कृपया सुनिश्चित करें कि Qdrant चल रहा है और {{qdrantUrl}} पर पहुंच योग्य है। त्रुटि: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/id/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Error tidak dikenal",
+	"authenticationFailed": "Gagal membuat embeddings: Autentikasi gagal. Silakan periksa API key Anda.",
+	"failedWithStatus": "Gagal membuat embeddings setelah {{attempts}} percobaan: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "Gagal membuat embeddings setelah {{attempts}} percobaan: {{errorMessage}}",
+	"failedMaxAttempts": "Gagal membuat embeddings setelah {{attempts}} percobaan",
+	"textExceedsTokenLimit": "Teks pada indeks {{index}} melebihi batas maksimum token ({{itemTokens}} > {{maxTokens}}). Dilewati.",
+	"rateLimitRetry": "Batas rate tercapai, mencoba lagi dalam {{delayMs}}ms (percobaan {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Tidak dapat membaca body error",
+		"requestFailed": "Permintaan API Ollama gagal dengan status {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Struktur respons tidak valid dari API Ollama: array \"embeddings\" tidak ditemukan atau bukan array.",
+		"embeddingFailed": "Embedding Ollama gagal: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Error tidak dikenal saat memproses file {{filePath}}",
+		"unknownErrorDeletingPoints": "Error tidak dikenal saat menghapus points untuk {{filePath}}",
+		"failedToProcessBatchWithError": "Gagal memproses batch setelah {{maxRetries}} percobaan: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Gagal terhubung ke database vektor Qdrant. Pastikan Qdrant berjalan dan dapat diakses di {{qdrantUrl}}. Error: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/it/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Errore sconosciuto",
+	"authenticationFailed": "Creazione degli embedding non riuscita: Autenticazione fallita. Controlla la tua chiave API.",
+	"failedWithStatus": "Creazione degli embedding non riuscita dopo {{attempts}} tentativi: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "Creazione degli embedding non riuscita dopo {{attempts}} tentativi: {{errorMessage}}",
+	"failedMaxAttempts": "Creazione degli embedding non riuscita dopo {{attempts}} tentativi",
+	"textExceedsTokenLimit": "Il testo all'indice {{index}} supera il limite massimo di token ({{itemTokens}} > {{maxTokens}}). Saltato.",
+	"rateLimitRetry": "Limite di velocità raggiunto, nuovo tentativo tra {{delayMs}}ms (tentativo {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Impossibile leggere il corpo dell'errore",
+		"requestFailed": "Richiesta API Ollama fallita con stato {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Struttura di risposta non valida dall'API Ollama: array \"embeddings\" non trovato o non è un array.",
+		"embeddingFailed": "Embedding Ollama fallito: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Errore sconosciuto nell'elaborazione del file {{filePath}}",
+		"unknownErrorDeletingPoints": "Errore sconosciuto nell'eliminazione dei punti per {{filePath}}",
+		"failedToProcessBatchWithError": "Elaborazione del batch fallita dopo {{maxRetries}} tentativi: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Impossibile connettersi al database vettoriale Qdrant. Assicurati che Qdrant sia in esecuzione e accessibile su {{qdrantUrl}}. Errore: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/ja/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "不明なエラー",
+	"authenticationFailed": "埋め込みの作成に失敗しました:認証に失敗しました。APIキーを確認してください。",
+	"failedWithStatus": "{{attempts}}回試行しましたが、埋め込みの作成に失敗しました:HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "{{attempts}}回試行しましたが、埋め込みの作成に失敗しました:{{errorMessage}}",
+	"failedMaxAttempts": "{{attempts}}回試行しましたが、埋め込みの作成に失敗しました",
+	"textExceedsTokenLimit": "インデックス{{index}}のテキストが最大トークン制限を超えています({{itemTokens}}> {{maxTokens}})。スキップします。",
+	"rateLimitRetry": "レート制限に達しました。{{delayMs}}ミリ秒後に再試行します(試行{{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "エラー本文を読み取れませんでした",
+		"requestFailed": "Ollama APIリクエストが失敗しました。ステータス {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Ollama APIからの無効な応答構造:\"embeddings\"配列が見つからないか、配列ではありません。",
+		"embeddingFailed": "Ollama埋め込みが失敗しました:{{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "ファイル{{filePath}}の処理中に不明なエラーが発生しました",
+		"unknownErrorDeletingPoints": "{{filePath}}のポイント削除中に不明なエラーが発生しました",
+		"failedToProcessBatchWithError": "{{maxRetries}}回の試行後、バッチ処理に失敗しました:{{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Qdrantベクターデータベースへの接続に失敗しました。Qdrantが実行中で{{qdrantUrl}}でアクセス可能であることを確認してください。エラー:{{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/ko/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "알 수 없는 오류",
+	"authenticationFailed": "임베딩 생성 실패: 인증에 실패했습니다. API 키를 확인하세요.",
+	"failedWithStatus": "{{attempts}}번 시도 후 임베딩 생성 실패: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "{{attempts}}번 시도 후 임베딩 생성 실패: {{errorMessage}}",
+	"failedMaxAttempts": "{{attempts}}번 시도 후 임베딩 생성 실패",
+	"textExceedsTokenLimit": "인덱스 {{index}}의 텍스트가 최대 토큰 제한({{itemTokens}} > {{maxTokens}})을 초과했습니다. 건너뜁니다.",
+	"rateLimitRetry": "속도 제한에 도달했습니다. {{delayMs}}ms 후에 다시 시도합니다(시도 {{attempt}}/{{maxRetries}}).",
+	"ollama": {
+		"couldNotReadErrorBody": "오류 본문을 읽을 수 없습니다",
+		"requestFailed": "Ollama API 요청이 실패했습니다. 상태 {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Ollama API에서 잘못된 응답 구조: \"embeddings\" 배열을 찾을 수 없거나 배열이 아닙니다.",
+		"embeddingFailed": "Ollama 임베딩 실패: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "파일 {{filePath}} 처리 중 알 수 없는 오류",
+		"unknownErrorDeletingPoints": "{{filePath}}의 포인트 삭제 중 알 수 없는 오류",
+		"failedToProcessBatchWithError": "{{maxRetries}}번 시도 후 배치 처리 실패: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Qdrant 벡터 데이터베이스에 연결하지 못했습니다. Qdrant가 실행 중이고 {{qdrantUrl}}에서 접근 가능한지 확인하세요. 오류: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/nl/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Onbekende fout",
+	"authenticationFailed": "Insluitingen maken mislukt: Authenticatie mislukt. Controleer je API-sleutel.",
+	"failedWithStatus": "Insluitingen maken mislukt na {{attempts}} pogingen: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "Insluitingen maken mislukt na {{attempts}} pogingen: {{errorMessage}}",
+	"failedMaxAttempts": "Insluitingen maken mislukt na {{attempts}} pogingen",
+	"textExceedsTokenLimit": "Tekst op index {{index}} overschrijdt de maximale tokenlimiet ({{itemTokens}} > {{maxTokens}}). Wordt overgeslagen.",
+	"rateLimitRetry": "Snelheidslimiet bereikt, opnieuw proberen over {{delayMs}}ms (poging {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Kon foutinhoud niet lezen",
+		"requestFailed": "Ollama API-verzoek mislukt met status {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Ongeldige responsstructuur van Ollama API: \"embeddings\" array niet gevonden of is geen array.",
+		"embeddingFailed": "Ollama insluiting mislukt: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Onbekende fout bij verwerken van bestand {{filePath}}",
+		"unknownErrorDeletingPoints": "Onbekende fout bij verwijderen van punten voor {{filePath}}",
+		"failedToProcessBatchWithError": "Verwerken van batch mislukt na {{maxRetries}} pogingen: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Kan geen verbinding maken met Qdrant vectordatabase. Zorg ervoor dat Qdrant draait en toegankelijk is op {{qdrantUrl}}. Fout: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/pl/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Nieznany błąd",
+	"authenticationFailed": "Nie udało się utworzyć osadzeń: Uwierzytelnianie nie powiodło się. Sprawdź swój klucz API.",
+	"failedWithStatus": "Nie udało się utworzyć osadzeń po {{attempts}} próbach: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "Nie udało się utworzyć osadzeń po {{attempts}} próbach: {{errorMessage}}",
+	"failedMaxAttempts": "Nie udało się utworzyć osadzeń po {{attempts}} próbach",
+	"textExceedsTokenLimit": "Tekst w indeksie {{index}} przekracza maksymalny limit tokenów ({{itemTokens}} > {{maxTokens}}). Pomijanie.",
+	"rateLimitRetry": "Osiągnięto limit szybkości, ponawianie za {{delayMs}}ms (próba {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Nie można odczytać treści błędu",
+		"requestFailed": "Żądanie API Ollama nie powiodło się ze statusem {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Nieprawidłowa struktura odpowiedzi z API Ollama: tablica \"embeddings\" nie została znaleziona lub nie jest tablicą.",
+		"embeddingFailed": "Osadzenie Ollama nie powiodło się: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Nieznany błąd podczas przetwarzania pliku {{filePath}}",
+		"unknownErrorDeletingPoints": "Nieznany błąd podczas usuwania punktów dla {{filePath}}",
+		"failedToProcessBatchWithError": "Nie udało się przetworzyć partii po {{maxRetries}} próbach: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Nie udało się połączyć z bazą danych wektorowych Qdrant. Upewnij się, że Qdrant jest uruchomiony i dostępny pod adresem {{qdrantUrl}}. Błąd: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/pt-BR/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Erro desconhecido",
+	"authenticationFailed": "Falha ao criar embeddings: Falha na autenticação. Verifique sua chave de API.",
+	"failedWithStatus": "Falha ao criar embeddings após {{attempts}} tentativas: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "Falha ao criar embeddings após {{attempts}} tentativas: {{errorMessage}}",
+	"failedMaxAttempts": "Falha ao criar embeddings após {{attempts}} tentativas",
+	"textExceedsTokenLimit": "O texto no índice {{index}} excede o limite máximo de tokens ({{itemTokens}} > {{maxTokens}}). Ignorando.",
+	"rateLimitRetry": "Limite de taxa atingido, tentando novamente em {{delayMs}}ms (tentativa {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Não foi possível ler o corpo do erro",
+		"requestFailed": "Solicitação da API Ollama falhou com status {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Estrutura de resposta inválida da API Ollama: array \"embeddings\" não encontrado ou não é um array.",
+		"embeddingFailed": "Embedding Ollama falhou: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Erro desconhecido ao processar arquivo {{filePath}}",
+		"unknownErrorDeletingPoints": "Erro desconhecido ao deletar pontos para {{filePath}}",
+		"failedToProcessBatchWithError": "Falha ao processar lote após {{maxRetries}} tentativas: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Falha ao conectar com o banco de dados vetorial Qdrant. Certifique-se de que o Qdrant esteja rodando e acessível em {{qdrantUrl}}. Erro: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/ru/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Неизвестная ошибка",
+	"authenticationFailed": "Не удалось создать вложения: Ошибка аутентификации. Проверьте свой ключ API.",
+	"failedWithStatus": "Не удалось создать вложения после {{attempts}} попыток: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "Не удалось создать вложения после {{attempts}} попыток: {{errorMessage}}",
+	"failedMaxAttempts": "Не удалось создать вложения после {{attempts}} попыток",
+	"textExceedsTokenLimit": "Текст в индексе {{index}} превышает максимальный лимит токенов ({{itemTokens}} > {{maxTokens}}). Пропускается.",
+	"rateLimitRetry": "Достигнут лимит скорости, повторная попытка через {{delayMs}} мс (попытка {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Не удалось прочитать тело ошибки",
+		"requestFailed": "Запрос к API Ollama не удался со статусом {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Неверная структура ответа от API Ollama: массив \"embeddings\" не найден или не является массивом.",
+		"embeddingFailed": "Вложение Ollama не удалось: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Неизвестная ошибка при обработке файла {{filePath}}",
+		"unknownErrorDeletingPoints": "Неизвестная ошибка при удалении точек для {{filePath}}",
+		"failedToProcessBatchWithError": "Не удалось обработать пакет после {{maxRetries}} попыток: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Не удалось подключиться к векторной базе данных Qdrant. Убедитесь, что Qdrant запущен и доступен по адресу {{qdrantUrl}}. Ошибка: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/tr/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Bilinmeyen hata",
+	"authenticationFailed": "Gömülmeler oluşturulamadı: Kimlik doğrulama başarısız oldu. Lütfen API anahtarınızı kontrol edin.",
+	"failedWithStatus": "{{attempts}} denemeden sonra gömülmeler oluşturulamadı: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "{{attempts}} denemeden sonra gömülmeler oluşturulamadı: {{errorMessage}}",
+	"failedMaxAttempts": "{{attempts}} denemeden sonra gömülmeler oluşturulamadı",
+	"textExceedsTokenLimit": "{{index}} dizinindeki metin maksimum jeton sınırını aşıyor ({{itemTokens}} > {{maxTokens}}). Atlanıyor.",
+	"rateLimitRetry": "Hız sınırına ulaşıldı, {{delayMs}}ms içinde yeniden deneniyor (deneme {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Hata gövdesi okunamadı",
+		"requestFailed": "Ollama API isteği {{status}} {{statusText}} durumuyla başarısız oldu: {{errorBody}}",
+		"invalidResponseStructure": "Ollama API'den geçersiz yanıt yapısı: \"embeddings\" dizisi bulunamadı veya dizi değil.",
+		"embeddingFailed": "Ollama gömülmesi başarısız oldu: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "{{filePath}} dosyası işlenirken bilinmeyen hata",
+		"unknownErrorDeletingPoints": "{{filePath}} için noktalar silinirken bilinmeyen hata",
+		"failedToProcessBatchWithError": "{{maxRetries}} denemeden sonra toplu işlem başarısız oldu: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Qdrant vektör veritabanına bağlanılamadı. Qdrant'ın çalıştığından ve {{qdrantUrl}} adresinde erişilebilir olduğundan emin olun. Hata: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/vi/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "Lỗi không xác định",
+	"authenticationFailed": "Không thể tạo nhúng: Xác thực không thành công. Vui lòng kiểm tra khóa API của bạn.",
+	"failedWithStatus": "Không thể tạo nhúng sau {{attempts}} lần thử: HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "Không thể tạo nhúng sau {{attempts}} lần thử: {{errorMessage}}",
+	"failedMaxAttempts": "Không thể tạo nhúng sau {{attempts}} lần thử",
+	"textExceedsTokenLimit": "Văn bản tại chỉ mục {{index}} vượt quá giới hạn mã thông báo tối đa ({{itemTokens}} > {{maxTokens}}). Bỏ qua.",
+	"rateLimitRetry": "Đã đạt đến giới hạn tốc độ, thử lại sau {{delayMs}}ms (lần thử {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "Không thể đọc nội dung lỗi",
+		"requestFailed": "Yêu cầu API Ollama thất bại với trạng thái {{status}} {{statusText}}: {{errorBody}}",
+		"invalidResponseStructure": "Cấu trúc phản hồi không hợp lệ từ API Ollama: không tìm thấy mảng \"embeddings\" hoặc không phải là mảng.",
+		"embeddingFailed": "Nhúng Ollama thất bại: {{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "Lỗi không xác định khi xử lý tệp {{filePath}}",
+		"unknownErrorDeletingPoints": "Lỗi không xác định khi xóa điểm cho {{filePath}}",
+		"failedToProcessBatchWithError": "Không thể xử lý lô sau {{maxRetries}} lần thử: {{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "Không thể kết nối với cơ sở dữ liệu vector Qdrant. Vui lòng đảm bảo Qdrant đang chạy và có thể truy cập tại {{qdrantUrl}}. Lỗi: {{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/zh-CN/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "未知错误",
+	"authenticationFailed": "创建嵌入失败:身份验证失败。请检查您的 API 密钥。",
+	"failedWithStatus": "尝试 {{attempts}} 次后创建嵌入失败:HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "尝试 {{attempts}} 次后创建嵌入失败:{{errorMessage}}",
+	"failedMaxAttempts": "尝试 {{attempts}} 次后创建嵌入失败",
+	"textExceedsTokenLimit": "索引 {{index}} 处的文本超过最大令牌限制 ({{itemTokens}} > {{maxTokens}})。正在跳过。",
+	"rateLimitRetry": "已达到速率限制,将在 {{delayMs}} 毫秒后重试(尝试次数 {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "无法读取错误内容",
+		"requestFailed": "Ollama API 请求失败,状态码 {{status}} {{statusText}}:{{errorBody}}",
+		"invalidResponseStructure": "Ollama API 响应结构无效:未找到 \"embeddings\" 数组或不是数组。",
+		"embeddingFailed": "Ollama 嵌入失败:{{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "处理文件 {{filePath}} 时出现未知错误",
+		"unknownErrorDeletingPoints": "删除 {{filePath}} 的数据点时出现未知错误",
+		"failedToProcessBatchWithError": "尝试 {{maxRetries}} 次后批次处理失败:{{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "连接 Qdrant 向量数据库失败。请确保 Qdrant 正在运行并可在 {{qdrantUrl}} 访问。错误:{{errorMessage}}"
+	}
+}

+ 23 - 0
src/i18n/locales/zh-TW/embeddings.json

@@ -0,0 +1,23 @@
+{
+	"unknownError": "未知錯誤",
+	"authenticationFailed": "建立內嵌失敗:驗證失敗。請檢查您的 API 金鑰。",
+	"failedWithStatus": "嘗試 {{attempts}} 次後建立內嵌失敗:HTTP {{statusCode}} - {{errorMessage}}",
+	"failedWithError": "嘗試 {{attempts}} 次後建立內嵌失敗:{{errorMessage}}",
+	"failedMaxAttempts": "嘗試 {{attempts}} 次後建立內嵌失敗",
+	"textExceedsTokenLimit": "索引 {{index}} 處的文字超過最大權杖限制 ({{itemTokens}} > {{maxTokens}})。正在略過。",
+	"rateLimitRetry": "已達到速率限制,將在 {{delayMs}} 毫秒後重試(嘗試次數 {{attempt}}/{{maxRetries}})",
+	"ollama": {
+		"couldNotReadErrorBody": "無法讀取錯誤內容",
+		"requestFailed": "Ollama API 請求失敗,狀態碼 {{status}} {{statusText}}:{{errorBody}}",
+		"invalidResponseStructure": "Ollama API 回應結構無效:未找到 \"embeddings\" 陣列或不是陣列。",
+		"embeddingFailed": "Ollama 內嵌失敗:{{message}}"
+	},
+	"scanner": {
+		"unknownErrorProcessingFile": "處理檔案 {{filePath}} 時發生未知錯誤",
+		"unknownErrorDeletingPoints": "刪除 {{filePath}} 的資料點時發生未知錯誤",
+		"failedToProcessBatchWithError": "嘗試 {{maxRetries}} 次後批次處理失敗:{{errorMessage}}"
+	},
+	"vectorStore": {
+		"qdrantConnectionFailed": "連接 Qdrant 向量資料庫失敗。請確保 Qdrant 正在執行並可在 {{qdrantUrl}} 存取。錯誤:{{errorMessage}}"
+	}
+}

+ 83 - 6
src/services/code-index/embedders/__tests__/openai-compatible.spec.ts

@@ -6,6 +6,23 @@ import { MAX_ITEM_TOKENS, INITIAL_RETRY_DELAY_MS } from "../../constants"
 // Mock the OpenAI SDK
 vitest.mock("openai")
 
+// Mock i18n
+vitest.mock("../../../../i18n", () => ({
+	t: (key: string, params?: Record<string, any>) => {
+		const translations: Record<string, string> = {
+			"embeddings:authenticationFailed":
+				"Failed to create embeddings: Authentication failed. Please check your API key.",
+			"embeddings:failedWithStatus": `Failed to create embeddings after ${params?.attempts} attempts: HTTP ${params?.statusCode} - ${params?.errorMessage}`,
+			"embeddings:failedWithError": `Failed to create embeddings after ${params?.attempts} attempts: ${params?.errorMessage}`,
+			"embeddings:failedMaxAttempts": `Failed to create embeddings after ${params?.attempts} attempts`,
+			"embeddings:textExceedsTokenLimit": `Text at index ${params?.index} exceeds maximum token limit (${params?.itemTokens} > ${params?.maxTokens}). Skipping.`,
+			"embeddings:rateLimitRetry": `Rate limit hit, retrying in ${params?.delayMs}ms (attempt ${params?.attempt}/${params?.maxRetries})`,
+			"embeddings:unknownError": "Unknown error",
+		}
+		return translations[key] || key
+	},
+}))
+
 const MockedOpenAI = OpenAI as MockedClass<typeof OpenAI>
 
 describe("OpenAICompatibleEmbedder", () => {
@@ -378,7 +395,7 @@ describe("OpenAICompatibleEmbedder", () => {
 				mockEmbeddingsCreate.mockRejectedValue(authError)
 
 				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
-					"Failed to create embeddings: batch processing error",
+					"Failed to create embeddings: Authentication failed. Please check your API key.",
 				)
 
 				expect(mockEmbeddingsCreate).toHaveBeenCalledTimes(1)
@@ -393,7 +410,7 @@ describe("OpenAICompatibleEmbedder", () => {
 				mockEmbeddingsCreate.mockRejectedValue(serverError)
 
 				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
-					"Failed to create embeddings: batch processing error",
+					"Failed to create embeddings after 3 attempts: HTTP 500 - Internal server error",
 				)
 
 				expect(mockEmbeddingsCreate).toHaveBeenCalledTimes(1)
@@ -411,11 +428,11 @@ describe("OpenAICompatibleEmbedder", () => {
 				mockEmbeddingsCreate.mockRejectedValue(apiError)
 
 				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
-					"Failed to create embeddings: batch processing error",
+					"Failed to create embeddings after 3 attempts: API connection failed",
 				)
 
 				expect(console.error).toHaveBeenCalledWith(
-					expect.stringContaining("Failed to process batch"),
+					expect.stringContaining("OpenAI Compatible embedder error"),
 					expect.any(Error),
 				)
 			})
@@ -427,10 +444,13 @@ describe("OpenAICompatibleEmbedder", () => {
 				mockEmbeddingsCreate.mockRejectedValue(batchError)
 
 				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
-					"Failed to create embeddings: batch processing error",
+					"Failed to create embeddings after 3 attempts: Batch processing failed",
 				)
 
-				expect(console.error).toHaveBeenCalledWith("Failed to process batch:", batchError)
+				expect(console.error).toHaveBeenCalledWith(
+					expect.stringContaining("OpenAI Compatible embedder error"),
+					batchError,
+				)
 			})
 
 			it("should handle empty text arrays", async () => {
@@ -456,6 +476,63 @@ describe("OpenAICompatibleEmbedder", () => {
 
 				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow()
 			})
+
+			it("should provide specific authentication error message", async () => {
+				const testTexts = ["Hello world"]
+				const authError = new Error("Invalid API key")
+				;(authError as any).status = 401
+
+				mockEmbeddingsCreate.mockRejectedValue(authError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings: Authentication failed. Please check your API key.",
+				)
+			})
+
+			it("should provide detailed error message for HTTP errors", async () => {
+				const testTexts = ["Hello world"]
+				const httpError = new Error("Bad request")
+				;(httpError as any).status = 400
+
+				mockEmbeddingsCreate.mockRejectedValue(httpError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: HTTP 400 - Bad request",
+				)
+			})
+
+			it("should handle errors without status codes", async () => {
+				const testTexts = ["Hello world"]
+				const networkError = new Error("Network timeout")
+
+				mockEmbeddingsCreate.mockRejectedValue(networkError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: Network timeout",
+				)
+			})
+
+			it("should handle errors without message property", async () => {
+				const testTexts = ["Hello world"]
+				const weirdError = { toString: () => "Custom error object" }
+
+				mockEmbeddingsCreate.mockRejectedValue(weirdError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: Custom error object",
+				)
+			})
+
+			it("should handle completely unknown error types", async () => {
+				const testTexts = ["Hello world"]
+				const unknownError = null
+
+				mockEmbeddingsCreate.mockRejectedValue(unknownError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: Unknown error",
+				)
+			})
 		})
 
 		/**

+ 467 - 0
src/services/code-index/embedders/__tests__/openai.spec.ts

@@ -0,0 +1,467 @@
+import { vitest, describe, it, expect, beforeEach, afterEach } from "vitest"
+import type { MockedClass, MockedFunction } from "vitest"
+import { OpenAI } from "openai"
+import { OpenAiEmbedder } from "../openai"
+import { MAX_BATCH_TOKENS, MAX_ITEM_TOKENS, MAX_BATCH_RETRIES, INITIAL_RETRY_DELAY_MS } from "../../constants"
+
+// Mock the OpenAI SDK
+vitest.mock("openai")
+
+// Mock i18n
+vitest.mock("../../../../i18n", () => ({
+	t: (key: string, params?: Record<string, any>) => {
+		const translations: Record<string, string> = {
+			"embeddings:authenticationFailed":
+				"Failed to create embeddings: Authentication failed. Please check your OpenAI API key.",
+			"embeddings:failedWithStatus": `Failed to create embeddings after ${params?.attempts} attempts: HTTP ${params?.statusCode} - ${params?.errorMessage}`,
+			"embeddings:failedWithError": `Failed to create embeddings after ${params?.attempts} attempts: ${params?.errorMessage}`,
+			"embeddings:failedMaxAttempts": `Failed to create embeddings after ${params?.attempts} attempts`,
+			"embeddings:textExceedsTokenLimit": `Text at index ${params?.index} exceeds maximum token limit (${params?.itemTokens} > ${params?.maxTokens}). Skipping.`,
+			"embeddings:rateLimitRetry": `Rate limit hit, retrying in ${params?.delayMs}ms (attempt ${params?.attempt}/${params?.maxRetries})`,
+		}
+		return translations[key] || key
+	},
+}))
+
+// Mock console methods
+const consoleMocks = {
+	error: vitest.spyOn(console, "error").mockImplementation(() => {}),
+	warn: vitest.spyOn(console, "warn").mockImplementation(() => {}),
+}
+
+describe("OpenAiEmbedder", () => {
+	let embedder: OpenAiEmbedder
+	let mockEmbeddingsCreate: MockedFunction<any>
+	let MockedOpenAI: MockedClass<typeof OpenAI>
+
+	beforeEach(() => {
+		vitest.clearAllMocks()
+		consoleMocks.error.mockClear()
+		consoleMocks.warn.mockClear()
+
+		MockedOpenAI = OpenAI as MockedClass<typeof OpenAI>
+		mockEmbeddingsCreate = vitest.fn()
+
+		MockedOpenAI.prototype.embeddings = {
+			create: mockEmbeddingsCreate,
+		} as any
+
+		embedder = new OpenAiEmbedder({
+			openAiNativeApiKey: "test-api-key",
+			openAiEmbeddingModelId: "text-embedding-3-small",
+		})
+	})
+
+	afterEach(() => {
+		vitest.clearAllMocks()
+	})
+
+	describe("constructor", () => {
+		it("should initialize with provided options", () => {
+			expect(MockedOpenAI).toHaveBeenCalledWith({ apiKey: "test-api-key" })
+			expect(embedder.embedderInfo.name).toBe("openai")
+		})
+
+		it("should use 'not-provided' if API key is not provided", () => {
+			const embedderWithoutKey = new OpenAiEmbedder({
+				openAiEmbeddingModelId: "text-embedding-3-small",
+			})
+
+			expect(MockedOpenAI).toHaveBeenCalledWith({ apiKey: "not-provided" })
+		})
+
+		it("should use default model if not specified", () => {
+			const embedderWithDefaultModel = new OpenAiEmbedder({
+				openAiNativeApiKey: "test-api-key",
+			})
+			// We can't directly test the defaultModelId but it should be text-embedding-3-small
+			expect(embedderWithDefaultModel).toBeDefined()
+		})
+	})
+
+	describe("createEmbeddings", () => {
+		const testModelId = "text-embedding-3-small"
+
+		it("should create embeddings for a single text", async () => {
+			const testTexts = ["Hello world"]
+			const mockResponse = {
+				data: [{ embedding: [0.1, 0.2, 0.3] }],
+				usage: { prompt_tokens: 10, total_tokens: 15 },
+			}
+			mockEmbeddingsCreate.mockResolvedValue(mockResponse)
+
+			const result = await embedder.createEmbeddings(testTexts)
+
+			expect(mockEmbeddingsCreate).toHaveBeenCalledWith({
+				input: testTexts,
+				model: testModelId,
+			})
+			expect(result).toEqual({
+				embeddings: [[0.1, 0.2, 0.3]],
+				usage: { promptTokens: 10, totalTokens: 15 },
+			})
+		})
+
+		it("should create embeddings for multiple texts", async () => {
+			const testTexts = ["Hello world", "Another text"]
+			const mockResponse = {
+				data: [{ embedding: [0.1, 0.2, 0.3] }, { embedding: [0.4, 0.5, 0.6] }],
+				usage: { prompt_tokens: 20, total_tokens: 30 },
+			}
+			mockEmbeddingsCreate.mockResolvedValue(mockResponse)
+
+			const result = await embedder.createEmbeddings(testTexts)
+
+			expect(mockEmbeddingsCreate).toHaveBeenCalledWith({
+				input: testTexts,
+				model: testModelId,
+			})
+			expect(result).toEqual({
+				embeddings: [
+					[0.1, 0.2, 0.3],
+					[0.4, 0.5, 0.6],
+				],
+				usage: { promptTokens: 20, totalTokens: 30 },
+			})
+		})
+
+		it("should use custom model when provided", async () => {
+			const testTexts = ["Hello world"]
+			const customModel = "text-embedding-ada-002"
+			const mockResponse = {
+				data: [{ embedding: [0.1, 0.2, 0.3] }],
+				usage: { prompt_tokens: 10, total_tokens: 15 },
+			}
+			mockEmbeddingsCreate.mockResolvedValue(mockResponse)
+
+			await embedder.createEmbeddings(testTexts, customModel)
+
+			expect(mockEmbeddingsCreate).toHaveBeenCalledWith({
+				input: testTexts,
+				model: customModel,
+			})
+		})
+
+		it("should handle missing usage data gracefully", async () => {
+			const testTexts = ["Hello world"]
+			const mockResponse = {
+				data: [{ embedding: [0.1, 0.2, 0.3] }],
+				usage: undefined,
+			}
+			mockEmbeddingsCreate.mockResolvedValue(mockResponse)
+
+			const result = await embedder.createEmbeddings(testTexts)
+
+			expect(result).toEqual({
+				embeddings: [[0.1, 0.2, 0.3]],
+				usage: { promptTokens: 0, totalTokens: 0 },
+			})
+		})
+
+		/**
+		 * Test batching logic when texts exceed token limits
+		 */
+		describe("batching logic", () => {
+			it("should process texts in batches", async () => {
+				// Use normal sized texts that won't be skipped
+				const testTexts = ["text1", "text2", "text3"]
+
+				mockEmbeddingsCreate.mockResolvedValue({
+					data: testTexts.map((_, i) => ({ embedding: [i, i + 0.1, i + 0.2] })),
+					usage: { prompt_tokens: 30, total_tokens: 45 },
+				})
+
+				const result = await embedder.createEmbeddings(testTexts)
+
+				expect(mockEmbeddingsCreate).toHaveBeenCalledTimes(1)
+				expect(result.embeddings).toHaveLength(3)
+				expect(result.usage?.promptTokens).toBe(30)
+			})
+
+			it("should warn and skip texts exceeding maximum token limit", async () => {
+				// Create a text that exceeds MAX_ITEM_TOKENS (4 characters ≈ 1 token)
+				const oversizedText = "a".repeat(MAX_ITEM_TOKENS * 4 + 100)
+				const normalText = "normal text"
+				const testTexts = [normalText, oversizedText, "another normal"]
+
+				mockEmbeddingsCreate.mockResolvedValue({
+					data: [{ embedding: [0.1, 0.2, 0.3] }, { embedding: [0.4, 0.5, 0.6] }],
+					usage: { prompt_tokens: 20, total_tokens: 30 },
+				})
+
+				const result = await embedder.createEmbeddings(testTexts)
+
+				// Verify warning was logged
+				expect(console.warn).toHaveBeenCalledWith(expect.stringContaining(`exceeds maximum token limit`))
+
+				// Verify only normal texts were processed
+				expect(mockEmbeddingsCreate).toHaveBeenCalledWith({
+					input: [normalText, "another normal"],
+					model: testModelId,
+				})
+				expect(result.embeddings).toHaveLength(2)
+			})
+
+			it("should handle multiple batches when total tokens exceed batch limit", async () => {
+				// Create texts that will require multiple batches
+				// Each text needs to be less than MAX_ITEM_TOKENS (8191) but together exceed MAX_BATCH_TOKENS (100000)
+				// Let's use 8000 tokens per text (safe under MAX_ITEM_TOKENS)
+				const tokensPerText = 8000
+				const largeText = "a".repeat(tokensPerText * 4) // 4 chars ≈ 1 token
+				// Create 15 texts * 8000 tokens = 120000 tokens total
+				const testTexts = Array(15).fill(largeText)
+
+				// Mock responses for each batch
+				// First batch will have 12 texts (96000 tokens), second batch will have 3 texts (24000 tokens)
+				mockEmbeddingsCreate
+					.mockResolvedValueOnce({
+						data: Array(12)
+							.fill(null)
+							.map((_, i) => ({ embedding: [i * 0.1, i * 0.1 + 0.1, i * 0.1 + 0.2] })),
+						usage: { prompt_tokens: 96000, total_tokens: 96000 },
+					})
+					.mockResolvedValueOnce({
+						data: Array(3)
+							.fill(null)
+							.map((_, i) => ({
+								embedding: [(12 + i) * 0.1, (12 + i) * 0.1 + 0.1, (12 + i) * 0.1 + 0.2],
+							})),
+						usage: { prompt_tokens: 24000, total_tokens: 24000 },
+					})
+
+				const result = await embedder.createEmbeddings(testTexts)
+
+				expect(mockEmbeddingsCreate).toHaveBeenCalledTimes(2)
+				expect(result.embeddings).toHaveLength(15)
+				expect(result.usage?.promptTokens).toBe(120000)
+				expect(result.usage?.totalTokens).toBe(120000)
+			})
+
+			it("should handle all texts being skipped due to size", async () => {
+				const oversizedText = "a".repeat(MAX_ITEM_TOKENS * 4 + 100)
+				const testTexts = [oversizedText, oversizedText]
+
+				const result = await embedder.createEmbeddings(testTexts)
+
+				expect(console.warn).toHaveBeenCalledTimes(2)
+				expect(mockEmbeddingsCreate).not.toHaveBeenCalled()
+				expect(result).toEqual({
+					embeddings: [],
+					usage: { promptTokens: 0, totalTokens: 0 },
+				})
+			})
+		})
+
+		/**
+		 * Test retry logic for rate limiting and other errors
+		 */
+		describe("retry logic", () => {
+			beforeEach(() => {
+				vitest.useFakeTimers()
+			})
+
+			afterEach(() => {
+				vitest.useRealTimers()
+			})
+
+			it("should retry on rate limit errors with exponential backoff", async () => {
+				const testTexts = ["Hello world"]
+				const rateLimitError = { status: 429, message: "Rate limit exceeded" }
+
+				mockEmbeddingsCreate
+					.mockRejectedValueOnce(rateLimitError)
+					.mockRejectedValueOnce(rateLimitError)
+					.mockResolvedValueOnce({
+						data: [{ embedding: [0.1, 0.2, 0.3] }],
+						usage: { prompt_tokens: 10, total_tokens: 15 },
+					})
+
+				const resultPromise = embedder.createEmbeddings(testTexts)
+
+				// Fast-forward through the delays
+				await vitest.advanceTimersByTimeAsync(INITIAL_RETRY_DELAY_MS) // First retry delay
+				await vitest.advanceTimersByTimeAsync(INITIAL_RETRY_DELAY_MS * 2) // Second retry delay
+
+				const result = await resultPromise
+
+				expect(mockEmbeddingsCreate).toHaveBeenCalledTimes(3)
+				expect(console.warn).toHaveBeenCalledWith(expect.stringContaining("Rate limit hit, retrying in"))
+				expect(result).toEqual({
+					embeddings: [[0.1, 0.2, 0.3]],
+					usage: { promptTokens: 10, totalTokens: 15 },
+				})
+			})
+
+			it("should not retry on non-rate-limit errors", async () => {
+				const testTexts = ["Hello world"]
+				const authError = new Error("Unauthorized")
+				;(authError as any).status = 401
+
+				mockEmbeddingsCreate.mockRejectedValue(authError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings: Authentication failed. Please check your OpenAI API key.",
+				)
+
+				expect(mockEmbeddingsCreate).toHaveBeenCalledTimes(1)
+				expect(console.warn).not.toHaveBeenCalledWith(expect.stringContaining("Rate limit hit"))
+			})
+
+			it("should throw error immediately on non-retryable errors", async () => {
+				const testTexts = ["Hello world"]
+				const serverError = new Error("Internal server error")
+				;(serverError as any).status = 500
+
+				mockEmbeddingsCreate.mockRejectedValue(serverError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: HTTP 500 - Internal server error",
+				)
+
+				expect(mockEmbeddingsCreate).toHaveBeenCalledTimes(1)
+			})
+		})
+
+		/**
+		 * Test error handling scenarios
+		 */
+		describe("error handling", () => {
+			it("should handle API errors gracefully", async () => {
+				const testTexts = ["Hello world"]
+				const apiError = new Error("API connection failed")
+
+				mockEmbeddingsCreate.mockRejectedValue(apiError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: API connection failed",
+				)
+
+				expect(console.error).toHaveBeenCalledWith(
+					expect.stringContaining("OpenAI embedder error"),
+					expect.any(Error),
+				)
+			})
+
+			it("should handle empty text arrays", async () => {
+				const testTexts: string[] = []
+
+				const result = await embedder.createEmbeddings(testTexts)
+
+				expect(result).toEqual({
+					embeddings: [],
+					usage: { promptTokens: 0, totalTokens: 0 },
+				})
+				expect(mockEmbeddingsCreate).not.toHaveBeenCalled()
+			})
+
+			it("should handle malformed API responses", async () => {
+				const testTexts = ["Hello world"]
+				const malformedResponse = {
+					data: null,
+					usage: { prompt_tokens: 10, total_tokens: 15 },
+				}
+
+				mockEmbeddingsCreate.mockResolvedValue(malformedResponse)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow()
+			})
+
+			it("should provide specific authentication error message", async () => {
+				const testTexts = ["Hello world"]
+				const authError = new Error("Invalid API key")
+				;(authError as any).status = 401
+
+				mockEmbeddingsCreate.mockRejectedValue(authError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings: Authentication failed. Please check your OpenAI API key.",
+				)
+			})
+
+			it("should provide detailed error message for HTTP errors", async () => {
+				const testTexts = ["Hello world"]
+				const httpError = new Error("Bad request")
+				;(httpError as any).status = 400
+
+				mockEmbeddingsCreate.mockRejectedValue(httpError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: HTTP 400 - Bad request",
+				)
+			})
+
+			it("should handle errors without status codes", async () => {
+				const testTexts = ["Hello world"]
+				const networkError = new Error("Network timeout")
+
+				mockEmbeddingsCreate.mockRejectedValue(networkError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: Network timeout",
+				)
+			})
+
+			it("should handle errors without message property", async () => {
+				const testTexts = ["Hello world"]
+				const weirdError = { toString: () => "Custom error object" }
+
+				mockEmbeddingsCreate.mockRejectedValue(weirdError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: Custom error object",
+				)
+			})
+
+			it("should handle completely unknown error types", async () => {
+				const testTexts = ["Hello world"]
+				const unknownError = null
+
+				mockEmbeddingsCreate.mockRejectedValue(unknownError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: Unknown error",
+				)
+			})
+
+			it("should handle string errors", async () => {
+				const testTexts = ["Hello world"]
+				const stringError = "Something went wrong"
+
+				mockEmbeddingsCreate.mockRejectedValue(stringError)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: Something went wrong",
+				)
+			})
+
+			it("should handle errors with failing toString method", async () => {
+				const testTexts = ["Hello world"]
+				const errorWithFailingToString = {
+					toString: () => {
+						throw new Error("toString failed")
+					},
+				}
+
+				mockEmbeddingsCreate.mockRejectedValue(errorWithFailingToString)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: Unknown error",
+				)
+			})
+
+			it("should handle errors from response.status property", async () => {
+				const testTexts = ["Hello world"]
+				const errorWithResponseStatus = {
+					message: "Request failed",
+					response: { status: 403 },
+				}
+
+				mockEmbeddingsCreate.mockRejectedValue(errorWithResponseStatus)
+
+				await expect(embedder.createEmbeddings(testTexts)).rejects.toThrow(
+					"Failed to create embeddings after 3 attempts: HTTP 403 - Request failed",
+				)
+			})
+		})
+	})
+})

+ 9 - 6
src/services/code-index/embedders/ollama.ts

@@ -1,5 +1,6 @@
 import { ApiHandlerOptions } from "../../../shared/api"
 import { EmbedderInfo, EmbeddingResponse, IEmbedder } from "../interfaces"
+import { t } from "../../../i18n"
 
 /**
  * Implements the IEmbedder interface using a local Ollama instance.
@@ -39,14 +40,18 @@ export class CodeIndexOllamaEmbedder implements IEmbedder {
 			})
 
 			if (!response.ok) {
-				let errorBody = "Could not read error body"
+				let errorBody = t("embeddings:ollama.couldNotReadErrorBody")
 				try {
 					errorBody = await response.text()
 				} catch (e) {
 					// Ignore error reading body
 				}
 				throw new Error(
-					`Ollama API request failed with status ${response.status} ${response.statusText}: ${errorBody}`,
+					t("embeddings:ollama.requestFailed", {
+						status: response.status,
+						statusText: response.statusText,
+						errorBody,
+					}),
 				)
 			}
 
@@ -55,9 +60,7 @@ export class CodeIndexOllamaEmbedder implements IEmbedder {
 			// Extract embeddings using 'embeddings' key as requested
 			const embeddings = data.embeddings
 			if (!embeddings || !Array.isArray(embeddings)) {
-				throw new Error(
-					'Invalid response structure from Ollama API: "embeddings" array not found or not an array.',
-				)
+				throw new Error(t("embeddings:ollama.invalidResponseStructure"))
 			}
 
 			return {
@@ -67,7 +70,7 @@ export class CodeIndexOllamaEmbedder implements IEmbedder {
 			// Log the original error for debugging purposes
 			console.error("Ollama embedding failed:", error)
 			// Re-throw a more specific error for the caller
-			throw new Error(`Ollama embedding failed: ${error.message}`)
+			throw new Error(t("embeddings:ollama.embeddingFailed", { message: error.message }))
 		}
 	}
 

+ 40 - 16
src/services/code-index/embedders/openai-compatible.ts

@@ -7,6 +7,7 @@ import {
 	INITIAL_RETRY_DELAY_MS as INITIAL_DELAY_MS,
 } from "../constants"
 import { getDefaultModelId } from "../../../shared/embeddingModels"
+import { t } from "../../../i18n"
 
 interface EmbeddingItem {
 	embedding: string | number[]
@@ -73,7 +74,11 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
 
 				if (itemTokens > MAX_ITEM_TOKENS) {
 					console.warn(
-						`Text at index ${i} exceeds maximum token limit (${itemTokens} > ${MAX_ITEM_TOKENS}). Skipping.`,
+						t("embeddings:textExceedsTokenLimit", {
+							index: i,
+							itemTokens,
+							maxTokens: MAX_ITEM_TOKENS,
+						}),
 					)
 					processedIndices.push(i)
 					continue
@@ -94,15 +99,10 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
 			}
 
 			if (currentBatch.length > 0) {
-				try {
-					const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse)
-					allEmbeddings.push(...batchResult.embeddings)
-					usage.promptTokens += batchResult.usage.promptTokens
-					usage.totalTokens += batchResult.usage.totalTokens
-				} catch (error) {
-					console.error("Failed to process batch:", error)
-					throw new Error("Failed to create embeddings: batch processing error")
-				}
+				const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse)
+				allEmbeddings.push(...batchResult.embeddings)
+				usage.promptTokens += batchResult.usage.promptTokens
+				usage.totalTokens += batchResult.usage.totalTokens
 			}
 		}
 
@@ -164,7 +164,13 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
 
 				if (isRateLimitError && hasMoreAttempts) {
 					const delayMs = INITIAL_DELAY_MS * Math.pow(2, attempts)
-					console.warn(`Rate limit hit, retrying in ${delayMs}ms (attempt ${attempts + 1}/${MAX_RETRIES})`)
+					console.warn(
+						t("embeddings:rateLimitRetry", {
+							delayMs,
+							attempt: attempts + 1,
+							maxRetries: MAX_RETRIES,
+						}),
+					)
 					await new Promise((resolve) => setTimeout(resolve, delayMs))
 					continue
 				}
@@ -172,17 +178,35 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
 				// Log the error for debugging
 				console.error(`OpenAI Compatible embedder error (attempt ${attempts + 1}/${MAX_RETRIES}):`, error)
 
-				if (!hasMoreAttempts) {
+				// Provide more context in the error message using robust error extraction
+				let errorMessage = t("embeddings:unknownError")
+				if (error?.message) {
+					errorMessage = error.message
+				} else if (typeof error === "string") {
+					errorMessage = error
+				} else if (error && typeof error.toString === "function") {
+					try {
+						errorMessage = error.toString()
+					} catch {
+						errorMessage = t("embeddings:unknownError")
+					}
+				}
+
+				const statusCode = error?.status || error?.response?.status
+
+				if (statusCode === 401) {
+					throw new Error(t("embeddings:authenticationFailed"))
+				} else if (statusCode) {
 					throw new Error(
-						`Failed to create embeddings after ${MAX_RETRIES} attempts: ${error.message || error}`,
+						t("embeddings:failedWithStatus", { attempts: MAX_RETRIES, statusCode, errorMessage }),
 					)
+				} else {
+					throw new Error(t("embeddings:failedWithError", { attempts: MAX_RETRIES, errorMessage }))
 				}
-
-				throw error
 			}
 		}
 
-		throw new Error(`Failed to create embeddings after ${MAX_RETRIES} attempts`)
+		throw new Error(t("embeddings:failedMaxAttempts", { attempts: MAX_RETRIES }))
 	}
 
 	/**

+ 46 - 12
src/services/code-index/embedders/openai.ts

@@ -8,6 +8,7 @@ import {
 	MAX_BATCH_RETRIES as MAX_RETRIES,
 	INITIAL_RETRY_DELAY_MS as INITIAL_DELAY_MS,
 } from "../constants"
+import { t } from "../../../i18n"
 
 /**
  * OpenAI implementation of the embedder interface with batching and rate limiting
@@ -50,7 +51,11 @@ export class OpenAiEmbedder extends OpenAiNativeHandler implements IEmbedder {
 
 				if (itemTokens > MAX_ITEM_TOKENS) {
 					console.warn(
-						`Text at index ${i} exceeds maximum token limit (${itemTokens} > ${MAX_ITEM_TOKENS}). Skipping.`,
+						t("embeddings:textExceedsTokenLimit", {
+							index: i,
+							itemTokens,
+							maxTokens: MAX_ITEM_TOKENS,
+						}),
 					)
 					processedIndices.push(i)
 					continue
@@ -71,15 +76,10 @@ export class OpenAiEmbedder extends OpenAiNativeHandler implements IEmbedder {
 			}
 
 			if (currentBatch.length > 0) {
-				try {
-					const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse)
-					allEmbeddings.push(...batchResult.embeddings)
-					usage.promptTokens += batchResult.usage.promptTokens
-					usage.totalTokens += batchResult.usage.totalTokens
-				} catch (error) {
-					console.error("Failed to process batch:", error)
-					throw new Error("Failed to create embeddings: batch processing error")
-				}
+				const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse)
+				allEmbeddings.push(...batchResult.embeddings)
+				usage.promptTokens += batchResult.usage.promptTokens
+				usage.totalTokens += batchResult.usage.totalTokens
 			}
 		}
 
@@ -116,15 +116,49 @@ export class OpenAiEmbedder extends OpenAiNativeHandler implements IEmbedder {
 
 				if (isRateLimitError && hasMoreAttempts) {
 					const delayMs = INITIAL_DELAY_MS * Math.pow(2, attempts)
+					console.warn(
+						t("embeddings:rateLimitRetry", {
+							delayMs,
+							attempt: attempts + 1,
+							maxRetries: MAX_RETRIES,
+						}),
+					)
 					await new Promise((resolve) => setTimeout(resolve, delayMs))
 					continue
 				}
 
-				throw error
+				// Log the error for debugging
+				console.error(`OpenAI embedder error (attempt ${attempts + 1}/${MAX_RETRIES}):`, error)
+
+				// Provide more context in the error message using robust error extraction
+				let errorMessage = "Unknown error"
+				if (error?.message) {
+					errorMessage = error.message
+				} else if (typeof error === "string") {
+					errorMessage = error
+				} else if (error && typeof error.toString === "function") {
+					try {
+						errorMessage = error.toString()
+					} catch {
+						errorMessage = "Unknown error"
+					}
+				}
+
+				const statusCode = error?.status || error?.response?.status
+
+				if (statusCode === 401) {
+					throw new Error(t("embeddings:authenticationFailed"))
+				} else if (statusCode) {
+					throw new Error(
+						t("embeddings:failedWithStatus", { attempts: MAX_RETRIES, statusCode, errorMessage }),
+					)
+				} else {
+					throw new Error(t("embeddings:failedWithError", { attempts: MAX_RETRIES, errorMessage }))
+				}
 			}
 		}
 
-		throw new Error(`Failed to create embeddings after ${MAX_RETRIES} attempts`)
+		throw new Error(t("embeddings:failedMaxAttempts", { attempts: MAX_RETRIES }))
 	}
 
 	get embedderInfo(): EmbedderInfo {

+ 16 - 0
src/services/code-index/orchestrator.ts

@@ -119,6 +119,7 @@ export class CodeIndexOrchestrator {
 
 			let cumulativeBlocksIndexed = 0
 			let cumulativeBlocksFoundSoFar = 0
+			let batchErrors: Error[] = []
 
 			const handleFileParsed = (fileBlockCount: number) => {
 				cumulativeBlocksFoundSoFar += fileBlockCount
@@ -137,6 +138,7 @@ export class CodeIndexOrchestrator {
 						`[CodeIndexOrchestrator] Error during initial scan batch: ${batchError.message}`,
 						batchError,
 					)
+					batchErrors.push(batchError)
 				},
 				handleBlocksIndexed,
 				handleFileParsed,
@@ -148,6 +150,20 @@ export class CodeIndexOrchestrator {
 
 			const { stats } = result
 
+			// Check if any blocks were actually indexed successfully
+			// If no blocks were indexed but blocks were found, it means all batches failed
+			if (cumulativeBlocksIndexed === 0 && cumulativeBlocksFoundSoFar > 0) {
+				if (batchErrors.length > 0) {
+					// Use the first batch error as it's likely representative of the main issue
+					const firstError = batchErrors[0]
+					throw new Error(`Indexing failed: ${firstError.message}`)
+				} else {
+					throw new Error(
+						"Indexing failed: No code blocks were successfully indexed. This usually indicates an embedder configuration issue.",
+					)
+				}
+			}
+
 			await this._startWatcher()
 
 			this.stateManager.setSystemState("Indexed", "File watcher started.")

+ 23 - 3
src/services/code-index/processors/scanner.ts

@@ -12,6 +12,7 @@ import { v5 as uuidv5 } from "uuid"
 import pLimit from "p-limit"
 import { Mutex } from "async-mutex"
 import { CacheManager } from "../cache-manager"
+import { t } from "../../../i18n"
 import {
 	QDRANT_CODE_BLOCK_NAMESPACE,
 	MAX_FILE_SIZE_BYTES,
@@ -186,7 +187,11 @@ export class DirectoryScanner implements IDirectoryScanner {
 				} catch (error) {
 					console.error(`Error processing file ${filePath}:`, error)
 					if (onError) {
-						onError(error instanceof Error ? error : new Error(`Unknown error processing file ${filePath}`))
+						onError(
+							error instanceof Error
+								? error
+								: new Error(t("embeddings:scanner.unknownErrorProcessingFile", { filePath })),
+						)
 					}
 				}
 			}),
@@ -235,7 +240,11 @@ export class DirectoryScanner implements IDirectoryScanner {
 							onError(
 								error instanceof Error
 									? error
-									: new Error(`Unknown error deleting points for ${cachedFilePath}`),
+									: new Error(
+											t("embeddings:scanner.unknownErrorDeletingPoints", {
+												filePath: cachedFilePath,
+											}),
+										),
 							)
 						}
 						// Decide if we should re-throw or just log
@@ -337,7 +346,18 @@ export class DirectoryScanner implements IDirectoryScanner {
 		if (!success && lastError) {
 			console.error(`[DirectoryScanner] Failed to process batch after ${MAX_BATCH_RETRIES} attempts`)
 			if (onError) {
-				onError(new Error(`Failed to process batch after ${MAX_BATCH_RETRIES} attempts: ${lastError.message}`))
+				// Preserve the original error message from embedders which now have detailed i18n messages
+				const errorMessage = lastError.message || "Unknown error"
+
+				// For other errors, provide context
+				onError(
+					new Error(
+						t("embeddings:scanner.failedToProcessBatchWithError", {
+							maxRetries: MAX_BATCH_RETRIES,
+							errorMessage,
+						}),
+					),
+				)
 			}
 		}
 	}

+ 8 - 2
src/services/code-index/vector-store/__tests__/qdrant-client.spec.ts

@@ -227,7 +227,10 @@ describe("QdrantVectorStore", () => {
 			mockQdrantClientInstance.createCollection.mockRejectedValue(createError)
 			vitest.spyOn(console, "error").mockImplementation(() => {}) // Suppress console.error
 
-			await expect(vectorStore.initialize()).rejects.toThrow(createError)
+			// The actual error message includes the URL and error details
+			await expect(vectorStore.initialize()).rejects.toThrow(
+				/Failed to connect to Qdrant vector database|vectorStore\.qdrantConnectionFailed/,
+			)
 
 			expect(mockQdrantClientInstance.getCollection).toHaveBeenCalledTimes(1)
 			expect(mockQdrantClientInstance.createCollection).toHaveBeenCalledTimes(1)
@@ -287,7 +290,10 @@ describe("QdrantVectorStore", () => {
 			vitest.spyOn(console, "error").mockImplementation(() => {})
 			vitest.spyOn(console, "warn").mockImplementation(() => {})
 
-			await expect(vectorStore.initialize()).rejects.toThrow(deleteError)
+			// The actual error message includes the URL and error details
+			await expect(vectorStore.initialize()).rejects.toThrow(
+				/Failed to connect to Qdrant vector database|vectorStore\.qdrantConnectionFailed/,
+			)
 
 			expect(mockQdrantClientInstance.getCollection).toHaveBeenCalledTimes(1)
 			expect(mockQdrantClientInstance.deleteCollection).toHaveBeenCalledTimes(1)

+ 11 - 4
src/services/code-index/vector-store/qdrant-client.ts

@@ -5,17 +5,18 @@ import { getWorkspacePath } from "../../../utils/path"
 import { IVectorStore } from "../interfaces/vector-store"
 import { Payload, VectorStoreSearchResult } from "../interfaces"
 import { MAX_SEARCH_RESULTS, SEARCH_MIN_SCORE } from "../constants"
+import { t } from "../../../i18n"
 
 /**
  * Qdrant implementation of the vector store interface
  */
 export class QdrantVectorStore implements IVectorStore {
-	private readonly QDRANT_URL = "http://localhost:6333"
 	private readonly vectorSize!: number
 	private readonly DISTANCE_METRIC = "Cosine"
 
 	private client: QdrantClient
 	private readonly collectionName: string
+	private readonly qdrantUrl: string = "http://localhost:6333"
 
 	/**
 	 * Creates a new Qdrant vector store
@@ -23,8 +24,9 @@ export class QdrantVectorStore implements IVectorStore {
 	 * @param url Optional URL to the Qdrant server
 	 */
 	constructor(workspacePath: string, url: string, vectorSize: number, apiKey?: string) {
+		this.qdrantUrl = url || "http://localhost:6333"
 		this.client = new QdrantClient({
-			url: url ?? this.QDRANT_URL,
+			url: this.qdrantUrl,
 			apiKey,
 			headers: {
 				"User-Agent": "Roo-Code",
@@ -110,11 +112,16 @@ export class QdrantVectorStore implements IVectorStore {
 			}
 			return created
 		} catch (error: any) {
+			const errorMessage = error?.message || error
 			console.error(
 				`[QdrantVectorStore] Failed to initialize Qdrant collection "${this.collectionName}":`,
-				error?.message || error,
+				errorMessage,
+			)
+
+			// Provide a more user-friendly error message that includes the original error
+			throw new Error(
+				t("embeddings:vectorStore.qdrantConnectionFailed", { qdrantUrl: this.qdrantUrl, errorMessage }),
 			)
-			throw error
 		}
 	}