Procházet zdrojové kódy

Merge pull request #1579 from Kilo-Org/roo-v3.24.0

Include changes from Roo Code v3.24.0
Christiaan Arnoldus před 6 měsíci
rodič
revize
9f7f0cb682
100 změnil soubory, kde provedl 2694 přidání a 109 odebrání
  1. 19 0
      .changeset/true-cases-shake.md
  2. 168 0
      apps/vscode-e2e/src/suite/markdown-lists.test.ts
  3. 1 1
      apps/web-roo-code/next.config.ts
  4. 11 0
      apps/web-roo-code/src/app/layout.tsx
  5. 18 2
      apps/web-roo-code/src/lib/hooks/use-logo-src.ts
  6. 1 1
      packages/types/npm/package.json
  7. 2 0
      packages/types/src/cloud.ts
  8. 16 0
      packages/types/src/global-settings.ts
  9. 77 0
      packages/types/src/mode.ts
  10. 12 0
      packages/types/src/provider-settings.ts
  11. 2 2
      packages/types/src/providers/bedrock.ts
  12. 17 0
      packages/types/src/providers/huggingface.ts
  13. 1 0
      packages/types/src/providers/index.ts
  14. 3 0
      src/api/index.ts
  15. 1 1
      src/api/providers/__tests__/bedrock-inference-profiles.spec.ts
  16. 46 0
      src/api/providers/__tests__/bedrock-reasoning.spec.ts
  17. 1 1
      src/api/providers/__tests__/bedrock-vpc-endpoint.spec.ts
  18. 17 9
      src/api/providers/bedrock.ts
  19. 290 0
      src/api/providers/fetchers/huggingface.ts
  20. 132 0
      src/api/providers/huggingface.ts
  21. 1 0
      src/api/providers/index.ts
  22. 58 1
      src/core/config/CustomModesManager.ts
  23. 353 0
      src/core/mentions/__tests__/processUserContentMentions.spec.ts
  24. 21 5
      src/core/mentions/index.ts
  25. 15 0
      src/core/mentions/processKiloUserContentMentions.ts
  26. 15 0
      src/core/mentions/processUserContentMentions.ts
  27. 7 1
      src/core/prompts/__tests__/system-prompt.spec.ts
  28. 2 0
      src/core/prompts/sections/__tests__/custom-instructions-global.spec.ts
  29. 123 0
      src/core/prompts/sections/__tests__/custom-instructions.spec.ts
  30. 33 1
      src/core/prompts/sections/custom-instructions.ts
  31. 9 3
      src/core/prompts/system.ts
  32. 8 0
      src/core/prompts/types.ts
  33. 1 0
      src/core/protect/RooProtectedController.ts
  34. 5 0
      src/core/protect/__tests__/RooProtectedController.spec.ts
  35. 14 6
      src/core/task/Task.ts
  36. 1 0
      src/core/tools/__tests__/insertContentTool.spec.ts
  37. 1 0
      src/core/tools/__tests__/writeToFileTool.spec.ts
  38. 9 2
      src/core/tools/applyDiffTool.ts
  39. 15 1
      src/core/tools/multiApplyDiffTool.ts
  40. 9 4
      src/core/webview/ClineProvider.ts
  41. 4 1
      src/core/webview/generateSystemPrompt.ts
  42. 120 3
      src/core/webview/webviewMessageHandler.ts
  43. 7 1
      src/i18n/locales/ar/common.json
  44. 9 0
      src/i18n/locales/ar/embeddings.json
  45. 7 1
      src/i18n/locales/ca/common.json
  46. 9 0
      src/i18n/locales/ca/embeddings.json
  47. 7 1
      src/i18n/locales/cs/common.json
  48. 9 0
      src/i18n/locales/cs/embeddings.json
  49. 7 1
      src/i18n/locales/de/common.json
  50. 9 0
      src/i18n/locales/de/embeddings.json
  51. 7 1
      src/i18n/locales/en/common.json
  52. 9 0
      src/i18n/locales/en/embeddings.json
  53. 7 1
      src/i18n/locales/es/common.json
  54. 9 0
      src/i18n/locales/es/embeddings.json
  55. 7 1
      src/i18n/locales/fr/common.json
  56. 9 0
      src/i18n/locales/fr/embeddings.json
  57. 7 1
      src/i18n/locales/hi/common.json
  58. 9 0
      src/i18n/locales/hi/embeddings.json
  59. 7 1
      src/i18n/locales/id/common.json
  60. 9 0
      src/i18n/locales/id/embeddings.json
  61. 7 1
      src/i18n/locales/it/common.json
  62. 9 0
      src/i18n/locales/it/embeddings.json
  63. 7 1
      src/i18n/locales/ja/common.json
  64. 9 0
      src/i18n/locales/ja/embeddings.json
  65. 7 1
      src/i18n/locales/ko/common.json
  66. 9 0
      src/i18n/locales/ko/embeddings.json
  67. 7 1
      src/i18n/locales/nl/common.json
  68. 9 0
      src/i18n/locales/nl/embeddings.json
  69. 7 1
      src/i18n/locales/pl/common.json
  70. 9 0
      src/i18n/locales/pl/embeddings.json
  71. 7 1
      src/i18n/locales/pt-BR/common.json
  72. 9 0
      src/i18n/locales/pt-BR/embeddings.json
  73. 7 1
      src/i18n/locales/ru/common.json
  74. 9 0
      src/i18n/locales/ru/embeddings.json
  75. 7 1
      src/i18n/locales/th/common.json
  76. 9 0
      src/i18n/locales/th/embeddings.json
  77. 7 1
      src/i18n/locales/tr/common.json
  78. 9 0
      src/i18n/locales/tr/embeddings.json
  79. 7 1
      src/i18n/locales/uk/common.json
  80. 9 0
      src/i18n/locales/uk/embeddings.json
  81. 7 1
      src/i18n/locales/vi/common.json
  82. 9 0
      src/i18n/locales/vi/embeddings.json
  83. 7 1
      src/i18n/locales/zh-CN/common.json
  84. 9 0
      src/i18n/locales/zh-CN/embeddings.json
  85. 7 1
      src/i18n/locales/zh-TW/common.json
  86. 9 0
      src/i18n/locales/zh-TW/embeddings.json
  87. 230 0
      src/integrations/diagnostics/__tests__/diagnostics.spec.ts
  88. 154 39
      src/integrations/diagnostics/index.ts
  89. 15 1
      src/integrations/editor/DiffViewProvider.ts
  90. 14 1
      src/integrations/editor/__tests__/DiffViewProvider.spec.ts
  91. 221 0
      src/integrations/misc/__tests__/extract-text-large-files.spec.ts
  92. 37 1
      src/integrations/misc/extract-text.ts
  93. 5 0
      src/package.json
  94. 1 0
      src/package.nls.ca.json
  95. 1 0
      src/package.nls.de.json
  96. 1 0
      src/package.nls.es.json
  97. 1 0
      src/package.nls.fr.json
  98. 1 0
      src/package.nls.hi.json
  99. 1 0
      src/package.nls.id.json
  100. 1 0
      src/package.nls.it.json

+ 19 - 0
.changeset/true-cases-shake.md

@@ -0,0 +1,19 @@
+---
+"kilo-code": minor
+---
+
+Include changes from Roo Code v3.24.0
+
+- Add Hugging Face provider with support for open source models (thanks @TGlide!)
+- Add terminal command permissions UI to chat interface
+- Add support for Agent Rules standard via AGENTS.md (thanks @sgryphon!)
+- Add settings to control diagnostic messages
+- Fix auto-approve checkbox to be toggled at any time (thanks @KJ7LNW!)
+- Add efficiency warning for single SEARCH/REPLACE blocks in apply_diff (thanks @KJ7LNW!)
+- Fix respect maxReadFileLine setting for file mentions to prevent context exhaustion (thanks @sebinseban!)
+- Fix Ollama API URL normalization by removing trailing slashes (thanks @Naam!)
+- Fix restore list styles for markdown lists in chat interface (thanks @village-way!)
+- Add support for bedrock api keys
+- Add confirmation dialog and proper cleanup for marketplace mode removal
+- Fix cancel auto-approve timer when editing follow-up suggestion (thanks @hassoncs!)
+- Fix add error message when no workspace folder is open for code indexing

+ 168 - 0
apps/vscode-e2e/src/suite/markdown-lists.test.ts

@@ -0,0 +1,168 @@
+import * as assert from "assert"
+
+import type { ClineMessage } from "@roo-code/types"
+
+import { waitUntilCompleted } from "./utils"
+import { setDefaultSuiteTimeout } from "./test-utils"
+
+suite("Markdown List Rendering", function () {
+	setDefaultSuiteTimeout(this)
+
+	test("Should render unordered lists with bullets in chat", async () => {
+		const api = globalThis.api
+
+		const messages: ClineMessage[] = []
+
+		api.on("message", ({ message }: { message: ClineMessage }) => {
+			if (message.type === "say" && message.partial === false) {
+				messages.push(message)
+			}
+		})
+
+		const taskId = await api.startNewTask({
+			configuration: { mode: "ask", alwaysAllowModeSwitch: true, autoApprovalEnabled: true },
+			text: "Please show me an example of an unordered list with the following items: Apple, Banana, Orange",
+		})
+
+		await waitUntilCompleted({ api, taskId })
+
+		// Find the message containing the list
+		const listMessage = messages.find(
+			({ say, text }) =>
+				(say === "completion_result" || say === "text") &&
+				text?.includes("Apple") &&
+				text?.includes("Banana") &&
+				text?.includes("Orange"),
+		)
+
+		assert.ok(listMessage, "Should have a message containing the list items")
+
+		// The rendered markdown should contain list markers
+		const messageText = listMessage?.text || ""
+		assert.ok(
+			messageText.includes("- Apple") || messageText.includes("* Apple") || messageText.includes("• Apple"),
+			"List items should be rendered with bullet points",
+		)
+	})
+
+	test("Should render ordered lists with numbers in chat", async () => {
+		const api = globalThis.api
+
+		const messages: ClineMessage[] = []
+
+		api.on("message", ({ message }: { message: ClineMessage }) => {
+			if (message.type === "say" && message.partial === false) {
+				messages.push(message)
+			}
+		})
+
+		const taskId = await api.startNewTask({
+			configuration: { mode: "ask", alwaysAllowModeSwitch: true, autoApprovalEnabled: true },
+			text: "Please show me a numbered list with three steps: First step, Second step, Third step",
+		})
+
+		await waitUntilCompleted({ api, taskId })
+
+		// Find the message containing the numbered list
+		const listMessage = messages.find(
+			({ say, text }) =>
+				(say === "completion_result" || say === "text") &&
+				text?.includes("First step") &&
+				text?.includes("Second step") &&
+				text?.includes("Third step"),
+		)
+
+		assert.ok(listMessage, "Should have a message containing the numbered list")
+
+		// The rendered markdown should contain numbered markers
+		const messageText = listMessage?.text || ""
+		assert.ok(
+			messageText.includes("1. First step") || messageText.includes("1) First step"),
+			"List items should be rendered with numbers",
+		)
+	})
+
+	test("Should render nested lists with proper hierarchy", async () => {
+		const api = globalThis.api
+
+		const messages: ClineMessage[] = []
+
+		api.on("message", ({ message }: { message: ClineMessage }) => {
+			if (message.type === "say" && message.partial === false) {
+				messages.push(message)
+			}
+		})
+
+		const taskId = await api.startNewTask({
+			configuration: { mode: "ask", alwaysAllowModeSwitch: true, autoApprovalEnabled: true },
+			text: "Please create a nested list with 'Main item' having two sub-items: 'Sub-item A' and 'Sub-item B'",
+		})
+
+		await waitUntilCompleted({ api, taskId })
+
+		// Find the message containing the nested list
+		const listMessage = messages.find(
+			({ say, text }) =>
+				(say === "completion_result" || say === "text") &&
+				text?.includes("Main item") &&
+				text?.includes("Sub-item A") &&
+				text?.includes("Sub-item B"),
+		)
+
+		assert.ok(listMessage, "Should have a message containing the nested list")
+
+		// The rendered markdown should show hierarchy through indentation
+		const messageText = listMessage?.text || ""
+
+		// Check for main item
+		assert.ok(
+			messageText.includes("- Main item") ||
+				messageText.includes("* Main item") ||
+				messageText.includes("• Main item"),
+			"Main list item should be rendered",
+		)
+
+		// Check for sub-items with indentation (typically 2-4 spaces or a tab)
+		assert.ok(
+			messageText.match(/\s{2,}- Sub-item A/) ||
+				messageText.match(/\s{2,}\* Sub-item A/) ||
+				messageText.match(/\s{2,}• Sub-item A/) ||
+				messageText.includes("\t- Sub-item A") ||
+				messageText.includes("\t* Sub-item A") ||
+				messageText.includes("\t• Sub-item A"),
+			"Sub-items should be indented",
+		)
+	})
+
+	test("Should render mixed ordered and unordered lists", async () => {
+		const api = globalThis.api
+
+		const messages: ClineMessage[] = []
+
+		api.on("message", ({ message }: { message: ClineMessage }) => {
+			if (message.type === "say" && message.partial === false) {
+				messages.push(message)
+			}
+		})
+
+		const taskId = await api.startNewTask({
+			configuration: { mode: "ask", alwaysAllowModeSwitch: true, autoApprovalEnabled: true },
+			text: "Please create a list that has both numbered items and bullet points, mixing ordered and unordered lists",
+		})
+
+		await waitUntilCompleted({ api, taskId })
+
+		// Find a message that contains both types of lists
+		const listMessage = messages.find(
+			({ say, text }) =>
+				(say === "completion_result" || say === "text") &&
+				text &&
+				// Check for numbered list markers
+				(text.includes("1.") || text.includes("1)")) &&
+				// Check for bullet list markers
+				(text.includes("-") || text.includes("*") || text.includes("•")),
+		)
+
+		assert.ok(listMessage, "Should have a message containing mixed list types")
+	})
+})

+ 1 - 1
apps/web-roo-code/next.config.ts

@@ -24,7 +24,7 @@ const nextConfig: NextConfig = {
 			// Redirect cloud waitlist to Notion page
 			{
 				source: "/cloud-waitlist",
-				destination: "https://shard-dogwood-daf.notion.site/238fd1401b0a8087b858e1ad431507cf?pvs=105",
+				destination: "https://roo-code.notion.site/238fd1401b0a8087b858e1ad431507cf?pvs=105",
 				permanent: false,
 			},
 		]

+ 11 - 0
apps/web-roo-code/src/app/layout.tsx

@@ -1,6 +1,7 @@
 import React from "react"
 import type { Metadata } from "next"
 import { Inter } from "next/font/google"
+import Script from "next/script"
 
 import { Providers } from "@/components/providers"
 
@@ -52,6 +53,16 @@ export default function RootLayout({ children }: { children: React.ReactNode })
 				/>
 			</head>
 			<body className={inter.className}>
+				{/* Google tag (gtag.js) */}
+				<Script src="https://www.googletagmanager.com/gtag/js?id=AW-17391954825" strategy="afterInteractive" />
+				<Script id="google-analytics" strategy="afterInteractive">
+					{`
+						window.dataLayer = window.dataLayer || [];
+						function gtag(){dataLayer.push(arguments);}
+						gtag('js', new Date());
+						gtag('config', 'AW-17391954825');
+					`}
+				</Script>
 				<div itemScope itemType="https://schema.org/WebSite">
 					<link itemProp="url" href="https://roocode.com" />
 					<meta itemProp="name" content="Roo Code" />

+ 18 - 2
apps/web-roo-code/src/lib/hooks/use-logo-src.ts

@@ -1,8 +1,24 @@
 "use client"
 
 import { useTheme } from "next-themes"
+import { useEffect, useState } from "react"
 
 export function useLogoSrc(): string {
-	const { resolvedTheme } = useTheme()
-	return resolvedTheme === "light" ? "/Roo-Code-Logo-Horiz-blk.svg" : "/Roo-Code-Logo-Horiz-white.svg"
+	const { resolvedTheme, theme } = useTheme()
+	const [mounted, setMounted] = useState(false)
+
+	// Avoid hydration mismatch by waiting for client-side mount
+	useEffect(() => {
+		setMounted(true)
+	}, [])
+
+	// Before mounting, return a default logo (dark theme as specified in providers)
+	// This prevents the logo from flickering on initial load
+	if (!mounted) {
+		return "/Roo-Code-Logo-Horiz-white.svg"
+	}
+
+	// Use theme as fallback if resolvedTheme is not available yet
+	const currentTheme = resolvedTheme || theme
+	return currentTheme === "light" ? "/Roo-Code-Logo-Horiz-blk.svg" : "/Roo-Code-Logo-Horiz-white.svg"
 }

+ 1 - 1
packages/types/npm/package.json

@@ -1,6 +1,6 @@
 {
 	"name": "@roo-code/types",
-	"version": "1.36.0",
+	"version": "1.39.0",
 	"description": "TypeScript type definitions for Roo Code.",
 	"publishConfig": {
 		"access": "public",

+ 2 - 0
packages/types/src/cloud.ts

@@ -96,6 +96,7 @@ export const organizationCloudSettingsSchema = z.object({
 	recordTaskMessages: z.boolean().optional(),
 	enableTaskSharing: z.boolean().optional(),
 	taskShareExpirationDays: z.number().int().positive().optional(),
+	allowMembersViewAllTasks: z.boolean().optional(),
 })
 
 export type OrganizationCloudSettings = z.infer<typeof organizationCloudSettingsSchema>
@@ -128,6 +129,7 @@ export const ORGANIZATION_DEFAULT: OrganizationSettings = {
 		recordTaskMessages: true,
 		enableTaskSharing: true,
 		taskShareExpirationDays: 30,
+		allowMembersViewAllTasks: true,
 	},
 	defaultSettings: {},
 	allowList: ORGANIZATION_ALLOW_ALL,

+ 16 - 0
packages/types/src/global-settings.ts

@@ -74,6 +74,17 @@ export const globalSettingsSchema = z.object({
 	maxConcurrentFileReads: z.number().optional(),
 	allowVeryLargeReads: z.boolean().optional(), // kilocode_change
 
+	/**
+	 * Whether to include diagnostic messages (errors, warnings) in tool outputs
+	 * @default true
+	 */
+	includeDiagnosticMessages: z.boolean().optional(),
+	/**
+	 * Maximum number of diagnostic messages to include in tool outputs
+	 * @default 50
+	 */
+	maxDiagnosticMessages: z.number().optional(),
+
 	browserToolEnabled: z.boolean().optional(),
 	browserViewportSize: z.string().optional(),
 	showAutoApproveMenu: z.boolean().optional(), // kilocode_change
@@ -166,6 +177,7 @@ export const SECRET_STATE_KEYS = [
 	"glamaApiKey",
 	"openRouterApiKey",
 	"awsAccessKey",
+	"awsApiKey",
 	"awsSecretKey",
 	"awsSessionToken",
 	"openAiApiKey",
@@ -186,6 +198,7 @@ export const SECRET_STATE_KEYS = [
 	"codebaseIndexOpenAiCompatibleApiKey",
 	"codebaseIndexGeminiApiKey",
 	"codebaseIndexMistralApiKey",
+	"huggingFaceApiKey",
 ] as const satisfies readonly (keyof ProviderSettings)[]
 export type SecretState = Pick<ProviderSettings, (typeof SECRET_STATE_KEYS)[number]>
 
@@ -278,6 +291,9 @@ export const EVALS_SETTINGS: RooCodeSettings = {
 	showRooIgnoredFiles: true,
 	maxReadFileLine: -1, // -1 to enable full file reading.
 
+	includeDiagnosticMessages: true,
+	maxDiagnosticMessages: 50,
+
 	language: "en",
 	telemetrySetting: "enabled",
 

Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 77 - 0
packages/types/src/mode.ts


+ 12 - 0
packages/types/src/provider-settings.ts

@@ -36,6 +36,7 @@ export const providerNames = [
 	"kilocode", // kilocode_change
 	"cerebras", // kilocode_change
 	"virtual-quota-fallback", // kilocode_change
+	"huggingface",
 ] as const
 
 export const providerNamesSchema = z.enum(providerNames)
@@ -124,6 +125,8 @@ const bedrockSchema = apiModelIdProviderModelSchema.extend({
 	awsUsePromptCache: z.boolean().optional(),
 	awsProfile: z.string().optional(),
 	awsUseProfile: z.boolean().optional(),
+	awsApiKey: z.string().optional(),
+	awsUseApiKey: z.boolean().optional(),
 	awsCustomArn: z.string().optional(),
 	awsModelContextWindow: z.number().optional(),
 	awsBedrockEndpointEnabled: z.boolean().optional(),
@@ -232,6 +235,12 @@ const groqSchema = apiModelIdProviderModelSchema.extend({
 	groqApiKey: z.string().optional(),
 })
 
+const huggingFaceSchema = baseProviderSettingsSchema.extend({
+	huggingFaceApiKey: z.string().optional(),
+	huggingFaceModelId: z.string().optional(),
+	huggingFaceInferenceProvider: z.string().optional(),
+})
+
 const chutesSchema = apiModelIdProviderModelSchema.extend({
 	chutesApiKey: z.string().optional(),
 })
@@ -302,6 +311,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
 	fakeAiSchema.merge(z.object({ apiProvider: z.literal("fake-ai") })),
 	xaiSchema.merge(z.object({ apiProvider: z.literal("xai") })),
 	groqSchema.merge(z.object({ apiProvider: z.literal("groq") })),
+	huggingFaceSchema.merge(z.object({ apiProvider: z.literal("huggingface") })),
 	chutesSchema.merge(z.object({ apiProvider: z.literal("chutes") })),
 	litellmSchema.merge(z.object({ apiProvider: z.literal("litellm") })),
 	kilocodeSchema.merge(z.object({ apiProvider: z.literal("kilocode") })), // kilocode_change
@@ -335,6 +345,7 @@ export const providerSettingsSchema = z.object({
 	...fakeAiSchema.shape,
 	...xaiSchema.shape,
 	...groqSchema.shape,
+	...huggingFaceSchema.shape,
 	...chutesSchema.shape,
 	...litellmSchema.shape,
 	...codebaseIndexProviderSchema.shape,
@@ -359,6 +370,7 @@ export const MODEL_ID_KEYS: Partial<keyof ProviderSettings>[] = [
 	"requestyModelId",
 	"litellmModelId",
 	"cerebrasModelId", // kilocode_change
+	"huggingFaceModelId",
 ]
 
 export const getModelId = (settings: ProviderSettings): string | undefined => {

+ 2 - 2
packages/types/src/providers/bedrock.ts

@@ -360,7 +360,7 @@ export const BEDROCK_MAX_TOKENS = 4096
 
 export const BEDROCK_DEFAULT_CONTEXT = 128_000
 
-// AWS Bedrock Inference Profile mapping based on official documentation
+// Amazon Bedrock Inference Profile mapping based on official documentation
 // https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html
 // This mapping is pre-ordered by pattern length (descending) to ensure more specific patterns match first
 export const AWS_INFERENCE_PROFILE_MAPPING: Array<[string, string]> = [
@@ -378,7 +378,7 @@ export const AWS_INFERENCE_PROFILE_MAPPING: Array<[string, string]> = [
 	["sa-", "sa."],
 ]
 
-// AWS Bedrock supported regions for the regions dropdown
+// Amazon Bedrock supported regions for the regions dropdown
 // Based on official AWS documentation
 export const BEDROCK_REGIONS = [
 	{ value: "us-east-1", label: "us-east-1" },

+ 17 - 0
packages/types/src/providers/huggingface.ts

@@ -0,0 +1,17 @@
+/**
+ * HuggingFace provider constants
+ */
+
+// Default values for HuggingFace models
+export const HUGGINGFACE_DEFAULT_MAX_TOKENS = 2048
+export const HUGGINGFACE_MAX_TOKENS_FALLBACK = 8192
+export const HUGGINGFACE_DEFAULT_CONTEXT_WINDOW = 128_000
+
+// UI constants
+export const HUGGINGFACE_SLIDER_STEP = 256
+export const HUGGINGFACE_SLIDER_MIN = 1
+export const HUGGINGFACE_TEMPERATURE_MAX_VALUE = 2
+
+// API constants
+export const HUGGINGFACE_API_URL = "https://router.huggingface.co/v1/models?collection=roocode"
+export const HUGGINGFACE_CACHE_DURATION = 1000 * 60 * 60 // 1 hour

+ 1 - 0
packages/types/src/providers/index.ts

@@ -7,6 +7,7 @@ export * from "./gemini.js"
 export * from "./gemini-cli.js" // kilocode_change
 export * from "./glama.js"
 export * from "./groq.js"
+export * from "./huggingface.js"
 export * from "./lite-llm.js"
 export * from "./lm-studio.js"
 export * from "./mistral.js"

+ 3 - 0
src/api/index.ts

@@ -27,6 +27,7 @@ import {
 	FakeAIHandler,
 	XAIHandler,
 	GroqHandler,
+	HuggingFaceHandler,
 	ChutesHandler,
 	LiteLLMHandler,
 	CerebrasHandler, // kilocode_change
@@ -128,6 +129,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
 			return new XAIHandler(options)
 		case "groq":
 			return new GroqHandler(options)
+		case "huggingface":
+			return new HuggingFaceHandler(options)
 		case "chutes":
 			return new ChutesHandler(options)
 		case "litellm":

+ 1 - 1
src/api/providers/__tests__/bedrock-inference-profiles.spec.ts

@@ -16,7 +16,7 @@ vitest.mock("@aws-sdk/client-bedrock-runtime", () => {
 	}
 })
 
-describe("AWS Bedrock Inference Profiles", () => {
+describe("Amazon Bedrock Inference Profiles", () => {
 	// Helper function to create a handler with specific options
 	const createHandler = (options: Partial<ApiHandlerOptions> = {}) => {
 		const defaultOptions: ApiHandlerOptions = {

+ 46 - 0
src/api/providers/__tests__/bedrock-reasoning.spec.ts

@@ -278,5 +278,51 @@ describe("AwsBedrockHandler - Extended Thinking", () => {
 			expect(reasoningChunks[0].text).toBe("Let me think...")
 			expect(reasoningChunks[1].text).toBe(" about this problem.")
 		})
+
+		it("should support API key authentication", async () => {
+			handler = new AwsBedrockHandler({
+				apiProvider: "bedrock",
+				apiModelId: "anthropic.claude-3-5-sonnet-20241022-v2:0",
+				awsRegion: "us-east-1",
+				awsUseApiKey: true,
+				awsApiKey: "test-api-key-token",
+			})
+
+			mockSend.mockResolvedValue({
+				stream: (async function* () {
+					yield { messageStart: { role: "assistant" } }
+					yield {
+						contentBlockStart: {
+							start: { text: "Hello from API key auth" },
+							contentBlockIndex: 0,
+						},
+					}
+					yield { metadata: { usage: { inputTokens: 100, outputTokens: 50 } } }
+				})(),
+			})
+
+			const messages = [{ role: "user" as const, content: "Test message" }]
+			const stream = handler.createMessage("System prompt", messages)
+
+			const chunks = []
+			for await (const chunk of stream) {
+				chunks.push(chunk)
+			}
+
+			// Verify the client was created with API key token
+			expect(BedrockRuntimeClient).toHaveBeenCalledWith(
+				expect.objectContaining({
+					region: "us-east-1",
+					token: { token: "test-api-key-token" },
+					authSchemePreference: ["httpBearerAuth"],
+				}),
+			)
+
+			// Verify the stream worked correctly
+			expect(mockSend).toHaveBeenCalledTimes(1)
+			const textChunks = chunks.filter((c) => c.type === "text")
+			expect(textChunks).toHaveLength(1)
+			expect(textChunks[0].text).toBe("Hello from API key auth")
+		})
 	})
 })

+ 1 - 1
src/api/providers/__tests__/bedrock-vpc-endpoint.spec.ts

@@ -29,7 +29,7 @@ import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"
 // Get access to the mocked functions
 const mockBedrockRuntimeClient = vi.mocked(BedrockRuntimeClient)
 
-describe("AWS Bedrock VPC Endpoint Functionality", () => {
+describe("Amazon Bedrock VPC Endpoint Functionality", () => {
 	beforeEach(() => {
 		// Clear all mocks before each test
 		vi.clearAllMocks()

+ 17 - 9
src/api/providers/bedrock.ts

@@ -215,14 +215,22 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
 
 		this.costModelConfig = this.getModel()
 
-		const clientConfig: BedrockRuntimeClientConfig = {
+		// Extended type to support custom authentication properties
+		const clientConfig: BedrockRuntimeClientConfig & {
+			token?: { token: string }
+			authSchemePreference?: string[]
+		} = {
 			region: this.options.awsRegion,
 			// Add the endpoint configuration when specified and enabled
 			...(this.options.awsBedrockEndpoint &&
 				this.options.awsBedrockEndpointEnabled && { endpoint: this.options.awsBedrockEndpoint }),
 		}
 
-		if (this.options.awsUseProfile && this.options.awsProfile) {
+		if (this.options.awsUseApiKey && this.options.awsApiKey) {
+			// Use API key/token-based authentication if enabled and API key is set
+			clientConfig.token = { token: this.options.awsApiKey }
+			clientConfig.authSchemePreference = ["httpBearerAuth"] // Otherwise there's no end of credential problems.
+		} else if (this.options.awsUseProfile && this.options.awsProfile) {
 			// Use profile-based credentials if enabled and profile is set
 			clientConfig.credentials = fromIni({
 				profile: this.options.awsProfile,
@@ -1078,7 +1086,7 @@ Please verify:
 				"throttl",
 				"rate",
 				"limit",
-				"bedrock is unable to process your request", // AWS Bedrock specific throttling message
+				"bedrock is unable to process your request", // Amazon Bedrock specific throttling message
 				"please wait",
 				"quota exceeded",
 				"service unavailable",
@@ -1124,7 +1132,7 @@ Suggestions:
 Please try:
 1. Contact AWS support to request a quota increase
 2. Reduce request frequency temporarily
-3. Check your AWS Bedrock quotas in the AWS console
+3. Check your Amazon Bedrock quotas in the AWS console
 4. Consider using a different model or region with available capacity
 
 `,
@@ -1139,7 +1147,7 @@ Please try:
 
 Please try:
 1. Wait a few minutes and retry
-2. Check the model status in AWS Bedrock console
+2. Check the model status in Amazon Bedrock console
 3. Verify the model is properly provisioned
 
 `,
@@ -1147,7 +1155,7 @@ Please try:
 		},
 		INTERNAL_SERVER_ERROR: {
 			patterns: ["internal server error", "internal error", "server error", "service error"],
-			messageTemplate: `AWS Bedrock internal server error. This is a temporary service issue.
+			messageTemplate: `Amazon Bedrock internal server error. This is a temporary service issue.
 
 Please try:
 1. Retry the request after a brief delay
@@ -1184,7 +1192,7 @@ Please try:
 			],
 			messageTemplate: `Parameter validation error: {errorMessage}
 
-This error indicates that the request parameters don't match AWS Bedrock's expected format.
+This error indicates that the request parameters don't match Amazon Bedrock's expected format.
 
 Common causes:
 1. Extended thinking parameter format is incorrect
@@ -1193,7 +1201,7 @@ Common causes:
 
 Please check:
 - Model supports the requested features (extended thinking, etc.)
-- Parameter format matches AWS Bedrock specification
+- Parameter format matches Amazon Bedrock specification
 - Model ID is correct for the requested features`,
 			logLevel: "error",
 		},
@@ -1218,7 +1226,7 @@ Please check:
 			return "THROTTLING"
 		}
 
-		// Check for AWS Bedrock specific throttling exception names
+		// Check for Amazon Bedrock specific throttling exception names
 		if ((error as any).name === "ThrottlingException" || (error as any).__type === "ThrottlingException") {
 			return "THROTTLING"
 		}

+ 290 - 0
src/api/providers/fetchers/huggingface.ts

@@ -0,0 +1,290 @@
+import axios from "axios"
+import { z } from "zod"
+import type { ModelInfo } from "@roo-code/types"
+import {
+	HUGGINGFACE_API_URL,
+	HUGGINGFACE_CACHE_DURATION,
+	HUGGINGFACE_DEFAULT_MAX_TOKENS,
+	HUGGINGFACE_DEFAULT_CONTEXT_WINDOW,
+} from "@roo-code/types"
+import type { ModelRecord } from "../../../shared/api"
+
+/**
+ * HuggingFace Provider Schema
+ */
+const huggingFaceProviderSchema = z.object({
+	provider: z.string(),
+	status: z.enum(["live", "staging", "error"]),
+	supports_tools: z.boolean().optional(),
+	supports_structured_output: z.boolean().optional(),
+	context_length: z.number().optional(),
+	pricing: z
+		.object({
+			input: z.number(),
+			output: z.number(),
+		})
+		.optional(),
+})
+
+/**
+ * Represents a provider that can serve a HuggingFace model
+ * @property provider - The provider identifier (e.g., "sambanova", "together")
+ * @property status - The current status of the provider
+ * @property supports_tools - Whether the provider supports tool/function calling
+ * @property supports_structured_output - Whether the provider supports structured output
+ * @property context_length - The maximum context length supported by this provider
+ * @property pricing - The pricing information for input/output tokens
+ */
+export type HuggingFaceProvider = z.infer<typeof huggingFaceProviderSchema>
+
+/**
+ * HuggingFace Model Schema
+ */
+const huggingFaceModelSchema = z.object({
+	id: z.string(),
+	object: z.literal("model"),
+	created: z.number(),
+	owned_by: z.string(),
+	providers: z.array(huggingFaceProviderSchema),
+})
+
+/**
+ * Represents a HuggingFace model available through the router API
+ * @property id - The unique identifier of the model
+ * @property object - The object type (always "model")
+ * @property created - Unix timestamp of when the model was created
+ * @property owned_by - The organization that owns the model
+ * @property providers - List of providers that can serve this model
+ */
+export type HuggingFaceModel = z.infer<typeof huggingFaceModelSchema>
+
+/**
+ * HuggingFace API Response Schema
+ */
+const huggingFaceApiResponseSchema = z.object({
+	object: z.string(),
+	data: z.array(huggingFaceModelSchema),
+})
+
+/**
+ * Represents the response from the HuggingFace router API
+ * @property object - The response object type
+ * @property data - Array of available models
+ */
+type HuggingFaceApiResponse = z.infer<typeof huggingFaceApiResponseSchema>
+
+/**
+ * Cache entry for storing fetched models
+ * @property data - The cached model records
+ * @property timestamp - Unix timestamp of when the cache was last updated
+ */
+interface CacheEntry {
+	data: ModelRecord
+	rawModels?: HuggingFaceModel[]
+	timestamp: number
+}
+
+let cache: CacheEntry | null = null
+
+/**
+ * Parse a HuggingFace model into ModelInfo format
+ * @param model - The HuggingFace model to parse
+ * @param provider - Optional specific provider to use for capabilities
+ * @returns ModelInfo object compatible with the application's model system
+ */
+function parseHuggingFaceModel(model: HuggingFaceModel, provider?: HuggingFaceProvider): ModelInfo {
+	// Use provider-specific values if available, otherwise find first provider with values
+	const contextLength =
+		provider?.context_length ||
+		model.providers.find((p) => p.context_length)?.context_length ||
+		HUGGINGFACE_DEFAULT_CONTEXT_WINDOW
+
+	const pricing = provider?.pricing || model.providers.find((p) => p.pricing)?.pricing
+
+	// Include provider name in description if specific provider is given
+	const description = provider ? `${model.id} via ${provider.provider}` : `${model.id} via HuggingFace`
+
+	return {
+		maxTokens: Math.min(contextLength, HUGGINGFACE_DEFAULT_MAX_TOKENS),
+		contextWindow: contextLength,
+		supportsImages: false, // HuggingFace API doesn't provide this info yet
+		supportsPromptCache: false,
+		supportsComputerUse: false,
+		inputPrice: pricing?.input,
+		outputPrice: pricing?.output,
+		description,
+	}
+}
+
+/**
+ * Fetches available models from HuggingFace
+ *
+ * @returns A promise that resolves to a record of model IDs to model info
+ * @throws Will throw an error if the request fails
+ */
+export async function getHuggingFaceModels(): Promise<ModelRecord> {
+	const now = Date.now()
+
+	// Check cache
+	if (cache && now - cache.timestamp < HUGGINGFACE_CACHE_DURATION) {
+		return cache.data
+	}
+
+	const models: ModelRecord = {}
+
+	try {
+		const response = await axios.get<HuggingFaceApiResponse>(HUGGINGFACE_API_URL, {
+			headers: {
+				"Upgrade-Insecure-Requests": "1",
+				"Sec-Fetch-Dest": "document",
+				"Sec-Fetch-Mode": "navigate",
+				"Sec-Fetch-Site": "none",
+				"Sec-Fetch-User": "?1",
+				Priority: "u=0, i",
+				Pragma: "no-cache",
+				"Cache-Control": "no-cache",
+			},
+			timeout: 10000, // 10 second timeout
+		})
+
+		const result = huggingFaceApiResponseSchema.safeParse(response.data)
+
+		if (!result.success) {
+			console.error("HuggingFace models response validation failed:", result.error.format())
+			throw new Error("Invalid response format from HuggingFace API")
+		}
+
+		const validModels = result.data.data.filter((model) => model.providers.length > 0)
+
+		for (const model of validModels) {
+			// Add the base model
+			models[model.id] = parseHuggingFaceModel(model)
+
+			// Add provider-specific variants for all live providers
+			for (const provider of model.providers) {
+				if (provider.status === "live") {
+					const providerKey = `${model.id}:${provider.provider}`
+					const providerModel = parseHuggingFaceModel(model, provider)
+
+					// Always add provider variants to show all available providers
+					models[providerKey] = providerModel
+				}
+			}
+		}
+
+		// Update cache
+		cache = {
+			data: models,
+			rawModels: validModels,
+			timestamp: now,
+		}
+
+		return models
+	} catch (error) {
+		console.error("Error fetching HuggingFace models:", error)
+
+		// Return cached data if available
+		if (cache) {
+			return cache.data
+		}
+
+		// Re-throw with more context
+		if (axios.isAxiosError(error)) {
+			if (error.response) {
+				throw new Error(
+					`Failed to fetch HuggingFace models: ${error.response.status} ${error.response.statusText}`,
+				)
+			} else if (error.request) {
+				throw new Error(
+					"Failed to fetch HuggingFace models: No response from server. Check your internet connection.",
+				)
+			}
+		}
+
+		throw new Error(
+			`Failed to fetch HuggingFace models: ${error instanceof Error ? error.message : "Unknown error"}`,
+		)
+	}
+}
+
+/**
+ * Get cached models without making an API request
+ */
+export function getCachedHuggingFaceModels(): ModelRecord | null {
+	return cache?.data || null
+}
+
+/**
+ * Get cached raw models for UI display
+ */
+export function getCachedRawHuggingFaceModels(): HuggingFaceModel[] | null {
+	return cache?.rawModels || null
+}
+
+/**
+ * Clear the cache
+ */
+export function clearHuggingFaceCache(): void {
+	cache = null
+}
+
+/**
+ * HuggingFace Models Response Interface
+ */
+export interface HuggingFaceModelsResponse {
+	models: HuggingFaceModel[]
+	cached: boolean
+	timestamp: number
+}
+
+/**
+ * Get HuggingFace models with response metadata
+ * This function provides a higher-level API that includes cache status and timestamp
+ */
+export async function getHuggingFaceModelsWithMetadata(): Promise<HuggingFaceModelsResponse> {
+	try {
+		// First, trigger the fetch to populate cache
+		await getHuggingFaceModels()
+
+		// Get the raw models from cache
+		const cachedRawModels = getCachedRawHuggingFaceModels()
+
+		if (cachedRawModels) {
+			return {
+				models: cachedRawModels,
+				cached: true,
+				timestamp: Date.now(),
+			}
+		}
+
+		// If no cached raw models, fetch directly from API
+		const response = await axios.get(HUGGINGFACE_API_URL, {
+			headers: {
+				"Upgrade-Insecure-Requests": "1",
+				"Sec-Fetch-Dest": "document",
+				"Sec-Fetch-Mode": "navigate",
+				"Sec-Fetch-Site": "none",
+				"Sec-Fetch-User": "?1",
+				Priority: "u=0, i",
+				Pragma: "no-cache",
+				"Cache-Control": "no-cache",
+			},
+			timeout: 10000,
+		})
+
+		const models = response.data?.data || []
+
+		return {
+			models,
+			cached: false,
+			timestamp: Date.now(),
+		}
+	} catch (error) {
+		console.error("Failed to get HuggingFace models:", error)
+		return {
+			models: [],
+			cached: false,
+			timestamp: Date.now(),
+		}
+	}
+}

+ 132 - 0
src/api/providers/huggingface.ts

@@ -0,0 +1,132 @@
+import OpenAI from "openai"
+import { Anthropic } from "@anthropic-ai/sdk"
+
+import type { ApiHandlerOptions, ModelRecord } from "../../shared/api"
+import { ApiStream } from "../transform/stream"
+import { convertToOpenAiMessages } from "../transform/openai-format"
+import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+import { DEFAULT_HEADERS } from "./constants"
+import { BaseProvider } from "./base-provider"
+import { getHuggingFaceModels, getCachedHuggingFaceModels } from "./fetchers/huggingface"
+
+export class HuggingFaceHandler extends BaseProvider implements SingleCompletionHandler {
+	private client: OpenAI
+	private options: ApiHandlerOptions
+	private modelCache: ModelRecord | null = null
+
+	constructor(options: ApiHandlerOptions) {
+		super()
+		this.options = options
+
+		if (!this.options.huggingFaceApiKey) {
+			throw new Error("Hugging Face API key is required")
+		}
+
+		this.client = new OpenAI({
+			baseURL: "https://router.huggingface.co/v1",
+			apiKey: this.options.huggingFaceApiKey,
+			defaultHeaders: DEFAULT_HEADERS,
+		})
+
+		// Try to get cached models first
+		this.modelCache = getCachedHuggingFaceModels()
+
+		// Fetch models asynchronously
+		this.fetchModels()
+	}
+
+	private async fetchModels() {
+		try {
+			this.modelCache = await getHuggingFaceModels()
+		} catch (error) {
+			console.error("Failed to fetch HuggingFace models:", error)
+		}
+	}
+
+	override async *createMessage(
+		systemPrompt: string,
+		messages: Anthropic.Messages.MessageParam[],
+		metadata?: ApiHandlerCreateMessageMetadata,
+	): ApiStream {
+		const modelId = this.options.huggingFaceModelId || "meta-llama/Llama-3.3-70B-Instruct"
+		const temperature = this.options.modelTemperature ?? 0.7
+
+		const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
+			model: modelId,
+			temperature,
+			messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
+			stream: true,
+			stream_options: { include_usage: true },
+		}
+
+		// Add max_tokens if specified
+		if (this.options.includeMaxTokens && this.options.modelMaxTokens) {
+			params.max_tokens = this.options.modelMaxTokens
+		}
+
+		const stream = await this.client.chat.completions.create(params)
+
+		for await (const chunk of stream) {
+			const delta = chunk.choices[0]?.delta
+
+			if (delta?.content) {
+				yield {
+					type: "text",
+					text: delta.content,
+				}
+			}
+
+			if (chunk.usage) {
+				yield {
+					type: "usage",
+					inputTokens: chunk.usage.prompt_tokens || 0,
+					outputTokens: chunk.usage.completion_tokens || 0,
+				}
+			}
+		}
+	}
+
+	async completePrompt(prompt: string): Promise<string> {
+		const modelId = this.options.huggingFaceModelId || "meta-llama/Llama-3.3-70B-Instruct"
+
+		try {
+			const response = await this.client.chat.completions.create({
+				model: modelId,
+				messages: [{ role: "user", content: prompt }],
+			})
+
+			return response.choices[0]?.message.content || ""
+		} catch (error) {
+			if (error instanceof Error) {
+				throw new Error(`Hugging Face completion error: ${error.message}`)
+			}
+
+			throw error
+		}
+	}
+
+	override getModel() {
+		const modelId = this.options.huggingFaceModelId || "meta-llama/Llama-3.3-70B-Instruct"
+
+		// Try to get model info from cache
+		const modelInfo = this.modelCache?.[modelId]
+
+		if (modelInfo) {
+			return {
+				id: modelId,
+				info: modelInfo,
+			}
+		}
+
+		// Fallback to default values if model not found in cache
+		return {
+			id: modelId,
+			info: {
+				maxTokens: 8192,
+				contextWindow: 131072,
+				supportsImages: false,
+				supportsPromptCache: false,
+			},
+		}
+	}
+}

+ 1 - 0
src/api/providers/index.ts

@@ -11,6 +11,7 @@ export { GeminiHandler } from "./gemini"
 export { GeminiCliHandler } from "./gemini-cli"
 export { GlamaHandler } from "./glama"
 export { GroqHandler } from "./groq"
+export { HuggingFaceHandler } from "./huggingface"
 export { HumanRelayHandler } from "./human-relay"
 export { LiteLLMHandler } from "./lite-llm"
 export { LmStudioHandler } from "./lm-studio"

+ 58 - 1
src/core/config/CustomModesManager.ts

@@ -1,6 +1,7 @@
 import * as vscode from "vscode"
 import * as path from "path"
 import * as fs from "fs/promises"
+import * as os from "os"
 
 import * as yaml from "yaml"
 import stripBom from "strip-bom"
@@ -501,7 +502,7 @@ export class CustomModesManager {
 		await this.onUpdate()
 	}
 
-	public async deleteCustomMode(slug: string): Promise<void> {
+	public async deleteCustomMode(slug: string, fromMarketplace = false): Promise<void> {
 		try {
 			const settingsPath = await this.getCustomModesFilePath()
 			const roomodesPath = await this.getWorkspaceRoomodes()
@@ -517,6 +518,9 @@ export class CustomModesManager {
 				throw new Error(t("common:customModes.errors.modeNotFound"))
 			}
 
+			// Determine which mode to use for rules folder path calculation
+			const modeToDelete = projectMode || globalMode
+
 			await this.queueWrite(async () => {
 				// Delete from project first if it exists there
 				if (projectMode && roomodesPath) {
@@ -528,6 +532,11 @@ export class CustomModesManager {
 					await this.updateModesInFile(settingsPath, (modes) => modes.filter((m) => m.slug !== slug))
 				}
 
+				// Delete associated rules folder
+				if (modeToDelete) {
+					await this.deleteRulesFolder(slug, modeToDelete, fromMarketplace)
+				}
+
 				// Clear cache when modes are deleted
 				this.clearCache()
 				await this.refreshMergedState()
@@ -538,6 +547,54 @@ export class CustomModesManager {
 		}
 	}
 
+	/**
+	 * Deletes the rules folder for a specific mode
+	 * @param slug - The mode slug
+	 * @param mode - The mode configuration to determine the scope
+	 */
+	private async deleteRulesFolder(slug: string, mode: ModeConfig, fromMarketplace = false): Promise<void> {
+		try {
+			// Determine the scope based on source (project or global)
+			const scope = mode.source || "global"
+
+			// Determine the rules folder path
+			let rulesFolderPath: string
+			if (scope === "project") {
+				const workspacePath = getWorkspacePath()
+				if (workspacePath) {
+					rulesFolderPath = path.join(workspacePath, ".roo", `rules-${slug}`)
+				} else {
+					return // No workspace, can't delete project rules
+				}
+			} else {
+				// Global scope - use OS home directory
+				const homeDir = os.homedir()
+				rulesFolderPath = path.join(homeDir, ".roo", `rules-${slug}`)
+			}
+
+			// Check if the rules folder exists and delete it
+			const rulesFolderExists = await fileExistsAtPath(rulesFolderPath)
+			if (rulesFolderExists) {
+				try {
+					await fs.rm(rulesFolderPath, { recursive: true, force: true })
+					logger.info(`Deleted rules folder for mode ${slug}: ${rulesFolderPath}`)
+				} catch (error) {
+					logger.error(`Failed to delete rules folder for mode ${slug}: ${error}`)
+					// Notify the user about the failure
+					const messageKey = fromMarketplace
+						? "common:marketplace.mode.rulesCleanupFailed"
+						: "common:customModes.errors.rulesCleanupFailed"
+					vscode.window.showWarningMessage(t(messageKey, { rulesFolderPath }))
+					// Continue even if folder deletion fails
+				}
+			}
+		} catch (error) {
+			logger.error(`Error deleting rules folder for mode ${slug}`, {
+				error: error instanceof Error ? error.message : String(error),
+			})
+		}
+	}
+
 	public async resetCustomModes(): Promise<void> {
 		try {
 			const filePath = await this.getCustomModesFilePath()

+ 353 - 0
src/core/mentions/__tests__/processUserContentMentions.spec.ts

@@ -0,0 +1,353 @@
+// npx vitest core/mentions/__tests__/processUserContentMentions.spec.ts
+
+import { describe, it, expect, vi, beforeEach } from "vitest"
+import { processUserContentMentions } from "../processUserContentMentions"
+import { parseMentions } from "../index"
+import { UrlContentFetcher } from "../../../services/browser/UrlContentFetcher"
+import { FileContextTracker } from "../../context-tracking/FileContextTracker"
+
+// Mock the parseMentions function
+vi.mock("../index", () => ({
+	parseMentions: vi.fn(),
+}))
+
+describe("processUserContentMentions", () => {
+	let mockUrlContentFetcher: UrlContentFetcher
+	let mockFileContextTracker: FileContextTracker
+	let mockRooIgnoreController: any
+
+	beforeEach(() => {
+		vi.clearAllMocks()
+
+		mockUrlContentFetcher = {} as UrlContentFetcher
+		mockFileContextTracker = {} as FileContextTracker
+		mockRooIgnoreController = {}
+
+		// Default mock implementation
+		vi.mocked(parseMentions).mockImplementation(async (text) => `parsed: ${text}`)
+	})
+
+	describe("maxReadFileLine parameter", () => {
+		it("should pass maxReadFileLine to parseMentions when provided", async () => {
+			const userContent = [
+				{
+					type: "text" as const,
+					text: "<task>Read file with limit</task>",
+				},
+			]
+
+			await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+				rooIgnoreController: mockRooIgnoreController,
+				maxReadFileLine: 100,
+			})
+
+			expect(parseMentions).toHaveBeenCalledWith(
+				"<task>Read file with limit</task>",
+				"/test",
+				mockUrlContentFetcher,
+				mockFileContextTracker,
+				mockRooIgnoreController,
+				true,
+				true, // includeDiagnosticMessages
+				50, // maxDiagnosticMessages
+				100,
+			)
+		})
+
+		it("should pass undefined maxReadFileLine when not provided", async () => {
+			const userContent = [
+				{
+					type: "text" as const,
+					text: "<task>Read file without limit</task>",
+				},
+			]
+
+			await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+				rooIgnoreController: mockRooIgnoreController,
+			})
+
+			expect(parseMentions).toHaveBeenCalledWith(
+				"<task>Read file without limit</task>",
+				"/test",
+				mockUrlContentFetcher,
+				mockFileContextTracker,
+				mockRooIgnoreController,
+				true,
+				true, // includeDiagnosticMessages
+				50, // maxDiagnosticMessages
+				undefined,
+			)
+		})
+
+		it("should handle UNLIMITED_LINES constant correctly", async () => {
+			const userContent = [
+				{
+					type: "text" as const,
+					text: "<task>Read unlimited lines</task>",
+				},
+			]
+
+			await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+				rooIgnoreController: mockRooIgnoreController,
+				maxReadFileLine: -1,
+			})
+
+			expect(parseMentions).toHaveBeenCalledWith(
+				"<task>Read unlimited lines</task>",
+				"/test",
+				mockUrlContentFetcher,
+				mockFileContextTracker,
+				mockRooIgnoreController,
+				true,
+				true, // includeDiagnosticMessages
+				50, // maxDiagnosticMessages
+				-1,
+			)
+		})
+	})
+
+	describe("content processing", () => {
+		it("should process text blocks with <task> tags", async () => {
+			const userContent = [
+				{
+					type: "text" as const,
+					text: "<task>Do something</task>",
+				},
+			]
+
+			const result = await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+			})
+
+			expect(parseMentions).toHaveBeenCalled()
+			expect(result[0]).toEqual({
+				type: "text",
+				text: "parsed: <task>Do something</task>",
+			})
+		})
+
+		it("should process text blocks with <feedback> tags", async () => {
+			const userContent = [
+				{
+					type: "text" as const,
+					text: "<feedback>Fix this issue</feedback>",
+				},
+			]
+
+			const result = await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+			})
+
+			expect(parseMentions).toHaveBeenCalled()
+			expect(result[0]).toEqual({
+				type: "text",
+				text: "parsed: <feedback>Fix this issue</feedback>",
+			})
+		})
+
+		it("should not process text blocks without task or feedback tags", async () => {
+			const userContent = [
+				{
+					type: "text" as const,
+					text: "Regular text without special tags",
+				},
+			]
+
+			const result = await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+			})
+
+			expect(parseMentions).not.toHaveBeenCalled()
+			expect(result[0]).toEqual(userContent[0])
+		})
+
+		it("should process tool_result blocks with string content", async () => {
+			const userContent = [
+				{
+					type: "tool_result" as const,
+					tool_use_id: "123",
+					content: "<feedback>Tool feedback</feedback>",
+				},
+			]
+
+			const result = await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+			})
+
+			expect(parseMentions).toHaveBeenCalled()
+			expect(result[0]).toEqual({
+				type: "tool_result",
+				tool_use_id: "123",
+				content: "parsed: <feedback>Tool feedback</feedback>",
+			})
+		})
+
+		it("should process tool_result blocks with array content", async () => {
+			const userContent = [
+				{
+					type: "tool_result" as const,
+					tool_use_id: "123",
+					content: [
+						{
+							type: "text" as const,
+							text: "<task>Array task</task>",
+						},
+						{
+							type: "text" as const,
+							text: "Regular text",
+						},
+					],
+				},
+			]
+
+			const result = await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+			})
+
+			expect(parseMentions).toHaveBeenCalledTimes(1)
+			expect(result[0]).toEqual({
+				type: "tool_result",
+				tool_use_id: "123",
+				content: [
+					{
+						type: "text",
+						text: "parsed: <task>Array task</task>",
+					},
+					{
+						type: "text",
+						text: "Regular text",
+					},
+				],
+			})
+		})
+
+		it("should handle mixed content types", async () => {
+			const userContent = [
+				{
+					type: "text" as const,
+					text: "<task>First task</task>",
+				},
+				{
+					type: "image" as const,
+					source: {
+						type: "base64" as const,
+						media_type: "image/png" as const,
+						data: "base64data",
+					},
+				},
+				{
+					type: "tool_result" as const,
+					tool_use_id: "456",
+					content: "<feedback>Feedback</feedback>",
+				},
+			]
+
+			const result = await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+				maxReadFileLine: 50,
+			})
+
+			expect(parseMentions).toHaveBeenCalledTimes(2)
+			expect(result).toHaveLength(3)
+			expect(result[0]).toEqual({
+				type: "text",
+				text: "parsed: <task>First task</task>",
+			})
+			expect(result[1]).toEqual(userContent[1]) // Image block unchanged
+			expect(result[2]).toEqual({
+				type: "tool_result",
+				tool_use_id: "456",
+				content: "parsed: <feedback>Feedback</feedback>",
+			})
+		})
+	})
+
+	describe("showRooIgnoredFiles parameter", () => {
+		it("should default showRooIgnoredFiles to true", async () => {
+			const userContent = [
+				{
+					type: "text" as const,
+					text: "<task>Test default</task>",
+				},
+			]
+
+			await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+			})
+
+			expect(parseMentions).toHaveBeenCalledWith(
+				"<task>Test default</task>",
+				"/test",
+				mockUrlContentFetcher,
+				mockFileContextTracker,
+				undefined,
+				true, // showRooIgnoredFiles should default to true
+				true, // includeDiagnosticMessages
+				50, // maxDiagnosticMessages
+				undefined,
+			)
+		})
+
+		it("should respect showRooIgnoredFiles when explicitly set to false", async () => {
+			const userContent = [
+				{
+					type: "text" as const,
+					text: "<task>Test explicit false</task>",
+				},
+			]
+
+			await processUserContentMentions({
+				userContent,
+				cwd: "/test",
+				urlContentFetcher: mockUrlContentFetcher,
+				fileContextTracker: mockFileContextTracker,
+				showRooIgnoredFiles: false,
+			})
+
+			expect(parseMentions).toHaveBeenCalledWith(
+				"<task>Test explicit false</task>",
+				"/test",
+				mockUrlContentFetcher,
+				mockFileContextTracker,
+				undefined,
+				false,
+				true, // includeDiagnosticMessages
+				50, // maxDiagnosticMessages
+				undefined,
+			)
+		})
+	})
+})

+ 21 - 5
src/core/mentions/index.ts

@@ -80,6 +80,9 @@ export async function parseMentions(
 	fileContextTracker?: FileContextTracker,
 	rooIgnoreController?: RooIgnoreController,
 	showRooIgnoredFiles: boolean = true,
+	includeDiagnosticMessages: boolean = true,
+	maxDiagnosticMessages: number = 50,
+	maxReadFileLine?: number,
 ): Promise<string> {
 	const mentions: Set<string> = new Set()
 	let parsedText = text.replace(mentionRegexGlobal, (match, mention) => {
@@ -147,7 +150,13 @@ export async function parseMentions(
 		} else if (mention.startsWith("/")) {
 			const mentionPath = mention.slice(1)
 			try {
-				const content = await getFileOrFolderContent(mentionPath, cwd, rooIgnoreController, showRooIgnoredFiles)
+				const content = await getFileOrFolderContent(
+					mentionPath,
+					cwd,
+					rooIgnoreController,
+					showRooIgnoredFiles,
+					maxReadFileLine,
+				)
 				if (mention.endsWith("/")) {
 					parsedText += `\n\n<folder_content path="${mentionPath}">\n${content}\n</folder_content>`
 				} else {
@@ -165,7 +174,7 @@ export async function parseMentions(
 			}
 		} else if (mention === "problems") {
 			try {
-				const problems = await getWorkspaceProblems(cwd)
+				const problems = await getWorkspaceProblems(cwd, includeDiagnosticMessages, maxDiagnosticMessages)
 				parsedText += `\n\n<workspace_diagnostics>\n${problems}\n</workspace_diagnostics>`
 			} catch (error) {
 				parsedText += `\n\n<workspace_diagnostics>\nError fetching diagnostics: ${error.message}\n</workspace_diagnostics>`
@@ -210,6 +219,7 @@ async function getFileOrFolderContent(
 	cwd: string,
 	rooIgnoreController?: any,
 	showRooIgnoredFiles: boolean = true,
+	maxReadFileLine?: number,
 ): Promise<string> {
 	const unescapedPath = unescapeSpaces(mentionPath)
 	const absPath = path.resolve(cwd, unescapedPath)
@@ -222,7 +232,7 @@ async function getFileOrFolderContent(
 				return `(File ${mentionPath} is ignored by .kilocodeignore)`
 			}
 			try {
-				const content = await extractTextFromFile(absPath)
+				const content = await extractTextFromFile(absPath, maxReadFileLine)
 				return content
 			} catch (error) {
 				return `(Failed to read contents of ${mentionPath}): ${error.message}`
@@ -262,7 +272,7 @@ async function getFileOrFolderContent(
 									if (isBinary) {
 										return undefined
 									}
-									const content = await extractTextFromFile(absoluteFilePath)
+									const content = await extractTextFromFile(absoluteFilePath, maxReadFileLine)
 									return `<file_content path="${filePath.toPosix()}">\n${content}\n</file_content>`
 								} catch (error) {
 									return undefined
@@ -286,12 +296,18 @@ async function getFileOrFolderContent(
 	}
 }
 
-async function getWorkspaceProblems(cwd: string): Promise<string> {
+async function getWorkspaceProblems(
+	cwd: string,
+	includeDiagnosticMessages: boolean = true,
+	maxDiagnosticMessages: number = 50,
+): Promise<string> {
 	const diagnostics = vscode.languages.getDiagnostics()
 	const result = await diagnosticsToProblemsString(
 		diagnostics,
 		[vscode.DiagnosticSeverity.Error, vscode.DiagnosticSeverity.Warning],
 		cwd,
+		includeDiagnosticMessages,
+		maxDiagnosticMessages,
 	)
 	if (!result) {
 		return "No errors or warnings detected."

+ 15 - 0
src/core/mentions/processKiloUserContentMentions.ts

@@ -20,6 +20,9 @@ export async function processKiloUserContentMentions({
 	fileContextTracker,
 	rooIgnoreController,
 	showRooIgnoredFiles = true,
+	includeDiagnosticMessages = true,
+	maxDiagnosticMessages = 50,
+	maxReadFileLine,
 }: {
 	context: vscode.ExtensionContext // kilocode_change
 	userContent: Anthropic.Messages.ContentBlockParam[]
@@ -28,6 +31,9 @@ export async function processKiloUserContentMentions({
 	fileContextTracker: FileContextTracker
 	rooIgnoreController?: any
 	showRooIgnoredFiles: boolean
+	includeDiagnosticMessages?: boolean
+	maxDiagnosticMessages?: number
+	maxReadFileLine?: number
 }): Promise<[Anthropic.Messages.ContentBlockParam[], boolean]> {
 	// Track if we need to check kilorules file
 	let needsRulesFileCheck = false
@@ -63,6 +69,9 @@ export async function processKiloUserContentMentions({
 							fileContextTracker,
 							rooIgnoreController,
 							showRooIgnoredFiles,
+							includeDiagnosticMessages,
+							maxDiagnosticMessages,
+							maxReadFileLine,
 						)
 
 						// when parsing slash commands, we still want to allow the user to provide their desired context
@@ -96,6 +105,9 @@ export async function processKiloUserContentMentions({
 									fileContextTracker,
 									rooIgnoreController,
 									showRooIgnoredFiles,
+									includeDiagnosticMessages,
+									maxDiagnosticMessages,
+									maxReadFileLine,
 								),
 							}
 						}
@@ -114,6 +126,9 @@ export async function processKiloUserContentMentions({
 											fileContextTracker,
 											rooIgnoreController,
 											showRooIgnoredFiles,
+											includeDiagnosticMessages,
+											maxDiagnosticMessages,
+											maxReadFileLine,
 										),
 									}
 								}

+ 15 - 0
src/core/mentions/processUserContentMentions.ts

@@ -13,6 +13,9 @@ export async function processUserContentMentions({
 	fileContextTracker,
 	rooIgnoreController,
 	showRooIgnoredFiles = true,
+	includeDiagnosticMessages = true,
+	maxDiagnosticMessages = 50,
+	maxReadFileLine,
 }: {
 	userContent: Anthropic.Messages.ContentBlockParam[]
 	cwd: string
@@ -20,6 +23,9 @@ export async function processUserContentMentions({
 	fileContextTracker: FileContextTracker
 	rooIgnoreController?: any
 	showRooIgnoredFiles?: boolean
+	includeDiagnosticMessages?: boolean
+	maxDiagnosticMessages?: number
+	maxReadFileLine?: number
 }) {
 	// Process userContent array, which contains various block types:
 	// TextBlockParam, ImageBlockParam, ToolUseBlockParam, and ToolResultBlockParam.
@@ -46,6 +52,9 @@ export async function processUserContentMentions({
 							fileContextTracker,
 							rooIgnoreController,
 							showRooIgnoredFiles,
+							includeDiagnosticMessages,
+							maxDiagnosticMessages,
+							maxReadFileLine,
 						),
 					}
 				}
@@ -63,6 +72,9 @@ export async function processUserContentMentions({
 								fileContextTracker,
 								rooIgnoreController,
 								showRooIgnoredFiles,
+								includeDiagnosticMessages,
+								maxDiagnosticMessages,
+								maxReadFileLine,
 							),
 						}
 					}
@@ -81,6 +93,9 @@ export async function processUserContentMentions({
 										fileContextTracker,
 										rooIgnoreController,
 										showRooIgnoredFiles,
+										includeDiagnosticMessages,
+										maxDiagnosticMessages,
+										maxReadFileLine,
 									),
 								}
 							}

+ 7 - 1
src/core/prompts/__tests__/system-prompt.spec.ts

@@ -577,7 +577,9 @@ describe("SYSTEM_PROMPT", () => {
 
 	it("should exclude update_todo_list tool when todoListEnabled is false", async () => {
 		const settings = {
+			maxConcurrentFileReads: 5,
 			todoListEnabled: false,
+			useAgentRules: true,
 		}
 
 		const prompt = await SYSTEM_PROMPT(
@@ -607,7 +609,9 @@ describe("SYSTEM_PROMPT", () => {
 
 	it("should include update_todo_list tool when todoListEnabled is true", async () => {
 		const settings = {
+			maxConcurrentFileReads: 5,
 			todoListEnabled: true,
+			useAgentRules: true,
 		}
 
 		const prompt = await SYSTEM_PROMPT(
@@ -636,7 +640,9 @@ describe("SYSTEM_PROMPT", () => {
 
 	it("should include update_todo_list tool when todoListEnabled is undefined", async () => {
 		const settings = {
-			// todoListEnabled not set
+			maxConcurrentFileReads: 5,
+			todoListEnabled: true,
+			useAgentRules: true,
 		}
 
 		const prompt = await SYSTEM_PROMPT(

+ 2 - 0
src/core/prompts/sections/__tests__/custom-instructions-global.spec.ts

@@ -194,6 +194,7 @@ describe("custom-instructions global .roo support", () => {
 			mockReadFile
 				.mockResolvedValueOnce("global mode rule content")
 				.mockResolvedValueOnce("project mode rule content")
+				.mockResolvedValueOnce("") // AGENTS.md file (empty)
 				.mockResolvedValueOnce("") // .kilocoderules legacy file (empty)
 				.mockResolvedValueOnce("") // .roorules legacy file (empty)
 				.mockResolvedValueOnce("") // .clinerules legacy file (empty)
@@ -220,6 +221,7 @@ describe("custom-instructions global .roo support", () => {
 			// Mock legacy mode file reading
 			mockReadFile
 				.mockResolvedValueOnce("legacy mode rule content") // .roorules-code
+				.mockResolvedValueOnce("") // AGENTS.md file (empty)
 				.mockResolvedValueOnce("") // generic .kilocoderules (empty)
 				.mockResolvedValueOnce("") // generic .roorules (empty)
 				.mockResolvedValueOnce("") // generic .clinerules (empty)

+ 123 - 0
src/core/prompts/sections/__tests__/custom-instructions.spec.ts

@@ -628,6 +628,129 @@ describe("addCustomInstructions", () => {
 		expect(result).toContain("Rules from .kilocoderules-test-mode:\nmode specific rules")
 	})
 
+	it("should load AGENTS.md when settings.useAgentRules is true", async () => {
+		// Simulate no .roo/rules-test-mode directory
+		statMock.mockRejectedValueOnce({ code: "ENOENT" })
+
+		readFileMock.mockImplementation((filePath: PathLike) => {
+			const pathStr = filePath.toString()
+			if (pathStr.endsWith("AGENTS.md")) {
+				return Promise.resolve("Agent rules from AGENTS.md file")
+			}
+			return Promise.reject({ code: "ENOENT" })
+		})
+
+		const result = await addCustomInstructions(
+			"mode instructions",
+			"global instructions",
+			"/fake/path",
+			"test-mode",
+			{ settings: { maxConcurrentFileReads: 5, todoListEnabled: true, useAgentRules: true } },
+		)
+
+		expect(result).toContain("# Agent Rules Standard (AGENTS.md):")
+		expect(result).toContain("Agent rules from AGENTS.md file")
+		expect(readFileMock).toHaveBeenCalledWith(expect.stringContaining("AGENTS.md"), "utf-8")
+	})
+
+	it("should not load AGENTS.md when settings.useAgentRules is false", async () => {
+		// Simulate no .roo/rules-test-mode directory
+		statMock.mockRejectedValueOnce({ code: "ENOENT" })
+
+		readFileMock.mockImplementation((filePath: PathLike) => {
+			const pathStr = filePath.toString()
+			if (pathStr.endsWith("AGENTS.md")) {
+				return Promise.resolve("Agent rules from AGENTS.md file")
+			}
+			return Promise.reject({ code: "ENOENT" })
+		})
+
+		const result = await addCustomInstructions(
+			"mode instructions",
+			"global instructions",
+			"/fake/path",
+			"test-mode",
+			{ settings: { maxConcurrentFileReads: 5, todoListEnabled: true, useAgentRules: false } },
+		)
+
+		expect(result).not.toContain("# Agent Rules Standard (AGENTS.md):")
+		expect(result).not.toContain("Agent rules from AGENTS.md file")
+	})
+
+	it("should load AGENTS.md by default when settings.useAgentRules is undefined", async () => {
+		// Simulate no .roo/rules-test-mode directory
+		statMock.mockRejectedValueOnce({ code: "ENOENT" })
+
+		readFileMock.mockImplementation((filePath: PathLike) => {
+			const pathStr = filePath.toString()
+			if (pathStr.endsWith("AGENTS.md")) {
+				return Promise.resolve("Agent rules from AGENTS.md file")
+			}
+			return Promise.reject({ code: "ENOENT" })
+		})
+
+		const result = await addCustomInstructions(
+			"mode instructions",
+			"global instructions",
+			"/fake/path",
+			"test-mode",
+			{}, // No settings.useAgentRules specified
+		)
+
+		expect(result).toContain("# Agent Rules Standard (AGENTS.md):")
+		expect(result).toContain("Agent rules from AGENTS.md file")
+		expect(readFileMock).toHaveBeenCalledWith(expect.stringContaining("AGENTS.md"), "utf-8")
+	})
+
+	it("should handle missing AGENTS.md gracefully", async () => {
+		// Simulate no .roo/rules-test-mode directory
+		statMock.mockRejectedValueOnce({ code: "ENOENT" })
+
+		readFileMock.mockRejectedValue({ code: "ENOENT" })
+
+		const result = await addCustomInstructions(
+			"mode instructions",
+			"global instructions",
+			"/fake/path",
+			"test-mode",
+			{ settings: { maxConcurrentFileReads: 5, todoListEnabled: true, useAgentRules: true } },
+		)
+
+		expect(result).toContain("Global Instructions:\nglobal instructions")
+		expect(result).toContain("Mode-specific Instructions:\nmode instructions")
+		expect(result).not.toContain("# Agent Rules Standard (AGENTS.md):")
+	})
+
+	it("should include AGENTS.md content along with other rules", async () => {
+		// Simulate no .roo/rules-test-mode directory
+		statMock.mockRejectedValueOnce({ code: "ENOENT" })
+
+		readFileMock.mockImplementation((filePath: PathLike) => {
+			const pathStr = filePath.toString()
+			if (pathStr.endsWith("AGENTS.md")) {
+				return Promise.resolve("Agent rules content")
+			}
+			if (pathStr.endsWith(".roorules")) {
+				return Promise.resolve("Roo rules content")
+			}
+			return Promise.reject({ code: "ENOENT" })
+		})
+
+		const result = await addCustomInstructions(
+			"mode instructions",
+			"global instructions",
+			"/fake/path",
+			"test-mode",
+			{ settings: { maxConcurrentFileReads: 5, todoListEnabled: true, useAgentRules: true } },
+		)
+
+		// Should contain both AGENTS.md and .roorules content
+		expect(result).toContain("# Agent Rules Standard (AGENTS.md):")
+		expect(result).toContain("Agent rules content")
+		expect(result).toContain("# Rules from .roorules:")
+		expect(result).toContain("Roo rules content")
+	})
+
 	it("should return empty string when no instructions provided", async () => {
 		// Simulate no .kilocode/rules directory
 		statMock.mockRejectedValueOnce({ code: "ENOENT" })

+ 33 - 1
src/core/prompts/sections/custom-instructions.ts

@@ -20,6 +20,8 @@ import { Dirent } from "fs"
 
 import { isLanguage } from "@roo-code/types"
 
+import type { SystemPromptSettings } from "../types"
+
 import { LANGUAGES } from "../../../shared/language"
 import { ClineRulesToggles } from "../../../shared/cline-rules" // kilocode_change
 import { getRooDirectoriesForCwd } from "../../../services/roo-config"
@@ -247,6 +249,22 @@ export async function loadRuleFiles(cwd: string): Promise<string> {
 	return ""
 }
 
+/**
+ * Load AGENTS.md file from the project root if it exists
+ */
+async function loadAgentRulesFile(cwd: string): Promise<string> {
+	try {
+		const agentsPath = path.join(cwd, "AGENTS.md")
+		const content = await safeReadFile(agentsPath)
+		if (content) {
+			return `# Agent Rules Standard (AGENTS.md):\n${content}`
+		}
+	} catch (err) {
+		// Silently ignore errors - AGENTS.md is optional
+	}
+	return ""
+}
+
 export async function addCustomInstructions(
 	modeCustomInstructions: string,
 	globalCustomInstructions: string,
@@ -258,7 +276,7 @@ export async function addCustomInstructions(
 		rooIgnoreInstructions?: string
 		localRulesToggleState?: ClineRulesToggles
 		globalRulesToggleState?: ClineRulesToggles
-		settings?: Record<string, any>
+		settings?: SystemPromptSettings
 	} = {},
 	// kilocode_change end
 ): Promise<string> {
@@ -332,6 +350,14 @@ export async function addCustomInstructions(
 		rules.push(options.rooIgnoreInstructions)
 	}
 
+	// Add AGENTS.md content if enabled (default: true)
+	if (options.settings?.useAgentRules !== false) {
+		const agentRulesContent = await loadAgentRulesFile(cwd)
+		if (agentRulesContent && agentRulesContent.trim()) {
+			rules.push(agentRulesContent.trim())
+		}
+	}
+
 	// kilocode_change start: rule toggles
 	if (hasAnyToggles(options.localRulesToggleState) || hasAnyToggles(options.globalRulesToggleState)) {
 		const genericRuleContent =
@@ -354,6 +380,12 @@ export async function addCustomInstructions(
 			rules.push(genericRuleContent)
 		}
 	}
+
+	// Add generic rules
+	const genericRuleContent = await loadRuleFiles(cwd)
+	if (genericRuleContent && genericRuleContent.trim()) {
+		rules.push(genericRuleContent.trim())
+	}
 	// kilocode_change end
 
 	if (rules.length > 0) {

+ 9 - 3
src/core/prompts/system.ts

@@ -3,6 +3,8 @@ import * as os from "os"
 
 import type { ModeConfig, PromptComponent, CustomModePrompts, TodoItem } from "@roo-code/types"
 
+import type { SystemPromptSettings } from "./types"
+
 import { Mode, modes, defaultModeSlug, getModeBySlug, getGroupName, getModeSelection } from "../../shared/modes"
 import { DiffStrategy } from "../../shared/tools"
 import { formatLanguage } from "../../shared/language"
@@ -57,7 +59,7 @@ async function generatePrompt(
 	language?: string,
 	rooIgnoreInstructions?: string,
 	partialReadsEnabled?: boolean,
-	settings?: Record<string, any>,
+	settings?: SystemPromptSettings,
 	todoList?: TodoItem[],
 ): Promise<string> {
 	if (!context) {
@@ -147,7 +149,7 @@ export const SYSTEM_PROMPT = async (
 	language?: string,
 	rooIgnoreInstructions?: string,
 	partialReadsEnabled?: boolean,
-	settings?: Record<string, any>,
+	settings?: SystemPromptSettings,
 	todoList?: TodoItem[],
 ): Promise<string> => {
 	if (!context) {
@@ -186,7 +188,11 @@ export const SYSTEM_PROMPT = async (
 			globalCustomInstructions || "",
 			cwd,
 			mode,
-			{ language: language ?? formatLanguage(vscode.env.language), rooIgnoreInstructions, settings },
+			{
+				language: language ?? formatLanguage(vscode.env.language),
+				rooIgnoreInstructions,
+				settings,
+			},
 		)
 
 		// For file-based prompts, don't include the tool sections

+ 8 - 0
src/core/prompts/types.ts

@@ -0,0 +1,8 @@
+/**
+ * Settings passed to system prompt generation functions
+ */
+export interface SystemPromptSettings {
+	maxConcurrentFileReads: number
+	todoListEnabled: boolean
+	useAgentRules: boolean
+}

+ 1 - 0
src/core/protect/RooProtectedController.ts

@@ -25,6 +25,7 @@ export class RooProtectedController {
 		".roo/**",
 		".vscode/**",
 		".rooprotected", // For future use
+		"AGENTS.md",
 	]
 
 	constructor(cwd: string) {

+ 5 - 0
src/core/protect/__tests__/RooProtectedController.spec.ts

@@ -44,6 +44,10 @@ describe("RooProtectedController", () => {
 			expect(controller.isWriteProtected(".vscode/tasks.json")).toBe(true)
 		})
 
+		it("should protect AGENTS.md file", () => {
+			expect(controller.isWriteProtected("AGENTS.md")).toBe(true)
+		})
+
 		it("should not protect other files starting with .roo", () => {
 			expect(controller.isWriteProtected(".roosettings")).toBe(false)
 			expect(controller.isWriteProtected(".rooconfig")).toBe(false)
@@ -147,6 +151,7 @@ describe("RooProtectedController", () => {
 				".roo/**",
 				".vscode/**",
 				".rooprotected",
+				"AGENTS.md",
 			])
 		})
 	})

+ 14 - 6
src/core/task/Task.ts

@@ -1,10 +1,9 @@
 import * as path from "path"
+import * as vscode from "vscode"
 import os from "os"
 import crypto from "crypto"
 import EventEmitter from "events"
 
-import * as vscode from "vscode" // kilocode_change:
-
 import { Anthropic } from "@anthropic-ai/sdk"
 import delay from "delay"
 import pWaitFor from "p-wait-for"
@@ -277,7 +276,7 @@ export class Task extends EventEmitter<ClineEvents> {
 		this.consecutiveMistakeLimit = consecutiveMistakeLimit ?? DEFAULT_CONSECUTIVE_MISTAKE_LIMIT
 		this.providerRef = new WeakRef(provider)
 		this.globalStoragePath = provider.context.globalStorageUri.fsPath
-		this.diffViewProvider = new DiffViewProvider(this.cwd)
+		this.diffViewProvider = new DiffViewProvider(this.cwd, this)
 		this.enableCheckpoints = enableCheckpoints
 
 		this.rootTask = rootTask
@@ -1254,7 +1253,12 @@ export class Task extends EventEmitter<ClineEvents> {
 			}),
 		)
 
-		const { showRooIgnoredFiles = true } = (await this.providerRef.deref()?.getState()) ?? {}
+		const {
+			showRooIgnoredFiles = true,
+			includeDiagnosticMessages = true,
+			maxDiagnosticMessages = 50,
+			maxReadFileLine = -1,
+		} = (await this.providerRef.deref()?.getState()) ?? {}
 
 		const [parsedUserContent, needsRulesFileCheck] = await processKiloUserContentMentions({
 			context: this.context, // kilocode_change
@@ -1264,6 +1268,9 @@ export class Task extends EventEmitter<ClineEvents> {
 			fileContextTracker: this.fileContextTracker,
 			rooIgnoreController: this.rooIgnoreController,
 			showRooIgnoredFiles,
+			includeDiagnosticMessages,
+			maxDiagnosticMessages,
+			maxReadFileLine,
 		})
 
 		if (needsRulesFileCheck) {
@@ -1960,8 +1967,9 @@ export class Task extends EventEmitter<ClineEvents> {
 				rooIgnoreInstructions,
 				maxReadFileLine !== -1,
 				{
-					maxConcurrentFileReads,
-					todoListEnabled: apiConfiguration?.todoListEnabled,
+					maxConcurrentFileReads: maxConcurrentFileReads ?? 5,
+					todoListEnabled: apiConfiguration?.todoListEnabled ?? true,
+					useAgentRules: vscode.workspace.getConfiguration("roo-cline").get<boolean>("useAgentRules") ?? true,
 				},
 			)
 		})()

+ 1 - 0
src/core/tools/__tests__/insertContentTool.spec.ts

@@ -96,6 +96,7 @@ describe("insertContentTool", () => {
 					finalContent: "final content",
 				}),
 				scrollToFirstDiff: vi.fn(),
+				updateDiagnosticSettings: vi.fn(),
 				pushToolWriteResult: vi.fn().mockImplementation(async function (
 					this: any,
 					task: any,

+ 1 - 0
src/core/tools/__tests__/writeToFileTool.spec.ts

@@ -157,6 +157,7 @@ describe("writeToFileTool", () => {
 				finalContent: "final content",
 			}),
 			scrollToFirstDiff: vi.fn(),
+			updateDiagnosticSettings: vi.fn(),
 			pushToolWriteResult: vi.fn().mockImplementation(async function (
 				this: any,
 				task: any,

+ 9 - 2
src/core/tools/applyDiffTool.ts

@@ -193,10 +193,17 @@ export async function applyDiffToolLegacy(
 			// Get the formatted response message
 			const message = await cline.diffViewProvider.pushToolWriteResult(cline, cline.cwd, !fileExists)
 
+			// Check for single SEARCH/REPLACE block warning
+			const searchBlocks = (diffContent.match(/<<<<<<< SEARCH/g) || []).length
+			const singleBlockNotice =
+				searchBlocks === 1
+					? "\n<notice>Making multiple related changes in a single apply_diff is more efficient. If other changes are needed in this file, please include them as additional SEARCH/REPLACE blocks.</notice>"
+					: ""
+
 			if (partFailHint) {
-				pushToolResult(partFailHint + message)
+				pushToolResult(partFailHint + message + singleBlockNotice)
 			} else {
-				pushToolResult(message)
+				pushToolResult(message + singleBlockNotice)
 			}
 
 			await cline.diffViewProvider.reset()

+ 15 - 1
src/core/tools/multiApplyDiffTool.ts

@@ -601,8 +601,22 @@ ${errorDetails ? `\nTechnical details:\n${errorDetails}\n` : ""}
 			await cline.say("diff_error", allDiffErrors.join("\n"))
 		}
 
+		// Check for single SEARCH/REPLACE block warning
+		let totalSearchBlocks = 0
+		for (const operation of operations) {
+			for (const diffItem of operation.diff) {
+				const searchBlocks = (diffItem.content.match(/<<<<<<< SEARCH/g) || []).length
+				totalSearchBlocks += searchBlocks
+			}
+		}
+
+		const singleBlockNotice =
+			totalSearchBlocks === 1
+				? "\n<notice>Making multiple related changes in a single apply_diff is more efficient. If other changes are needed in this file, please include them as additional SEARCH/REPLACE blocks.</notice>"
+				: ""
+
 		// Push the final result combining all operation results
-		pushToolResult(results.join("\n\n"))
+		pushToolResult(results.join("\n\n") + singleBlockNotice)
 		return
 	} catch (error) {
 		await handleError("applying diff", error)

+ 9 - 4
src/core/webview/ClineProvider.ts

@@ -117,7 +117,7 @@ export class ClineProvider
 
 	public isViewLaunched = false
 	public settingsImportedAt?: number
-	public readonly latestAnnouncementId = "jul-09-2025-3-23-0" // Update for v3.23.0 announcement
+	public readonly latestAnnouncementId = "jul-26-2025-3-24-0" // Update for v3.24.0 announcement
 	public readonly providerSettingsManager: ProviderSettingsManager
 	public readonly customModesManager: CustomModesManager
 
@@ -163,7 +163,7 @@ export class ClineProvider
 				this.log(`Failed to initialize MCP Hub: ${error}`)
 			})
 
-		this.marketplaceManager = new MarketplaceManager(this.context)
+		this.marketplaceManager = new MarketplaceManager(this.context, this.customModesManager)
 	}
 
 	// Adds a new Cline instance to clineStack, marking the start of a new task.
@@ -1507,7 +1507,8 @@ export class ClineProvider
 			dismissedNotificationIds, // kilocode_change
 			alwaysAllowFollowupQuestions,
 			followupAutoApproveTimeoutMs,
-			diagnosticsEnabled,
+			includeDiagnosticMessages,
+			maxDiagnosticMessages,
 		} = await this.getState()
 
 		const telemetryKey = process.env.KILOCODE_POSTHOG_API_KEY
@@ -1636,7 +1637,8 @@ export class ClineProvider
 			dismissedNotificationIds: dismissedNotificationIds ?? [], // kilocode_change
 			alwaysAllowFollowupQuestions: alwaysAllowFollowupQuestions ?? false,
 			followupAutoApproveTimeoutMs: followupAutoApproveTimeoutMs ?? 60000,
-			diagnosticsEnabled: diagnosticsEnabled ?? true,
+			includeDiagnosticMessages: includeDiagnosticMessages ?? true,
+			maxDiagnosticMessages: maxDiagnosticMessages ?? 50,
 		}
 	}
 
@@ -1809,6 +1811,9 @@ export class ClineProvider
 				codebaseIndexSearchMinScore: stateValues.codebaseIndexConfig?.codebaseIndexSearchMinScore,
 			},
 			profileThresholds: stateValues.profileThresholds ?? {},
+			// Add diagnostic message settings
+			includeDiagnosticMessages: stateValues.includeDiagnosticMessages ?? true,
+			maxDiagnosticMessages: stateValues.maxDiagnosticMessages ?? 50,
 		}
 	}
 

+ 4 - 1
src/core/webview/generateSystemPrompt.ts

@@ -1,3 +1,4 @@
+import * as vscode from "vscode"
 import { WebviewMessage } from "../../shared/WebviewMessage"
 import { defaultModeSlug, getModeBySlug, getGroupName } from "../../shared/modes"
 import { buildApiHandler } from "../../api"
@@ -82,7 +83,9 @@ export const generateSystemPrompt = async (provider: ClineProvider, message: Web
 		rooIgnoreInstructions,
 		maxReadFileLine !== -1,
 		{
-			maxConcurrentFileReads,
+			maxConcurrentFileReads: maxConcurrentFileReads ?? 5,
+			todoListEnabled: apiConfiguration?.todoListEnabled ?? true,
+			useAgentRules: vscode.workspace.getConfiguration("roo-cline").get<boolean>("useAgentRules") ?? true,
 		},
 	)
 

+ 120 - 3
src/core/webview/webviewMessageHandler.ts

@@ -706,6 +706,22 @@ export const webviewMessageHandler = async (
 			// TODO: Cache like we do for OpenRouter, etc?
 			provider.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
 			break
+		case "requestHuggingFaceModels":
+			try {
+				const { getHuggingFaceModelsWithMetadata } = await import("../../api/providers/fetchers/huggingface")
+				const huggingFaceModelsResponse = await getHuggingFaceModelsWithMetadata()
+				provider.postMessageToWebview({
+					type: "huggingFaceModels",
+					huggingFaceModels: huggingFaceModelsResponse.models,
+				})
+			} catch (error) {
+				console.error("Failed to fetch Hugging Face models:", error)
+				provider.postMessageToWebview({
+					type: "huggingFaceModels",
+					huggingFaceModels: [],
+				})
+			}
+			break
 		case "openImage":
 			openImage(message.text!, { values: message.values })
 			break
@@ -1324,6 +1340,16 @@ export const webviewMessageHandler = async (
 			await updateGlobalState("maxConcurrentFileReads", valueToSave)
 			await provider.postStateToWebview()
 			break
+		case "includeDiagnosticMessages":
+			// Only apply default if the value is truly undefined (not false)
+			const includeValue = message.bool !== undefined ? message.bool : true
+			await updateGlobalState("includeDiagnosticMessages", includeValue)
+			await provider.postStateToWebview()
+			break
+		case "maxDiagnosticMessages":
+			await updateGlobalState("maxDiagnosticMessages", message.value ?? 50)
+			await provider.postStateToWebview()
+			break
 		case "setHistoryPreviewCollapsed": // Add the new case handler
 			await updateGlobalState("historyPreviewCollapsed", message.bool ?? false)
 			// No need to call postStateToWebview here as the UI already updated optimistically
@@ -2337,6 +2363,19 @@ export const webviewMessageHandler = async (
 							}
 						}
 					}
+				} else {
+					// No workspace open - send error status
+					provider.log("Cannot save code index settings: No workspace folder open")
+					await provider.postMessageToWebview({
+						type: "indexingStatusUpdate",
+						values: {
+							systemStatus: "Error",
+							message: t("embeddings:orchestrator.indexingRequiresWorkspace"),
+							processedItems: 0,
+							totalItems: 0,
+							currentItemUnit: "items",
+						},
+					})
 				}
 			} catch (error) {
 				provider.log(`Error saving code index settings: ${error.message || error}`)
@@ -2350,7 +2389,22 @@ export const webviewMessageHandler = async (
 		}
 
 		case "requestIndexingStatus": {
-			const status = provider.codeIndexManager!.getCurrentStatus()
+			const manager = provider.codeIndexManager
+			if (!manager) {
+				// No workspace open - send error status
+				provider.postMessageToWebview({
+					type: "indexingStatusUpdate",
+					values: {
+						systemStatus: "Error",
+						message: t("embeddings:orchestrator.indexingRequiresWorkspace"),
+						processedItems: 0,
+						totalItems: 0,
+						currentItemUnit: "items",
+					},
+				})
+				return
+			}
+			const status = manager.getCurrentStatus()
 			provider.postMessageToWebview({
 				type: "indexingStatusUpdate",
 				values: status,
@@ -2381,7 +2435,22 @@ export const webviewMessageHandler = async (
 		}
 		case "startIndexing": {
 			try {
-				const manager = provider.codeIndexManager!
+				const manager = provider.codeIndexManager
+				if (!manager) {
+					// No workspace open - send error status
+					provider.postMessageToWebview({
+						type: "indexingStatusUpdate",
+						values: {
+							systemStatus: "Error",
+							message: t("embeddings:orchestrator.indexingRequiresWorkspace"),
+							processedItems: 0,
+							totalItems: 0,
+							currentItemUnit: "items",
+						},
+					})
+					provider.log("Cannot start indexing: No workspace folder open")
+					return
+				}
 				if (manager.isFeatureEnabled && manager.isFeatureConfigured) {
 					if (!manager.isInitialized) {
 						await manager.initialize(provider.contextProxy)
@@ -2396,7 +2465,18 @@ export const webviewMessageHandler = async (
 		}
 		case "clearIndexData": {
 			try {
-				const manager = provider.codeIndexManager!
+				const manager = provider.codeIndexManager
+				if (!manager) {
+					provider.log("Cannot clear index data: No workspace folder open")
+					provider.postMessageToWebview({
+						type: "indexCleared",
+						values: {
+							success: false,
+							error: t("embeddings:orchestrator.indexingRequiresWorkspace"),
+						},
+					})
+					return
+				}
 				await manager.clearIndexData()
 				provider.postMessageToWebview({ type: "indexCleared", values: { success: true } })
 			} catch (error) {
@@ -2523,8 +2603,45 @@ export const webviewMessageHandler = async (
 				try {
 					await marketplaceManager.removeInstalledMarketplaceItem(message.mpItem, message.mpInstallOptions)
 					await provider.postStateToWebview()
+
+					// Send success message to webview
+					provider.postMessageToWebview({
+						type: "marketplaceRemoveResult",
+						success: true,
+						slug: message.mpItem.id,
+					})
 				} catch (error) {
 					console.error(`Error removing marketplace item: ${error}`)
+
+					// Show error message to user
+					vscode.window.showErrorMessage(
+						`Failed to remove marketplace item: ${error instanceof Error ? error.message : String(error)}`,
+					)
+
+					// Send error message to webview
+					provider.postMessageToWebview({
+						type: "marketplaceRemoveResult",
+						success: false,
+						error: error instanceof Error ? error.message : String(error),
+						slug: message.mpItem.id,
+					})
+				}
+			} else {
+				// MarketplaceManager not available or missing required parameters
+				const errorMessage = !marketplaceManager
+					? "Marketplace manager is not available"
+					: "Missing required parameters for marketplace item removal"
+				console.error(errorMessage)
+
+				vscode.window.showErrorMessage(errorMessage)
+
+				if (message.mpItem?.id) {
+					provider.postMessageToWebview({
+						type: "marketplaceRemoveResult",
+						success: false,
+						error: errorMessage,
+						slug: message.mpItem.id,
+					})
 				}
 			}
 			break

+ 7 - 1
src/i18n/locales/ar/common.json

@@ -160,13 +160,19 @@
 			"deleteFailed": "فشل حذف الوضع المخصص: {{error}}",
 			"resetFailed": "فشل إعادة تعيين الأوضاع المخصصة: {{error}}",
 			"modeNotFound": "خطأ في الكتابة: ما لقينا الوضع المحدد",
-			"noWorkspaceForProject": "ما فيه مجلد مشروع مخصص لهذا الوضع"
+			"noWorkspaceForProject": "ما فيه مجلد مشروع مخصص لهذا الوضع",
+			"rulesCleanupFailed": "تم حذف الوضع بنجاح، لكن فشل حذف مجلد القواعد في {{rulesFolderPath}}. قد تحتاج لحذفه يدوياً."
 		},
 		"scope": {
 			"project": "مشروع",
 			"global": "عام"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "تم إزالة الوضع بنجاح، لكن فشل حذف مجلد القواعد في {{rulesFolderPath}}. قد تحتاج لحذفه يدوياً."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "منظمتك تتطلب تسجيل دخول Kilo Code Cloud. سجل دخولك عشان تكمل.",

+ 9 - 0
src/i18n/locales/ar/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "لا يمكن تحديد أبعاد المتجه للنموذج '{{modelId}}' مع المزوّد '{{provider}}'. تحقق من ملفات النموذج أو الإعدادات.",
 		"qdrantUrlMissing": "رابط Qdrant مفقود لإنشاء مخزن المتجهات",
 		"codeIndexingNotConfigured": "لا يمكن إنشاء الخدمات: فهرسة الكود غير مُعدّة بشكل صحيح"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "فشلت الفهرسة: لم يتم فهرسة أي كتل كود بنجاح. هذا عادة يشير إلى مشكلة في إعدادات المضمّن.",
+		"indexingFailedCritical": "فشلت الفهرسة: لم يتم فهرسة أي كتل كود بنجاح رغم العثور على ملفات للمعالجة. هذا يشير إلى فشل حرج في المضمّن.",
+		"fileWatcherStarted": "تم بدء مراقب الملفات.",
+		"fileWatcherStopped": "تم إيقاف مراقب الملفات.",
+		"failedDuringInitialScan": "فشل أثناء المسح الأولي: {{errorMessage}}",
+		"unknownError": "خطأ غير معروف",
+		"indexingRequiresWorkspace": "الفهرسة تتطلب مجلد مشروع مفتوح"
 	}
 }

+ 7 - 1
src/i18n/locales/ca/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Error en eliminar el mode personalitzat: {{error}}",
 			"resetFailed": "Error en restablir els modes personalitzats: {{error}}",
 			"modeNotFound": "Error d'escriptura: Mode no trobat",
-			"noWorkspaceForProject": "No s'ha trobat cap carpeta d'espai de treball per al mode específic del projecte"
+			"noWorkspaceForProject": "No s'ha trobat cap carpeta d'espai de treball per al mode específic del projecte",
+			"rulesCleanupFailed": "El mode s'ha suprimit correctament, però no s'ha pogut suprimir la carpeta de regles a {{rulesFolderPath}}. És possible que l'hagis de suprimir manualment."
 		},
 		"scope": {
 			"project": "projecte",
 			"global": "global"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "El mode s'ha eliminat correctament, però no s'ha pogut eliminar la carpeta de regles a {{rulesFolderPath}}. És possible que l'hagis d'eliminar manualment."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "La teva organització requereix autenticació de Kilo Code Cloud. Si us plau, inicia sessió per continuar.",

+ 9 - 0
src/i18n/locales/ca/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "No s'ha pogut determinar la dimensió del vector per al model '{{modelId}}' amb el proveïdor '{{provider}}'. Comprova els perfils del model o la configuració.",
 		"qdrantUrlMissing": "Falta l'URL de Qdrant per crear l'emmagatzematge de vectors",
 		"codeIndexingNotConfigured": "No es poden crear serveis: La indexació de codi no està configurada correctament"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Indexació fallida: No s'ha indexat cap bloc de codi amb èxit. Això normalment indica un problema de configuració de l'embedder.",
+		"indexingFailedCritical": "Indexació fallida: No s'ha indexat cap bloc de codi amb èxit malgrat trobar fitxers per processar. Això indica una fallida crítica de l'embedder.",
+		"fileWatcherStarted": "Monitor de fitxers iniciat.",
+		"fileWatcherStopped": "Monitor de fitxers aturat.",
+		"failedDuringInitialScan": "Ha fallat durant l'escaneig inicial: {{errorMessage}}",
+		"unknownError": "Error desconegut",
+		"indexingRequiresWorkspace": "Indexació requereix una carpeta de workspace oberta"
 	}
 }

+ 7 - 1
src/i18n/locales/cs/common.json

@@ -160,13 +160,19 @@
 			"deleteFailed": "Nepodařilo se smazat vlastní režim: {{error}}",
 			"resetFailed": "Nepodařilo se resetovat vlastní režimy: {{error}}",
 			"modeNotFound": "Chyba zápisu: Režim nebyl nalezen",
-			"noWorkspaceForProject": "Pro projektově specifický režim nebyla nalezena žádná složka workspace"
+			"noWorkspaceForProject": "Pro projektově specifický režim nebyla nalezena žádná složka workspace",
+			"rulesCleanupFailed": "Režim byl úspěšně smazán, ale nepodařilo se smazat složku pravidel v {{rulesFolderPath}}. Možná ji budete muset smazat ručně."
 		},
 		"scope": {
 			"project": "projekt",
 			"global": "globální"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Režim byl úspěšně odstraněn, ale nepodařilo se smazat složku pravidel v {{rulesFolderPath}}. Možná ji budete muset smazat ručně."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Tvá organizace vyžaduje autentizaci Kilo Code Cloud. Prosím přihlas se pro pokračování.",

+ 9 - 0
src/i18n/locales/cs/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Nelze určit rozměr vektoru pro model '{{modelId}}' s poskytovatelem '{{provider}}'. Zkontroluj profily modelů nebo konfiguraci.",
 		"qdrantUrlMissing": "Chybí Qdrant URL pro vytvoření vektorového úložiště",
 		"codeIndexingNotConfigured": "Nelze vytvořit služby: Indexování kódu není správně nakonfigurováno"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Indexování selhalo: Žádné bloky kódu nebyly úspěšně zaindexovány. To obvykle naznačuje problém s konfigurací embedderu.",
+		"indexingFailedCritical": "Indexování selhalo: Žádné bloky kódu nebyly úspěšně zaindexovány navzdory nalezení souborů ke zpracování. To naznačuje kritickou chybu embedderu.",
+		"fileWatcherStarted": "Pozorovatel souborů byl spuštěn.",
+		"fileWatcherStopped": "Pozorovatel souborů byl zastaven.",
+		"failedDuringInitialScan": "Selhalo během úvodního skenování: {{errorMessage}}",
+		"unknownError": "Neznámá chyba",
+		"indexingRequiresWorkspace": "Indexování vyžaduje otevřenou složku workspace"
 	}
 }

+ 7 - 1
src/i18n/locales/de/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Fehler beim Löschen des benutzerdefinierten Modus: {{error}}",
 			"resetFailed": "Fehler beim Zurücksetzen der benutzerdefinierten Modi: {{error}}",
 			"modeNotFound": "Schreibfehler: Modus nicht gefunden",
-			"noWorkspaceForProject": "Kein Arbeitsbereich-Ordner für projektspezifischen Modus gefunden"
+			"noWorkspaceForProject": "Kein Arbeitsbereich-Ordner für projektspezifischen Modus gefunden",
+			"rulesCleanupFailed": "Der Modus wurde erfolgreich gelöscht, aber der Regelordner unter {{rulesFolderPath}} konnte nicht gelöscht werden. Möglicherweise musst du ihn manuell löschen."
 		},
 		"scope": {
 			"project": "projekt",
 			"global": "global"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Der Modus wurde erfolgreich entfernt, aber der Regelordner unter {{rulesFolderPath}} konnte nicht gelöscht werden. Möglicherweise musst du ihn manuell löschen."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Deine Organisation erfordert eine Kilo Code Cloud-Authentifizierung. Bitte melde dich an, um fortzufahren.",

+ 9 - 0
src/i18n/locales/de/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Konnte die Vektordimension für Modell '{{modelId}}' mit Anbieter '{{provider}}' nicht bestimmen. Überprüfe die Modellprofile oder Konfiguration.",
 		"qdrantUrlMissing": "Qdrant-URL fehlt für die Erstellung des Vektorspeichers",
 		"codeIndexingNotConfigured": "Kann keine Dienste erstellen: Code-Indizierung ist nicht richtig konfiguriert"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Indizierung fehlgeschlagen: Keine Code-Blöcke wurden erfolgreich indiziert. Dies deutet normalerweise auf ein Embedder-Konfigurationsproblem hin.",
+		"indexingFailedCritical": "Indizierung fehlgeschlagen: Keine Code-Blöcke wurden erfolgreich indiziert, obwohl zu verarbeitende Dateien gefunden wurden. Dies deutet auf einen kritischen Embedder-Fehler hin.",
+		"fileWatcherStarted": "Datei-Watcher gestartet.",
+		"fileWatcherStopped": "Datei-Watcher gestoppt.",
+		"failedDuringInitialScan": "Fehler während des ersten Scans: {{errorMessage}}",
+		"unknownError": "Unbekannter Fehler",
+		"indexingRequiresWorkspace": "Indexierung erfordert einen offenen Workspace-Ordner"
 	}
 }

+ 7 - 1
src/i18n/locales/en/common.json

@@ -154,13 +154,19 @@
 			"deleteFailed": "Failed to delete custom mode: {{error}}",
 			"resetFailed": "Failed to reset custom modes: {{error}}",
 			"modeNotFound": "Write error: Mode not found",
-			"noWorkspaceForProject": "No workspace folder found for project-specific mode"
+			"noWorkspaceForProject": "No workspace folder found for project-specific mode",
+			"rulesCleanupFailed": "Mode deleted successfully, but failed to delete rules folder at {{rulesFolderPath}}. You may need to delete it manually."
 		},
 		"scope": {
 			"project": "project",
 			"global": "global"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Mode removed successfully, but failed to delete rules folder at {{rulesFolderPath}}. You may need to delete it manually."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Your organization requires Kilo Code Cloud authentication. Please sign in to continue.",

+ 9 - 0
src/i18n/locales/en/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Could not determine vector dimension for model '{{modelId}}' with provider '{{provider}}'. Check model profiles or configuration.",
 		"qdrantUrlMissing": "Qdrant URL missing for vector store creation",
 		"codeIndexingNotConfigured": "Cannot create services: Code indexing is not properly configured"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Indexing failed: No code blocks were successfully indexed. This usually indicates an embedder configuration issue.",
+		"indexingFailedCritical": "Indexing failed: No code blocks were successfully indexed despite finding files to process. This indicates a critical embedder failure.",
+		"fileWatcherStarted": "File watcher started.",
+		"fileWatcherStopped": "File watcher stopped.",
+		"failedDuringInitialScan": "Failed during initial scan: {{errorMessage}}",
+		"unknownError": "Unknown error",
+		"indexingRequiresWorkspace": "Indexing requires an open workspace folder"
 	}
 }

+ 7 - 1
src/i18n/locales/es/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Error al eliminar modo personalizado: {{error}}",
 			"resetFailed": "Error al restablecer modos personalizados: {{error}}",
 			"modeNotFound": "Error de escritura: Modo no encontrado",
-			"noWorkspaceForProject": "No se encontró carpeta de espacio de trabajo para modo específico del proyecto"
+			"noWorkspaceForProject": "No se encontró carpeta de espacio de trabajo para modo específico del proyecto",
+			"rulesCleanupFailed": "El modo se eliminó correctamente, pero no se pudo eliminar la carpeta de reglas en {{rulesFolderPath}}. Es posible que debas eliminarla manualmente."
 		},
 		"scope": {
 			"project": "proyecto",
 			"global": "global"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "El modo se eliminó correctamente, pero no se pudo eliminar la carpeta de reglas en {{rulesFolderPath}}. Es posible que debas eliminarla manually."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Tu organización requiere autenticación de Kilo Code Cloud. Por favor, inicia sesión para continuar.",

+ 9 - 0
src/i18n/locales/es/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "No se pudo determinar la dimensión del vector para el modelo '{{modelId}}' con el proveedor '{{provider}}'. Verifica los perfiles del modelo o la configuración.",
 		"qdrantUrlMissing": "Falta la URL de Qdrant para crear el almacén de vectores",
 		"codeIndexingNotConfigured": "No se pueden crear servicios: La indexación de código no está configurada correctamente"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Indexación fallida: No se indexaron exitosamente bloques de código. Esto usualmente indica un problema de configuración del incrustador.",
+		"indexingFailedCritical": "Indexación fallida: No se indexaron exitosamente bloques de código a pesar de encontrar archivos para procesar. Esto indica una falla crítica del incrustador.",
+		"fileWatcherStarted": "Monitor de archivos iniciado.",
+		"fileWatcherStopped": "Monitor de archivos detenido.",
+		"failedDuringInitialScan": "Falló durante el escaneo inicial: {{errorMessage}}",
+		"unknownError": "Error desconocido",
+		"indexingRequiresWorkspace": "La indexación requiere una carpeta de workspace abierta"
 	}
 }

+ 7 - 1
src/i18n/locales/fr/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Échec de la suppression du mode personnalisé : {{error}}",
 			"resetFailed": "Échec de la réinitialisation des modes personnalisés : {{error}}",
 			"modeNotFound": "Erreur d'écriture : Mode non trouvé",
-			"noWorkspaceForProject": "Aucun dossier d'espace de travail trouvé pour le mode spécifique au projet"
+			"noWorkspaceForProject": "Aucun dossier d'espace de travail trouvé pour le mode spécifique au projet",
+			"rulesCleanupFailed": "Le mode a été supprimé avec succès, mais la suppression du dossier de règles à l'adresse {{rulesFolderPath}} a échoué. Vous devrez peut-être le supprimer manuellement."
 		},
 		"scope": {
 			"project": "projet",
 			"global": "global"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Le mode a été supprimé avec succès, mais la suppression du dossier de règles à l'adresse {{rulesFolderPath}} a échoué. Vous devrez peut-être le supprimer manuellement."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Votre organisation nécessite une authentification Kilo Code Cloud. Veuillez vous connecter pour continuer.",

+ 9 - 0
src/i18n/locales/fr/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Impossible de déterminer la dimension du vecteur pour le modèle '{{modelId}}' avec le fournisseur '{{provider}}'. Vérifie les profils du modèle ou la configuration.",
 		"qdrantUrlMissing": "URL Qdrant manquante pour la création du stockage de vecteurs",
 		"codeIndexingNotConfigured": "Impossible de créer les services : L'indexation du code n'est pas correctement configurée"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Échec de l'indexation : Aucun bloc de code n'a été indexé avec succès. Cela indique généralement un problème de configuration de l'embedder.",
+		"indexingFailedCritical": "Échec de l'indexation : Aucun bloc de code n'a été indexé avec succès malgré la découverte de fichiers à traiter. Cela indique une défaillance critique de l'embedder.",
+		"fileWatcherStarted": "Surveillant de fichiers démarré.",
+		"fileWatcherStopped": "Surveillant de fichiers arrêté.",
+		"failedDuringInitialScan": "Échec lors du scan initial : {{errorMessage}}",
+		"unknownError": "Erreur inconnue",
+		"indexingRequiresWorkspace": "L'indexation nécessite l'ouverture d'un dossier workspace"
 	}
 }

+ 7 - 1
src/i18n/locales/hi/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "कस्टम मोड डिलीट विफल: {{error}}",
 			"resetFailed": "कस्टम मोड रीसेट विफल: {{error}}",
 			"modeNotFound": "लेखन त्रुटि: मोड नहीं मिला",
-			"noWorkspaceForProject": "प्रोजेक्ट-विशिष्ट मोड के लिए वर्कस्पेस फ़ोल्डर नहीं मिला"
+			"noWorkspaceForProject": "प्रोजेक्ट-विशिष्ट मोड के लिए वर्कस्पेस फ़ोल्डर नहीं मिला",
+			"rulesCleanupFailed": "मोड सफलतापूर्वक हटा दिया गया, लेकिन {{rulesFolderPath}} पर नियम फ़ोल्डर को हटाने में विफल रहा। आपको इसे मैन्युअल रूप से हटाना पड़ सकता है।"
 		},
 		"scope": {
 			"project": "परियोजना",
 			"global": "वैश्विक"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "मोड सफलतापूर्वक हटा दिया गया, लेकिन {{rulesFolderPath}} पर नियम फ़ोल्डर को हटाने में विफल रहा। आपको इसे मैन्युअल रूप से हटाना पड़ सकता है।"
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "आपके संगठन को Kilo Code Cloud प्रमाणीकरण की आवश्यकता है। कृपया जारी रखने के लिए साइन इन करें।",

+ 9 - 0
src/i18n/locales/hi/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "प्रदाता '{{provider}}' के साथ मॉडल '{{modelId}}' के लिए वेक्टर आयाम निर्धारित नहीं कर सका। मॉडल प्रोफ़ाइल या कॉन्फ़िगरेशन की जांच करें।",
 		"qdrantUrlMissing": "वेक्टर स्टोर बनाने के लिए Qdrant URL गायब है",
 		"codeIndexingNotConfigured": "सेवाएं नहीं बना सकते: कोड इंडेक्सिंग ठीक से कॉन्फ़िगर नहीं है"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "इंडेक्सिंग असफल: कोई भी कोड ब्लॉक सफलतापूर्वक इंडेक्स नहीं हुआ। यह आमतौर पर एम्बेडर कॉन्फ़िगरेशन समस्या को दर्शाता है।",
+		"indexingFailedCritical": "इंडेक्सिंग असफल: प्रोसेस करने के लिए फाइलें मिलने के बावजूद कोई भी कोड ब्लॉक सफलतापूर्वक इंडेक्स नहीं हुआ। यह एक गंभीर एम्बेडर विफलता को दर्शाता है।",
+		"fileWatcherStarted": "फाइल वॉचर शुरू हुआ।",
+		"fileWatcherStopped": "फाइल वॉचर रुक गया।",
+		"failedDuringInitialScan": "प्रारंभिक स्कैन के दौरान असफल: {{errorMessage}}",
+		"unknownError": "अज्ञात त्रुटि",
+		"indexingRequiresWorkspace": "इंडेक्सिंग के लिए एक खुला वर्कस्पेस फ़ोल्डर आवश्यक है"
 	}
 }

+ 7 - 1
src/i18n/locales/id/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Gagal menghapus mode kustom: {{error}}",
 			"resetFailed": "Gagal mereset mode kustom: {{error}}",
 			"modeNotFound": "Kesalahan tulis: Mode tidak ditemukan",
-			"noWorkspaceForProject": "Tidak ditemukan folder workspace untuk mode khusus proyek"
+			"noWorkspaceForProject": "Tidak ditemukan folder workspace untuk mode khusus proyek",
+			"rulesCleanupFailed": "Mode berhasil dihapus, tetapi gagal menghapus folder aturan di {{rulesFolderPath}}. Kamu mungkin perlu menghapusnya secara manual."
 		},
 		"scope": {
 			"project": "proyek",
 			"global": "global"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Mode berhasil dihapus, tetapi gagal menghapus folder aturan di {{rulesFolderPath}}. Kamu mungkin perlu menghapusnya secara manual."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Organisasi kamu memerlukan autentikasi Kilo Code Cloud. Silakan masuk untuk melanjutkan.",

+ 9 - 0
src/i18n/locales/id/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Tidak dapat menentukan dimensi vektor untuk model '{{modelId}}' dengan penyedia '{{provider}}'. Periksa profil model atau konfigurasi.",
 		"qdrantUrlMissing": "URL Qdrant tidak ada untuk membuat penyimpanan vektor",
 		"codeIndexingNotConfigured": "Tidak dapat membuat layanan: Pengindeksan kode tidak dikonfigurasi dengan benar"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Pengindeksan gagal: Tidak ada blok kode yang berhasil diindeks. Ini biasanya menunjukkan masalah konfigurasi embedder.",
+		"indexingFailedCritical": "Pengindeksan gagal: Tidak ada blok kode yang berhasil diindeks meskipun menemukan file untuk diproses. Ini menunjukkan kegagalan kritis embedder.",
+		"fileWatcherStarted": "Pemantau file dimulai.",
+		"fileWatcherStopped": "Pemantau file dihentikan.",
+		"failedDuringInitialScan": "Gagal selama pemindaian awal: {{errorMessage}}",
+		"unknownError": "Kesalahan tidak diketahui",
+		"indexingRequiresWorkspace": "Pengindeksan memerlukan folder workspace yang terbuka"
 	}
 }

+ 7 - 1
src/i18n/locales/it/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Eliminazione modalità personalizzata fallita: {{error}}",
 			"resetFailed": "Reset modalità personalizzate fallito: {{error}}",
 			"modeNotFound": "Errore di scrittura: Modalità non trovata",
-			"noWorkspaceForProject": "Nessuna cartella workspace trovata per la modalità specifica del progetto"
+			"noWorkspaceForProject": "Nessuna cartella workspace trovata per la modalità specifica del progetto",
+			"rulesCleanupFailed": "La modalità è stata eliminata con successo, ma non è stato possibile eliminare la cartella delle regole in {{rulesFolderPath}}. Potrebbe essere necessario eliminarla manualmente."
 		},
 		"scope": {
 			"project": "progetto",
 			"global": "globale"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "La modalità è stata rimossa con successo, ma non è stato possibile eliminare la cartella delle regole in {{rulesFolderPath}}. Potrebbe essere necessario eliminarla manualmente."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "La tua organizzazione richiede l'autenticazione Kilo Code Cloud. Accedi per continuare.",

+ 9 - 0
src/i18n/locales/it/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Impossibile determinare la dimensione del vettore per il modello '{{modelId}}' con il provider '{{provider}}'. Controlla i profili del modello o la configurazione.",
 		"qdrantUrlMissing": "URL Qdrant mancante per la creazione dello storage vettoriale",
 		"codeIndexingNotConfigured": "Impossibile creare i servizi: L'indicizzazione del codice non è configurata correttamente"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Indicizzazione fallita: Nessun blocco di codice è stato indicizzato con successo. Questo di solito indica un problema di configurazione dell'embedder.",
+		"indexingFailedCritical": "Indicizzazione fallita: Nessun blocco di codice è stato indicizzato con successo nonostante siano stati trovati file da elaborare. Questo indica un errore critico dell'embedder.",
+		"fileWatcherStarted": "Monitoraggio file avviato.",
+		"fileWatcherStopped": "Monitoraggio file fermato.",
+		"failedDuringInitialScan": "Fallito durante la scansione iniziale: {{errorMessage}}",
+		"unknownError": "Errore sconosciuto",
+		"indexingRequiresWorkspace": "L'indicizzazione richiede una cartella di workspace aperta"
 	}
 }

+ 7 - 1
src/i18n/locales/ja/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "カスタムモードの削除に失敗しました:{{error}}",
 			"resetFailed": "カスタムモードのリセットに失敗しました:{{error}}",
 			"modeNotFound": "書き込みエラー:モードが見つかりません",
-			"noWorkspaceForProject": "プロジェクト固有モード用のワークスペースフォルダーが見つかりません"
+			"noWorkspaceForProject": "プロジェクト固有モード用のワークスペースフォルダーが見つかりません",
+			"rulesCleanupFailed": "モードは正常に削除されましたが、{{rulesFolderPath}} にあるルールフォルダの削除に失敗しました。手動で削除する必要がある場合があります。"
 		},
 		"scope": {
 			"project": "プロジェクト",
 			"global": "グローバル"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "モードは正常に削除されましたが、{{rulesFolderPath}} にあるルールフォルダの削除に失敗しました。手動で削除する必要がある場合があります。"
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "あなたの組織では Kilo Code Cloud 認証が必要です。続行するにはサインインしてください。",

+ 9 - 0
src/i18n/locales/ja/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "プロバイダー '{{provider}}' のモデル '{{modelId}}' の埋め込み次元を決定できませんでした。モデルプロファイルまたは設定を確認してください。",
 		"qdrantUrlMissing": "ベクターストア作成のためのQdrant URLがありません",
 		"codeIndexingNotConfigured": "サービスを作成できません: コードインデックスが正しく設定されていません"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "インデックス作成に失敗しました:コードブロックが正常にインデックス化されませんでした。これは通常、エンベッダーの設定問題を示しています。",
+		"indexingFailedCritical": "インデックス作成に失敗しました:処理するファイルが見つかったにもかかわらず、コードブロックが正常にインデックス化されませんでした。これは重大なエンベッダーの障害を示しています。",
+		"fileWatcherStarted": "ファイルウォッチャーが開始されました。",
+		"fileWatcherStopped": "ファイルウォッチャーが停止されました。",
+		"failedDuringInitialScan": "初期スキャン中に失敗しました:{{errorMessage}}",
+		"unknownError": "不明なエラー",
+		"indexingRequiresWorkspace": "インデックス作成には、開かれたワークスペースフォルダーが必要です"
 	}
 }

+ 7 - 1
src/i18n/locales/ko/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "사용자 정의 모드 삭제 실패: {{error}}",
 			"resetFailed": "사용자 정의 모드 재설정 실패: {{error}}",
 			"modeNotFound": "쓰기 오류: 모드를 찾을 수 없습니다",
-			"noWorkspaceForProject": "프로젝트별 모드용 작업 공간 폴더를 찾을 수 없습니다"
+			"noWorkspaceForProject": "프로젝트별 모드용 작업 공간 폴더를 찾을 수 없습니다",
+			"rulesCleanupFailed": "모드가 성공적으로 삭제되었지만 {{rulesFolderPath}}의 규칙 폴더를 삭제하지 못했습니다. 수동으로 삭제해야 할 수도 있습니다."
 		},
 		"scope": {
 			"project": "프로젝트",
 			"global": "글로벌"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "모드가 성공적으로 제거되었지만 {{rulesFolderPath}}의 규칙 폴더를 삭제하지 못했습니다. 수동으로 삭제해야 할 수도 있습니다."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "조직에서 Kilo Code Cloud 인증이 필요합니다. 계속하려면 로그인하세요.",

+ 9 - 0
src/i18n/locales/ko/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "프로바이더 '{{provider}}'의 모델 '{{modelId}}'에 대한 벡터 차원을 결정할 수 없습니다. 모델 프로필 또는 구성을 확인하세요.",
 		"qdrantUrlMissing": "벡터 저장소 생성을 위한 Qdrant URL이 누락되었습니다",
 		"codeIndexingNotConfigured": "서비스를 생성할 수 없습니다: 코드 인덱싱이 올바르게 구성되지 않았습니다"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "인덱싱 실패: 코드 블록이 성공적으로 인덱싱되지 않았습니다. 이는 일반적으로 임베더 구성 문제를 나타냅니다.",
+		"indexingFailedCritical": "인덱싱 실패: 처리할 파일을 찾았음에도 불구하고 코드 블록이 성공적으로 인덱싱되지 않았습니다. 이는 중요한 임베더 오류를 나타냅니다.",
+		"fileWatcherStarted": "파일 감시자가 시작되었습니다.",
+		"fileWatcherStopped": "파일 감시자가 중지되었습니다.",
+		"failedDuringInitialScan": "초기 스캔 중 실패: {{errorMessage}}",
+		"unknownError": "알 수 없는 오류",
+		"indexingRequiresWorkspace": "인덱싱에는 열린 워크스페이스 폴더가 필요합니다"
 	}
 }

+ 7 - 1
src/i18n/locales/nl/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Aangepaste modus verwijderen mislukt: {{error}}",
 			"resetFailed": "Aangepaste modi resetten mislukt: {{error}}",
 			"modeNotFound": "Schrijffout: Modus niet gevonden",
-			"noWorkspaceForProject": "Geen workspace map gevonden voor projectspecifieke modus"
+			"noWorkspaceForProject": "Geen workspace map gevonden voor projectspecifieke modus",
+			"rulesCleanupFailed": "Modus succesvol verwijderd, maar het verwijderen van de regelsmap op {{rulesFolderPath}} is mislukt. Je moet deze mogelijk handmatig verwijderen."
 		},
 		"scope": {
 			"project": "project",
 			"global": "globaal"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Modus succesvol verwijderd, maar het verwijderen van de regelsmap op {{rulesFolderPath}} is mislukt. Je moet deze mogelijk handmatig verwijderen."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Je organisatie vereist Kilo Code Cloud-authenticatie. Log in om door te gaan.",

+ 9 - 0
src/i18n/locales/nl/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Kan de vectordimensie voor model '{{modelId}}' met provider '{{provider}}' niet bepalen. Controleer modelprofielen of configuratie.",
 		"qdrantUrlMissing": "Qdrant URL ontbreekt voor het maken van vectoropslag",
 		"codeIndexingNotConfigured": "Kan geen services maken: Code-indexering is niet correct geconfigureerd"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Indexering mislukt: Geen codeblokken werden succesvol geïndexeerd. Dit duidt meestal op een embedder configuratieprobleem.",
+		"indexingFailedCritical": "Indexering mislukt: Geen codeblokken werden succesvol geïndexeerd ondanks het vinden van bestanden om te verwerken. Dit duidt op een kritieke embedder fout.",
+		"fileWatcherStarted": "Bestandsmonitor gestart.",
+		"fileWatcherStopped": "Bestandsmonitor gestopt.",
+		"failedDuringInitialScan": "Mislukt tijdens initiële scan: {{errorMessage}}",
+		"unknownError": "Onbekende fout",
+		"indexingRequiresWorkspace": "Indexering vereist een geopende workspace map"
 	}
 }

+ 7 - 1
src/i18n/locales/pl/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Usunięcie trybu niestandardowego nie powiodło się: {{error}}",
 			"resetFailed": "Resetowanie trybów niestandardowych nie powiodło się: {{error}}",
 			"modeNotFound": "Błąd zapisu: Tryb nie został znaleziony",
-			"noWorkspaceForProject": "Nie znaleziono folderu obszaru roboczego dla trybu specyficznego dla projektu"
+			"noWorkspaceForProject": "Nie znaleziono folderu obszaru roboczego dla trybu specyficznego dla projektu",
+			"rulesCleanupFailed": "Tryb został pomyślnie usunięty, ale nie udało się usunąć folderu reguł w {{rulesFolderPath}}. Może być konieczne ręczne usunięcie."
 		},
 		"scope": {
 			"project": "projekt",
 			"global": "globalny"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Tryb został pomyślnie usunięty, ale nie udało się usunąć folderu reguł w {{rulesFolderPath}}. Może być konieczne ręczne usunięcie."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Twoja organizacja wymaga uwierzytelnienia Kilo Code Cloud. Zaloguj się, aby kontynuować.",

+ 9 - 0
src/i18n/locales/pl/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Nie można określić wymiaru wektora dla modelu '{{modelId}}' z dostawcą '{{provider}}'. Sprawdź profile modelu lub konfigurację.",
 		"qdrantUrlMissing": "Brak adresu URL Qdrant do utworzenia magazynu wektorów",
 		"codeIndexingNotConfigured": "Nie można utworzyć usług: Indeksowanie kodu nie jest poprawnie skonfigurowane"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Indeksowanie nie powiodło się: Żadne bloki kodu nie zostały pomyślnie zaindeksowane. To zwykle wskazuje na problem z konfiguracją embeddera.",
+		"indexingFailedCritical": "Indeksowanie nie powiodło się: Żadne bloki kodu nie zostały pomyślnie zaindeksowane pomimo znalezienia plików do przetworzenia. To wskazuje na krytyczny błąd embeddera.",
+		"fileWatcherStarted": "Monitor plików uruchomiony.",
+		"fileWatcherStopped": "Monitor plików zatrzymany.",
+		"failedDuringInitialScan": "Niepowodzenie podczas początkowego skanowania: {{errorMessage}}",
+		"unknownError": "Nieznany błąd",
+		"indexingRequiresWorkspace": "Indeksowanie wymaga otwartego folderu workspace"
 	}
 }

+ 7 - 1
src/i18n/locales/pt-BR/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Falha ao excluir modo personalizado: {{error}}",
 			"resetFailed": "Falha ao redefinir modos personalizados: {{error}}",
 			"modeNotFound": "Erro de escrita: Modo não encontrado",
-			"noWorkspaceForProject": "Nenhuma pasta de workspace encontrada para modo específico do projeto"
+			"noWorkspaceForProject": "Nenhuma pasta de workspace encontrada para modo específico do projeto",
+			"rulesCleanupFailed": "O modo foi excluído com sucesso, mas falhou ao excluir a pasta de regras em {{rulesFolderPath}}. Você pode precisar excluí-la manualmente."
 		},
 		"scope": {
 			"project": "projeto",
 			"global": "global"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "O modo foi removido com sucesso, mas falhou ao excluir a pasta de regras em {{rulesFolderPath}}. Você pode precisar excluí-la manualmente."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Sua organização requer autenticação do Kilo Code Cloud. Faça login para continuar.",

+ 9 - 0
src/i18n/locales/pt-BR/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Não foi possível determinar a dimensão do vetor para o modelo '{{modelId}}' com o provedor '{{provider}}'. Verifique os perfis do modelo ou a configuração.",
 		"qdrantUrlMissing": "URL do Qdrant ausente para criação do armazenamento de vetores",
 		"codeIndexingNotConfigured": "Não é possível criar serviços: A indexação de código não está configurada corretamente"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Indexação falhou: Nenhum bloco de código foi indexado com sucesso. Isso geralmente indica um problema de configuração do embedder.",
+		"indexingFailedCritical": "Indexação falhou: Nenhum bloco de código foi indexado com sucesso apesar de encontrar arquivos para processar. Isso indica uma falha crítica do embedder.",
+		"fileWatcherStarted": "Monitor de arquivos iniciado.",
+		"fileWatcherStopped": "Monitor de arquivos parado.",
+		"failedDuringInitialScan": "Falhou durante a varredura inicial: {{errorMessage}}",
+		"unknownError": "Erro desconhecido",
+		"indexingRequiresWorkspace": "A indexação requer uma pasta de workspace aberta"
 	}
 }

+ 7 - 1
src/i18n/locales/ru/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Не удалось удалить пользовательский режим: {{error}}",
 			"resetFailed": "Не удалось сбросить пользовательские режимы: {{error}}",
 			"modeNotFound": "Ошибка записи: Режим не найден",
-			"noWorkspaceForProject": "Не найдена папка рабочего пространства для режима, специфичного для проекта"
+			"noWorkspaceForProject": "Не найдена папка рабочего пространства для режима, специфичного для проекта",
+			"rulesCleanupFailed": "Режим успешно удален, но не удалось удалить папку правил в {{rulesFolderPath}}. Возможно, вам придется удалить ее вручную."
 		},
 		"scope": {
 			"project": "проект",
 			"global": "глобальный"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Режим успешно удален, но не удалось удалить папку правил в {{rulesFolderPath}}. Возможно, вам придется удалить ее вручную."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Ваша организация требует аутентификации Kilo Code Cloud. Войдите в систему, чтобы продолжить.",

+ 9 - 0
src/i18n/locales/ru/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Не удалось определить размерность вектора для модели '{{modelId}}' с провайдером '{{provider}}'. Проверьте профили модели или конфигурацию.",
 		"qdrantUrlMissing": "Отсутствует URL Qdrant для создания векторного хранилища",
 		"codeIndexingNotConfigured": "Невозможно создать сервисы: Индексация кода не настроена должным образом"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Индексация не удалась: Ни один блок кода не был успешно проиндексирован. Это обычно указывает на проблему конфигурации эмбеддера.",
+		"indexingFailedCritical": "Индексация не удалась: Ни один блок кода не был успешно проиндексирован, несмотря на обнаружение файлов для обработки. Это указывает на критическую ошибку эмбеддера.",
+		"fileWatcherStarted": "Наблюдатель файлов запущен.",
+		"fileWatcherStopped": "Наблюдатель файлов остановлен.",
+		"failedDuringInitialScan": "Ошибка во время первоначального сканирования: {{errorMessage}}",
+		"unknownError": "Неизвестная ошибка",
+		"indexingRequiresWorkspace": "Для индексации требуется открытая папка рабочего пространства"
 	}
 }

+ 7 - 1
src/i18n/locales/th/common.json

@@ -160,13 +160,19 @@
 			"deleteFailed": "ล้มเหลวในการลบโหมดกำหนดเอง: {{error}}",
 			"resetFailed": "ล้มเหลวในการรีเซ็ตโหมดกำหนดเอง: {{error}}",
 			"modeNotFound": "ข้อผิดพลาดในการเขียน: ไม่พบโหมด",
-			"noWorkspaceForProject": "ไม่พบโฟลเดอร์พื้นที่ทำงานสำหรับโหมดเฉพาะโครงการ"
+			"noWorkspaceForProject": "ไม่พบโฟลเดอร์พื้นที่ทำงานสำหรับโหมดเฉพาะโครงการ",
+			"rulesCleanupFailed": "ลบโหมดเรียบร้อยแล้ว แต่ไม่สามารถลบโฟลเดอร์กฎที่ {{rulesFolderPath}} คุณอาจต้องลบด้วยตนเอง"
 		},
 		"scope": {
 			"project": "โครงการ",
 			"global": "ทั่วไป"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "เอาโหมดออกเรียบร้อยแล้ว แต่ไม่สามารถลบโฟลเดอร์กฎที่ {{rulesFolderPath}} คุณอาจต้องลบด้วยตนเอง"
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "องค์กรของคุณต้องการการยืนยันตัวตน Kilo Code Cloud กรุณาลงชื่อเข้าใช้เพื่อดำเนินการต่อ",

+ 9 - 0
src/i18n/locales/th/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "ไม่สามารถกำหนดมิติเวกเตอร์สำหรับโมเดล '{{modelId}}' กับผู้ให้บริการ '{{provider}}' ตรวจสอบโปรไฟล์โมเดลหรือการกำหนดค่า",
 		"qdrantUrlMissing": "Qdrant URL หายไปสำหรับการสร้าง vector store",
 		"codeIndexingNotConfigured": "ไม่สามารถสร้างบริการ: การจัดทำดัชนีโค้ดไม่ได้กำหนดค่าอย่างถูกต้อง"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "การจัดทำดัชนีล้มเหลว: ไม่มีบล็อกโค้ดใดที่ถูกจัดทำดัชนีสำเร็จ ซึ่งมักหมายถึงปัญหาการกำหนดค่า embedder",
+		"indexingFailedCritical": "การจัดทำดัชนีล้มเหลว: ไม่มีบล็อกโค้ดใดที่ถูกจัดทำดัชนีสำเร็จแม้ว่าจะพบไฟล์ที่ต้องประมวลผล ซึ่งแสดงถึงความล้มเหลวที่สำคัญของ embedder",
+		"fileWatcherStarted": "เริ่มต้นตัวติดตามไฟล์แล้ว",
+		"fileWatcherStopped": "หยุดตัวติดตามไฟล์แล้ว",
+		"failedDuringInitialScan": "ล้มเหลวระหว่างการสแกนเริ่มต้น: {{errorMessage}}",
+		"unknownError": "ข้อผิดพลาดที่ไม่ทราบสาเหตุ",
+		"indexingRequiresWorkspace": "การจัดทำดัชนีต้องการโฟลเดอร์พื้นที่ทำงานที่เปิดอยู่"
 	}
 }

+ 7 - 1
src/i18n/locales/tr/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Özel mod silme başarısız: {{error}}",
 			"resetFailed": "Özel modları sıfırlama başarısız: {{error}}",
 			"modeNotFound": "Yazma hatası: Mod bulunamadı",
-			"noWorkspaceForProject": "Proje özel modu için çalışma alanı klasörü bulunamadı"
+			"noWorkspaceForProject": "Proje özel modu için çalışma alanı klasörü bulunamadı",
+			"rulesCleanupFailed": "Mod başarıyla silindi, ancak {{rulesFolderPath}} konumundaki kurallar klasörü silinemedi. Manuel olarak silmeniz gerekebilir."
 		},
 		"scope": {
 			"project": "proje",
 			"global": "küresel"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Mod başarıyla kaldırıldı, ancak {{rulesFolderPath}} konumundaki kurallar klasörü silinemedi. Manuel olarak silmeniz gerekebilir."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Kuruluşunuz Kilo Code Cloud kimlik doğrulaması gerektiriyor. Devam etmek için giriş yapın.",

+ 9 - 0
src/i18n/locales/tr/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "'{{provider}}' sağlayıcısı ile '{{modelId}}' modeli için vektör boyutu belirlenemedi. Model profillerini veya yapılandırmayı kontrol et.",
 		"qdrantUrlMissing": "Vektör deposu oluşturmak için Qdrant URL'si eksik",
 		"codeIndexingNotConfigured": "Hizmetler oluşturulamıyor: Kod indeksleme düzgün yapılandırılmamış"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "İndeksleme başarısız: Hiçbir kod bloğu başarıyla indekslenemedi. Bu genellikle bir embedder yapılandırma sorunu olduğunu gösterir.",
+		"indexingFailedCritical": "İndeksleme başarısız: İşlenecek dosyalar bulunmasına rağmen hiçbir kod bloğu başarıyla indekslenemedi. Bu kritik bir embedder hatası olduğunu gösterir.",
+		"fileWatcherStarted": "Dosya izleyici başlatıldı.",
+		"fileWatcherStopped": "Dosya izleyici durduruldu.",
+		"failedDuringInitialScan": "İlk tarama sırasında başarısız: {{errorMessage}}",
+		"unknownError": "Bilinmeyen hata",
+		"indexingRequiresWorkspace": "İndeksleme açık bir workspace klasörü gerektirir"
 	}
 }

+ 7 - 1
src/i18n/locales/uk/common.json

@@ -160,13 +160,19 @@
 			"deleteFailed": "Не вдалося видалити користувацький режим: {{error}}",
 			"resetFailed": "Не вдалося скинути користувацькі режими: {{error}}",
 			"modeNotFound": "Помилка запису: Режим не знайдено",
-			"noWorkspaceForProject": "Не знайдено папку робочого простору для режиму конкретного проекту"
+			"noWorkspaceForProject": "Не знайдено папку робочого простору для режиму конкретного проекту",
+			"rulesCleanupFailed": "Режим успішно видалено, але не вдалося видалити папку правил в {{rulesFolderPath}}. Можливо, вам доведеться видалити її вручну."
 		},
 		"scope": {
 			"project": "проект",
 			"global": "глобальний"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Режим успішно видалено, але не вдалося видалити папку правил в {{rulesFolderPath}}. Можливо, вам доведеться видалити її вручну."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Твоя організація вимагає автентифікації Kilo Code Cloud. Будь ласка, увійди, щоб продовжити.",

+ 9 - 0
src/i18n/locales/uk/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Не вдалося визначити розмірність вектора для моделі '{{modelId}}' з провайдером '{{provider}}'. Перевір профілі моделі або конфігурацію.",
 		"qdrantUrlMissing": "Відсутній URL Qdrant для створення векторного сховища",
 		"codeIndexingNotConfigured": "Неможливо створити сервіси: Індексування коду не налаштовано належним чином"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Індексування не вдалося: Жодного блоку коду не було успішно проіндексовано. Це зазвичай вказує на проблему конфігурації embedder.",
+		"indexingFailedCritical": "Індексування не вдалося: Жодного блоку коду не було успішно проіндексовано, незважаючи на знаходження файлів для обробки. Це вказує на критичну помилку embedder.",
+		"fileWatcherStarted": "Спостерігач файлів запущено.",
+		"fileWatcherStopped": "Спостерігач файлів зупинено.",
+		"failedDuringInitialScan": "Не вдалося під час початкового сканування: {{errorMessage}}",
+		"unknownError": "Невідома помилка",
+		"indexingRequiresWorkspace": "Індексування вимагає відкритої папки робочого простору"
 	}
 }

+ 7 - 1
src/i18n/locales/vi/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "Xóa chế độ tùy chỉnh thất bại: {{error}}",
 			"resetFailed": "Đặt lại chế độ tùy chỉnh thất bại: {{error}}",
 			"modeNotFound": "Lỗi ghi: Không tìm thấy chế độ",
-			"noWorkspaceForProject": "Không tìm thấy thư mục workspace cho chế độ dành riêng cho dự án"
+			"noWorkspaceForProject": "Không tìm thấy thư mục workspace cho chế độ dành riêng cho dự án",
+			"rulesCleanupFailed": "Đã xóa chế độ thành công, nhưng không thể xóa thư mục quy tắc tại {{rulesFolderPath}}. Bạn có thể cần xóa thủ công."
 		},
 		"scope": {
 			"project": "dự án",
 			"global": "toàn cầu"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "Đã xóa chế độ thành công, nhưng không thể xóa thư mục quy tắc tại {{rulesFolderPath}}. Bạn có thể cần xóa thủ công."
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "Tổ chức của bạn yêu cầu xác thực Kilo Code Cloud. Vui lòng đăng nhập để tiếp tục.",

+ 9 - 0
src/i18n/locales/vi/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "Không thể xác định kích thước vector cho mô hình '{{modelId}}' với nhà cung cấp '{{provider}}'. Kiểm tra hồ sơ mô hình hoặc cấu hình.",
 		"qdrantUrlMissing": "Thiếu URL Qdrant để tạo kho lưu trữ vector",
 		"codeIndexingNotConfigured": "Không thể tạo dịch vụ: Lập chỉ mục mã không được cấu hình đúng cách"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "Lập chỉ mục thất bại: Không có khối mã nào được lập chỉ mục thành công. Điều này thường cho thấy vấn đề cấu hình embedder.",
+		"indexingFailedCritical": "Lập chỉ mục thất bại: Không có khối mã nào được lập chỉ mục thành công mặc dù đã tìm thấy tệp để xử lý. Điều này cho thấy lỗi nghiêm trọng của embedder.",
+		"fileWatcherStarted": "Trình theo dõi tệp đã khởi động.",
+		"fileWatcherStopped": "Trình theo dõi tệp đã dừng.",
+		"failedDuringInitialScan": "Thất bại trong quá trình quét ban đầu: {{errorMessage}}",
+		"unknownError": "Lỗi không xác định",
+		"indexingRequiresWorkspace": "Lập chỉ mục yêu cầu một thư mục workspace đang mở"
 	}
 }

+ 7 - 1
src/i18n/locales/zh-CN/common.json

@@ -170,13 +170,19 @@
 			"deleteFailed": "删除自定义模式失败:{{error}}",
 			"resetFailed": "重置自定义模式失败:{{error}}",
 			"modeNotFound": "写入错误:未找到模式",
-			"noWorkspaceForProject": "未找到项目特定模式的工作区文件夹"
+			"noWorkspaceForProject": "未找到项目特定模式的工作区文件夹",
+			"rulesCleanupFailed": "模式删除成功,但无法删除位于 {{rulesFolderPath}} 的规则文件夹。您可能需要手动删除。"
 		},
 		"scope": {
 			"project": "项目",
 			"global": "全局"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "模式已成功移除,但无法删除位于 {{rulesFolderPath}} 的规则文件夹。您可能需要手动删除。"
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "您的组织需要 Kilo Code Cloud 身份验证。请登录以继续。",

+ 9 - 0
src/i18n/locales/zh-CN/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "无法确定提供商 '{{provider}}' 的模型 '{{modelId}}' 的向量维度。请检查模型配置文件或配置。",
 		"qdrantUrlMissing": "创建向量存储缺少 Qdrant URL",
 		"codeIndexingNotConfigured": "无法创建服务:代码索引未正确配置"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "索引失败:没有代码块被成功索引。这通常表示 Embedder 配置问题。",
+		"indexingFailedCritical": "索引失败:尽管找到了要处理的文件,但没有代码块被成功索引。这表示 Embedder 出现严重故障。",
+		"fileWatcherStarted": "文件监控已启动。",
+		"fileWatcherStopped": "文件监控已停止。",
+		"failedDuringInitialScan": "初始扫描失败:{{errorMessage}}",
+		"unknownError": "未知错误",
+		"indexingRequiresWorkspace": "索引需要打开的工作区文件夹"
 	}
 }

+ 7 - 1
src/i18n/locales/zh-TW/common.json

@@ -165,13 +165,19 @@
 			"deleteFailed": "刪除自訂模式失敗:{{error}}",
 			"resetFailed": "重設自訂模式失敗:{{error}}",
 			"modeNotFound": "寫入錯誤:未找到模式",
-			"noWorkspaceForProject": "未找到專案特定模式的工作區資料夾"
+			"noWorkspaceForProject": "未找到專案特定模式的工作區資料夾",
+			"rulesCleanupFailed": "模式已成功刪除,但無法刪除位於 {{rulesFolderPath}} 的規則資料夾。您可能需要手動刪除。"
 		},
 		"scope": {
 			"project": "專案",
 			"global": "全域"
 		}
 	},
+	"marketplace": {
+		"mode": {
+			"rulesCleanupFailed": "模式已成功移除,但無法刪除位於 {{rulesFolderPath}} 的規則資料夾。您可能需要手動刪除。"
+		}
+	},
 	"mdm": {
 		"errors": {
 			"cloud_auth_required": "您的組織需要 Kilo Code Cloud 身份驗證。請登入以繼續。",

+ 9 - 0
src/i18n/locales/zh-TW/embeddings.json

@@ -52,5 +52,14 @@
 		"vectorDimensionNotDetermined": "無法確定提供商 '{{provider}}' 的模型 '{{modelId}}' 的向量維度。請檢查模型設定檔或設定。",
 		"qdrantUrlMissing": "建立向量儲存缺少 Qdrant URL",
 		"codeIndexingNotConfigured": "無法建立服務:程式碼索引未正確設定"
+	},
+	"orchestrator": {
+		"indexingFailedNoBlocks": "索引失敗:沒有程式碼區塊被成功索引。這通常表示 Embedder 設定問題。",
+		"indexingFailedCritical": "索引失敗:儘管找到了要處理的檔案,但沒有程式碼區塊被成功索引。這表示 Embedder 出現嚴重故障。",
+		"fileWatcherStarted": "檔案監控已啟動。",
+		"fileWatcherStopped": "檔案監控已停止。",
+		"failedDuringInitialScan": "初始掃描失敗:{{errorMessage}}",
+		"unknownError": "未知錯誤",
+		"indexingRequiresWorkspace": "索引需要開啟的工作區資料夾"
 	}
 }

+ 230 - 0
src/integrations/diagnostics/__tests__/diagnostics.spec.ts

@@ -383,4 +383,234 @@ describe("diagnosticsToProblemsString", () => {
 		expect(vscode.workspace.fs.stat).toHaveBeenCalledWith(fileUri)
 		expect(vscode.workspace.openTextDocument).toHaveBeenCalledWith(fileUri)
 	})
+	it("should return empty string when includeDiagnostics is false", async () => {
+		// Mock file URI
+		const fileUri = vscode.Uri.file("/path/to/file.ts")
+
+		// Create diagnostics
+		const diagnostics = [
+			new vscode.Diagnostic(new vscode.Range(0, 0, 0, 10), "Error message", vscode.DiagnosticSeverity.Error),
+			new vscode.Diagnostic(new vscode.Range(1, 0, 1, 10), "Warning message", vscode.DiagnosticSeverity.Warning),
+		]
+
+		// Mock fs.stat to return file type
+		const mockStat = {
+			type: vscode.FileType.File,
+		}
+		vscode.workspace.fs.stat = vitest.fn().mockResolvedValue(mockStat)
+
+		// Mock document content
+		const mockDocument = {
+			lineAt: vitest.fn((line) => ({
+				text: `Line ${line + 1} content`,
+			})),
+		}
+		vscode.workspace.openTextDocument = vitest.fn().mockResolvedValue(mockDocument)
+
+		// Test with includeDiagnostics set to false
+		const result = await diagnosticsToProblemsString(
+			[[fileUri, diagnostics]],
+			[vscode.DiagnosticSeverity.Error, vscode.DiagnosticSeverity.Warning],
+			"/path/to",
+			false, // includeDiagnostics
+		)
+
+		// Verify empty string is returned
+		expect(result).toBe("")
+
+		// Verify no file operations were performed
+		expect(vscode.workspace.fs.stat).not.toHaveBeenCalled()
+		expect(vscode.workspace.openTextDocument).not.toHaveBeenCalled()
+	})
+
+	it("should limit diagnostics based on count when maxDiagnostics is specified", async () => {
+		// Mock file URI
+		const fileUri = vscode.Uri.file("/path/to/file.ts")
+
+		// Create multiple diagnostics with varying message lengths
+		const diagnostics = [
+			new vscode.Diagnostic(new vscode.Range(0, 0, 0, 10), "Error 1", vscode.DiagnosticSeverity.Error),
+			new vscode.Diagnostic(new vscode.Range(1, 0, 1, 10), "Warning 1", vscode.DiagnosticSeverity.Warning),
+			new vscode.Diagnostic(new vscode.Range(2, 0, 2, 10), "Error 2", vscode.DiagnosticSeverity.Error),
+			new vscode.Diagnostic(new vscode.Range(3, 0, 3, 10), "Warning 2", vscode.DiagnosticSeverity.Warning),
+			new vscode.Diagnostic(new vscode.Range(4, 0, 4, 10), "Error 3", vscode.DiagnosticSeverity.Error),
+		]
+
+		// Mock fs.stat to return file type
+		const mockStat = {
+			type: vscode.FileType.File,
+		}
+		vscode.workspace.fs.stat = vitest.fn().mockResolvedValue(mockStat)
+
+		// Mock document content
+		const mockDocument = {
+			lineAt: vitest.fn((line) => ({
+				text: `Line ${line + 1} content`,
+			})),
+		}
+		vscode.workspace.openTextDocument = vitest.fn().mockResolvedValue(mockDocument)
+
+		// Test with maxDiagnostics set to 3 (should include exactly 3 diagnostics)
+		const result = await diagnosticsToProblemsString(
+			[[fileUri, diagnostics]],
+			[vscode.DiagnosticSeverity.Error, vscode.DiagnosticSeverity.Warning],
+			"/path/to",
+			true, // includeDiagnostics
+			3, // maxDiagnostics (count limit)
+		)
+
+		// Verify that exactly 3 diagnostics are included, prioritizing errors
+		expect(result).toContain("Error 1")
+		expect(result).toContain("Error 2")
+		expect(result).toContain("Error 3")
+		// Warnings should not be included since we have 3 errors and limit is 3
+		expect(result).not.toContain("Warning 1")
+		expect(result).not.toContain("Warning 2")
+
+		// Verify the limit message is included
+		expect(result).toContain("2 more problems omitted to prevent context overflow")
+	})
+
+	it("should prioritize errors over warnings when limiting diagnostics by count", async () => {
+		// Mock file URIs
+		const fileUri1 = vscode.Uri.file("/path/to/file1.ts")
+		const fileUri2 = vscode.Uri.file("/path/to/file2.ts")
+
+		// Create diagnostics with mixed severities
+		const diagnostics1 = [
+			new vscode.Diagnostic(new vscode.Range(0, 0, 0, 10), "Warning in file1", vscode.DiagnosticSeverity.Warning),
+			new vscode.Diagnostic(new vscode.Range(1, 0, 1, 10), "Error in file1", vscode.DiagnosticSeverity.Error),
+		]
+
+		const diagnostics2 = [
+			new vscode.Diagnostic(new vscode.Range(0, 0, 0, 10), "Error in file2", vscode.DiagnosticSeverity.Error),
+			new vscode.Diagnostic(new vscode.Range(1, 0, 1, 10), "Warning in file2", vscode.DiagnosticSeverity.Warning),
+			new vscode.Diagnostic(
+				new vscode.Range(2, 0, 2, 10),
+				"Info in file2",
+				vscode.DiagnosticSeverity.Information,
+			),
+		]
+
+		// Mock fs.stat to return file type
+		const mockStat = {
+			type: vscode.FileType.File,
+		}
+		vscode.workspace.fs.stat = vitest.fn().mockResolvedValue(mockStat)
+
+		// Mock document content
+		const mockDocument = {
+			lineAt: vitest.fn((line) => ({
+				text: `Line ${line + 1} content`,
+			})),
+		}
+		vscode.workspace.openTextDocument = vitest.fn().mockResolvedValue(mockDocument)
+
+		// Test with maxDiagnostics set to 2 (should include exactly 2 diagnostics)
+		const result = await diagnosticsToProblemsString(
+			[
+				[fileUri1, diagnostics1],
+				[fileUri2, diagnostics2],
+			],
+			[vscode.DiagnosticSeverity.Error, vscode.DiagnosticSeverity.Warning, vscode.DiagnosticSeverity.Information],
+			"/path/to",
+			true, // includeDiagnostics
+			2, // maxDiagnostics (count limit)
+		)
+
+		// Verify exactly 2 errors are included (prioritized over warnings)
+		expect(result).toContain("Error in file1")
+		expect(result).toContain("Error in file2")
+		// Warnings and info should not be included
+		expect(result).not.toContain("Warning in file1")
+		expect(result).not.toContain("Warning in file2")
+		expect(result).not.toContain("Info in file2")
+
+		// Verify the limit message is included
+		expect(result).toContain("3 more problems omitted to prevent context overflow")
+	})
+
+	it("should handle maxDiagnostics with no limit when undefined", async () => {
+		// Mock file URI
+		const fileUri = vscode.Uri.file("/path/to/file.ts")
+
+		// Create multiple diagnostics
+		const diagnostics = [
+			new vscode.Diagnostic(new vscode.Range(0, 0, 0, 10), "Error 1", vscode.DiagnosticSeverity.Error),
+			new vscode.Diagnostic(new vscode.Range(1, 0, 1, 10), "Warning 1", vscode.DiagnosticSeverity.Warning),
+			new vscode.Diagnostic(new vscode.Range(2, 0, 2, 10), "Error 2", vscode.DiagnosticSeverity.Error),
+		]
+
+		// Mock fs.stat to return file type
+		const mockStat = {
+			type: vscode.FileType.File,
+		}
+		vscode.workspace.fs.stat = vitest.fn().mockResolvedValue(mockStat)
+
+		// Mock document content
+		const mockDocument = {
+			lineAt: vitest.fn((line) => ({
+				text: `Line ${line + 1} content`,
+			})),
+		}
+		vscode.workspace.openTextDocument = vitest.fn().mockResolvedValue(mockDocument)
+
+		// Test with maxDiagnostics undefined
+		const result = await diagnosticsToProblemsString(
+			[[fileUri, diagnostics]],
+			[vscode.DiagnosticSeverity.Error, vscode.DiagnosticSeverity.Warning],
+			"/path/to",
+			true, // includeDiagnostics
+			undefined, // maxDiagnostics
+		)
+
+		// Verify all diagnostics are included
+		expect(result).toContain("Error 1")
+		expect(result).toContain("Warning 1")
+		expect(result).toContain("Error 2")
+
+		// Verify no limit message is included
+		expect(result).not.toContain("(Showing")
+	})
+
+	it("should handle maxDiagnostics of 0 as no limit", async () => {
+		// Mock file URI
+		const fileUri = vscode.Uri.file("/path/to/file.ts")
+
+		// Create multiple diagnostics
+		const diagnostics = [
+			new vscode.Diagnostic(new vscode.Range(0, 0, 0, 10), "Error 1", vscode.DiagnosticSeverity.Error),
+			new vscode.Diagnostic(new vscode.Range(1, 0, 1, 10), "Warning 1", vscode.DiagnosticSeverity.Warning),
+		]
+
+		// Mock fs.stat to return file type
+		const mockStat = {
+			type: vscode.FileType.File,
+		}
+		vscode.workspace.fs.stat = vitest.fn().mockResolvedValue(mockStat)
+
+		// Mock document content
+		const mockDocument = {
+			lineAt: vitest.fn((line) => ({
+				text: `Line ${line + 1} content`,
+			})),
+		}
+		vscode.workspace.openTextDocument = vitest.fn().mockResolvedValue(mockDocument)
+
+		// Test with maxDiagnostics set to 0
+		const result = await diagnosticsToProblemsString(
+			[[fileUri, diagnostics]],
+			[vscode.DiagnosticSeverity.Error, vscode.DiagnosticSeverity.Warning],
+			"/path/to",
+			true, // includeDiagnostics
+			0, // maxDiagnostics (should be treated as no limit)
+		)
+
+		// Verify all diagnostics are included
+		expect(result).toContain("Error 1")
+		expect(result).toContain("Warning 1")
+
+		// Verify no limit message is included
+		expect(result).not.toContain("(Showing")
+	})
 })

+ 154 - 39
src/integrations/diagnostics/index.ts

@@ -74,55 +74,170 @@ export async function diagnosticsToProblemsString(
 	diagnostics: [vscode.Uri, vscode.Diagnostic[]][],
 	severities: vscode.DiagnosticSeverity[],
 	cwd: string,
+	includeDiagnosticMessages: boolean = true,
+	maxDiagnosticMessages?: number,
 ): Promise<string> {
+	// If diagnostics are disabled, return empty string
+	if (!includeDiagnosticMessages) {
+		return ""
+	}
+
 	const documents = new Map<vscode.Uri, vscode.TextDocument>()
 	const fileStats = new Map<vscode.Uri, vscode.FileStat>()
 	let result = ""
-	for (const [uri, fileDiagnostics] of diagnostics) {
-		const problems = fileDiagnostics
-			.filter((d) => severities.includes(d.severity))
-			.sort((a, b) => a.range.start.line - b.range.start.line)
-		if (problems.length > 0) {
-			result += `\n\n${path.relative(cwd, uri.fsPath).toPosix()}`
-			for (const diagnostic of problems) {
-				let label: string
-				switch (diagnostic.severity) {
-					case vscode.DiagnosticSeverity.Error:
-						label = "Error"
-						break
-					case vscode.DiagnosticSeverity.Warning:
-						label = "Warning"
-						break
-					case vscode.DiagnosticSeverity.Information:
-						label = "Information"
-						break
-					case vscode.DiagnosticSeverity.Hint:
-						label = "Hint"
-						break
-					default:
-						label = "Diagnostic"
+
+	// If we have a limit, use count-based limiting
+	if (maxDiagnosticMessages && maxDiagnosticMessages > 0) {
+		let includedCount = 0
+		let totalCount = 0
+
+		// Flatten all diagnostics with their URIs
+		const allDiagnostics: { uri: vscode.Uri; diagnostic: vscode.Diagnostic; formattedText?: string }[] = []
+		for (const [uri, fileDiagnostics] of diagnostics) {
+			const filtered = fileDiagnostics.filter((d) => severities.includes(d.severity))
+			for (const diagnostic of filtered) {
+				allDiagnostics.push({ uri, diagnostic })
+				totalCount++
+			}
+		}
+
+		// Sort by severity (errors first) and then by line number
+		allDiagnostics.sort((a, b) => {
+			const severityDiff = a.diagnostic.severity - b.diagnostic.severity
+			if (severityDiff !== 0) return severityDiff
+			return a.diagnostic.range.start.line - b.diagnostic.range.start.line
+		})
+
+		// Process diagnostics up to the count limit
+		const includedDiagnostics: typeof allDiagnostics = []
+		for (const item of allDiagnostics) {
+			// Stop if we've reached the count limit
+			if (includedCount >= maxDiagnosticMessages) {
+				break
+			}
+
+			// Format the diagnostic
+			let label: string
+			switch (item.diagnostic.severity) {
+				case vscode.DiagnosticSeverity.Error:
+					label = "Error"
+					break
+				case vscode.DiagnosticSeverity.Warning:
+					label = "Warning"
+					break
+				case vscode.DiagnosticSeverity.Information:
+					label = "Information"
+					break
+				case vscode.DiagnosticSeverity.Hint:
+					label = "Hint"
+					break
+				default:
+					label = "Diagnostic"
+			}
+			const line = item.diagnostic.range.start.line + 1
+			const source = item.diagnostic.source ? `${item.diagnostic.source} ` : ""
+
+			// Pre-format the diagnostic text
+			let diagnosticText = ""
+			try {
+				let fileStat = fileStats.get(item.uri)
+				if (!fileStat) {
+					fileStat = await vscode.workspace.fs.stat(item.uri)
+					fileStats.set(item.uri, fileStat)
+				}
+				if (fileStat.type === vscode.FileType.File) {
+					const document = documents.get(item.uri) || (await vscode.workspace.openTextDocument(item.uri))
+					documents.set(item.uri, document)
+					const lineContent = document.lineAt(item.diagnostic.range.start.line).text
+					diagnosticText = `\n- [${source}${label}] ${line} | ${lineContent} : ${item.diagnostic.message}`
+				} else {
+					diagnosticText = `\n- [${source}${label}] 1 | (directory) : ${item.diagnostic.message}`
 				}
-				const line = diagnostic.range.start.line + 1 // VSCode lines are 0-indexed
-				const source = diagnostic.source ? `${diagnostic.source} ` : ""
-				try {
-					let fileStat = fileStats.get(uri)
-					if (!fileStat) {
-						fileStat = await vscode.workspace.fs.stat(uri)
-						fileStats.set(uri, fileStat)
+			} catch {
+				diagnosticText = `\n- [${source}${label}] ${line} | (unavailable) : ${item.diagnostic.message}`
+			}
+
+			item.formattedText = diagnosticText
+			includedDiagnostics.push(item)
+			includedCount++
+		}
+
+		// Group included diagnostics by URI for output
+		const groupedDiagnostics = new Map<string, { uri: vscode.Uri; diagnostics: typeof allDiagnostics }>()
+		for (const item of includedDiagnostics) {
+			const key = item.uri.toString()
+			if (!groupedDiagnostics.has(key)) {
+				groupedDiagnostics.set(key, { uri: item.uri, diagnostics: [] })
+			}
+			groupedDiagnostics.get(key)!.diagnostics.push(item)
+		}
+
+		// Build the output
+		for (const { uri, diagnostics: fileDiagnostics } of groupedDiagnostics.values()) {
+			const sortedDiagnostics = fileDiagnostics.sort(
+				(a, b) => a.diagnostic.range.start.line - b.diagnostic.range.start.line,
+			)
+			if (sortedDiagnostics.length > 0) {
+				result += `\n\n${path.relative(cwd, uri.fsPath).toPosix()}`
+				for (const item of sortedDiagnostics) {
+					result += item.formattedText
+				}
+			}
+		}
+
+		// Add a note if we hit the limit
+		if (totalCount > includedCount) {
+			result += `\n\n... ${totalCount - includedCount} more problems omitted to prevent context overflow`
+		}
+	} else {
+		// No limit, process all diagnostics as before
+		for (const [uri, fileDiagnostics] of diagnostics) {
+			const problems = fileDiagnostics
+				.filter((d) => severities.includes(d.severity))
+				.sort((a, b) => a.range.start.line - b.range.start.line)
+			if (problems.length > 0) {
+				result += `\n\n${path.relative(cwd, uri.fsPath).toPosix()}`
+				for (const diagnostic of problems) {
+					let label: string
+					switch (diagnostic.severity) {
+						case vscode.DiagnosticSeverity.Error:
+							label = "Error"
+							break
+						case vscode.DiagnosticSeverity.Warning:
+							label = "Warning"
+							break
+						case vscode.DiagnosticSeverity.Information:
+							label = "Information"
+							break
+						case vscode.DiagnosticSeverity.Hint:
+							label = "Hint"
+							break
+						default:
+							label = "Diagnostic"
 					}
-					if (fileStat.type === vscode.FileType.File) {
-						const document = documents.get(uri) || (await vscode.workspace.openTextDocument(uri))
-						documents.set(uri, document)
-						const lineContent = document.lineAt(diagnostic.range.start.line).text
-						result += `\n- [${source}${label}] ${line} | ${lineContent} : ${diagnostic.message}`
-					} else {
-						result += `\n- [${source}${label}] 1 | (directory) : ${diagnostic.message}`
+					const line = diagnostic.range.start.line + 1 // VSCode lines are 0-indexed
+					const source = diagnostic.source ? `${diagnostic.source} ` : ""
+					try {
+						let fileStat = fileStats.get(uri)
+						if (!fileStat) {
+							fileStat = await vscode.workspace.fs.stat(uri)
+							fileStats.set(uri, fileStat)
+						}
+						if (fileStat.type === vscode.FileType.File) {
+							const document = documents.get(uri) || (await vscode.workspace.openTextDocument(uri))
+							documents.set(uri, document)
+							const lineContent = document.lineAt(diagnostic.range.start.line).text
+							result += `\n- [${source}${label}] ${line} | ${lineContent} : ${diagnostic.message}`
+						} else {
+							result += `\n- [${source}${label}] 1 | (directory) : ${diagnostic.message}`
+						}
+					} catch {
+						result += `\n- [${source}${label}] ${line} | (unavailable) : ${diagnostic.message}`
 					}
-				} catch {
-					result += `\n- [${source}${label}] ${line} | (unavailable) : ${diagnostic.message}`
 				}
 			}
 		}
 	}
+
 	return result.trim()
 }

+ 15 - 1
src/integrations/editor/DiffViewProvider.ts

@@ -36,8 +36,14 @@ export class DiffViewProvider {
 	private activeLineController?: DecorationController
 	private streamedLines: string[] = []
 	private preDiagnostics: [vscode.Uri, vscode.Diagnostic[]][] = []
+	private taskRef: WeakRef<Task>
 
-	constructor(private cwd: string) {}
+	constructor(
+		private cwd: string,
+		task: Task,
+	) {
+		this.taskRef = new WeakRef(task)
+	}
 
 	async open(relPath: string): Promise<void> {
 		this.relPath = relPath
@@ -237,12 +243,20 @@ export class DiffViewProvider {
 
 			const postDiagnostics = vscode.languages.getDiagnostics()
 
+			// Get diagnostic settings from state
+			const task = this.taskRef.deref()
+			const state = await task?.providerRef.deref()?.getState()
+			const includeDiagnosticMessages = state?.includeDiagnosticMessages ?? true
+			const maxDiagnosticMessages = state?.maxDiagnosticMessages ?? 50
+
 			const newProblems = await diagnosticsToProblemsString(
 				getNewDiagnostics(this.preDiagnostics, postDiagnostics),
 				[
 					vscode.DiagnosticSeverity.Error, // only including errors since warnings can be distracting (if user wants to fix warnings they can use the @problems mention)
 				],
 				this.cwd,
+				includeDiagnosticMessages,
+				maxDiagnosticMessages,
 			) // Will be empty string if no errors.
 
 			newProblemsMessage =

+ 14 - 1
src/integrations/editor/__tests__/DiffViewProvider.spec.ts

@@ -101,6 +101,7 @@ describe("DiffViewProvider", () => {
 	let diffViewProvider: DiffViewProvider
 	const mockCwd = "/mock/cwd"
 	let mockWorkspaceEdit: { replace: any; delete: any }
+	let mockTask: any
 
 	beforeEach(() => {
 		vi.clearAllMocks()
@@ -110,7 +111,19 @@ describe("DiffViewProvider", () => {
 		}
 		vi.mocked(vscode.WorkspaceEdit).mockImplementation(() => mockWorkspaceEdit as any)
 
-		diffViewProvider = new DiffViewProvider(mockCwd)
+		// Create a mock Task instance
+		mockTask = {
+			providerRef: {
+				deref: vi.fn().mockReturnValue({
+					getState: vi.fn().mockResolvedValue({
+						includeDiagnosticMessages: true,
+						maxDiagnosticMessages: 50,
+					}),
+				}),
+			},
+		}
+
+		diffViewProvider = new DiffViewProvider(mockCwd, mockTask)
 		// Mock the necessary properties and methods
 		;(diffViewProvider as any).relPath = "test.txt"
 		;(diffViewProvider as any).activeDiffEditor = {

+ 221 - 0
src/integrations/misc/__tests__/extract-text-large-files.spec.ts

@@ -0,0 +1,221 @@
+// npx vitest run integrations/misc/__tests__/extract-text-large-files.spec.ts
+
+import { describe, it, expect, vi, beforeEach, Mock } from "vitest"
+import * as fs from "fs/promises"
+import { extractTextFromFile } from "../extract-text"
+import { countFileLines } from "../line-counter"
+import { readLines } from "../read-lines"
+import { isBinaryFile } from "isbinaryfile"
+
+// Mock all dependencies
+vi.mock("fs/promises")
+vi.mock("../line-counter")
+vi.mock("../read-lines")
+vi.mock("isbinaryfile")
+
+describe("extractTextFromFile - Large File Handling", () => {
+	// Type the mocks
+	const mockedFs = vi.mocked(fs)
+	const mockedCountFileLines = vi.mocked(countFileLines)
+	const mockedReadLines = vi.mocked(readLines)
+	const mockedIsBinaryFile = vi.mocked(isBinaryFile)
+
+	beforeEach(() => {
+		vi.clearAllMocks()
+		// Set default mock behavior
+		mockedFs.access.mockResolvedValue(undefined)
+		mockedIsBinaryFile.mockResolvedValue(false)
+	})
+
+	it("should truncate files that exceed maxReadFileLine limit", async () => {
+		const largeFileContent = Array(150)
+			.fill(null)
+			.map((_, i) => `Line ${i + 1}: This is a test line with some content`)
+			.join("\n")
+
+		mockedCountFileLines.mockResolvedValue(150)
+		mockedReadLines.mockResolvedValue(
+			Array(100)
+				.fill(null)
+				.map((_, i) => `Line ${i + 1}: This is a test line with some content`)
+				.join("\n"),
+		)
+
+		const result = await extractTextFromFile("/test/large-file.ts", 100)
+
+		// Should only include first 100 lines with line numbers
+		expect(result).toContain("  1 | Line 1: This is a test line with some content")
+		expect(result).toContain("100 | Line 100: This is a test line with some content")
+		expect(result).not.toContain("101 | Line 101: This is a test line with some content")
+
+		// Should include truncation message
+		expect(result).toContain(
+			"[File truncated: showing 100 of 150 total lines. The file is too large and may exhaust the context window if read in full.]",
+		)
+	})
+
+	it("should not truncate files within the maxReadFileLine limit", async () => {
+		const smallFileContent = Array(50)
+			.fill(null)
+			.map((_, i) => `Line ${i + 1}: This is a test line`)
+			.join("\n")
+
+		mockedCountFileLines.mockResolvedValue(50)
+		mockedFs.readFile.mockResolvedValue(smallFileContent as any)
+
+		const result = await extractTextFromFile("/test/small-file.ts", 100)
+
+		// Should include all lines with line numbers
+		expect(result).toContain(" 1 | Line 1: This is a test line")
+		expect(result).toContain("50 | Line 50: This is a test line")
+
+		// Should not include truncation message
+		expect(result).not.toContain("[File truncated:")
+	})
+
+	it("should handle files with exactly maxReadFileLine lines", async () => {
+		const exactFileContent = Array(100)
+			.fill(null)
+			.map((_, i) => `Line ${i + 1}`)
+			.join("\n")
+
+		mockedCountFileLines.mockResolvedValue(100)
+		mockedFs.readFile.mockResolvedValue(exactFileContent as any)
+
+		const result = await extractTextFromFile("/test/exact-file.ts", 100)
+
+		// Should include all lines with line numbers
+		expect(result).toContain("  1 | Line 1")
+		expect(result).toContain("100 | Line 100")
+
+		// Should not include truncation message
+		expect(result).not.toContain("[File truncated:")
+	})
+
+	it("should handle undefined maxReadFileLine by not truncating", async () => {
+		const largeFileContent = Array(200)
+			.fill(null)
+			.map((_, i) => `Line ${i + 1}`)
+			.join("\n")
+
+		mockedFs.readFile.mockResolvedValue(largeFileContent as any)
+
+		const result = await extractTextFromFile("/test/large-file.ts", undefined)
+
+		// Should include all lines with line numbers when maxReadFileLine is undefined
+		expect(result).toContain("  1 | Line 1")
+		expect(result).toContain("200 | Line 200")
+
+		// Should not include truncation message
+		expect(result).not.toContain("[File truncated:")
+	})
+
+	it("should handle empty files", async () => {
+		mockedFs.readFile.mockResolvedValue("" as any)
+
+		const result = await extractTextFromFile("/test/empty-file.ts", 100)
+
+		expect(result).toBe("")
+		expect(result).not.toContain("[File truncated:")
+	})
+
+	it("should handle files with only newlines", async () => {
+		const newlineOnlyContent = "\n\n\n\n\n"
+
+		mockedCountFileLines.mockResolvedValue(6) // 5 newlines = 6 lines
+		mockedReadLines.mockResolvedValue("\n\n")
+
+		const result = await extractTextFromFile("/test/newline-file.ts", 3)
+
+		// Should truncate at line 3
+		expect(result).toContain("[File truncated: showing 3 of 6 total lines")
+	})
+
+	it("should handle very large files efficiently", async () => {
+		// Simulate a 10,000 line file
+		mockedCountFileLines.mockResolvedValue(10000)
+		mockedReadLines.mockResolvedValue(
+			Array(500)
+				.fill(null)
+				.map((_, i) => `Line ${i + 1}: Some content here`)
+				.join("\n"),
+		)
+
+		const result = await extractTextFromFile("/test/very-large-file.ts", 500)
+
+		// Should only include first 500 lines with line numbers
+		expect(result).toContain("  1 | Line 1: Some content here")
+		expect(result).toContain("500 | Line 500: Some content here")
+		expect(result).not.toContain("501 | Line 501: Some content here")
+
+		// Should show truncation message
+		expect(result).toContain("[File truncated: showing 500 of 10000 total lines")
+	})
+
+	it("should handle maxReadFileLine of 0 by throwing an error", async () => {
+		const fileContent = "Line 1\nLine 2\nLine 3"
+
+		mockedFs.readFile.mockResolvedValue(fileContent as any)
+
+		// maxReadFileLine of 0 should throw an error
+		await expect(extractTextFromFile("/test/file.ts", 0)).rejects.toThrow(
+			"Invalid maxReadFileLine: 0. Must be a positive integer or -1 for unlimited.",
+		)
+	})
+
+	it("should handle negative maxReadFileLine by treating as undefined", async () => {
+		const fileContent = "Line 1\nLine 2\nLine 3"
+
+		mockedFs.readFile.mockResolvedValue(fileContent as any)
+
+		const result = await extractTextFromFile("/test/file.ts", -1)
+
+		// Should include all content with line numbers when negative
+		expect(result).toContain("1 | Line 1")
+		expect(result).toContain("2 | Line 2")
+		expect(result).toContain("3 | Line 3")
+		expect(result).not.toContain("[File truncated:")
+	})
+
+	it("should preserve file content structure when truncating", async () => {
+		const structuredContent = [
+			"function example() {",
+			"  const x = 1;",
+			"  const y = 2;",
+			"  return x + y;",
+			"}",
+			"",
+			"// More code below",
+		].join("\n")
+
+		mockedCountFileLines.mockResolvedValue(7)
+		mockedReadLines.mockResolvedValue(["function example() {", "  const x = 1;", "  const y = 2;"].join("\n"))
+
+		const result = await extractTextFromFile("/test/structured.ts", 3)
+
+		// Should preserve the first 3 lines with line numbers
+		expect(result).toContain("1 | function example() {")
+		expect(result).toContain("2 |   const x = 1;")
+		expect(result).toContain("3 |   const y = 2;")
+		expect(result).not.toContain("4 |   return x + y;")
+
+		// Should include truncation info
+		expect(result).toContain("[File truncated: showing 3 of 7 total lines")
+	})
+
+	it("should handle binary files by throwing an error", async () => {
+		mockedIsBinaryFile.mockResolvedValue(true)
+
+		await expect(extractTextFromFile("/test/binary.bin", 100)).rejects.toThrow(
+			"Cannot read text for file type: .bin",
+		)
+	})
+
+	it("should handle file not found errors", async () => {
+		mockedFs.access.mockRejectedValue(new Error("ENOENT"))
+
+		await expect(extractTextFromFile("/test/nonexistent.ts", 100)).rejects.toThrow(
+			"File not found: /test/nonexistent.ts",
+		)
+	})
+})

+ 37 - 1
src/integrations/misc/extract-text.ts

@@ -5,6 +5,8 @@ import mammoth from "mammoth"
 import fs from "fs/promises"
 import { isBinaryFile } from "isbinaryfile"
 import { extractTextFromXLSX } from "./extract-text-from-xlsx"
+import { countFileLines } from "./line-counter"
+import { readLines } from "./read-lines"
 
 async function extractTextFromPDF(filePath: string): Promise<string> {
 	const dataBuffer = await fs.readFile(filePath)
@@ -48,7 +50,27 @@ export function getSupportedBinaryFormats(): string[] {
 	return Object.keys(SUPPORTED_BINARY_FORMATS)
 }
 
-export async function extractTextFromFile(filePath: string): Promise<string> {
+/**
+ * Extracts text content from a file, with support for various formats including PDF, DOCX, XLSX, and plain text.
+ * For large text files, can limit the number of lines read to prevent context exhaustion.
+ *
+ * @param filePath - Path to the file to extract text from
+ * @param maxReadFileLine - Maximum number of lines to read from text files.
+ *                          Use UNLIMITED_LINES (-1) or undefined for no limit.
+ *                          Must be a positive integer or UNLIMITED_LINES.
+ * @returns Promise resolving to the extracted text content with line numbers
+ * @throws {Error} If file not found, unsupported format, or invalid parameters
+ */
+export async function extractTextFromFile(filePath: string, maxReadFileLine?: number): Promise<string> {
+	// Validate maxReadFileLine parameter
+	if (maxReadFileLine !== undefined && maxReadFileLine !== -1) {
+		if (!Number.isInteger(maxReadFileLine) || maxReadFileLine < 1) {
+			throw new Error(
+				`Invalid maxReadFileLine: ${maxReadFileLine}. Must be a positive integer or -1 for unlimited.`,
+			)
+		}
+	}
+
 	try {
 		await fs.access(filePath)
 	} catch (error) {
@@ -67,6 +89,20 @@ export async function extractTextFromFile(filePath: string): Promise<string> {
 	const isBinary = await isBinaryFile(filePath).catch(() => false)
 
 	if (!isBinary) {
+		// Check if we need to apply line limit
+		if (maxReadFileLine !== undefined && maxReadFileLine !== -1) {
+			const totalLines = await countFileLines(filePath)
+			if (totalLines > maxReadFileLine) {
+				// Read only up to maxReadFileLine (endLine is 0-based and inclusive)
+				const content = await readLines(filePath, maxReadFileLine - 1, 0)
+				const numberedContent = addLineNumbers(content)
+				return (
+					numberedContent +
+					`\n\n[File truncated: showing ${maxReadFileLine} of ${totalLines} total lines. The file is too large and may exhaust the context window if read in full.]`
+				)
+			}
+		}
+		// Read the entire file if no limit or file is within limit
 		return addLineNumbers(await fs.readFile(filePath, "utf8"))
 	} else {
 		throw new Error(`Cannot read text for file type: ${fileExtension}`)

+ 5 - 0
src/package.json

@@ -536,6 +536,11 @@
 					"type": "string",
 					"default": "",
 					"description": "%settings.autoImportSettingsPath.description%"
+				},
+				"kilo-code.useAgentRules": {
+					"type": "boolean",
+					"default": true,
+					"description": "%settings.useAgentRules.description%"
 				}
 			}
 		},

+ 1 - 0
src/package.nls.ca.json

@@ -38,6 +38,7 @@
 	"settings.customStoragePath.description": "Ruta d'emmagatzematge personalitzada. Deixeu-la buida per utilitzar la ubicació predeterminada. Admet rutes absolutes (p. ex. 'D:\\KiloCodeStorage')",
 	"settings.enableCodeActions.description": "Habilitar correccions ràpides de Kilo Code.",
 	"settings.autoImportSettingsPath.description": "Ruta a un fitxer de configuració de Kilo Code per importar automàticament en iniciar l'extensió. Admet rutes absolutes i rutes relatives al directori d'inici (per exemple, '~/Documents/kilo-code-settings.json'). Deixeu-ho en blanc per desactivar la importació automàtica.",
+	"settings.useAgentRules.description": "Activa la càrrega de fitxers AGENTS.md per a regles específiques de l'agent (vegeu https://agent-rules.org/)",
 	"ghost.input.title": "Premeu 'Enter' per confirmar o 'Escape' per cancel·lar",
 	"ghost.input.placeholder": "Descriviu què voleu fer...",
 	"ghost.commands.generateSuggestions": "Kilo Code: Generar Edicions Suggerides",

+ 1 - 0
src/package.nls.de.json

@@ -38,6 +38,7 @@
 	"settings.customStoragePath.description": "Benutzerdefinierter Speicherpfad. Leer lassen, um den Standardspeicherort zu verwenden. Unterstützt absolute Pfade (z.B. 'D:\\KiloCodeStorage')",
 	"settings.enableCodeActions.description": "Kilo Code Schnelle Problembehebung aktivieren.",
 	"settings.autoImportSettingsPath.description": "Pfad zu einer Kilo Code-Konfigurationsdatei, die beim Start der Erweiterung automatisch importiert wird. Unterstützt absolute Pfade und Pfade relativ zum Home-Verzeichnis (z.B. '~/Documents/kilo-code-settings.json'). Leer lassen, um den automatischen Import zu deaktivieren.",
+	"settings.useAgentRules.description": "Aktiviert das Laden von AGENTS.md-Dateien für agentenspezifische Regeln (siehe https://agent-rules.org/)",
 	"ghost.input.title": "Kilo Code Geisterschreiber",
 	"ghost.input.placeholder": "Beschreiben Sie, was Sie programmieren möchten...",
 	"ghost.commands.generateSuggestions": "Kilo Code: Vorgeschlagene Bearbeitungen Generieren",

+ 1 - 0
src/package.nls.es.json

@@ -38,6 +38,7 @@
 	"settings.customStoragePath.description": "Ruta de almacenamiento personalizada. Dejar vacío para usar la ubicación predeterminada. Admite rutas absolutas (ej. 'D:\\KiloCodeStorage')",
 	"settings.enableCodeActions.description": "Habilitar correcciones rápidas de Kilo Code.",
 	"settings.autoImportSettingsPath.description": "Ruta a un archivo de configuración de Kilo Code para importar automáticamente al iniciar la extensión. Admite rutas absolutas y rutas relativas al directorio de inicio (por ejemplo, '~/Documents/kilo-code-settings.json'). Dejar vacío para desactivar la importación automática.",
+	"settings.useAgentRules.description": "Habilita la carga de archivos AGENTS.md para reglas específicas del agente (ver https://agent-rules.org/)",
 	"ghost.input.title": "Presiona 'Enter' para confirmar o 'Escape' para cancelar",
 	"ghost.input.placeholder": "Describe lo que quieres hacer...",
 	"ghost.commands.generateSuggestions": "Kilo Code: Generar Ediciones Sugeridas",

+ 1 - 0
src/package.nls.fr.json

@@ -38,6 +38,7 @@
 	"settings.customStoragePath.description": "Chemin de stockage personnalisé. Laisser vide pour utiliser l'emplacement par défaut. Prend en charge les chemins absolus (ex: 'D:\\KiloCodeStorage')",
 	"settings.enableCodeActions.description": "Activer les correctifs rapides de Kilo Code.",
 	"settings.autoImportSettingsPath.description": "Chemin d'accès à un fichier de configuration Kilo Code à importer automatiquement au démarrage de l'extension. Prend en charge les chemins absolus et les chemins relatifs au répertoire de base (par exemple, '~/Documents/kilo-code-settings.json'). Laisser vide pour désactiver l'importation automatique.",
+	"settings.useAgentRules.description": "Activer le chargement des fichiers AGENTS.md pour les règles spécifiques à l'agent (voir https://agent-rules.org/)",
 	"ghost.input.title": "Écrivain fantôme Kilo Code",
 	"ghost.input.placeholder": "Décrivez ce que vous voulez coder...",
 	"ghost.commands.generateSuggestions": "Kilo Code : Générer des Modifications Suggérées",

+ 1 - 0
src/package.nls.hi.json

@@ -38,6 +38,7 @@
 	"settings.customStoragePath.description": "कस्टम स्टोरेज पाथ। डिफ़ॉल्ट स्थान का उपयोग करने के लिए खाली छोड़ें। पूर्ण पथ का समर्थन करता है (उदा. 'D:\\KiloCodeStorage')",
 	"settings.enableCodeActions.description": "Kilo Code त्वरित सुधार सक्षम करें",
 	"settings.autoImportSettingsPath.description": "Kilo Code कॉन्फ़िगरेशन फ़ाइल का पथ जिसे एक्सटेंशन स्टार्टअप पर स्वचालित रूप से आयात किया जाएगा। होम डायरेक्टरी के सापेक्ष पूर्ण पथ और पथों का समर्थन करता है (उदाहरण के लिए '~/Documents/kilo-code-settings.json')। ऑटो-इंपोर्ट को अक्षम करने के लिए खाली छोड़ दें।",
+	"settings.useAgentRules.description": "एजेंट-विशिष्ट नियमों के लिए AGENTS.md फ़ाइलों को लोड करना सक्षम करें (देखें https://agent-rules.org/)",
 	"ghost.input.title": "पुष्टि के लिए 'Enter' दबाएं या रद्द करने के लिए 'Escape' दबाएं",
 	"ghost.input.placeholder": "वर्णन करें कि आप क्या करना चाहते हैं...",
 	"ghost.commands.generateSuggestions": "Kilo Code: सुझाए गए संपादन जेनरेट करें",

+ 1 - 0
src/package.nls.id.json

@@ -36,6 +36,7 @@
 	"settings.customStoragePath.description": "Path penyimpanan kustom. Biarkan kosong untuk menggunakan lokasi default. Mendukung path absolut (misalnya 'D:\\KiloCodeStorage')",
 	"settings.enableCodeActions.description": "Aktifkan perbaikan cepat Kilo Code.",
 	"settings.autoImportSettingsPath.description": "Path ke file konfigurasi Kilo Code untuk diimpor secara otomatis saat ekstensi dimulai. Mendukung path absolut dan path relatif terhadap direktori home (misalnya '~/Documents/kilo-code-settings.json'). Biarkan kosong untuk menonaktifkan impor otomatis.",
+	"settings.useAgentRules.description": "Aktifkan pemuatan file AGENTS.md untuk aturan khusus agen (lihat https://agent-rules.org/)",
 	"command.generateCommitMessage.title": "Buat Pesan Commit dengan Kilo",
 	"command.profile.title": "Profil",
 	"ghost.input.title": "Tekan 'Enter' untuk konfirmasi atau 'Escape' untuk membatalkan",

+ 1 - 0
src/package.nls.it.json

@@ -38,6 +38,7 @@
 	"settings.customStoragePath.description": "Percorso di archiviazione personalizzato. Lasciare vuoto per utilizzare la posizione predefinita. Supporta percorsi assoluti (es. 'D:\\KiloCodeStorage')",
 	"settings.enableCodeActions.description": "Abilita correzioni rapide di Kilo Code.",
 	"settings.autoImportSettingsPath.description": "Percorso di un file di configurazione di Kilo Code da importare automaticamente all'avvio dell'estensione. Supporta percorsi assoluti e percorsi relativi alla directory home (ad es. '~/Documents/kilo-code-settings.json'). Lasciare vuoto per disabilitare l'importazione automatica.",
+	"settings.useAgentRules.description": "Abilita il caricamento dei file AGENTS.md per regole specifiche dell'agente (vedi https://agent-rules.org/)",
 	"ghost.input.title": "Scrittore Fantasma Kilo Code",
 	"ghost.input.placeholder": "Descrivi cosa vuoi programmare...",
 	"ghost.commands.generateSuggestions": "Kilo Code: Genera Modifiche Suggerite",

Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů