Sfoglia il codice sorgente

🐛 fix(watchMode): remove redundant watch mode commands and logic

- eliminate watch mode enable/disable/toggle commands from package.json
- delete unused logic in registerCommands.ts and WatchModeService.ts
- simplify file system watcher and document listener initialization

♻️ refactor(watchMode): enhance reflection handling

- create reflectionWrapper.ts for reflection logic
- introduce config.ts for reflection settings
- update processAIResponse to utilize reflectionWrapper

✨ feat(watchMode): add reflection capability

- implement reflection logic to handle failed AI response applications
- configure reflection attempts and enable/disable through config.ts
Chris Hasson 8 mesi fa
parent
commit
94fc779e75

+ 0 - 18
package.json

@@ -199,24 +199,6 @@
 				"title": "%command.addToContext.title%",
 				"category": "%configuration.title%"
 			},
-			{
-				"command": "kilo-code.watchMode.enable",
-				"title": "Enable Watch Mode",
-				"category": "Kilo Code",
-				"icon": "$(eye)"
-			},
-			{
-				"command": "kilo-code.watchMode.disable",
-				"title": "Disable Watch Mode",
-				"category": "Kilo Code",
-				"icon": "$(eye-closed)"
-			},
-			{
-				"command": "kilo-code.watchMode.toggle",
-				"title": "Toggle Watch Mode",
-				"category": "Kilo Code",
-				"icon": "$(eye)"
-			},
 			{
 				"command": "kilo-code.quickCommand",
 				"title": "Quick Command",

+ 0 - 33
src/activate/registerCommands.ts

@@ -203,39 +203,6 @@ const getCommandsMap = ({ context, outputChannel, watchModeService }: RegisterCo
 			})
 		},
 		// Watch Mode commands
-		"kilo-code.watchMode.enable": () => {
-			if (!watchModeService) {
-				outputChannel.appendLine("Watch Mode service not initialized")
-				vscode.window.showErrorMessage("Watch Mode service not initialized")
-				return
-			}
-
-			const success = watchModeService.enable()
-			if (success) {
-				vscode.window.showInformationMessage("Watch Mode enabled")
-			} else {
-				vscode.window.showInformationMessage("Watch Mode could not be enabled. Is the experiment enabled?")
-			}
-		},
-		"kilo-code.watchMode.disable": () => {
-			if (!watchModeService) {
-				outputChannel.appendLine("Watch Mode service not initialized")
-				return
-			}
-
-			watchModeService.disable()
-			vscode.window.showInformationMessage("Watch Mode disabled")
-		},
-		"kilo-code.watchMode.toggle": () => {
-			if (!watchModeService) {
-				outputChannel.appendLine("Watch Mode service not initialized")
-				vscode.window.showErrorMessage("Watch Mode service not initialized")
-				return
-			}
-
-			const isActive = watchModeService.toggle()
-			vscode.window.showInformationMessage(`Watch Mode ${isActive ? "enabled" : "disabled"}`)
-		},
 		"kilo-code.watchMode.enableExperiment": async () => {
 			if (!watchModeService) {
 				outputChannel.appendLine("Watch Mode service not initialized")

+ 148 - 306
src/services/watchMode/WatchModeService.ts

@@ -1,7 +1,7 @@
 import * as vscode from "vscode"
 import { Anthropic } from "@anthropic-ai/sdk"
 import { EXPERIMENT_IDS, ExperimentId, experiments } from "../../shared/experiments"
-import { AICommentData, FileChangeData, WatchModeConfig, TriggerType } from "./types"
+import { AICommentData, WatchModeConfig, TriggerType } from "./types"
 import { WatchModeUI } from "./ui"
 import { ApiHandler, buildApiHandler } from "../../api"
 import { ContextProxy } from "../../core/config/ContextProxy"
@@ -13,9 +13,9 @@ import {
 	updateAICommentPatterns,
 	updateCurrentAICommentPrefix,
 	determineTriggerType,
-	buildReflectionPrompt,
 	estimateTokenCount,
 } from "./commentProcessor"
+import { withReflection, buildWatchModeReflectionPrompt } from "./reflectionWrapper"
 import { WatchModeHighlighter } from "./WatchModeHighlighter"
 import { getContextFiles } from "./importParser"
 
@@ -30,7 +30,6 @@ interface DocumentChangeData {
  */
 export class WatchModeService {
 	private apiHandler: ApiHandler | null = null
-	private watchers: Map<string, vscode.FileSystemWatcher> = new Map()
 	private pendingProcessing: Map<string, ReturnType<typeof setTimeout>> = new Map()
 	private _isActive: boolean = false
 	private outputChannel?: vscode.OutputChannel
@@ -67,10 +66,7 @@ export class WatchModeService {
 	readonly onDidFinishProcessingComment = this._onDidFinishProcessingComment.event
 
 	private readonly defaultConfig: WatchModeConfig = {
-		include: ["**/*.{js,jsx,ts,tsx,py,java,go,rb,php,c,cpp,h,cs}"],
-		exclude: ["**/node_modules/**", "**/dist/**", "**/build/**", "**/.git/**"],
 		model: "claude-3.7",
-		debounceTime: 2000, // 2 seconds
 		commentPrefix: "KO!", // Default AI comment prefix
 	}
 	private config: WatchModeConfig
@@ -155,10 +151,7 @@ export class WatchModeService {
 		const config = vscode.workspace.getConfiguration("kilo-code.watchMode")
 
 		this.config = {
-			include: config.get("include", this.defaultConfig.include),
-			exclude: config.get("exclude", this.defaultConfig.exclude),
 			model: config.get("model", this.defaultConfig.model),
-			debounceTime: config.get("debounceTime", this.defaultConfig.debounceTime),
 			commentPrefix: config.get("commentPrefix", this.defaultConfig.commentPrefix),
 		}
 
@@ -177,61 +170,44 @@ export class WatchModeService {
 	 * Initializes file system watchers
 	 */
 	private initializeWatchers(): void {
-		this.disposeWatchers() // Clean up any existing watchers first
 		this.disposeDocumentListeners() // Clean up any existing document listeners
 
-		// Set up file system watchers for file save events
-		this.config.include.forEach((pattern) => {
-			const watcher = vscode.workspace.createFileSystemWatcher(
-				new vscode.RelativePattern(vscode.workspace.workspaceFolders?.[0]?.uri || "", pattern),
-				false, // Don't ignore creates
-				false, // Don't ignore changes
-				true, // Ignore deletes
-			)
-
-			// Handle file creation events
-			watcher.onDidCreate((uri: vscode.Uri) =>
-				this.handleFileChange({ fileUri: uri, type: vscode.FileChangeType.Created }),
-			)
-
-			// Handle file change events (file saves)
-			watcher.onDidChange((uri: vscode.Uri) => {
-				this.log(`File changed: ${uri.toString()}`)
-				return this.handleFileChange({ fileUri: uri, type: vscode.FileChangeType.Changed })
-			})
-
-			const watcherId = `watcher-${pattern}`
-			this.watchers.set(watcherId, watcher)
-			this.context.subscriptions.push(watcher)
-
-			this.log(`Initialized file watcher for pattern: ${pattern}`)
+		// Set up file system watcher for all files in the workspace
+		const watcher = vscode.workspace.createFileSystemWatcher(
+			new vscode.RelativePattern(vscode.workspace.workspaceFolders?.[0]?.uri || "", "**/*"),
+			false, // Don't ignore creates
+			false, // Don't ignore changes
+			true, // Ignore deletes
+		)
+
+		// Handle file creation events
+		const createDisposable = watcher.onDidCreate((uri: vscode.Uri) =>
+			this.handleFileChange({ fileUri: uri, type: vscode.FileChangeType.Created }),
+		)
+
+		// Handle file change events (file saves)
+		const changeDisposable = watcher.onDidChange((uri: vscode.Uri) => {
+			this.log(`File changed: ${uri.toString()}`)
+			return this.handleFileChange({ fileUri: uri, type: vscode.FileChangeType.Changed })
 		})
 
+		// Add watcher and its event handlers to subscriptions
+		this.context.subscriptions.push(watcher, createDisposable, changeDisposable)
+
 		// Set up document change listeners for real-time editing
 		const changeListener = vscode.workspace.onDidChangeTextDocument((event) => {
-			// Skip excluded files
-			if (this.isFileExcluded(event.document.uri)) {
-				return
-			}
-
 			// Process document changes as they happen (without debounce)
 			this.handleDocumentChange({ document: event.document, isDocumentSave: false })
 		})
 
 		// Set up document save listeners
 		const saveListener = vscode.workspace.onDidSaveTextDocument((document) => {
-			// Skip excluded files
-			if (this.isFileExcluded(document.uri)) {
-				return
-			}
-
 			// Process document saves
 			this.handleDocumentChange({ document: document, isDocumentSave: true })
 		})
 
 		this.documentListeners.push(changeListener, saveListener)
 		this.context.subscriptions.push(changeListener, saveListener)
-		this.log(`Initialized document change and save listeners`)
 	}
 
 	/**
@@ -273,62 +249,6 @@ export class WatchModeService {
 		}
 	}
 
-	/**
-	 * Handles file change events from file system watcher
-	 * @param data File change event data
-	 */
-	private handleFileChange(data: FileChangeData): void {
-		const { fileUri } = data
-		const fileKey = fileUri.toString()
-
-		// Skip excluded files
-		if (this.isFileExcluded(fileUri)) {
-			this.log(`File excluded: ${fileUri.toString()}`)
-			return
-		}
-
-		// Check if this file was recently processed to avoid duplicate processing
-		if (this.isFileRecentlyProcessed(fileKey)) {
-			this.log(`Skipping duplicate processing for recently changed file: ${fileKey}`)
-			return
-		}
-
-		// Mark this file as recently processed
-		this.markFileAsProcessed(fileKey)
-
-		// Process the file immediately without debounce
-		this.log(`Processing file from file system event: ${fileUri.toString()}`)
-		this.processFile(fileUri)
-	}
-
-	/**
-	 * Checks if a file should be excluded from processing
-	 * @param uri File URI to check
-	 */
-	private isFileExcluded(uri: vscode.Uri): boolean {
-		const relativePath = vscode.workspace.asRelativePath(uri)
-
-		// Convert glob patterns to proper regex patterns
-		const isExcluded = this.config.exclude.some((pattern) => {
-			// Escape special regex characters except * and ?
-			const escapedPattern = pattern
-				.replace(/[.+^${}()|[\]\\]/g, "\\$&")
-				.replace(/\*/g, ".*")
-				.replace(/\?/g, ".")
-
-			const regExp = new RegExp(`^${escapedPattern}$`)
-			const result = regExp.test(relativePath)
-
-			if (result) {
-				this.log(`File excluded: matched pattern ${pattern}`)
-			}
-
-			return result
-		})
-
-		return isExcluded
-	}
-
 	/**
 	 * Processes a file to find and handle AI comments
 	 * @param fileUri URI of the file to process
@@ -645,224 +565,158 @@ export class WatchModeService {
 		triggerType: TriggerType,
 		clearHighlight: () => void,
 	): Promise<void> {
-		// Maximum number of reflection attempts
-		const MAX_REFLECTION_ATTEMPTS = 1
-		let currentAttempt = 0
-		let success = false
-		let lastResponse: string | null = null
+		// Prepare context for the reflection wrapper
+		const context = {
+			document,
+			comment,
+			triggerType,
+			activeFilesWithContent: [] as { uri: vscode.Uri; content: string }[],
+		}
 
-		while (currentAttempt <= MAX_REFLECTION_ATTEMPTS) {
-			try {
-				// Build prompt from the comment and context
-				// this.log(`Building AI prompt (attempt ${currentAttempt})...`)
+		// Gather active files context first
+		const activeFilesWithContent = await this.gatherActiveFilesContext(document)
+		context.activeFilesWithContent = activeFilesWithContent
+
+		// Use the reflection wrapper
+		const result = await withReflection(context, {
+			buildPrompt: (ctx) => {
+				return buildAIPrompt(ctx.comment, ctx.triggerType, ctx.activeFilesWithContent)
+			},
+			buildReflectionPrompt: (ctx, originalResponse, errors) => {
+				return buildWatchModeReflectionPrompt(
+					ctx.comment,
+					originalResponse,
+					errors,
+					ctx.activeFilesWithContent,
+					this.config.commentPrefix,
+				)
+			},
+			callAI: async (prompt) => {
+				return await this.callAIModel(prompt)
+			},
+			processResponse: async (ctx, response, attemptNumber) => {
+				// Use the tracked document if this is from a quick command
+				const documentToProcess = this.quickCommandDocument || ctx.document
+				return await processAIResponse(documentToProcess, ctx.comment, response, attemptNumber)
+			},
+			log: (message) => this.log(message),
+		})
+
+		// Handle the result
+		if (result.success) {
+			this.log(`Successfully applied AI response to ${document.uri.fsPath}`)
+		} else {
+			this.log(`Failed to apply AI response to ${document.uri.fsPath}`)
+		}
 
-				// Gather content from active files for additional context
-				const activeFilesWithContent: { uri: vscode.Uri; content: string }[] = []
+		// Emit event that we've finished processing this comment
+		this._onDidFinishProcessingComment.fire({
+			fileUri: document.uri,
+			comment,
+			success: result.success,
+		})
 
-				// Maximum token budget for additional context (roughly 50% of model's context window)
-				const MAX_ADDITIONAL_CONTEXT_TOKENS = 50000
-				let estimatedTokens = 0
+		// Clear the highlight
+		clearHighlight()
+	}
 
-				// First, estimate tokens for the base prompt
-				const basePrompt = buildAIPrompt(comment, triggerType)
-				estimatedTokens += estimateTokenCount(basePrompt)
+	/**
+	 * Gathers content from active files for additional context
+	 */
+	private async gatherActiveFilesContext(
+		document: vscode.TextDocument,
+	): Promise<{ uri: vscode.Uri; content: string }[]> {
+		const activeFilesWithContent: { uri: vscode.Uri; content: string }[] = []
+
+		// Maximum token budget for additional context (roughly 50% of model's context window)
+		const MAX_ADDITIONAL_CONTEXT_TOKENS = 50000
+		let estimatedTokens = 0
+
+		// First, estimate tokens for the base prompt
+		const basePrompt = buildAIPrompt(
+			{
+				content: "",
+				startPos: new vscode.Position(0, 0),
+				endPos: new vscode.Position(0, 0),
+				context: "",
+				fileUri: document.uri,
+			},
+			TriggerType.Edit,
+		)
+		estimatedTokens += estimateTokenCount(basePrompt)
 
-				// Get imported files from the current document
-				const importedFiles = await getContextFiles(document.uri, document.getText(), 2)
-				this.log(`Found ${importedFiles.length} imported files for context`)
+		// Get imported files from the current document
+		const importedFiles = await getContextFiles(document.uri, document.getText(), 2)
+		this.log(`Found ${importedFiles.length} imported files for context`)
 
-				// Get active files and sort by recency (most recent first)
-				const activeFileUris = this.getActiveFiles()
+		// Get active files and sort by recency (most recent first)
+		const activeFileUris = this.getActiveFiles()
 
-				// Prioritize open editor tabs
-				const openEditors = vscode.window.visibleTextEditors.map((editor) => editor.document.uri.toString())
+		// Prioritize open editor tabs
+		const openEditors = vscode.window.visibleTextEditors.map((editor) => editor.document.uri.toString())
 
-				// Combine imported files with active files, removing duplicates
-				const allContextFiles = new Set<string>()
+		// Combine imported files with active files, removing duplicates
+		const allContextFiles = new Set<string>()
 
-				// Add imported files first (highest priority)
-				importedFiles.forEach((uri) => allContextFiles.add(uri.toString()))
+		// Add imported files first (highest priority)
+		importedFiles.forEach((uri) => allContextFiles.add(uri.toString()))
 
-				// Add open editors next
-				openEditors.forEach((uri) => allContextFiles.add(uri))
+		// Add open editors next
+		openEditors.forEach((uri) => allContextFiles.add(uri))
 
-				// Add other active files last
-				activeFileUris.forEach((uri) => allContextFiles.add(uri.toString()))
+		// Add other active files last
+		activeFileUris.forEach((uri) => allContextFiles.add(uri.toString()))
 
-				// Convert back to URIs and sort by priority
-				const sortedContextFiles = Array.from(allContextFiles).map((uriStr) => vscode.Uri.parse(uriStr))
+		// Convert back to URIs and sort by priority
+		const sortedContextFiles = Array.from(allContextFiles).map((uriStr) => vscode.Uri.parse(uriStr))
 
-				// Add content from context files until we reach the token limit
-				for (const uri of sortedContextFiles) {
-					// Skip the file with the comment (already included in the context)
-					if (uri.toString() === document.uri.toString()) {
-						continue
-					}
+		// Add content from context files until we reach the token limit
+		for (const uri of sortedContextFiles) {
+			// Skip the file with the comment (already included in the context)
+			if (uri.toString() === document.uri.toString()) {
+				continue
+			}
 
-					try {
-						// Skip files that are too large
-						if (uri.fsPath.endsWith(".min.js") || uri.fsPath.endsWith(".min.css")) {
-							this.log(`Skipping minified file: ${uri.fsPath}`)
-							continue
-						}
-
-						const doc = await vscode.workspace.openTextDocument(uri)
-						const content = doc.getText()
-
-						// Skip if file is too large
-						if (content.length > this.largeFileThreshold) {
-							this.log(`Skipping large file for context: ${uri.fsPath} (${content.length} bytes)`)
-							continue
-						}
-
-						// Estimate tokens for this file
-						const fileTokens = estimateTokenCount(content)
-
-						// If adding this file would exceed our budget, skip it
-						if (estimatedTokens + fileTokens > MAX_ADDITIONAL_CONTEXT_TOKENS) {
-							this.log(`Skipping file due to token budget: ${uri.fsPath} (${fileTokens} tokens)`)
-							continue
-						}
-
-						// Add file to context
-						activeFilesWithContent.push({ uri, content })
-						estimatedTokens += fileTokens
-
-						this.log(`Added file to context: ${uri.fsPath} (${fileTokens} tokens)`)
-					} catch (error) {
-						this.log(
-							`Error reading file ${uri.fsPath}: ${error instanceof Error ? error.message : String(error)}`,
-						)
-					}
+			try {
+				// Skip files that are too large
+				if (uri.fsPath.endsWith(".min.js") || uri.fsPath.endsWith(".min.css")) {
+					this.log(`Skipping minified file: ${uri.fsPath}`)
+					continue
 				}
 
-				this.log(
-					`Total context includes ${activeFilesWithContent.length} additional files (est. ${estimatedTokens} tokens)`,
-				)
+				const doc = await vscode.workspace.openTextDocument(uri)
+				const content = doc.getText()
 
-				// Ensure we always have a valid string prompt
-				const prompt =
-					currentAttempt === 0
-						? buildAIPrompt(comment, triggerType, activeFilesWithContent)
-						: lastResponse || buildAIPrompt(comment, triggerType, activeFilesWithContent)
-
-				this.log(`Prompt built, length: ${prompt.length} characters`)
-				let apiResponse: string | null = null
-
-				try {
-					// We know prompt is a valid string at this point
-					apiResponse = await this.callAIModel(prompt)
-					this.log(`API response received, length: ${apiResponse?.length || 0} characters`)
-					console.log("=== FULL API RESPONSE ===\n" + apiResponse)
-				} catch (apiError) {
-					this.log(
-						`Error calling AI model: ${apiError instanceof Error ? apiError.message : String(apiError)}`,
-					)
-					apiResponse = null
+				// Skip if file is too large
+				if (content.length > this.largeFileThreshold) {
+					this.log(`Skipping large file for context: ${uri.fsPath} (${content.length} bytes)`)
+					continue
 				}
 
-				if (!apiResponse) {
-					this.log("No response from AI model")
-					this._onDidFinishProcessingComment.fire({
-						fileUri: document.uri,
-						comment,
-						success: false,
-					})
-					clearHighlight()
-					return
+				// Estimate tokens for this file
+				const fileTokens = estimateTokenCount(content)
+
+				// If adding this file would exceed our budget, skip it
+				if (estimatedTokens + fileTokens > MAX_ADDITIONAL_CONTEXT_TOKENS) {
+					this.log(`Skipping file due to token budget: ${uri.fsPath} (${fileTokens} tokens)`)
+					continue
 				}
 
-				// Process the AI response
-				this.log(`Processing AI response (attempt ${currentAttempt})...`)
-				try {
-					// Use the tracked document if this is from a quick command
-					const documentToProcess = this.quickCommandDocument || document
-					success = await processAIResponse(documentToProcess, comment, apiResponse, currentAttempt)
-					this.log(`Response processed, success: ${success}`)
-
-					// If successful, break out of the loop
-					if (success) {
-						break
-					}
+				// Add file to context
+				activeFilesWithContent.push({ uri, content })
+				estimatedTokens += fileTokens
 
-					// If we've reached the maximum attempts, break out of the loop
-					if (currentAttempt >= MAX_REFLECTION_ATTEMPTS) {
-						break
-					}
-
-					// Increment the attempt counter
-					currentAttempt++
-				} catch (processError) {
-					// Check if this is a reflection request
-					if (processError instanceof Error && processError.message.startsWith("REFLECTION_NEEDED:")) {
-						// Parse the reflection information
-						const parts = processError.message.split(":")
-						const nextAttempt = parseInt(parts[1], 10)
-						const errorMessages = parts[2].split("|")
-
-						this.log(`Reflection needed, attempt ${nextAttempt} of ${MAX_REFLECTION_ATTEMPTS}`)
-						this.log(`Error messages: ${errorMessages.join(", ")}`)
-
-						// Update the attempt counter
-						currentAttempt = nextAttempt
-
-						// Build a reflection prompt with active files context
-						const reflectionPrompt = buildReflectionPrompt(
-							comment,
-							apiResponse,
-							errorMessages,
-							activeFilesWithContent,
-						)
-						lastResponse = reflectionPrompt
-
-						// Log the full reflection prompt for debugging
-						console.log("=== FULL REFLECTION PROMPT ===")
-						console.log(reflectionPrompt)
-						console.log("=== END REFLECTION PROMPT ===")
-
-						// Continue to the next iteration
-						continue
-					} else {
-						// Handle other errors
-						this.log(
-							`Error processing response: ${processError instanceof Error ? processError.message : String(processError)}`,
-						)
-						this._onDidFinishProcessingComment.fire({
-							fileUri: document.uri,
-							comment,
-							success: false,
-						})
-						clearHighlight()
-						return
-					}
-				}
+				this.log(`Added file to context: ${uri.fsPath} (${fileTokens} tokens)`)
 			} catch (error) {
-				this.log(`Error in reflection loop: ${error instanceof Error ? error.message : String(error)}`)
-				this._onDidFinishProcessingComment.fire({
-					fileUri: document.uri,
-					comment,
-					success: false,
-				})
-				clearHighlight()
-				return
+				this.log(`Error reading file ${uri.fsPath}: ${error instanceof Error ? error.message : String(error)}`)
 			}
 		}
 
-		if (success) {
-			this.log(`Successfully applied AI response to ${document.uri.fsPath}`)
-		} else {
-			this.log(`Failed to apply AI response to ${document.uri.fsPath} after ${currentAttempt} attempts`)
-		}
+		this.log(
+			`Total context includes ${activeFilesWithContent.length} additional files (est. ${estimatedTokens} tokens)`,
+		)
 
-		// Emit event that we've finished processing this comment
-		this._onDidFinishProcessingComment.fire({
-			fileUri: document.uri,
-			comment,
-			success,
-		})
-
-		// Clear the highlight
-		clearHighlight()
+		return activeFilesWithContent
 	}
 
 	/**
@@ -879,7 +733,7 @@ export class WatchModeService {
 			}
 
 			// Call the model with the prompt using the streaming API
-			this.log(`Using model: ${this.config.model || this.apiHandler?.getModel()?.id || "unknown"}`)
+			this.log(`Using model: ${this.apiHandler?.getModel()?.id || "unknown"}`)
 
 			// Create a system message and a user message with the prompt
 			const systemPrompt =
@@ -927,17 +781,6 @@ export class WatchModeService {
 		}
 	}
 
-	/**
-	 * Disposes all file system watchers
-	 */
-	private disposeWatchers(): void {
-		this.log(`Disposing ${this.watchers.size} watchers`)
-		for (const watcher of this.watchers.values()) {
-			watcher.dispose()
-		}
-		this.watchers.clear()
-	}
-
 	/**
 	 * Disposes all document listeners
 	 */
@@ -989,7 +832,6 @@ export class WatchModeService {
 		}
 
 		this.log("Stopping watch mode")
-		this.disposeWatchers()
 
 		// Clear any pending processing
 		for (const timeout of this.pendingProcessing.values()) {

+ 62 - 252
src/services/watchMode/commentProcessor.ts

@@ -9,6 +9,7 @@ import {
 	TriggerType,
 } from "./types"
 import { MultiSearchReplaceDiffStrategy } from "../../core/diff/strategies/multi-search-replace"
+import { ReflectionNeededError } from "./reflectionWrapper"
 
 /**
  * Interface for a diff edit
@@ -1308,11 +1309,6 @@ export const applySearchReplaceEdits = async (
 	}
 }
 
-/**
- * Maximum number of reflection attempts for failed edits
- */
-const MAX_REFLECTION_ATTEMPTS = 1
-
 /**
  * Processes the AI response and applies it to the document
  * @param document The document to modify
@@ -1327,277 +1323,91 @@ export const processAIResponse = async (
 	response: string,
 	reflectionAttempt: number = 0,
 ): Promise<boolean> => {
-	try {
-		// Determine the trigger type from the comment content
-		const triggerType = determineTriggerType(commentData.content)
-		console.log(`[WatchMode DEBUG] Trigger type: ${triggerType}`)
-
-		// Parse the AI response
-		const currentFilePath = vscode.workspace.asRelativePath(document.uri)
-		const parsedResponse = parseAIResponse(response, triggerType, currentFilePath)
-
-		// If it's a question, just show the explanation
-		if (triggerType === TriggerType.Ask) {
-			// Show the explanation in a new editor or information message
-			await vscode.window.showInformationMessage(
-				"AI Response: " + parsedResponse.explanation.substring(0, 100) + "...",
-			)
+	// Determine the trigger type from the comment content
+	const triggerType = determineTriggerType(commentData.content)
+	console.log(`[WatchMode DEBUG] Trigger type: ${triggerType}`)
 
-			// Remove the comment
-			const edit = new vscode.WorkspaceEdit()
-			const range = new vscode.Range(commentData.startPos, commentData.endPos)
-			edit.delete(document.uri, range)
-			const result = await vscode.workspace.applyEdit(edit)
+	// Parse the AI response
+	const currentFilePath = vscode.workspace.asRelativePath(document.uri)
+	const parsedResponse = parseAIResponse(response, triggerType, currentFilePath)
 
-			console.log(`[WatchMode DEBUG] Comment removal result: ${result ? "SUCCESS" : "FAILED"}`)
-			return result
-		}
-
-		// If there are no edits but there's an explanation, show it
-		if (parsedResponse.edits.length === 0 && parsedResponse.explanation) {
-			console.log("[WatchMode DEBUG] No edits found, but explanation exists")
-
-			// Check if there are code blocks in the explanation that should replace the comment
-			const codeBlocks: string[] = []
-			// Use the existing CODE_BLOCK_REGEX constant
-			let match
+	// If it's a question, just show the explanation
+	if (triggerType === TriggerType.Ask) {
+		// Show the explanation in a new editor or information message
+		await vscode.window.showInformationMessage(
+			"AI Response: " + parsedResponse.explanation.substring(0, 100) + "...",
+		)
 
-			while ((match = CODE_BLOCK_REGEX.exec(parsedResponse.explanation)) !== null) {
-				if (match[1]) {
-					codeBlocks.push(match[1].trim())
-				}
-			}
+		// Remove the comment
+		const edit = new vscode.WorkspaceEdit()
+		const range = new vscode.Range(commentData.startPos, commentData.endPos)
+		edit.delete(document.uri, range)
+		const result = await vscode.workspace.applyEdit(edit)
 
-			if (codeBlocks.length > 0) {
-				console.log(`[WatchMode DEBUG] Found ${codeBlocks.length} code blocks, using as replacement`)
+		console.log(`[WatchMode DEBUG] Comment removal result: ${result ? "SUCCESS" : "FAILED"}`)
+		return result
+	}
 
-				// Use the code blocks as a direct replacement for the comment
-				const edit = new vscode.WorkspaceEdit()
-				const range = new vscode.Range(commentData.startPos, commentData.endPos)
-				const replacement = codeBlocks.join("\n\n")
+	// If there are no edits but there's an explanation, show it
+	if (parsedResponse.edits.length === 0 && parsedResponse.explanation) {
+		console.log("[WatchMode DEBUG] No edits found, but explanation exists")
 
-				edit.replace(document.uri, range, replacement)
-				const result = await vscode.workspace.applyEdit(edit)
+		// Check if there are code blocks in the explanation that should replace the comment
+		const codeBlocks: string[] = []
+		// Use the existing CODE_BLOCK_REGEX constant
+		let match
 
-				console.log(`[WatchMode DEBUG] Direct replacement result: ${result ? "SUCCESS" : "FAILED"}`)
-				return result
+		while ((match = CODE_BLOCK_REGEX.exec(parsedResponse.explanation)) !== null) {
+			if (match[1]) {
+				codeBlocks.push(match[1].trim())
 			}
+		}
+
+		if (codeBlocks.length > 0) {
+			console.log(`[WatchMode DEBUG] Found ${codeBlocks.length} code blocks, using as replacement`)
 
-			// If no code blocks were found, just remove the comment
-			console.log("[WatchMode DEBUG] No code blocks found, removing comment")
+			// Use the code blocks as a direct replacement for the comment
 			const edit = new vscode.WorkspaceEdit()
 			const range = new vscode.Range(commentData.startPos, commentData.endPos)
+			const replacement = codeBlocks.join("\n\n")
 
-			edit.delete(document.uri, range)
+			edit.replace(document.uri, range, replacement)
 			const result = await vscode.workspace.applyEdit(edit)
 
-			console.log(`[WatchMode DEBUG] Comment removal result: ${result ? "SUCCESS" : "FAILED"}`)
+			console.log(`[WatchMode DEBUG] Direct replacement result: ${result ? "SUCCESS" : "FAILED"}`)
 			return result
 		}
 
-		// Try to apply SEARCH/REPLACE edits first
-		let success = await applySearchReplaceEdits(document, parsedResponse.edits)
-
-		// If SEARCH/REPLACE failed, try unified diff as fallback
-		if (!success) {
-			console.log("[WatchMode DEBUG] SEARCH/REPLACE edits failed, trying unified diff")
-
-			// Convert the edits to the old format
-			const oldFormatEdits = parsedResponse.edits.map((edit) => {
-				// Fix the file path if it doesn't match the current document
-				// This handles cases where the AI uses "untitled" or other incorrect paths
-				const documentPath = vscode.workspace.asRelativePath(document.uri)
-				const editPath = edit.filePath
-
-				// If the edit path is "untitled" or doesn't exist in the workspace, use the current document path
-				const finalPath =
-					editPath === "untitled" || editPath.includes("/dev/null") ? documentPath : edit.filePath
-
-				console.log(`[WatchMode DEBUG] Mapping file path: ${editPath} -> ${finalPath}`)
-
-				return {
-					path: finalPath,
-					hunk: edit.blocks.flatMap((block) =>
-						block.content
-							.split("\n")
-							.map((line) =>
-								block.type === "SEARCH" ? " " + line : block.type === "REPLACE" ? "+" + line : line,
-							),
-					),
-				}
-			})
-
-			const diffHandler = new UnifiedDiffHandler()
-			const documentContent = document.getText()
-			const [newContent, errors] = diffHandler.applyEdits(oldFormatEdits, documentContent, document.uri)
-
-			if (errors.length > 0) {
-				// Log errors but continue with the successful edits
-				console.log(`[WatchMode DEBUG] Encountered ${errors.length} errors while applying diffs`)
-				for (const error of errors) {
-					console.log(`[WatchMode DEBUG] Error: ${error.split("\n")[0]}`)
-				}
-
-				// If all edits failed, try reflection if we haven't exceeded the maximum attempts
-				if (errors.length >= oldFormatEdits.length) {
-					console.log("[WatchMode DEBUG] All edits failed to apply")
-
-					if (reflectionAttempt < MAX_REFLECTION_ATTEMPTS) {
-						console.log(`♻️♻️♻️[WatchMode DEBUG] Attempting reflection #${reflectionAttempt + 1}...`)
-						errors.forEach((error, index) => {
-							console.log(`[WatchMode DEBUG] Error ${index + 1}: ${error}`)
-						})
-
-						throw new Error(
-							`REFLECTION_NEEDED:${reflectionAttempt + 1}:${errors.map((e) => e.split("\n")[0]).join("|")}`,
-						)
-					}
-
-					console.log("[WatchMode DEBUG] ====== processAIResponse END (max reflections reached) ======")
-					return false
-				}
-			}
-
-			// Apply the updated content to the document if different from the original
-			if (newContent !== documentContent) {
-				const fullRange = new vscode.Range(
-					new vscode.Position(0, 0),
-					document.positionAt(documentContent.length),
-				)
-
-				const edit = new vscode.WorkspaceEdit()
-				edit.replace(document.uri, fullRange, newContent)
-				success = await vscode.workspace.applyEdit(edit)
-
-				console.log(`[WatchMode DEBUG] Applied updated content: ${success ? "SUCCESS" : "FAILED"}`)
-			}
-		}
+		// If no code blocks were found, just remove the comment
+		console.log("[WatchMode DEBUG] No code blocks found, removing comment")
+		const edit = new vscode.WorkspaceEdit()
+		const range = new vscode.Range(commentData.startPos, commentData.endPos)
 
-		console.log(`[WatchMode DEBUG] Process result: ${success ? "SUCCESS" : "FAILED"}`)
-		return success
-	} catch (error) {
-		// Check if this is a reflection request
-		if (error instanceof Error && error.message.startsWith("REFLECTION_NEEDED:")) {
-			// Let the calling code handle the reflection
-			throw error
-		}
+		edit.delete(document.uri, range)
+		const result = await vscode.workspace.applyEdit(edit)
 
-		console.error("[WatchMode DEBUG] Error in processAIResponse:", error)
-		console.log("[WatchMode DEBUG] ====== processAIResponse END (with error) ======")
-		return false
+		console.log(`[WatchMode DEBUG] Comment removal result: ${result ? "SUCCESS" : "FAILED"}`)
+		return result
 	}
-}
-
-/**
- * Builds a reflection prompt for the AI model when edits fail
- * @param commentData The original AI comment data
- * @param originalResponse The original AI response that failed
- * @param errors The errors encountered when applying the edits
- * @returns A prompt for the AI model to reflect on the errors
- */
-export function buildReflectionPrompt(
-	commentData: AICommentData,
-	originalResponse: string,
-	errors: string[],
-	activeFiles: { uri: vscode.Uri; content: string }[] = [],
-): string {
-	console.log("[WatchMode DEBUG] Building reflection prompt")
-	const { content, context, fileUri } = commentData
-	const filePath = vscode.workspace.asRelativePath(fileUri)
 
-	// Extract the prefix without the exclamation mark for display in the prompt
-	const displayPrefix = currentAICommentPrefix.endsWith("!")
-		? currentAICommentPrefix.slice(0, -1)
-		: currentAICommentPrefix
-
-	// Create the reflection prompt with escaped markers
-	let prompt = `
-You are Kilo Code, a highly skilled software engineer with extensive knowledge in many programming languages, frameworks, design patterns, and best practices.
-
-# Task
-
-${content}
-
-I've written your instructions in comments in the code and marked them with "${displayPrefix}"
-You can see the "${displayPrefix}" comments shown below.
-Find them in the code files I've shared with you, and follow their instructions.
-
-# Code to modify
-
-\`\`\`
-${context || "No context available"}
-\`\`\`
-
-# Previous response
-
-Your previous response failed to apply correctly. Here's what you provided:
-
-\`\`\`
-${originalResponse}
-\`\`\`
-
-# Errors
-
-The following errors occurred when trying to apply your changes:
-
-${errors.join("\n\n")}
-`
-
-	// Add content from active files for additional context
-	if (activeFiles.length > 0) {
-		prompt += `\n\n# Additional context from open files\n\n`
+	// Apply the edits based on their format
+	// The parseAIResponse function already determines if it's SEARCH/REPLACE or unified diff
+	let success = false
 
-		for (const file of activeFiles) {
-			if (file.uri.toString() !== fileUri.toString()) {
-				// Skip the file with the comment
-				const relativePath = vscode.workspace.asRelativePath(file.uri)
-				prompt += `## ${relativePath}\n\n\`\`\`\n${file.content}\n\`\`\`\n\n`
-			}
-		}
+	// Check if we have any edits to apply
+	if (parsedResponse.edits.length > 0) {
+		console.log(`[WatchMode DEBUG] Applying ${parsedResponse.edits.length} edits`)
+		success = await applySearchReplaceEdits(document, parsedResponse.edits)
+	} else {
+		console.log("[WatchMode DEBUG] No edits found in AI response")
 	}
 
-	prompt += `
-# Response format
+	console.log(`[WatchMode DEBUG] Process result: ${success ? "SUCCESS" : "FAILED"}`)
 
-Please correct your previous response to address these errors. Make sure your SEARCH blocks exactly match the code in the file.
-You MUST respond with SEARCH/REPLACE blocks for each edit. Format your changes as follows:
-
-${filePath}
-\<\<\<\<\<\<\< SEARCH
-exact original code
-\=\=\=\=\=\=\=
-replacement code
-\>\>\>\>\>\>\> REPLACE
-
-IMPORTANT: You MUST ALWAYS include the file path (${filePath}) before each SEARCH/REPLACE block.
-You can include multiple SEARCH/REPLACE blocks for the same file, and you can edit multiple files.
-Make sure to include enough context in the SEARCH block to uniquely identify the code to replace.
-After completing the instructions, also BE SURE to remove all the "${displayPrefix}" comments from the code.
-
-NEVER use generic file names like "Code", "file", or similar placeholders. ALWAYS use the actual file path: ${filePath}
-
-If you need to explain your changes, please do so before or after the code blocks.
-`
-
-	const finalPrompt = prompt.trim()
-
-	// Log the full reflection prompt for debugging
-	console.log("[WatchMode DEBUG] === FULL REFLECTION PROMPT ===")
-	console.log(finalPrompt)
-	console.log("[WatchMode DEBUG] === END REFLECTION PROMPT ===")
-
-	// Debug log the full reflection prompt string
-	console.debug(
-		"[WatchMode DEBUG] Full reflection prompt string:",
-		JSON.stringify({
-			prompt: finalPrompt,
-			length: finalPrompt.length,
-			estimatedTokens: estimateTokenCount(finalPrompt),
-			originalResponseLength: originalResponse.length,
-			errorsCount: errors.length,
-			timestamp: new Date().toISOString(),
-		}),
-	)
+	// If the edits failed and we haven't exceeded reflection attempts, throw reflection error
+	if (!success) {
+		throw new ReflectionNeededError(reflectionAttempt + 1, ["Failed to apply edits"], response)
+	}
 
-	return finalPrompt
+	return success
 }

+ 9 - 0
src/services/watchMode/config.ts

@@ -0,0 +1,9 @@
+/**
+ * Configuration constants for Watch Mode
+ */
+
+// Whether reflection is enabled for watch mode
+export const REFLECTION_ENABLED = false
+
+// Maximum number of reflection attempts when edits fail to apply
+export const MAX_REFLECTION_ATTEMPTS = REFLECTION_ENABLED ? 1 : 0

+ 273 - 0
src/services/watchMode/reflectionWrapper.ts

@@ -0,0 +1,273 @@
+import * as vscode from "vscode"
+import { REFLECTION_ENABLED, MAX_REFLECTION_ATTEMPTS } from "./config"
+import { AICommentData } from "./types"
+import { estimateTokenCount } from "./commentProcessor"
+
+/**
+ * Error thrown when reflection is needed
+ */
+export class ReflectionNeededError extends Error {
+	constructor(
+		public attemptNumber: number,
+		public errors: string[],
+		public originalResponse: string,
+	) {
+		super(`REFLECTION_NEEDED:${attemptNumber}:${errors.join("|")}`)
+		this.name = "ReflectionNeededError"
+	}
+}
+
+/**
+ * Options for the reflection wrapper
+ */
+export interface ReflectionWrapperOptions<TContext> {
+	/**
+	 * Function to build the initial prompt
+	 */
+	buildPrompt: (context: TContext) => string
+
+	/**
+	 * Function to build the reflection prompt when errors occur
+	 */
+	buildReflectionPrompt: (context: TContext, originalResponse: string, errors: string[]) => string
+
+	/**
+	 * Function to call the AI model with a prompt
+	 */
+	callAI: (prompt: string) => Promise<string>
+
+	/**
+	 * Function to process the AI response
+	 * Should throw ReflectionNeededError if reflection is needed
+	 * Should return true if successful, false if failed
+	 */
+	processResponse: (context: TContext, response: string, attemptNumber: number) => Promise<boolean>
+
+	/**
+	 * Optional logger function
+	 */
+	log?: (message: string) => void
+}
+
+/**
+ * Generic reflection wrapper that can wrap any prompt/response cycle
+ *
+ * @param context The context data needed for the operation
+ * @param options The options for the reflection wrapper
+ * @returns Promise that resolves to true if successful, false if failed
+ */
+export async function withReflection<TContext>(
+	context: TContext,
+	options: ReflectionWrapperOptions<TContext>,
+): Promise<{ success: boolean; response: string | null }> {
+	const { buildPrompt, buildReflectionPrompt, callAI, processResponse, log = console.log } = options
+
+	let currentAttempt = 0
+	let lastResponse: string | null = null
+	let success = false
+
+	// If reflection is disabled, just run once
+	const maxAttempts = REFLECTION_ENABLED ? MAX_REFLECTION_ATTEMPTS : 0
+
+	while (currentAttempt <= maxAttempts) {
+		try {
+			// Build the appropriate prompt
+			const prompt =
+				currentAttempt === 0 ? buildPrompt(context) : buildReflectionPrompt(context, lastResponse!, []) // Errors will be passed from the catch block
+
+			log(`Building prompt (attempt ${currentAttempt})...`)
+
+			// Call the AI model
+			const response = await callAI(prompt)
+			lastResponse = response
+
+			if (!response) {
+				log("No response from AI model")
+				return { success: false, response: null }
+			}
+
+			log(`Processing response (attempt ${currentAttempt})...`)
+
+			// Process the response
+			try {
+				success = await processResponse(context, response, currentAttempt)
+
+				if (success) {
+					return { success: true, response }
+				}
+
+				// If we've reached the maximum attempts, break out
+				if (currentAttempt >= maxAttempts) {
+					break
+				}
+
+				currentAttempt++
+			} catch (error) {
+				// Check if this is a reflection request
+				if (error instanceof ReflectionNeededError) {
+					log(`Reflection needed, attempt ${error.attemptNumber} of ${maxAttempts}`)
+					log(`Error messages: ${error.errors.join(", ")}`)
+
+					// If reflection is disabled or we've exceeded attempts, fail
+					if (!REFLECTION_ENABLED || error.attemptNumber > maxAttempts) {
+						log("Reflection disabled or max attempts exceeded")
+						return { success: false, response: lastResponse }
+					}
+
+					// Update the attempt counter
+					currentAttempt = error.attemptNumber
+
+					// In the next iteration, we'll build a reflection prompt
+					// The errors are already captured in the error object
+					const reflectionPrompt = buildReflectionPrompt(context, error.originalResponse, error.errors)
+
+					// Call AI with reflection prompt
+					const reflectionResponse = await callAI(reflectionPrompt)
+					lastResponse = reflectionResponse
+
+					if (!reflectionResponse) {
+						log("No response from AI model for reflection")
+						return { success: false, response: null }
+					}
+
+					// Process the reflection response
+					success = await processResponse(context, reflectionResponse, currentAttempt)
+
+					if (success) {
+						return { success: true, response: reflectionResponse }
+					}
+
+					// Move to next attempt
+					currentAttempt++
+				} else {
+					// Other errors should bubble up
+					throw error
+				}
+			}
+		} catch (error) {
+			log(`Error in reflection loop: ${error instanceof Error ? error.message : String(error)}`)
+			return { success: false, response: lastResponse }
+		}
+	}
+
+	return { success, response: lastResponse }
+}
+
+/**
+ * Helper function to check if reflection should be attempted
+ */
+export function shouldAttemptReflection(currentAttempt: number): boolean {
+	return REFLECTION_ENABLED && currentAttempt < MAX_REFLECTION_ATTEMPTS
+}
+
+/**
+ * Builds a reflection prompt for the AI model when edits fail
+ * This is specific to watch mode comment processing
+ */
+export function buildWatchModeReflectionPrompt(
+	commentData: AICommentData,
+	originalResponse: string,
+	errors: string[],
+	activeFiles: { uri: vscode.Uri; content: string }[] = [],
+	currentAICommentPrefix: string = "KO!",
+): string {
+	console.log("[WatchMode DEBUG] Building reflection prompt")
+	const { content, context, fileUri } = commentData
+	const filePath = vscode.workspace.asRelativePath(fileUri)
+
+	// Extract the prefix without the exclamation mark for display in the prompt
+	const displayPrefix = currentAICommentPrefix.endsWith("!")
+		? currentAICommentPrefix.slice(0, -1)
+		: currentAICommentPrefix
+
+	// Create the reflection prompt with escaped markers
+	let prompt = `
+You are Kilo Code, a highly skilled software engineer with extensive knowledge in many programming languages, frameworks, design patterns, and best practices.
+
+# Task
+
+${content}
+
+I've written your instructions in comments in the code and marked them with "${displayPrefix}"
+You can see the "${displayPrefix}" comments shown below.
+Find them in the code files I've shared with you, and follow their instructions.
+
+# Code to modify
+
+\`\`\`
+${context || "No context available"}
+\`\`\`
+
+# Previous response
+
+Your previous response failed to apply correctly. Here's what you provided:
+
+\`\`\`
+${originalResponse}
+\`\`\`
+
+# Errors
+
+The following errors occurred when trying to apply your changes:
+
+${errors.join("\n\n")}
+`
+
+	// Add content from active files for additional context
+	if (activeFiles.length > 0) {
+		prompt += `\n\n# Additional context from open files\n\n`
+
+		for (const file of activeFiles) {
+			if (file.uri.toString() !== fileUri.toString()) {
+				// Skip the file with the comment
+				const relativePath = vscode.workspace.asRelativePath(file.uri)
+				prompt += `## ${relativePath}\n\n\`\`\`\n${file.content}\n\`\`\`\n\n`
+			}
+		}
+	}
+
+	prompt += `
+# Response format
+
+Please correct your previous response to address these errors. Make sure your SEARCH blocks exactly match the code in the file.
+You MUST respond with SEARCH/REPLACE blocks for each edit. Format your changes as follows:
+
+${filePath}
+\<\<\<\<\<\<\< SEARCH
+exact original code
+\=\=\=\=\=\=\=
+replacement code
+\>\>\>\>\>\>\> REPLACE
+
+IMPORTANT: You MUST ALWAYS include the file path (${filePath}) before each SEARCH/REPLACE block.
+You can include multiple SEARCH/REPLACE blocks for the same file, and you can edit multiple files.
+Make sure to include enough context in the SEARCH block to uniquely identify the code to replace.
+After completing the instructions, also BE SURE to remove all the "${displayPrefix}" comments from the code.
+
+NEVER use generic file names like "Code", "file", or similar placeholders. ALWAYS use the actual file path: ${filePath}
+
+If you need to explain your changes, please do so before or after the code blocks.
+`
+
+	const finalPrompt = prompt.trim()
+
+	// Log the full reflection prompt for debugging
+	console.log("[WatchMode DEBUG] === FULL REFLECTION PROMPT ===")
+	console.log(finalPrompt)
+	console.log("[WatchMode DEBUG] === END REFLECTION PROMPT ===")
+
+	// Debug log the full reflection prompt string
+	console.debug(
+		"[WatchMode DEBUG] Full reflection prompt string:",
+		JSON.stringify({
+			prompt: finalPrompt,
+			length: finalPrompt.length,
+			estimatedTokens: estimateTokenCount(finalPrompt),
+			originalResponseLength: originalResponse.length,
+			errorsCount: errors.length,
+			timestamp: new Date().toISOString(),
+		}),
+	)
+
+	return finalPrompt
+}

+ 0 - 6
src/services/watchMode/types.ts

@@ -62,14 +62,8 @@ export interface FileChangeData {
  * Configuration options for WatchModeService
  */
 export interface WatchModeConfig {
-	/** File patterns to include in watching */
-	include: string[]
-	/** File patterns to exclude from watching */
-	exclude: string[]
 	/** The model to use for AI processing */
 	model: string
-	/** Debounce time in milliseconds */
-	debounceTime: number
 	/** Prefix for AI comments (e.g., "KO!") */
 	commentPrefix: string
 }