Przeglądaj źródła

Fix: Restore file truncation backward compatibility and fix regressions (#2022)

* fix: restore file truncation backward compatibility and fix regressions

Fixes #1934

- Setting maxReadFileLine=-1 provides equivalent functionality for reading entire files
- Revert to system-controlled maxReadFileLine for file truncation, fixing regressions with:
  - Overridden system prompts lacking auto_truncate parameter information
  - Insufficient user control over truncation behavior
- Add 'Always read entire file' checkbox for easier access to full file reading
- Update translations across all locales with improved descriptions
- Update truncation notice to be more concise

Signed-off-by: Eric Wheeler <[email protected]>

* test: add comprehensive tests for read_file maxReadFileLine settings

- Tests behavior when maxReadFileLine is -1 (reads entire file)
- Tests behavior when maxReadFileLine >= file length (reads entire file)
- Tests behavior when maxReadFileLine is 0 (shows only definitions)
- Tests behavior when maxReadFileLine < file length (truncates content)
- Verifies exact line counts in responses
- Adds helper functions for response validation
- Includes DEBUG flag for controlled logging

---------

Signed-off-by: Eric Wheeler <[email protected]>
Co-authored-by: Eric Wheeler <[email protected]>
KJ7LNW 9 miesięcy temu
rodzic
commit
3c75b81f0d

+ 2 - 3
src/core/Cline.ts

@@ -2355,7 +2355,6 @@ export class Cline extends EventEmitter<ClineEvents> {
 								let sourceCodeDef = ""
 
 								const isBinary = await isBinaryFile(absolutePath).catch(() => false)
-								const autoTruncate = block.params.auto_truncate === "true"
 
 								if (isRangeRead) {
 									if (startLine === undefined) {
@@ -2366,7 +2365,7 @@ export class Cline extends EventEmitter<ClineEvents> {
 											startLine + 1,
 										)
 									}
-								} else if (autoTruncate && !isBinary && totalLines > maxReadFileLine) {
+								} else if (!isBinary && maxReadFileLine >= 0 && totalLines > maxReadFileLine) {
 									// If file is too large, only read the first maxReadFileLine lines
 									isFileTruncated = true
 
@@ -2387,7 +2386,7 @@ export class Cline extends EventEmitter<ClineEvents> {
 
 								// Add truncation notice if applicable
 								if (isFileTruncated) {
-									content += `\n\n[File truncated: showing ${maxReadFileLine} of ${totalLines} total lines. Use start_line and end_line or set auto_truncate to false if you need to read more.].${sourceCodeDef}`
+									content += `\n\n[Showing only ${maxReadFileLine} of ${totalLines} total lines. Use start_line and end_line if you need to read more]${sourceCodeDef}`
 								}
 
 								pushToolResult(content)

+ 325 - 0
src/core/__tests__/read-file-maxReadFileLine.test.ts

@@ -0,0 +1,325 @@
+const DEBUG = false
+
+import * as path from "path"
+import { countFileLines } from "../../integrations/misc/line-counter"
+import { readLines } from "../../integrations/misc/read-lines"
+import { extractTextFromFile, addLineNumbers } from "../../integrations/misc/extract-text"
+import { parseSourceCodeDefinitionsForFile } from "../../services/tree-sitter"
+import { isBinaryFile } from "isbinaryfile"
+import { ReadFileToolUse } from "../assistant-message"
+import { Cline } from "../Cline"
+import { ClineProvider } from "../webview/ClineProvider"
+
+// Mock dependencies
+jest.mock("../../integrations/misc/line-counter")
+jest.mock("../../integrations/misc/read-lines")
+jest.mock("../../integrations/misc/extract-text")
+jest.mock("../../services/tree-sitter")
+jest.mock("isbinaryfile")
+jest.mock("../ignore/RooIgnoreController", () => ({
+	RooIgnoreController: class {
+		initialize() {
+			return Promise.resolve()
+		}
+		validateAccess(filePath: string) {
+			return true
+		}
+	},
+}))
+jest.mock("fs/promises", () => ({
+	mkdir: jest.fn().mockResolvedValue(undefined),
+	writeFile: jest.fn().mockResolvedValue(undefined),
+	readFile: jest.fn().mockResolvedValue("{}"),
+}))
+jest.mock("../../utils/fs", () => ({
+	fileExistsAtPath: jest.fn().mockReturnValue(true),
+}))
+
+// Mock path
+jest.mock("path", () => {
+	const originalPath = jest.requireActual("path")
+	return {
+		...originalPath,
+		resolve: jest.fn().mockImplementation((...args) => args.join("/")),
+	}
+})
+
+describe("read_file tool with maxReadFileLine setting", () => {
+	// Mock original implementation first to use in tests
+	const originalCountFileLines = jest.requireActual("../../integrations/misc/line-counter").countFileLines
+	const originalReadLines = jest.requireActual("../../integrations/misc/read-lines").readLines
+	const originalExtractTextFromFile = jest.requireActual("../../integrations/misc/extract-text").extractTextFromFile
+	const originalAddLineNumbers = jest.requireActual("../../integrations/misc/extract-text").addLineNumbers
+	const originalParseSourceCodeDefinitionsForFile =
+		jest.requireActual("../../services/tree-sitter").parseSourceCodeDefinitionsForFile
+	const originalIsBinaryFile = jest.requireActual("isbinaryfile").isBinaryFile
+
+	let cline: Cline
+	let mockProvider: any
+	const testFilePath = "test/file.txt"
+	const absoluteFilePath = "/home/ewheeler/src/roo/roo-main/test/file.txt"
+	const fileContent = "Line 1\nLine 2\nLine 3\nLine 4\nLine 5"
+	const numberedFileContent = "1 | Line 1\n2 | Line 2\n3 | Line 3\n4 | Line 4\n5 | Line 5"
+	const sourceCodeDef = "\n\n# file.txt\n1--5 | Content"
+
+	beforeEach(() => {
+		jest.resetAllMocks()
+
+		// Reset mocks to simulate original behavior
+		;(countFileLines as jest.Mock).mockImplementation(originalCountFileLines)
+		;(readLines as jest.Mock).mockImplementation(originalReadLines)
+		;(extractTextFromFile as jest.Mock).mockImplementation(originalExtractTextFromFile)
+		;(parseSourceCodeDefinitionsForFile as jest.Mock).mockImplementation(originalParseSourceCodeDefinitionsForFile)
+		;(isBinaryFile as jest.Mock).mockImplementation(originalIsBinaryFile)
+
+		// Default mock implementations
+		;(countFileLines as jest.Mock).mockResolvedValue(5)
+		;(readLines as jest.Mock).mockResolvedValue(fileContent)
+		;(extractTextFromFile as jest.Mock).mockResolvedValue(numberedFileContent)
+		// Use the real addLineNumbers function
+		;(addLineNumbers as jest.Mock).mockImplementation(originalAddLineNumbers)
+		;(parseSourceCodeDefinitionsForFile as jest.Mock).mockResolvedValue(sourceCodeDef)
+		;(isBinaryFile as jest.Mock).mockResolvedValue(false)
+
+		// Add spy to debug the readLines calls
+		const readLinesSpy = jest.spyOn(require("../../integrations/misc/read-lines"), "readLines")
+
+		// Mock path.resolve to return a predictable path
+		;(path.resolve as jest.Mock).mockReturnValue(absoluteFilePath)
+
+		// Create mock provider
+		mockProvider = {
+			getState: jest.fn(),
+			deref: jest.fn().mockReturnThis(),
+		}
+
+		// Create a Cline instance with the necessary configuration
+		cline = new Cline({
+			provider: mockProvider,
+			apiConfiguration: { apiProvider: "anthropic" } as any,
+			task: "Test read_file tool", // Required to satisfy constructor check
+			startTask: false, // Prevent actual task initialization
+		})
+
+		// Set up the read_file tool use
+		const readFileToolUse: ReadFileToolUse = {
+			type: "tool_use",
+			name: "read_file",
+			params: {
+				path: testFilePath,
+			},
+			partial: false,
+		}
+
+		// Set up the Cline instance for testing
+		const clineAny = cline as any
+
+		// Set up the required properties for the test
+		clineAny.assistantMessageContent = [readFileToolUse]
+		clineAny.currentStreamingContentIndex = 0
+		clineAny.userMessageContent = []
+		clineAny.presentAssistantMessageLocked = false
+		clineAny.didCompleteReadingStream = true
+		clineAny.didRejectTool = false
+		clineAny.didAlreadyUseTool = false
+
+		// Mock methods that would be called during presentAssistantMessage
+		clineAny.say = jest.fn().mockResolvedValue(undefined)
+		clineAny.ask = jest.fn().mockImplementation((type, message) => {
+			return Promise.resolve({ response: "yesButtonClicked" })
+		})
+	})
+
+	// Helper function to get user message content
+	const getUserMessageContent = (clineInstance: Cline) => {
+		const clineAny = clineInstance as any
+		return clineAny.userMessageContent
+	}
+
+	// Helper function to validate response lines
+	const validateResponseLines = (
+		responseLines: string[],
+		options: {
+			expectedLineCount: number
+			shouldContainLines?: number[]
+			shouldNotContainLines?: number[]
+		},
+	) => {
+		if (options.shouldContainLines) {
+			const contentLines = responseLines.filter((line) => line.includes("Line "))
+			expect(contentLines.length).toBe(options.expectedLineCount)
+			options.shouldContainLines.forEach((lineNum) => {
+				expect(contentLines[lineNum - 1]).toContain(`Line ${lineNum}`)
+			})
+		}
+
+		if (options.shouldNotContainLines) {
+			options.shouldNotContainLines.forEach((lineNum) => {
+				expect(responseLines.some((line) => line.includes(`Line ${lineNum}`))).toBe(false)
+			})
+		}
+	}
+
+	interface TestExpectations {
+		extractTextCalled: boolean
+		readLinesCalled: boolean
+		sourceCodeDefCalled: boolean
+		readLinesParams?: [string, number, number]
+		responseValidation: {
+			expectedLineCount: number
+			shouldContainLines?: number[]
+			shouldNotContainLines?: number[]
+		}
+		expectedContent?: string
+		truncationMessage?: string
+		includeSourceCodeDef?: boolean
+	}
+
+	interface TestCase {
+		name: string
+		maxReadFileLine: number
+		setup?: () => void
+		expectations: TestExpectations
+	}
+
+	// Test cases
+	const testCases: TestCase[] = [
+		{
+			name: "read entire file when maxReadFileLine is -1",
+			maxReadFileLine: -1,
+			expectations: {
+				extractTextCalled: true,
+				readLinesCalled: false,
+				sourceCodeDefCalled: false,
+				responseValidation: {
+					expectedLineCount: 5,
+					shouldContainLines: [1, 2, 3, 4, 5],
+				},
+				expectedContent: numberedFileContent,
+			},
+		},
+		{
+			name: "read entire file when maxReadFileLine >= file length",
+			maxReadFileLine: 10,
+			expectations: {
+				extractTextCalled: true,
+				readLinesCalled: false,
+				sourceCodeDefCalled: false,
+				responseValidation: {
+					expectedLineCount: 5,
+					shouldContainLines: [1, 2, 3, 4, 5],
+				},
+				expectedContent: numberedFileContent,
+			},
+		},
+		{
+			name: "read zero lines and only provide line declaration definitions when maxReadFileLine is 0",
+			maxReadFileLine: 0,
+			expectations: {
+				extractTextCalled: false,
+				readLinesCalled: false,
+				sourceCodeDefCalled: true,
+				responseValidation: {
+					expectedLineCount: 0,
+				},
+				truncationMessage: `[Showing only 0 of 5 total lines. Use start_line and end_line if you need to read more]`,
+				includeSourceCodeDef: true,
+			},
+		},
+		{
+			name: "read maxReadFileLine lines and provide line declaration definitions when maxReadFileLine < file length",
+			maxReadFileLine: 3,
+			setup: () => {
+				jest.clearAllMocks()
+				;(countFileLines as jest.Mock).mockResolvedValue(5)
+				;(readLines as jest.Mock).mockImplementation((path, endLine, startLine = 0) => {
+					const lines = fileContent.split("\n")
+					const actualEndLine = endLine !== undefined ? Math.min(endLine, lines.length - 1) : lines.length - 1
+					const actualStartLine = startLine !== undefined ? Math.min(startLine, lines.length - 1) : 0
+					const requestedLines = lines.slice(actualStartLine, actualEndLine + 1)
+					return Promise.resolve(requestedLines.join("\n"))
+				})
+			},
+			expectations: {
+				extractTextCalled: false,
+				readLinesCalled: true,
+				sourceCodeDefCalled: true,
+				readLinesParams: [absoluteFilePath, 2, 0],
+				responseValidation: {
+					expectedLineCount: 3,
+					shouldContainLines: [1, 2, 3],
+					shouldNotContainLines: [4, 5],
+				},
+				truncationMessage: `[Showing only 3 of 5 total lines. Use start_line and end_line if you need to read more]`,
+				includeSourceCodeDef: true,
+			},
+		},
+	]
+
+	test.each(testCases)("should $name", async (testCase) => {
+		// Setup
+		if (testCase.setup) {
+			testCase.setup()
+		}
+		mockProvider.getState.mockResolvedValue({ maxReadFileLine: testCase.maxReadFileLine })
+
+		// Execute
+		await cline.presentAssistantMessage()
+
+		// Verify mock calls
+		if (testCase.expectations.extractTextCalled) {
+			expect(extractTextFromFile).toHaveBeenCalledWith(absoluteFilePath)
+		} else {
+			expect(extractTextFromFile).not.toHaveBeenCalled()
+		}
+
+		if (testCase.expectations.readLinesCalled) {
+			const params = testCase.expectations.readLinesParams
+			if (!params) {
+				throw new Error("readLinesParams must be defined when readLinesCalled is true")
+			}
+			expect(readLines).toHaveBeenCalledWith(...params)
+		} else {
+			expect(readLines).not.toHaveBeenCalled()
+		}
+
+		if (testCase.expectations.sourceCodeDefCalled) {
+			expect(parseSourceCodeDefinitionsForFile).toHaveBeenCalled()
+		} else {
+			expect(parseSourceCodeDefinitionsForFile).not.toHaveBeenCalled()
+		}
+
+		// Verify response content
+		const userMessageContent = getUserMessageContent(cline)
+
+		if (DEBUG) {
+			console.log(`\n=== Test: ${testCase.name} ===`)
+			console.log(`maxReadFileLine: ${testCase.maxReadFileLine}`)
+			console.log("Response content:", JSON.stringify(userMessageContent, null, 2))
+		}
+		const responseLines = userMessageContent[1].text.split("\n")
+
+		if (DEBUG) {
+			console.log(`Number of lines in response: ${responseLines.length}`)
+		}
+
+		expect(userMessageContent.length).toBe(2)
+		expect(userMessageContent[0].text).toBe(`[read_file for '${testFilePath}'] Result:`)
+
+		if (testCase.expectations.expectedContent) {
+			expect(userMessageContent[1].text).toBe(testCase.expectations.expectedContent)
+		}
+
+		if (testCase.expectations.responseValidation) {
+			validateResponseLines(responseLines, testCase.expectations.responseValidation)
+		}
+
+		if (testCase.expectations.truncationMessage) {
+			expect(userMessageContent[1].text).toContain(testCase.expectations.truncationMessage)
+		}
+
+		if (testCase.expectations.includeSourceCodeDef) {
+			expect(userMessageContent[1].text).toContain(sourceCodeDef)
+		}
+	})
+})

+ 0 - 1
src/core/assistant-message/index.ts

@@ -52,7 +52,6 @@ export const toolParamNames = [
 	"diff",
 	"start_line",
 	"end_line",
-	"auto_truncate",
 	"mode_slug",
 	"reason",
 	"operations",

+ 15 - 150
src/core/prompts/__tests__/__snapshots__/system.test.ts.snap

@@ -35,13 +35,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -70,15 +68,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
-
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -434,13 +425,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -469,15 +458,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
-
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -922,13 +904,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -957,15 +937,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
-
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -1374,13 +1347,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -1409,15 +1380,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
-
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
 
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -1773,13 +1737,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -1808,15 +1770,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
-
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
 
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -2172,13 +2127,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -2207,15 +2160,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
-
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
 
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -2571,13 +2517,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -2606,15 +2550,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
-
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
 
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -3019,13 +2956,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -3054,15 +2989,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
-
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -3486,13 +3414,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -3521,15 +3447,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
-
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
 
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -3934,13 +3853,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -3969,15 +3886,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
-
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -4395,13 +4305,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -4430,15 +4338,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
-
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -4836,13 +4737,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -4871,15 +4770,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
-
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -5397,13 +5289,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -5432,15 +5322,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
-
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -5872,13 +5755,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -5907,15 +5788,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
-
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
 
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task
@@ -6245,13 +6119,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory /test/path)
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -6280,15 +6152,8 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
-
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
 ## fetch_instructions
 Description: Request to fetch instructions to perform a task

+ 1 - 10
src/core/prompts/tools/read-file.ts

@@ -7,13 +7,11 @@ Parameters:
 - path: (required) The path of the file to read (relative to the current working directory ${args.cwd})
 - start_line: (optional) The starting line number to read from (1-based). If not provided, it starts from the beginning of the file.
 - end_line: (optional) The ending line number to read to (1-based, inclusive). If not provided, it reads to the end of the file.
-- auto_truncate: (optional) Whether to automatically truncate large files when start_line and end_line are not specified. If true and the file exceeds a certain line threshold, it will: a) return only a subset of lines to save tokens, b) include information about the total line count, and c) provide a summary of method definitions with their line ranges. You should set this to true unless you've been explicitly asked to read an entire large file at once, as this prevents context bloat that can lead to truncated responses. For backwards compatibility, it defaults to false when omitted.
 Usage:
 <read_file>
 <path>File path here</path>
 <start_line>Starting line number (optional)</start_line>
 <end_line>Ending line number (optional)</end_line>
-<auto_truncate>true or false (optional)</auto_truncate>
 </read_file>
 
 Examples:
@@ -42,13 +40,6 @@ Examples:
 <start_line>46</start_line>
 <end_line>68</end_line>
 </read_file>
-Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.
 
-5. Reading a large file with automatic truncation:
-<read_file>
-<path>src/large-module.ts</path>
-<auto_truncate>true</auto_truncate>
-</read_file>
-
-This will return a truncated version of the file with information about total line count and method definitions, helping to prevent context size issues with very large files.`
+Note: When both start_line and end_line are provided, this tool efficiently streams only the requested lines, making it suitable for processing large files like logs, CSV files, and other large datasets without memory issues.`
 }

+ 14 - 4
webview-ui/src/components/settings/ContextManagementSettings.tsx

@@ -95,22 +95,32 @@ export const ContextManagementSettings = ({
 				<div>
 					<div className="flex flex-col gap-2">
 						<span className="font-medium">{t("settings:contextManagement.maxReadFile.label")}</span>
-						<div className="flex items-center gap-2">
+						<div className="flex items-center gap-4">
 							<input
 								type="number"
-								className="w-24 bg-vscode-input-background text-vscode-input-foreground border border-vscode-input-border px-2 py-1 rounded text-right [appearance:textfield] [&::-webkit-outer-spin-button]:appearance-none [&::-webkit-inner-spin-button]:appearance-none"
+								pattern="-?[0-9]*"
+								className="w-24 bg-vscode-input-background text-vscode-input-foreground border border-vscode-input-border px-2 py-1 rounded text-right [appearance:textfield] [&::-webkit-outer-spin-button]:appearance-none [&::-webkit-inner-spin-button]:appearance-none disabled:opacity-50"
 								value={maxReadFileLine ?? 500}
-								min={0}
+								min={-1}
 								onChange={(e) => {
 									const newValue = parseInt(e.target.value, 10)
-									if (!isNaN(newValue) && newValue >= 0) {
+									if (!isNaN(newValue) && newValue >= -1) {
 										setCachedStateField("maxReadFileLine", newValue)
 									}
 								}}
 								onClick={(e) => e.currentTarget.select()}
 								data-testid="max-read-file-line-input"
+								disabled={maxReadFileLine === -1}
 							/>
 							<span>{t("settings:contextManagement.maxReadFile.lines")}</span>
+							<VSCodeCheckbox
+								checked={maxReadFileLine === -1}
+								onChange={(e: any) =>
+									setCachedStateField("maxReadFileLine", e.target.checked ? -1 : 500)
+								}
+								data-testid="max-read-file-always-full-checkbox">
+								{t("settings:contextManagement.maxReadFile.always_full_read")}
+							</VSCodeCheckbox>
 						</div>
 					</div>
 					<div className="text-vscode-descriptionForeground text-sm mt-2">

+ 3 - 2
webview-ui/src/i18n/locales/ca/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "Llindar d'auto-truncament de lectura de fitxers",
-			"description": "El nombre predeterminat de línies per llegir d'un fitxer en un lot. Valors més baixos redueixen l'ús de context/recursos però poden requerir més lectures per a fitxers grans.",
-			"lines": "línies"
+			"description": "Roo llegeix aquest nombre de línies quan el model omet els valors d'inici/final. Si aquest nombre és menor que el total del fitxer, Roo genera un índex de números de línia de les definicions de codi. Casos especials: -1 indica a Roo que llegeixi tot el fitxer (sense indexació), i 0 indica que no llegeixi cap línia i proporcioni només índexs de línia per a un context mínim. Valors més baixos minimitzen l'ús inicial de context, permetent lectures posteriors de rangs de línies precisos. Les sol·licituds amb inici/final explícits no estan limitades per aquesta configuració.",
+			"lines": "línies",
+			"always_full_read": "Llegeix sempre el fitxer sencer"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/de/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "Schwellenwert für automatische Dateilesekürzung",
-			"description": "Die Standardanzahl an Zeilen, die in einem Durchgang aus einer Datei gelesen werden. Niedrigere Werte reduzieren den Kontext-/Ressourcenverbrauch, können aber mehr Lesevorgänge für große Dateien erfordern.",
-			"lines": "Zeilen"
+			"description": "Roo liest diese Anzahl von Zeilen, wenn das Modell keine Start-/Endwerte angibt. Wenn diese Zahl kleiner als die Gesamtzahl der Zeilen ist, erstellt Roo einen Zeilennummernindex der Codedefinitionen. Spezialfälle: -1 weist Roo an, die gesamte Datei zu lesen (ohne Indexierung), und 0 weist an, keine Zeilen zu lesen und nur Zeilenindizes für minimalen Kontext bereitzustellen. Niedrigere Werte minimieren die anfängliche Kontextnutzung und ermöglichen präzise nachfolgende Zeilenbereich-Lesungen. Explizite Start-/End-Anfragen sind von dieser Einstellung nicht begrenzt.",
+			"lines": "Zeilen",
+			"always_full_read": "Immer die gesamte Datei lesen"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/en/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "File read auto-truncate threshold",
-			"description": "The default number of lines to read from a file in one batch. Lower values reduce context/resource usage but may require more reads for large files.",
-			"lines": "lines"
+			"description": "Roo reads this number of lines when the model omits start/end values. If this number is less than the file's total, Roo generates a line number index of code definitions. Special cases: -1 instructs Roo to read the entire file (without indexing), and 0 instructs it to read no lines and provides line indexes only for minimal context. Lower values minimize initial context usage, enabling precise subsequent line-range reads. Explicit start/end requests are not limited by this setting.",
+			"lines": "lines",
+			"always_full_read": "Always read entire file"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/es/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "Umbral de auto-truncado de lectura de archivos",
-			"description": "El número predeterminado de líneas para leer de un archivo en un lote. Valores más bajos reducen el uso de contexto/recursos pero pueden requerir más lecturas para archivos grandes.",
-			"lines": "líneas"
+			"description": "Roo lee este número de líneas cuando el modelo omite valores de inicio/fin. Si este número es menor que el total del archivo, Roo genera un índice de números de línea de las definiciones de código. Casos especiales: -1 indica a Roo que lea el archivo completo (sin indexación), y 0 indica que no lea líneas y proporcione solo índices de línea para un contexto mínimo. Valores más bajos minimizan el uso inicial de contexto, permitiendo lecturas posteriores de rangos de líneas precisos. Las solicitudes con inicio/fin explícitos no están limitadas por esta configuración.",
+			"lines": "líneas",
+			"always_full_read": "Siempre leer el archivo completo"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/fr/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "Seuil d'auto-troncature de lecture de fichier",
-			"description": "Le nombre par défaut de lignes à lire depuis un fichier en un lot. Des valeurs plus basses réduisent l'utilisation de contexte/ressources mais peuvent nécessiter plus de lectures pour les fichiers volumineux.",
-			"lines": "lignes"
+			"description": "Roo lit ce nombre de lignes lorsque le modèle omet les valeurs de début/fin. Si ce nombre est inférieur au total du fichier, Roo génère un index des numéros de ligne des définitions de code. Cas spéciaux : -1 indique à Roo de lire le fichier entier (sans indexation), et 0 indique de ne lire aucune ligne et de fournir uniquement les index de ligne pour un contexte minimal. Des valeurs plus basses minimisent l'utilisation initiale du contexte, permettant des lectures ultérieures de plages de lignes précises. Les requêtes avec début/fin explicites ne sont pas limitées par ce paramètre.",
+			"lines": "lignes",
+			"always_full_read": "Toujours lire le fichier entier"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/hi/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "फ़ाइल पढ़ने का स्वचालित काटने की सीमा",
-			"description": "एक बैच में फ़ाइल से पढ़ने के लिए डिफ़ॉल्ट लाइनों की संख्या। कम मान संदर्भ/संसाधन उपयोग को कम करते हैं लेकिन बड़ी फाइलों के लिए अधिक पठन की आवश्यकता हो सकती है।",
-			"lines": "पंक्तियाँ"
+			"description": "जब मॉडल प्रारंभ/अंत मान नहीं देता है, तो Roo इतनी पंक्तियाँ पढ़ता है। यदि यह संख्या फ़ाइल की कुल पंक्तियों से कम है, तो Roo कोड परिभाषाओं का पंक्ति क्रमांक इंडेक्स बनाता है। विशेष मामले: -1 Roo को पूरी फ़ाइल पढ़ने का निर्देश देता है (इंडेक्सिंग के बिना), और 0 कोई पंक्ति न पढ़ने और न्यूनतम संदर्भ के लिए केवल पंक्ति इंडेक्स प्रदान करने का निर्देश देता है। कम मान प्रारंभिक संदर्भ उपयोग को कम करते हैं, जो बाद में सटीक पंक्ति श्रेणी पढ़ने की अनुमति देता है। स्पष्ट प्रारंभ/अंत अनुरोध इस सेटिंग से सीमित नहीं हैं।",
+			"lines": "पंक्तियाँ",
+			"always_full_read": "हमेशा पूरी फ़ाइल पढ़ें"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/it/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "Soglia di auto-troncamento lettura file",
-			"description": "Il numero predefinito di righe da leggere da un file in un singolo batch. Valori più bassi riducono l'utilizzo di contesto/risorse ma potrebbero richiedere più letture per file di grandi dimensioni.",
-			"lines": "righe"
+			"description": "Roo legge questo numero di righe quando il modello omette i valori di inizio/fine. Se questo numero è inferiore al totale del file, Roo genera un indice dei numeri di riga delle definizioni di codice. Casi speciali: -1 indica a Roo di leggere l'intero file (senza indicizzazione), e 0 indica di non leggere righe e fornire solo indici di riga per un contesto minimo. Valori più bassi minimizzano l'utilizzo iniziale del contesto, permettendo successive letture precise di intervalli di righe. Le richieste con inizio/fine espliciti non sono limitate da questa impostazione.",
+			"lines": "righe",
+			"always_full_read": "Leggi sempre l'intero file"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/ja/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "ファイル読み込み自動切り詰めしきい値",
-			"description": "一括でファイルから読み込むデフォルトの行数。低い値はコンテキスト/リソース使用量を減らしますが、大きなファイルではより多くの読み込みが必要になる場合があります。",
-			"lines": "行"
+			"description": "モデルが開始/終了の値を指定しない場合、Rooはこの行数を読み込みます。この数がファイルの総行数より少ない場合、Rooはコード定義の行番号インデックスを生成します。特殊なケース:-1はRooにファイル全体を読み込むよう指示し(インデックス作成なし)、0は行を読み込まず最小限のコンテキストのために行インデックスのみを提供するよう指示します。低い値は初期コンテキスト使用量を最小限に抑え、後続の正確な行範囲の読み込みを可能にします。明示的な開始/終了の要求はこの設定による制限を受けません。",
+			"lines": "行",
+			"always_full_read": "常にファイル全体を読み込む"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/ko/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "파일 읽기 자동 축소 임계값",
-			"description": "한 번에 파일에서 읽을 기본 라인 수. 낮은 값은 컨텍스트/리소스 사용량을 줄이지만 대용량 파일의 경우 더 많은 읽기가 필요할 수 있습니다.",
-			"lines": "줄"
+			"description": "모델이 시작/끝 값을 지정하지 않을 때 Roo가 읽는 줄 수입니다. 이 수가 파일의 총 줄 수보다 적으면 Roo는 코드 정의의 줄 번호 인덱스를 생성합니다. 특수한 경우: -1은 Roo에게 전체 파일을 읽도록 지시하고(인덱싱 없이), 0은 줄을 읽지 않고 최소한의 컨텍스트를 위해 줄 인덱스만 제공하도록 지시합니다. 낮은 값은 초기 컨텍스트 사용을 최소화하고, 이후 정확한 줄 범위 읽기를 가능하게 합니다. 명시적 시작/끝 요청은 이 설정의 제한을 받지 않습니다.",
+			"lines": "줄",
+			"always_full_read": "항상 전체 파일 읽기"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/pl/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "Próg automatycznego skracania odczytu pliku",
-			"description": "Domyślna liczba linii odczytywanych z pliku w jednej partii. Niższe wartości zmniejszają użycie kontekstu/zasobów, ale mogą wymagać więcej odczytów dla dużych plików.",
-			"lines": "linii"
+			"description": "Roo odczytuje tę liczbę linii, gdy model nie określa wartości początkowej/końcowej. Jeśli ta liczba jest mniejsza niż całkowita liczba linii pliku, Roo generuje indeks numerów linii definicji kodu. Przypadki specjalne: -1 nakazuje Roo odczytać cały plik (bez indeksowania), a 0 nakazuje nie czytać żadnych linii i dostarczyć tylko indeksy linii dla minimalnego kontekstu. Niższe wartości minimalizują początkowe użycie kontekstu, umożliwiając późniejsze precyzyjne odczyty zakresów linii. Jawne żądania początku/końca nie są ograniczone tym ustawieniem.",
+			"lines": "linii",
+			"always_full_read": "Zawsze czytaj cały plik"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/pt-BR/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "Limite de auto-truncamento de leitura de arquivo",
-			"description": "O número padrão de linhas para ler de um arquivo em um lote. Valores mais baixos reduzem o uso de contexto/recursos, mas podem exigir mais leituras para arquivos grandes.",
-			"lines": "linhas"
+			"description": "O Roo lê este número de linhas quando o modelo omite valores de início/fim. Se este número for menor que o total do arquivo, o Roo gera um índice de números de linha das definições de código. Casos especiais: -1 instrui o Roo a ler o arquivo inteiro (sem indexação), e 0 instrui a não ler linhas e fornecer apenas índices de linha para contexto mínimo. Valores mais baixos minimizam o uso inicial de contexto, permitindo leituras posteriores precisas de intervalos de linhas. Requisições com início/fim explícitos não são limitadas por esta configuração.",
+			"lines": "linhas",
+			"always_full_read": "Sempre ler o arquivo inteiro"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/tr/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "Dosya okuma otomatik kısaltma eşiği",
-			"description": "Bir dosyadan bir partide okunacak varsayılan satır sayısı. Daha düşük değerler bağlam/kaynak kullanımını azaltır ancak büyük dosyalar için daha fazla okuma gerektirebilir.",
-			"lines": "satır"
+			"description": "Model başlangıç/bitiş değerlerini belirtmediğinde Roo bu sayıda satırı okur. Bu sayı dosyanın toplam satır sayısından azsa, Roo kod tanımlamalarının satır numarası dizinini oluşturur. Özel durumlar: -1, Roo'ya tüm dosyayı okumasını (dizinleme olmadan), 0 ise hiç satır okumamasını ve minimum bağlam için yalnızca satır dizinleri sağlamasını belirtir. Düşük değerler başlangıç bağlam kullanımını en aza indirir ve sonraki hassas satır aralığı okumalarına olanak tanır. Açık başlangıç/bitiş istekleri bu ayarla sınırlı değildir.",
+			"lines": "satır",
+			"always_full_read": "Her zaman tüm dosyayı oku"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/vi/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "Ngưỡng tự động cắt ngắn khi đọc tệp",
-			"description": "Số dòng mặc định để đọc từ một tệp trong một lô. Giá trị thấp hơn giảm sử dụng ngữ cảnh/tài nguyên nhưng có thể yêu cầu đọc nhiều lần hơn cho các tệp lớn.",
-			"lines": "dòng"
+			"description": "Roo đọc số dòng này khi mô hình không chỉ định giá trị bắt đầu/kết thúc. Nếu số này nhỏ hơn tổng số dòng của tệp, Roo sẽ tạo một chỉ mục số dòng của các định nghĩa mã. Trường hợp đặc biệt: -1 chỉ thị Roo đọc toàn bộ tệp (không tạo chỉ mục), và 0 chỉ thị không đọc dòng nào và chỉ cung cấp chỉ mục dòng cho ngữ cảnh tối thiểu. Giá trị thấp hơn giảm thiểu việc sử dụng ngữ cảnh ban đầu, cho phép đọc chính xác các phạm vi dòng sau này. Các yêu cầu có chỉ định bắt đầu/kết thúc rõ ràng không bị giới hạn bởi cài đặt này.",
+			"lines": "dòng",
+			"always_full_read": "Luôn đọc toàn bộ tệp"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/zh-CN/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "文件读取自动截断阈值",
-			"description": "一次批处理中从文件读取的默认行数。较低的值会减少上下文/资源使用,但可能需要对大文件进行更多次读取。",
-			"lines": "行"
+			"description": "当模型未指定起始/结束值时,Roo 读取的行数。如果此数值小于文件总行数,Roo 将生成代码定义的行号索引。特殊情况:-1 指示 Roo 读取整个文件(不创建索引),0 指示不读取任何行并仅提供行索引以获得最小上下文。较低的值可最小化初始上下文使用,允许后续精确的行范围读取。显式指定起始/结束的请求不受此设置限制。",
+			"lines": "行",
+			"always_full_read": "始终读取整个文件"
 		}
 	},
 	"terminal": {

+ 3 - 2
webview-ui/src/i18n/locales/zh-TW/settings.json

@@ -276,8 +276,9 @@
 		},
 		"maxReadFile": {
 			"label": "檔案讀取自動截斷閾值",
-			"description": "預設讀取檔案的行數",
-			"lines": "行"
+			"description": "當模型未指定起始/結束值時,Roo 讀取的行數。如果此數值小於檔案總行數,Roo 將產生程式碼定義的行號索引。特殊情況:-1 指示 Roo 讀取整個檔案(不建立索引),0 指示不讀取任何行並僅提供行索引以取得最小上下文。較低的值可最小化初始上下文使用,允許後續精確的行範圍讀取。明確指定起始/結束的請求不受此設定限制。",
+			"lines": "行",
+			"always_full_read": "始終讀取整個檔案"
 		}
 	},
 	"terminal": {