Kaynağa Gözat

Merge branch 'dev' into snapshot-node-shim-stuff

Aiden Cline 1 hafta önce
ebeveyn
işleme
8bce02e567
43 değiştirilmiş dosya ile 1301 ekleme ve 1165 silme
  1. 2 0
      packages/opencode/src/bus/index.ts
  2. 100 0
      packages/opencode/src/effect/app-runtime.ts
  3. 1 1
      packages/opencode/src/mcp/auth.ts
  4. 1 1
      packages/opencode/src/project/vcs.ts
  5. 1 1
      packages/opencode/src/pty/index.ts
  6. 2 2
      packages/opencode/src/session/processor.ts
  7. 10 4
      packages/opencode/src/session/prompt.ts
  8. 248 231
      packages/opencode/src/tool/apply_patch.ts
  9. 149 123
      packages/opencode/src/tool/edit.ts
  10. 163 142
      packages/opencode/src/tool/grep.ts
  11. 97 82
      packages/opencode/src/tool/ls.ts
  12. 54 37
      packages/opencode/src/tool/multiedit.ts
  13. 13 5
      packages/opencode/src/tool/registry.ts
  14. 82 80
      packages/opencode/src/tool/skill.ts
  15. 13 4
      packages/opencode/src/tool/task.ts
  16. 80 61
      packages/opencode/src/tool/write.ts
  17. 1 1
      packages/opencode/src/worktree/index.ts
  18. 2 0
      packages/opencode/test/session/prompt-effect.test.ts
  19. 2 0
      packages/opencode/test/session/snapshot-tool-race.test.ts
  20. 8 1
      packages/opencode/test/tool/apply_patch.test.ts
  21. 35 19
      packages/opencode/test/tool/edit.test.ts
  22. 11 3
      packages/opencode/test/tool/grep.test.ts
  23. 6 2
      packages/opencode/test/tool/skill.test.ts
  24. 25 60
      packages/opencode/test/tool/task.test.ts
  25. 177 287
      packages/opencode/test/tool/write.test.ts
  26. 1 1
      packages/web/src/content/docs/ar/zen.mdx
  27. 1 1
      packages/web/src/content/docs/bs/zen.mdx
  28. 1 1
      packages/web/src/content/docs/da/zen.mdx
  29. 1 1
      packages/web/src/content/docs/de/zen.mdx
  30. 1 1
      packages/web/src/content/docs/es/zen.mdx
  31. 1 1
      packages/web/src/content/docs/fr/zen.mdx
  32. 1 1
      packages/web/src/content/docs/it/zen.mdx
  33. 1 1
      packages/web/src/content/docs/ja/zen.mdx
  34. 1 1
      packages/web/src/content/docs/ko/zen.mdx
  35. 1 1
      packages/web/src/content/docs/nb/zen.mdx
  36. 1 1
      packages/web/src/content/docs/pl/zen.mdx
  37. 1 1
      packages/web/src/content/docs/pt-br/zen.mdx
  38. 1 1
      packages/web/src/content/docs/ru/zen.mdx
  39. 1 1
      packages/web/src/content/docs/th/zen.mdx
  40. 1 1
      packages/web/src/content/docs/tr/zen.mdx
  41. 1 1
      packages/web/src/content/docs/zen.mdx
  42. 1 1
      packages/web/src/content/docs/zh-cn/zen.mdx
  43. 1 1
      packages/web/src/content/docs/zh-tw/zen.mdx

+ 2 - 0
packages/opencode/src/bus/index.ts

@@ -169,6 +169,8 @@ export namespace Bus {
     }),
   )
 
+  export const defaultLayer = layer
+
   const { runPromise, runSync } = makeRuntime(Service, layer)
 
   // runSync is safe here because the subscribe chain (InstanceState.get, PubSub.subscribe,

+ 100 - 0
packages/opencode/src/effect/app-runtime.ts

@@ -0,0 +1,100 @@
+import { Layer, ManagedRuntime } from "effect"
+import { memoMap } from "./run-service"
+import { Observability } from "./oltp"
+
+import { AppFileSystem } from "@/filesystem"
+import { Bus } from "@/bus"
+import { Auth } from "@/auth"
+import { Account } from "@/account"
+import { Config } from "@/config/config"
+import { Git } from "@/git"
+import { Ripgrep } from "@/file/ripgrep"
+import { FileTime } from "@/file/time"
+import { File } from "@/file"
+import { FileWatcher } from "@/file/watcher"
+import { Storage } from "@/storage/storage"
+import { Snapshot } from "@/snapshot"
+import { Plugin } from "@/plugin"
+import { Provider } from "@/provider/provider"
+import { ProviderAuth } from "@/provider/auth"
+import { Agent } from "@/agent/agent"
+import { Skill } from "@/skill"
+import { Discovery } from "@/skill/discovery"
+import { Question } from "@/question"
+import { Permission } from "@/permission"
+import { Todo } from "@/session/todo"
+import { Session } from "@/session"
+import { SessionStatus } from "@/session/status"
+import { SessionRunState } from "@/session/run-state"
+import { SessionProcessor } from "@/session/processor"
+import { SessionCompaction } from "@/session/compaction"
+import { SessionRevert } from "@/session/revert"
+import { SessionSummary } from "@/session/summary"
+import { SessionPrompt } from "@/session/prompt"
+import { Instruction } from "@/session/instruction"
+import { LLM } from "@/session/llm"
+import { LSP } from "@/lsp"
+import { MCP } from "@/mcp"
+import { McpAuth } from "@/mcp/auth"
+import { Command } from "@/command"
+import { Truncate } from "@/tool/truncate"
+import { ToolRegistry } from "@/tool/registry"
+import { Format } from "@/format"
+import { Project } from "@/project/project"
+import { Vcs } from "@/project/vcs"
+import { Worktree } from "@/worktree"
+import { Pty } from "@/pty"
+import { Installation } from "@/installation"
+import { ShareNext } from "@/share/share-next"
+import { SessionShare } from "@/share/session"
+
+export const AppLayer = Layer.mergeAll(
+  Observability.layer,
+  AppFileSystem.defaultLayer,
+  Bus.defaultLayer,
+  Auth.defaultLayer,
+  Account.defaultLayer,
+  Config.defaultLayer,
+  Git.defaultLayer,
+  Ripgrep.defaultLayer,
+  FileTime.defaultLayer,
+  File.defaultLayer,
+  FileWatcher.defaultLayer,
+  Storage.defaultLayer,
+  Snapshot.defaultLayer,
+  Plugin.defaultLayer,
+  Provider.defaultLayer,
+  ProviderAuth.defaultLayer,
+  Agent.defaultLayer,
+  Skill.defaultLayer,
+  Discovery.defaultLayer,
+  Question.defaultLayer,
+  Permission.defaultLayer,
+  Todo.defaultLayer,
+  Session.defaultLayer,
+  SessionStatus.defaultLayer,
+  SessionRunState.defaultLayer,
+  SessionProcessor.defaultLayer,
+  SessionCompaction.defaultLayer,
+  SessionRevert.defaultLayer,
+  SessionSummary.defaultLayer,
+  SessionPrompt.defaultLayer,
+  Instruction.defaultLayer,
+  LLM.defaultLayer,
+  LSP.defaultLayer,
+  MCP.defaultLayer,
+  McpAuth.defaultLayer,
+  Command.defaultLayer,
+  Truncate.defaultLayer,
+  ToolRegistry.defaultLayer,
+  Format.defaultLayer,
+  Project.defaultLayer,
+  Vcs.defaultLayer,
+  Worktree.defaultLayer,
+  Pty.defaultLayer,
+  Installation.defaultLayer,
+  ShareNext.defaultLayer,
+  SessionShare.defaultLayer,
+)
+
+export const AppRuntime = ManagedRuntime.make(AppLayer, { memoMap })

+ 1 - 1
packages/opencode/src/mcp/auth.ts

@@ -141,7 +141,7 @@ export namespace McpAuth {
     }),
   )
 
-  const defaultLayer = layer.pipe(Layer.provide(AppFileSystem.defaultLayer))
+  export const defaultLayer = layer.pipe(Layer.provide(AppFileSystem.defaultLayer))
 
   const { runPromise } = makeRuntime(Service, defaultLayer)
 

+ 1 - 1
packages/opencode/src/project/vcs.ts

@@ -226,7 +226,7 @@ export namespace Vcs {
     }),
   )
 
-  const defaultLayer = layer.pipe(
+  export const defaultLayer = layer.pipe(
     Layer.provide(Git.defaultLayer),
     Layer.provide(AppFileSystem.defaultLayer),
     Layer.provide(Bus.layer),

+ 1 - 1
packages/opencode/src/pty/index.ts

@@ -359,7 +359,7 @@ export namespace Pty {
     }),
   )
 
-  const defaultLayer = layer.pipe(Layer.provide(Bus.layer), Layer.provide(Plugin.defaultLayer))
+  export const defaultLayer = layer.pipe(Layer.provide(Bus.layer), Layer.provide(Plugin.defaultLayer))
 
   const { runPromise } = makeRuntime(Service, defaultLayer)
 

+ 2 - 2
packages/opencode/src/session/processor.ts

@@ -245,7 +245,7 @@ export namespace SessionProcessor {
 
             case "reasoning-end":
               if (!(value.id in ctx.reasoningMap)) return
-              ctx.reasoningMap[value.id].text = ctx.reasoningMap[value.id].text.trimEnd()
+              ctx.reasoningMap[value.id].text = ctx.reasoningMap[value.id].text
               ctx.reasoningMap[value.id].time = { ...ctx.reasoningMap[value.id].time, end: Date.now() }
               if (value.providerMetadata) ctx.reasoningMap[value.id].metadata = value.providerMetadata
               yield* session.updatePart(ctx.reasoningMap[value.id])
@@ -425,7 +425,7 @@ export namespace SessionProcessor {
 
             case "text-end":
               if (!ctx.currentText) return
-              ctx.currentText.text = ctx.currentText.text.trimEnd()
+              ctx.currentText.text = ctx.currentText.text
               ctx.currentText.text = (yield* plugin.trigger(
                 "experimental.text.complete",
                 {

+ 10 - 4
packages/opencode/src/session/prompt.ts

@@ -46,7 +46,7 @@ import { Process } from "@/util/process"
 import { Cause, Effect, Exit, Layer, Option, Scope, ServiceMap } from "effect"
 import { InstanceState } from "@/effect/instance-state"
 import { makeRuntime } from "@/effect/run-service"
-import { TaskTool } from "@/tool/task"
+import { TaskTool, type TaskPromptOps } from "@/tool/task"
 import { SessionRunState } from "./run-state"
 
 // @ts-ignore
@@ -356,7 +356,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
           abort: options.abortSignal!,
           messageID: input.processor.message.id,
           callID: options.toolCallId,
-          extra: { model: input.model, bypassAgentCheck: input.bypassAgentCheck },
+          extra: { model: input.model, bypassAgentCheck: input.bypassAgentCheck, promptOps },
           agent: input.agent.name,
           messages: input.messages,
           metadata: (val) =>
@@ -586,7 +586,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
               sessionID,
               abort: signal,
               callID: part.callID,
-              extra: { bypassAgentCheck: true },
+              extra: { bypassAgentCheck: true, promptOps },
               messages: msgs,
               metadata(val: { title?: string; metadata?: Record<string, any> }) {
                 return Effect.runPromise(
@@ -1655,6 +1655,12 @@ NOTE: At any point in time through this workflow you should feel free to ask the
         return result
       })
 
+      const promptOps: TaskPromptOps = {
+        cancel: (sessionID) => Effect.runFork(cancel(sessionID)),
+        resolvePromptParts: (template) => Effect.runPromise(resolvePromptParts(template)),
+        prompt: (input) => Effect.runPromise(prompt(input)),
+      }
+
       return Service.of({
         cancel,
         prompt,
@@ -1666,7 +1672,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
     }),
   )
 
-  const defaultLayer = Layer.suspend(() =>
+  export const defaultLayer = Layer.suspend(() =>
     layer.pipe(
       Layer.provide(SessionRunState.defaultLayer),
       Layer.provide(SessionStatus.defaultLayer),

+ 248 - 231
packages/opencode/src/tool/apply_patch.ts

@@ -1,16 +1,16 @@
 import z from "zod"
 import * as path from "path"
-import * as fs from "fs/promises"
+import { Effect } from "effect"
 import { Tool } from "./tool"
 import { Bus } from "../bus"
 import { FileWatcher } from "../file/watcher"
 import { Instance } from "../project/instance"
 import { Patch } from "../patch"
 import { createTwoFilesPatch, diffLines } from "diff"
-import { assertExternalDirectory } from "./external-directory"
+import { assertExternalDirectoryEffect } from "./external-directory"
 import { trimDiff } from "./edit"
 import { LSP } from "../lsp"
-import { Filesystem } from "../util/filesystem"
+import { AppFileSystem } from "../filesystem"
 import DESCRIPTION from "./apply_patch.txt"
 import { File } from "../file"
 import { Format } from "../format"
@@ -19,261 +19,278 @@ const PatchParams = z.object({
   patchText: z.string().describe("The full patch text that describes all changes to be made"),
 })
 
-export const ApplyPatchTool = Tool.define("apply_patch", {
-  description: DESCRIPTION,
-  parameters: PatchParams,
-  async execute(params, ctx) {
-    if (!params.patchText) {
-      throw new Error("patchText is required")
-    }
-
-    // Parse the patch to get hunks
-    let hunks: Patch.Hunk[]
-    try {
-      const parseResult = Patch.parsePatch(params.patchText)
-      hunks = parseResult.hunks
-    } catch (error) {
-      throw new Error(`apply_patch verification failed: ${error}`)
-    }
+export const ApplyPatchTool = Tool.defineEffect(
+  "apply_patch",
+  Effect.gen(function* () {
+    const lsp = yield* LSP.Service
+    const afs = yield* AppFileSystem.Service
+    const format = yield* Format.Service
 
-    if (hunks.length === 0) {
-      const normalized = params.patchText.replace(/\r\n/g, "\n").replace(/\r/g, "\n").trim()
-      if (normalized === "*** Begin Patch\n*** End Patch") {
-        throw new Error("patch rejected: empty patch")
+    const run = Effect.fn("ApplyPatchTool.execute")(function* (params: z.infer<typeof PatchParams>, ctx: Tool.Context) {
+      if (!params.patchText) {
+        return yield* Effect.fail(new Error("patchText is required"))
       }
-      throw new Error("apply_patch verification failed: no hunks found")
-    }
 
-    // Validate file paths and check permissions
-    const fileChanges: Array<{
-      filePath: string
-      oldContent: string
-      newContent: string
-      type: "add" | "update" | "delete" | "move"
-      movePath?: string
-      diff: string
-      additions: number
-      deletions: number
-    }> = []
-
-    let totalDiff = ""
-
-    for (const hunk of hunks) {
-      const filePath = path.resolve(Instance.directory, hunk.path)
-      await assertExternalDirectory(ctx, filePath)
-
-      switch (hunk.type) {
-        case "add": {
-          const oldContent = ""
-          const newContent =
-            hunk.contents.length === 0 || hunk.contents.endsWith("\n") ? hunk.contents : `${hunk.contents}\n`
-          const diff = trimDiff(createTwoFilesPatch(filePath, filePath, oldContent, newContent))
-
-          let additions = 0
-          let deletions = 0
-          for (const change of diffLines(oldContent, newContent)) {
-            if (change.added) additions += change.count || 0
-            if (change.removed) deletions += change.count || 0
-          }
+      // Parse the patch to get hunks
+      let hunks: Patch.Hunk[]
+      try {
+        const parseResult = Patch.parsePatch(params.patchText)
+        hunks = parseResult.hunks
+      } catch (error) {
+        return yield* Effect.fail(new Error(`apply_patch verification failed: ${error}`))
+      }
 
-          fileChanges.push({
-            filePath,
-            oldContent,
-            newContent,
-            type: "add",
-            diff,
-            additions,
-            deletions,
-          })
-
-          totalDiff += diff + "\n"
-          break
+      if (hunks.length === 0) {
+        const normalized = params.patchText.replace(/\r\n/g, "\n").replace(/\r/g, "\n").trim()
+        if (normalized === "*** Begin Patch\n*** End Patch") {
+          return yield* Effect.fail(new Error("patch rejected: empty patch"))
         }
+        return yield* Effect.fail(new Error("apply_patch verification failed: no hunks found"))
+      }
 
-        case "update": {
-          // Check if file exists for update
-          const stats = await fs.stat(filePath).catch(() => null)
-          if (!stats || stats.isDirectory()) {
-            throw new Error(`apply_patch verification failed: Failed to read file to update: ${filePath}`)
+      // Validate file paths and check permissions
+      const fileChanges: Array<{
+        filePath: string
+        oldContent: string
+        newContent: string
+        type: "add" | "update" | "delete" | "move"
+        movePath?: string
+        diff: string
+        additions: number
+        deletions: number
+      }> = []
+
+      let totalDiff = ""
+
+      for (const hunk of hunks) {
+        const filePath = path.resolve(Instance.directory, hunk.path)
+        yield* assertExternalDirectoryEffect(ctx, filePath)
+
+        switch (hunk.type) {
+          case "add": {
+            const oldContent = ""
+            const newContent =
+              hunk.contents.length === 0 || hunk.contents.endsWith("\n") ? hunk.contents : `${hunk.contents}\n`
+            const diff = trimDiff(createTwoFilesPatch(filePath, filePath, oldContent, newContent))
+
+            let additions = 0
+            let deletions = 0
+            for (const change of diffLines(oldContent, newContent)) {
+              if (change.added) additions += change.count || 0
+              if (change.removed) deletions += change.count || 0
+            }
+
+            fileChanges.push({
+              filePath,
+              oldContent,
+              newContent,
+              type: "add",
+              diff,
+              additions,
+              deletions,
+            })
+
+            totalDiff += diff + "\n"
+            break
           }
 
-          const oldContent = await fs.readFile(filePath, "utf-8")
-          let newContent = oldContent
+          case "update": {
+            // Check if file exists for update
+            const stats = yield* afs.stat(filePath).pipe(Effect.catch(() => Effect.succeed(undefined)))
+            if (!stats || stats.type === "Directory") {
+              return yield* Effect.fail(
+                new Error(`apply_patch verification failed: Failed to read file to update: ${filePath}`),
+              )
+            }
+
+            const oldContent = yield* afs.readFileString(filePath)
+            let newContent = oldContent
+
+            // Apply the update chunks to get new content
+            try {
+              const fileUpdate = Patch.deriveNewContentsFromChunks(filePath, hunk.chunks)
+              newContent = fileUpdate.content
+            } catch (error) {
+              return yield* Effect.fail(new Error(`apply_patch verification failed: ${error}`))
+            }
+
+            const diff = trimDiff(createTwoFilesPatch(filePath, filePath, oldContent, newContent))
+
+            let additions = 0
+            let deletions = 0
+            for (const change of diffLines(oldContent, newContent)) {
+              if (change.added) additions += change.count || 0
+              if (change.removed) deletions += change.count || 0
+            }
+
+            const movePath = hunk.move_path ? path.resolve(Instance.directory, hunk.move_path) : undefined
+            yield* assertExternalDirectoryEffect(ctx, movePath)
+
+            fileChanges.push({
+              filePath,
+              oldContent,
+              newContent,
+              type: hunk.move_path ? "move" : "update",
+              movePath,
+              diff,
+              additions,
+              deletions,
+            })
+
+            totalDiff += diff + "\n"
+            break
+          }
 
-          // Apply the update chunks to get new content
-          try {
-            const fileUpdate = Patch.deriveNewContentsFromChunks(filePath, hunk.chunks)
-            newContent = fileUpdate.content
-          } catch (error) {
-            throw new Error(`apply_patch verification failed: ${error}`)
+          case "delete": {
+            const contentToDelete = yield* afs
+              .readFileString(filePath)
+              .pipe(Effect.catch((error) => Effect.fail(new Error(`apply_patch verification failed: ${error}`))))
+            const deleteDiff = trimDiff(createTwoFilesPatch(filePath, filePath, contentToDelete, ""))
+
+            const deletions = contentToDelete.split("\n").length
+
+            fileChanges.push({
+              filePath,
+              oldContent: contentToDelete,
+              newContent: "",
+              type: "delete",
+              diff: deleteDiff,
+              additions: 0,
+              deletions,
+            })
+
+            totalDiff += deleteDiff + "\n"
+            break
           }
+        }
+      }
 
-          const diff = trimDiff(createTwoFilesPatch(filePath, filePath, oldContent, newContent))
+      // Build per-file metadata for UI rendering (used for both permission and result)
+      const files = fileChanges.map((change) => ({
+        filePath: change.filePath,
+        relativePath: path.relative(Instance.worktree, change.movePath ?? change.filePath).replaceAll("\\", "/"),
+        type: change.type,
+        patch: change.diff,
+        additions: change.additions,
+        deletions: change.deletions,
+        movePath: change.movePath,
+      }))
+
+      // Check permissions if needed
+      const relativePaths = fileChanges.map((c) => path.relative(Instance.worktree, c.filePath).replaceAll("\\", "/"))
+      yield* Effect.promise(() =>
+        ctx.ask({
+          permission: "edit",
+          patterns: relativePaths,
+          always: ["*"],
+          metadata: {
+            filepath: relativePaths.join(", "),
+            diff: totalDiff,
+            files,
+          },
+        }),
+      )
+
+      // Apply the changes
+      const updates: Array<{ file: string; event: "add" | "change" | "unlink" }> = []
+
+      for (const change of fileChanges) {
+        const edited = change.type === "delete" ? undefined : (change.movePath ?? change.filePath)
+        switch (change.type) {
+          case "add":
+            // Create parent directories (recursive: true is safe on existing/root dirs)
 
-          let additions = 0
-          let deletions = 0
-          for (const change of diffLines(oldContent, newContent)) {
-            if (change.added) additions += change.count || 0
-            if (change.removed) deletions += change.count || 0
-          }
+            yield* afs.writeWithDirs(change.filePath, change.newContent)
+            updates.push({ file: change.filePath, event: "add" })
+            break
 
-          const movePath = hunk.move_path ? path.resolve(Instance.directory, hunk.move_path) : undefined
-          await assertExternalDirectory(ctx, movePath)
-
-          fileChanges.push({
-            filePath,
-            oldContent,
-            newContent,
-            type: hunk.move_path ? "move" : "update",
-            movePath,
-            diff,
-            additions,
-            deletions,
-          })
-
-          totalDiff += diff + "\n"
-          break
-        }
+          case "update":
+            yield* afs.writeWithDirs(change.filePath, change.newContent)
+            updates.push({ file: change.filePath, event: "change" })
+            break
 
-        case "delete": {
-          const contentToDelete = await fs.readFile(filePath, "utf-8").catch((error) => {
-            throw new Error(`apply_patch verification failed: ${error}`)
-          })
-          const deleteDiff = trimDiff(createTwoFilesPatch(filePath, filePath, contentToDelete, ""))
-
-          const deletions = contentToDelete.split("\n").length
-
-          fileChanges.push({
-            filePath,
-            oldContent: contentToDelete,
-            newContent: "",
-            type: "delete",
-            diff: deleteDiff,
-            additions: 0,
-            deletions,
-          })
-
-          totalDiff += deleteDiff + "\n"
-          break
-        }
-      }
-    }
+          case "move":
+            if (change.movePath) {
+              // Create parent directories (recursive: true is safe on existing/root dirs)
 
-    // Build per-file metadata for UI rendering (used for both permission and result)
-    const files = fileChanges.map((change) => ({
-      filePath: change.filePath,
-      relativePath: path.relative(Instance.worktree, change.movePath ?? change.filePath).replaceAll("\\", "/"),
-      type: change.type,
-      patch: change.diff,
-      additions: change.additions,
-      deletions: change.deletions,
-      movePath: change.movePath,
-    }))
-
-    // Check permissions if needed
-    const relativePaths = fileChanges.map((c) => path.relative(Instance.worktree, c.filePath).replaceAll("\\", "/"))
-    await ctx.ask({
-      permission: "edit",
-      patterns: relativePaths,
-      always: ["*"],
-      metadata: {
-        filepath: relativePaths.join(", "),
-        diff: totalDiff,
-        files,
-      },
-    })
+              yield* afs.writeWithDirs(change.movePath!, change.newContent)
+              yield* afs.remove(change.filePath)
+              updates.push({ file: change.filePath, event: "unlink" })
+              updates.push({ file: change.movePath, event: "add" })
+            }
+            break
 
-    // Apply the changes
-    const updates: Array<{ file: string; event: "add" | "change" | "unlink" }> = []
-
-    for (const change of fileChanges) {
-      const edited = change.type === "delete" ? undefined : (change.movePath ?? change.filePath)
-      switch (change.type) {
-        case "add":
-          // Create parent directories (recursive: true is safe on existing/root dirs)
-          await fs.mkdir(path.dirname(change.filePath), { recursive: true })
-          await fs.writeFile(change.filePath, change.newContent, "utf-8")
-          updates.push({ file: change.filePath, event: "add" })
-          break
-
-        case "update":
-          await fs.writeFile(change.filePath, change.newContent, "utf-8")
-          updates.push({ file: change.filePath, event: "change" })
-          break
-
-        case "move":
-          if (change.movePath) {
-            // Create parent directories (recursive: true is safe on existing/root dirs)
-            await fs.mkdir(path.dirname(change.movePath), { recursive: true })
-            await fs.writeFile(change.movePath, change.newContent, "utf-8")
-            await fs.unlink(change.filePath)
+          case "delete":
+            yield* afs.remove(change.filePath)
             updates.push({ file: change.filePath, event: "unlink" })
-            updates.push({ file: change.movePath, event: "add" })
-          }
-          break
+            break
+        }
 
-        case "delete":
-          await fs.unlink(change.filePath)
-          updates.push({ file: change.filePath, event: "unlink" })
-          break
+        if (edited) {
+          yield* format.file(edited)
+          Bus.publish(File.Event.Edited, { file: edited })
+        }
       }
 
-      if (edited) {
-        await Format.file(edited)
-        Bus.publish(File.Event.Edited, { file: edited })
+      // Publish file change events
+      for (const update of updates) {
+        Bus.publish(FileWatcher.Event.Updated, update)
       }
-    }
-
-    // Publish file change events
-    for (const update of updates) {
-      await Bus.publish(FileWatcher.Event.Updated, update)
-    }
 
-    // Notify LSP of file changes and collect diagnostics
-    for (const change of fileChanges) {
-      if (change.type === "delete") continue
-      const target = change.movePath ?? change.filePath
-      await LSP.touchFile(target, true)
-    }
-    const diagnostics = await LSP.diagnostics()
+      // Notify LSP of file changes and collect diagnostics
+      for (const change of fileChanges) {
+        if (change.type === "delete") continue
+        const target = change.movePath ?? change.filePath
+        yield* lsp.touchFile(target, true)
+      }
+      const diagnostics = yield* lsp.diagnostics()
 
-    // Generate output summary
-    const summaryLines = fileChanges.map((change) => {
-      if (change.type === "add") {
-        return `A ${path.relative(Instance.worktree, change.filePath).replaceAll("\\", "/")}`
+      // Generate output summary
+      const summaryLines = fileChanges.map((change) => {
+        if (change.type === "add") {
+          return `A ${path.relative(Instance.worktree, change.filePath).replaceAll("\\", "/")}`
+        }
+        if (change.type === "delete") {
+          return `D ${path.relative(Instance.worktree, change.filePath).replaceAll("\\", "/")}`
+        }
+        const target = change.movePath ?? change.filePath
+        return `M ${path.relative(Instance.worktree, target).replaceAll("\\", "/")}`
+      })
+      let output = `Success. Updated the following files:\n${summaryLines.join("\n")}`
+
+      // Report LSP errors for changed files
+      const MAX_DIAGNOSTICS_PER_FILE = 20
+      for (const change of fileChanges) {
+        if (change.type === "delete") continue
+        const target = change.movePath ?? change.filePath
+        const normalized = AppFileSystem.normalizePath(target)
+        const issues = diagnostics[normalized] ?? []
+        const errors = issues.filter((item) => item.severity === 1)
+        if (errors.length > 0) {
+          const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE)
+          const suffix =
+            errors.length > MAX_DIAGNOSTICS_PER_FILE ? `\n... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE} more` : ""
+          output += `\n\nLSP errors detected in ${path.relative(Instance.worktree, target).replaceAll("\\", "/")}, please fix:\n<diagnostics file="${target}">\n${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}\n</diagnostics>`
+        }
       }
-      if (change.type === "delete") {
-        return `D ${path.relative(Instance.worktree, change.filePath).replaceAll("\\", "/")}`
+
+      return {
+        title: output,
+        metadata: {
+          diff: totalDiff,
+          files,
+          diagnostics,
+        },
+        output,
       }
-      const target = change.movePath ?? change.filePath
-      return `M ${path.relative(Instance.worktree, target).replaceAll("\\", "/")}`
     })
-    let output = `Success. Updated the following files:\n${summaryLines.join("\n")}`
-
-    // Report LSP errors for changed files
-    const MAX_DIAGNOSTICS_PER_FILE = 20
-    for (const change of fileChanges) {
-      if (change.type === "delete") continue
-      const target = change.movePath ?? change.filePath
-      const normalized = Filesystem.normalizePath(target)
-      const issues = diagnostics[normalized] ?? []
-      const errors = issues.filter((item) => item.severity === 1)
-      if (errors.length > 0) {
-        const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE)
-        const suffix =
-          errors.length > MAX_DIAGNOSTICS_PER_FILE ? `\n... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE} more` : ""
-        output += `\n\nLSP errors detected in ${path.relative(Instance.worktree, target).replaceAll("\\", "/")}, please fix:\n<diagnostics file="${target}">\n${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}\n</diagnostics>`
-      }
-    }
 
     return {
-      title: output,
-      metadata: {
-        diff: totalDiff,
-        files,
-        diagnostics,
+      description: DESCRIPTION,
+      parameters: PatchParams,
+      async execute(params: z.infer<typeof PatchParams>, ctx) {
+        return Effect.runPromise(run(params, ctx).pipe(Effect.orDie))
       },
-      output,
     }
-  },
-})
+  }),
+)

+ 149 - 123
packages/opencode/src/tool/edit.ts

@@ -5,6 +5,7 @@
 
 import z from "zod"
 import * as path from "path"
+import { Effect } from "effect"
 import { Tool } from "./tool"
 import { LSP } from "../lsp"
 import { createTwoFilesPatch, diffLines } from "diff"
@@ -17,7 +18,7 @@ import { FileTime } from "../file/time"
 import { Filesystem } from "../util/filesystem"
 import { Instance } from "../project/instance"
 import { Snapshot } from "@/snapshot"
-import { assertExternalDirectory } from "./external-directory"
+import { assertExternalDirectoryEffect } from "./external-directory"
 
 const MAX_DIAGNOSTICS_PER_FILE = 20
 
@@ -34,136 +35,161 @@ function convertToLineEnding(text: string, ending: "\n" | "\r\n"): string {
   return text.replaceAll("\n", "\r\n")
 }
 
-export const EditTool = Tool.define("edit", {
-  description: DESCRIPTION,
-  parameters: z.object({
-    filePath: z.string().describe("The absolute path to the file to modify"),
-    oldString: z.string().describe("The text to replace"),
-    newString: z.string().describe("The text to replace it with (must be different from oldString)"),
-    replaceAll: z.boolean().optional().describe("Replace all occurrences of oldString (default false)"),
-  }),
-  async execute(params, ctx) {
-    if (!params.filePath) {
-      throw new Error("filePath is required")
-    }
+const Parameters = z.object({
+  filePath: z.string().describe("The absolute path to the file to modify"),
+  oldString: z.string().describe("The text to replace"),
+  newString: z.string().describe("The text to replace it with (must be different from oldString)"),
+  replaceAll: z.boolean().optional().describe("Replace all occurrences of oldString (default false)"),
+})
 
-    if (params.oldString === params.newString) {
-      throw new Error("No changes to apply: oldString and newString are identical.")
-    }
+export const EditTool = Tool.defineEffect(
+  "edit",
+  Effect.gen(function* () {
+    const lsp = yield* LSP.Service
+    const filetime = yield* FileTime.Service
 
-    const filePath = path.isAbsolute(params.filePath) ? params.filePath : path.join(Instance.directory, params.filePath)
-    await assertExternalDirectory(ctx, filePath)
-
-    let diff = ""
-    let contentOld = ""
-    let contentNew = ""
-    await FileTime.withLock(filePath, async () => {
-      if (params.oldString === "") {
-        const existed = await Filesystem.exists(filePath)
-        contentNew = params.newString
-        diff = trimDiff(createTwoFilesPatch(filePath, filePath, contentOld, contentNew))
-        await ctx.ask({
-          permission: "edit",
-          patterns: [path.relative(Instance.worktree, filePath)],
-          always: ["*"],
-          metadata: {
-            filepath: filePath,
-            diff,
-          },
-        })
-        await Filesystem.write(filePath, params.newString)
-        await Format.file(filePath)
-        Bus.publish(File.Event.Edited, { file: filePath })
-        await Bus.publish(FileWatcher.Event.Updated, {
-          file: filePath,
-          event: existed ? "change" : "add",
-        })
-        await FileTime.read(ctx.sessionID, filePath)
-        return
-      }
+    return {
+      description: DESCRIPTION,
+      parameters: Parameters,
+      execute: (params: z.infer<typeof Parameters>, ctx: Tool.Context) =>
+        Effect.gen(function* () {
+          if (!params.filePath) {
+            throw new Error("filePath is required")
+          }
 
-      const stats = Filesystem.stat(filePath)
-      if (!stats) throw new Error(`File ${filePath} not found`)
-      if (stats.isDirectory()) throw new Error(`Path is a directory, not a file: ${filePath}`)
-      await FileTime.assert(ctx.sessionID, filePath)
-      contentOld = await Filesystem.readText(filePath)
-
-      const ending = detectLineEnding(contentOld)
-      const old = convertToLineEnding(normalizeLineEndings(params.oldString), ending)
-      const next = convertToLineEnding(normalizeLineEndings(params.newString), ending)
-
-      contentNew = replace(contentOld, old, next, params.replaceAll)
-
-      diff = trimDiff(
-        createTwoFilesPatch(filePath, filePath, normalizeLineEndings(contentOld), normalizeLineEndings(contentNew)),
-      )
-      await ctx.ask({
-        permission: "edit",
-        patterns: [path.relative(Instance.worktree, filePath)],
-        always: ["*"],
-        metadata: {
-          filepath: filePath,
-          diff,
-        },
-      })
-
-      await Filesystem.write(filePath, contentNew)
-      await Format.file(filePath)
-      Bus.publish(File.Event.Edited, { file: filePath })
-      await Bus.publish(FileWatcher.Event.Updated, {
-        file: filePath,
-        event: "change",
-      })
-      contentNew = await Filesystem.readText(filePath)
-      diff = trimDiff(
-        createTwoFilesPatch(filePath, filePath, normalizeLineEndings(contentOld), normalizeLineEndings(contentNew)),
-      )
-      await FileTime.read(ctx.sessionID, filePath)
-    })
+          if (params.oldString === params.newString) {
+            throw new Error("No changes to apply: oldString and newString are identical.")
+          }
 
-    const filediff: Snapshot.FileDiff = {
-      file: filePath,
-      patch: diff,
-      additions: 0,
-      deletions: 0,
-    }
-    for (const change of diffLines(contentOld, contentNew)) {
-      if (change.added) filediff.additions += change.count || 0
-      if (change.removed) filediff.deletions += change.count || 0
-    }
+          const filePath = path.isAbsolute(params.filePath)
+            ? params.filePath
+            : path.join(Instance.directory, params.filePath)
+          yield* assertExternalDirectoryEffect(ctx, filePath)
+
+          let diff = ""
+          let contentOld = ""
+          let contentNew = ""
+          yield* filetime.withLock(filePath, async () => {
+            if (params.oldString === "") {
+              const existed = await Filesystem.exists(filePath)
+              contentNew = params.newString
+              diff = trimDiff(createTwoFilesPatch(filePath, filePath, contentOld, contentNew))
+              await ctx.ask({
+                permission: "edit",
+                patterns: [path.relative(Instance.worktree, filePath)],
+                always: ["*"],
+                metadata: {
+                  filepath: filePath,
+                  diff,
+                },
+              })
+              await Filesystem.write(filePath, params.newString)
+              await Format.file(filePath)
+              Bus.publish(File.Event.Edited, { file: filePath })
+              await Bus.publish(FileWatcher.Event.Updated, {
+                file: filePath,
+                event: existed ? "change" : "add",
+              })
+              await FileTime.read(ctx.sessionID, filePath)
+              return
+            }
 
-    ctx.metadata({
-      metadata: {
-        diff,
-        filediff,
-        diagnostics: {},
-      },
-    })
+            const stats = Filesystem.stat(filePath)
+            if (!stats) throw new Error(`File ${filePath} not found`)
+            if (stats.isDirectory()) throw new Error(`Path is a directory, not a file: ${filePath}`)
+            await FileTime.assert(ctx.sessionID, filePath)
+            contentOld = await Filesystem.readText(filePath)
+
+            const ending = detectLineEnding(contentOld)
+            const old = convertToLineEnding(normalizeLineEndings(params.oldString), ending)
+            const next = convertToLineEnding(normalizeLineEndings(params.newString), ending)
+
+            contentNew = replace(contentOld, old, next, params.replaceAll)
+
+            diff = trimDiff(
+              createTwoFilesPatch(
+                filePath,
+                filePath,
+                normalizeLineEndings(contentOld),
+                normalizeLineEndings(contentNew),
+              ),
+            )
+            await ctx.ask({
+              permission: "edit",
+              patterns: [path.relative(Instance.worktree, filePath)],
+              always: ["*"],
+              metadata: {
+                filepath: filePath,
+                diff,
+              },
+            })
+
+            await Filesystem.write(filePath, contentNew)
+            await Format.file(filePath)
+            Bus.publish(File.Event.Edited, { file: filePath })
+            await Bus.publish(FileWatcher.Event.Updated, {
+              file: filePath,
+              event: "change",
+            })
+            contentNew = await Filesystem.readText(filePath)
+            diff = trimDiff(
+              createTwoFilesPatch(
+                filePath,
+                filePath,
+                normalizeLineEndings(contentOld),
+                normalizeLineEndings(contentNew),
+              ),
+            )
+            await FileTime.read(ctx.sessionID, filePath)
+          })
+
+          const filediff: Snapshot.FileDiff = {
+            file: filePath,
+            patch: diff,
+            additions: 0,
+            deletions: 0,
+          }
+          for (const change of diffLines(contentOld, contentNew)) {
+            if (change.added) filediff.additions += change.count || 0
+            if (change.removed) filediff.deletions += change.count || 0
+          }
 
-    let output = "Edit applied successfully."
-    await LSP.touchFile(filePath, true)
-    const diagnostics = await LSP.diagnostics()
-    const normalizedFilePath = Filesystem.normalizePath(filePath)
-    const issues = diagnostics[normalizedFilePath] ?? []
-    const errors = issues.filter((item) => item.severity === 1)
-    if (errors.length > 0) {
-      const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE)
-      const suffix =
-        errors.length > MAX_DIAGNOSTICS_PER_FILE ? `\n... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE} more` : ""
-      output += `\n\nLSP errors detected in this file, please fix:\n<diagnostics file="${filePath}">\n${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}\n</diagnostics>`
-    }
+          ctx.metadata({
+            metadata: {
+              diff,
+              filediff,
+              diagnostics: {},
+            },
+          })
+
+          let output = "Edit applied successfully."
+          yield* lsp.touchFile(filePath, true)
+          const diagnostics = yield* lsp.diagnostics()
+          const normalizedFilePath = Filesystem.normalizePath(filePath)
+          const issues = diagnostics[normalizedFilePath] ?? []
+          const errors = issues.filter((item) => item.severity === 1)
+          if (errors.length > 0) {
+            const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE)
+            const suffix =
+              errors.length > MAX_DIAGNOSTICS_PER_FILE
+                ? `\n... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE} more`
+                : ""
+            output += `\n\nLSP errors detected in this file, please fix:\n<diagnostics file="${filePath}">\n${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}\n</diagnostics>`
+          }
 
-    return {
-      metadata: {
-        diagnostics,
-        diff,
-        filediff,
-      },
-      title: `${path.relative(Instance.worktree, filePath)}`,
-      output,
+          return {
+            metadata: {
+              diagnostics,
+              diff,
+              filediff,
+            },
+            title: `${path.relative(Instance.worktree, filePath)}`,
+            output,
+          }
+        }).pipe(Effect.orDie, Effect.runPromise),
     }
-  },
-})
+  }),
+)
 
 export type Replacer = (content: string, find: string) => Generator<string, void, unknown>
 

+ 163 - 142
packages/opencode/src/tool/grep.ts

@@ -1,156 +1,177 @@
 import z from "zod"
-import { text } from "node:stream/consumers"
+import { Effect } from "effect"
+import * as Stream from "effect/Stream"
 import { Tool } from "./tool"
 import { Filesystem } from "../util/filesystem"
 import { Ripgrep } from "../file/ripgrep"
-import { Process } from "../util/process"
+import { ChildProcess } from "effect/unstable/process"
+import { ChildProcessSpawner } from "effect/unstable/process/ChildProcessSpawner"
 
 import DESCRIPTION from "./grep.txt"
 import { Instance } from "../project/instance"
 import path from "path"
-import { assertExternalDirectory } from "./external-directory"
+import { assertExternalDirectoryEffect } from "./external-directory"
 
 const MAX_LINE_LENGTH = 2000
 
-export const GrepTool = Tool.define("grep", {
-  description: DESCRIPTION,
-  parameters: z.object({
-    pattern: z.string().describe("The regex pattern to search for in file contents"),
-    path: z.string().optional().describe("The directory to search in. Defaults to the current working directory."),
-    include: z.string().optional().describe('File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")'),
-  }),
-  async execute(params, ctx) {
-    if (!params.pattern) {
-      throw new Error("pattern is required")
-    }
-
-    await ctx.ask({
-      permission: "grep",
-      patterns: [params.pattern],
-      always: ["*"],
-      metadata: {
-        pattern: params.pattern,
-        path: params.path,
-        include: params.include,
-      },
-    })
-
-    let searchPath = params.path ?? Instance.directory
-    searchPath = path.isAbsolute(searchPath) ? searchPath : path.resolve(Instance.directory, searchPath)
-    await assertExternalDirectory(ctx, searchPath, { kind: "directory" })
-
-    const rgPath = await Ripgrep.filepath()
-    const args = ["-nH", "--hidden", "--no-messages", "--field-match-separator=|", "--regexp", params.pattern]
-    if (params.include) {
-      args.push("--glob", params.include)
-    }
-    args.push(searchPath)
-
-    const proc = Process.spawn([rgPath, ...args], {
-      stdout: "pipe",
-      stderr: "pipe",
-      abort: ctx.abort,
-    })
-
-    if (!proc.stdout || !proc.stderr) {
-      throw new Error("Process output not available")
-    }
-
-    const output = await text(proc.stdout)
-    const errorOutput = await text(proc.stderr)
-    const exitCode = await proc.exited
-
-    // Exit codes: 0 = matches found, 1 = no matches, 2 = errors (but may still have matches)
-    // With --no-messages, we suppress error output but still get exit code 2 for broken symlinks etc.
-    // Only fail if exit code is 2 AND no output was produced
-    if (exitCode === 1 || (exitCode === 2 && !output.trim())) {
-      return {
-        title: params.pattern,
-        metadata: { matches: 0, truncated: false },
-        output: "No files found",
-      }
-    }
-
-    if (exitCode !== 0 && exitCode !== 2) {
-      throw new Error(`ripgrep failed: ${errorOutput}`)
-    }
-
-    const hasErrors = exitCode === 2
-
-    // Handle both Unix (\n) and Windows (\r\n) line endings
-    const lines = output.trim().split(/\r?\n/)
-    const matches = []
-
-    for (const line of lines) {
-      if (!line) continue
-
-      const [filePath, lineNumStr, ...lineTextParts] = line.split("|")
-      if (!filePath || !lineNumStr || lineTextParts.length === 0) continue
-
-      const lineNum = parseInt(lineNumStr, 10)
-      const lineText = lineTextParts.join("|")
-
-      const stats = Filesystem.stat(filePath)
-      if (!stats) continue
-
-      matches.push({
-        path: filePath,
-        modTime: stats.mtime.getTime(),
-        lineNum,
-        lineText,
-      })
-    }
-
-    matches.sort((a, b) => b.modTime - a.modTime)
-
-    const limit = 100
-    const truncated = matches.length > limit
-    const finalMatches = truncated ? matches.slice(0, limit) : matches
-
-    if (finalMatches.length === 0) {
-      return {
-        title: params.pattern,
-        metadata: { matches: 0, truncated: false },
-        output: "No files found",
-      }
-    }
-
-    const totalMatches = matches.length
-    const outputLines = [`Found ${totalMatches} matches${truncated ? ` (showing first ${limit})` : ""}`]
-
-    let currentFile = ""
-    for (const match of finalMatches) {
-      if (currentFile !== match.path) {
-        if (currentFile !== "") {
-          outputLines.push("")
-        }
-        currentFile = match.path
-        outputLines.push(`${match.path}:`)
-      }
-      const truncatedLineText =
-        match.lineText.length > MAX_LINE_LENGTH ? match.lineText.substring(0, MAX_LINE_LENGTH) + "..." : match.lineText
-      outputLines.push(`  Line ${match.lineNum}: ${truncatedLineText}`)
-    }
-
-    if (truncated) {
-      outputLines.push("")
-      outputLines.push(
-        `(Results truncated: showing ${limit} of ${totalMatches} matches (${totalMatches - limit} hidden). Consider using a more specific path or pattern.)`,
-      )
-    }
-
-    if (hasErrors) {
-      outputLines.push("")
-      outputLines.push("(Some paths were inaccessible and skipped)")
-    }
+export const GrepTool = Tool.defineEffect(
+  "grep",
+  Effect.gen(function* () {
+    const spawner = yield* ChildProcessSpawner
 
     return {
-      title: params.pattern,
-      metadata: {
-        matches: totalMatches,
-        truncated,
-      },
-      output: outputLines.join("\n"),
+      description: DESCRIPTION,
+      parameters: z.object({
+        pattern: z.string().describe("The regex pattern to search for in file contents"),
+        path: z.string().optional().describe("The directory to search in. Defaults to the current working directory."),
+        include: z.string().optional().describe('File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")'),
+      }),
+      execute: (params: { pattern: string; path?: string; include?: string }, ctx: Tool.Context) =>
+        Effect.gen(function* () {
+          if (!params.pattern) {
+            throw new Error("pattern is required")
+          }
+
+          yield* Effect.promise(() =>
+            ctx.ask({
+              permission: "grep",
+              patterns: [params.pattern],
+              always: ["*"],
+              metadata: {
+                pattern: params.pattern,
+                path: params.path,
+                include: params.include,
+              },
+            }),
+          )
+
+          let searchPath = params.path ?? Instance.directory
+          searchPath = path.isAbsolute(searchPath) ? searchPath : path.resolve(Instance.directory, searchPath)
+          yield* assertExternalDirectoryEffect(ctx, searchPath, { kind: "directory" })
+
+          const rgPath = yield* Effect.promise(() => Ripgrep.filepath())
+          const args = ["-nH", "--hidden", "--no-messages", "--field-match-separator=|", "--regexp", params.pattern]
+          if (params.include) {
+            args.push("--glob", params.include)
+          }
+          args.push(searchPath)
+
+          const result = yield* Effect.scoped(
+            Effect.gen(function* () {
+              const handle = yield* spawner.spawn(
+                ChildProcess.make(rgPath, args, {
+                  stdin: "ignore",
+                }),
+              )
+
+              const [output, errorOutput] = yield* Effect.all(
+                [Stream.mkString(Stream.decodeText(handle.stdout)), Stream.mkString(Stream.decodeText(handle.stderr))],
+                { concurrency: 2 },
+              )
+
+              const exitCode = yield* handle.exitCode
+
+              return { output, errorOutput, exitCode }
+            }),
+          )
+
+          const { output, errorOutput, exitCode } = result
+
+          // Exit codes: 0 = matches found, 1 = no matches, 2 = errors (but may still have matches)
+          // With --no-messages, we suppress error output but still get exit code 2 for broken symlinks etc.
+          // Only fail if exit code is 2 AND no output was produced
+          if (exitCode === 1 || (exitCode === 2 && !output.trim())) {
+            return {
+              title: params.pattern,
+              metadata: { matches: 0, truncated: false },
+              output: "No files found",
+            }
+          }
+
+          if (exitCode !== 0 && exitCode !== 2) {
+            throw new Error(`ripgrep failed: ${errorOutput}`)
+          }
+
+          const hasErrors = exitCode === 2
+
+          // Handle both Unix (\n) and Windows (\r\n) line endings
+          const lines = output.trim().split(/\r?\n/)
+          const matches = []
+
+          for (const line of lines) {
+            if (!line) continue
+
+            const [filePath, lineNumStr, ...lineTextParts] = line.split("|")
+            if (!filePath || !lineNumStr || lineTextParts.length === 0) continue
+
+            const lineNum = parseInt(lineNumStr, 10)
+            const lineText = lineTextParts.join("|")
+
+            const stats = Filesystem.stat(filePath)
+            if (!stats) continue
+
+            matches.push({
+              path: filePath,
+              modTime: stats.mtime.getTime(),
+              lineNum,
+              lineText,
+            })
+          }
+
+          matches.sort((a, b) => b.modTime - a.modTime)
+
+          const limit = 100
+          const truncated = matches.length > limit
+          const finalMatches = truncated ? matches.slice(0, limit) : matches
+
+          if (finalMatches.length === 0) {
+            return {
+              title: params.pattern,
+              metadata: { matches: 0, truncated: false },
+              output: "No files found",
+            }
+          }
+
+          const totalMatches = matches.length
+          const outputLines = [`Found ${totalMatches} matches${truncated ? ` (showing first ${limit})` : ""}`]
+
+          let currentFile = ""
+          for (const match of finalMatches) {
+            if (currentFile !== match.path) {
+              if (currentFile !== "") {
+                outputLines.push("")
+              }
+              currentFile = match.path
+              outputLines.push(`${match.path}:`)
+            }
+            const truncatedLineText =
+              match.lineText.length > MAX_LINE_LENGTH
+                ? match.lineText.substring(0, MAX_LINE_LENGTH) + "..."
+                : match.lineText
+            outputLines.push(`  Line ${match.lineNum}: ${truncatedLineText}`)
+          }
+
+          if (truncated) {
+            outputLines.push("")
+            outputLines.push(
+              `(Results truncated: showing ${limit} of ${totalMatches} matches (${totalMatches - limit} hidden). Consider using a more specific path or pattern.)`,
+            )
+          }
+
+          if (hasErrors) {
+            outputLines.push("")
+            outputLines.push("(Some paths were inaccessible and skipped)")
+          }
+
+          return {
+            title: params.pattern,
+            metadata: {
+              matches: totalMatches,
+              truncated,
+            },
+            output: outputLines.join("\n"),
+          }
+        }).pipe(Effect.orDie, Effect.runPromise),
     }
-  },
-})
+  }),
+)

+ 97 - 82
packages/opencode/src/tool/ls.ts

@@ -1,10 +1,12 @@
 import z from "zod"
+import { Effect } from "effect"
+import * as Stream from "effect/Stream"
 import { Tool } from "./tool"
 import * as path from "path"
 import DESCRIPTION from "./ls.txt"
 import { Instance } from "../project/instance"
 import { Ripgrep } from "../file/ripgrep"
-import { assertExternalDirectory } from "./external-directory"
+import { assertExternalDirectoryEffect } from "./external-directory"
 
 export const IGNORE_PATTERNS = [
   "node_modules/",
@@ -35,87 +37,100 @@ export const IGNORE_PATTERNS = [
 
 const LIMIT = 100
 
-export const ListTool = Tool.define("list", {
-  description: DESCRIPTION,
-  parameters: z.object({
-    path: z.string().describe("The absolute path to the directory to list (must be absolute, not relative)").optional(),
-    ignore: z.array(z.string()).describe("List of glob patterns to ignore").optional(),
-  }),
-  async execute(params, ctx) {
-    const searchPath = path.resolve(Instance.directory, params.path || ".")
-    await assertExternalDirectory(ctx, searchPath, { kind: "directory" })
-
-    await ctx.ask({
-      permission: "list",
-      patterns: [searchPath],
-      always: ["*"],
-      metadata: {
-        path: searchPath,
-      },
-    })
-
-    const ignoreGlobs = IGNORE_PATTERNS.map((p) => `!${p}*`).concat(params.ignore?.map((p) => `!${p}`) || [])
-    const files = []
-    for await (const file of Ripgrep.files({ cwd: searchPath, glob: ignoreGlobs, signal: ctx.abort })) {
-      files.push(file)
-      if (files.length >= LIMIT) break
-    }
-
-    // Build directory structure
-    const dirs = new Set<string>()
-    const filesByDir = new Map<string, string[]>()
-
-    for (const file of files) {
-      const dir = path.dirname(file)
-      const parts = dir === "." ? [] : dir.split("/")
-
-      // Add all parent directories
-      for (let i = 0; i <= parts.length; i++) {
-        const dirPath = i === 0 ? "." : parts.slice(0, i).join("/")
-        dirs.add(dirPath)
-      }
-
-      // Add file to its directory
-      if (!filesByDir.has(dir)) filesByDir.set(dir, [])
-      filesByDir.get(dir)!.push(path.basename(file))
-    }
-
-    function renderDir(dirPath: string, depth: number): string {
-      const indent = "  ".repeat(depth)
-      let output = ""
-
-      if (depth > 0) {
-        output += `${indent}${path.basename(dirPath)}/\n`
-      }
-
-      const childIndent = "  ".repeat(depth + 1)
-      const children = Array.from(dirs)
-        .filter((d) => path.dirname(d) === dirPath && d !== dirPath)
-        .sort()
-
-      // Render subdirectories first
-      for (const child of children) {
-        output += renderDir(child, depth + 1)
-      }
-
-      // Render files
-      const files = filesByDir.get(dirPath) || []
-      for (const file of files.sort()) {
-        output += `${childIndent}${file}\n`
-      }
-
-      return output
-    }
-
-    const output = `${searchPath}/\n` + renderDir(".", 0)
+export const ListTool = Tool.defineEffect(
+  "list",
+  Effect.gen(function* () {
+    const rg = yield* Ripgrep.Service
 
     return {
-      title: path.relative(Instance.worktree, searchPath),
-      metadata: {
-        count: files.length,
-        truncated: files.length >= LIMIT,
-      },
-      output,
+      description: DESCRIPTION,
+      parameters: z.object({
+        path: z
+          .string()
+          .describe("The absolute path to the directory to list (must be absolute, not relative)")
+          .optional(),
+        ignore: z.array(z.string()).describe("List of glob patterns to ignore").optional(),
+      }),
+      execute: (params: { path?: string; ignore?: string[] }, ctx: Tool.Context) =>
+        Effect.gen(function* () {
+          const searchPath = path.resolve(Instance.directory, params.path || ".")
+          yield* assertExternalDirectoryEffect(ctx, searchPath, { kind: "directory" })
+
+          yield* Effect.promise(() =>
+            ctx.ask({
+              permission: "list",
+              patterns: [searchPath],
+              always: ["*"],
+              metadata: {
+                path: searchPath,
+              },
+            }),
+          )
+
+          const ignoreGlobs = IGNORE_PATTERNS.map((p) => `!${p}*`).concat(params.ignore?.map((p) => `!${p}`) || [])
+          const files = yield* rg.files({ cwd: searchPath, glob: ignoreGlobs }).pipe(
+            Stream.take(LIMIT),
+            Stream.runCollect,
+            Effect.map((chunk) => [...chunk]),
+          )
+
+          // Build directory structure
+          const dirs = new Set<string>()
+          const filesByDir = new Map<string, string[]>()
+
+          for (const file of files) {
+            const dir = path.dirname(file)
+            const parts = dir === "." ? [] : dir.split("/")
+
+            // Add all parent directories
+            for (let i = 0; i <= parts.length; i++) {
+              const dirPath = i === 0 ? "." : parts.slice(0, i).join("/")
+              dirs.add(dirPath)
+            }
+
+            // Add file to its directory
+            if (!filesByDir.has(dir)) filesByDir.set(dir, [])
+            filesByDir.get(dir)!.push(path.basename(file))
+          }
+
+          function renderDir(dirPath: string, depth: number): string {
+            const indent = "  ".repeat(depth)
+            let output = ""
+
+            if (depth > 0) {
+              output += `${indent}${path.basename(dirPath)}/\n`
+            }
+
+            const childIndent = "  ".repeat(depth + 1)
+            const children = Array.from(dirs)
+              .filter((d) => path.dirname(d) === dirPath && d !== dirPath)
+              .sort()
+
+            // Render subdirectories first
+            for (const child of children) {
+              output += renderDir(child, depth + 1)
+            }
+
+            // Render files
+            const files = filesByDir.get(dirPath) || []
+            for (const file of files.sort()) {
+              output += `${childIndent}${file}\n`
+            }
+
+            return output
+          }
+
+          const output = `${searchPath}/\n` + renderDir(".", 0)
+
+          return {
+            title: path.relative(Instance.worktree, searchPath),
+            metadata: {
+              count: files.length,
+              truncated: files.length >= LIMIT,
+            },
+            output,
+          }
+        }).pipe(Effect.orDie, Effect.runPromise),
     }
-  },
-})
+  }),
+)

+ 54 - 37
packages/opencode/src/tool/multiedit.ts

@@ -1,46 +1,63 @@
 import z from "zod"
+import { Effect } from "effect"
 import { Tool } from "./tool"
 import { EditTool } from "./edit"
 import DESCRIPTION from "./multiedit.txt"
 import path from "path"
 import { Instance } from "../project/instance"
 
-export const MultiEditTool = Tool.define("multiedit", {
-  description: DESCRIPTION,
-  parameters: z.object({
-    filePath: z.string().describe("The absolute path to the file to modify"),
-    edits: z
-      .array(
-        z.object({
-          filePath: z.string().describe("The absolute path to the file to modify"),
-          oldString: z.string().describe("The text to replace"),
-          newString: z.string().describe("The text to replace it with (must be different from oldString)"),
-          replaceAll: z.boolean().optional().describe("Replace all occurrences of oldString (default false)"),
-        }),
-      )
-      .describe("Array of edit operations to perform sequentially on the file"),
-  }),
-  async execute(params, ctx) {
-    const tool = await EditTool.init()
-    const results = []
-    for (const [, edit] of params.edits.entries()) {
-      const result = await tool.execute(
-        {
-          filePath: params.filePath,
-          oldString: edit.oldString,
-          newString: edit.newString,
-          replaceAll: edit.replaceAll,
-        },
-        ctx,
-      )
-      results.push(result)
-    }
+export const MultiEditTool = Tool.defineEffect(
+  "multiedit",
+  Effect.gen(function* () {
+    const editInfo = yield* EditTool
+    const edit = yield* Effect.promise(() => editInfo.init())
+
     return {
-      title: path.relative(Instance.worktree, params.filePath),
-      metadata: {
-        results: results.map((r) => r.metadata),
-      },
-      output: results.at(-1)!.output,
+      description: DESCRIPTION,
+      parameters: z.object({
+        filePath: z.string().describe("The absolute path to the file to modify"),
+        edits: z
+          .array(
+            z.object({
+              filePath: z.string().describe("The absolute path to the file to modify"),
+              oldString: z.string().describe("The text to replace"),
+              newString: z.string().describe("The text to replace it with (must be different from oldString)"),
+              replaceAll: z.boolean().optional().describe("Replace all occurrences of oldString (default false)"),
+            }),
+          )
+          .describe("Array of edit operations to perform sequentially on the file"),
+      }),
+      execute: (
+        params: {
+          filePath: string
+          edits: Array<{ filePath: string; oldString: string; newString: string; replaceAll?: boolean }>
+        },
+        ctx: Tool.Context,
+      ) =>
+        Effect.gen(function* () {
+          const results = []
+          for (const [, entry] of params.edits.entries()) {
+            const result = yield* Effect.promise(() =>
+              edit.execute(
+                {
+                  filePath: params.filePath,
+                  oldString: entry.oldString,
+                  newString: entry.newString,
+                  replaceAll: entry.replaceAll,
+                },
+                ctx,
+              ),
+            )
+            results.push(result)
+          }
+          return {
+            title: path.relative(Instance.worktree, params.filePath),
+            metadata: {
+              results: results.map((r) => r.metadata),
+            },
+            output: results.at(-1)!.output,
+          }
+        }).pipe(Effect.orDie, Effect.runPromise),
     }
-  },
-})
+  }),
+)

+ 13 - 5
packages/opencode/src/tool/registry.ts

@@ -34,6 +34,7 @@ import { FetchHttpClient, HttpClient } from "effect/unstable/http"
 import { ChildProcessSpawner } from "effect/unstable/process/ChildProcessSpawner"
 import * as CrossSpawnSpawner from "@/effect/cross-spawn-spawner"
 import { Ripgrep } from "../file/ripgrep"
+import { Format } from "../format"
 import { InstanceState } from "@/effect/instance-state"
 import { makeRuntime } from "@/effect/run-service"
 import { Env } from "../env"
@@ -91,6 +92,7 @@ export namespace ToolRegistry {
     | HttpClient.HttpClient
     | ChildProcessSpawner
     | Ripgrep.Service
+    | Format.Service
   > = Layer.effect(
     Service,
     Effect.gen(function* () {
@@ -110,6 +112,11 @@ export namespace ToolRegistry {
       const bash = yield* BashTool
       const codesearch = yield* CodeSearchTool
       const globtool = yield* GlobTool
+      const writetool = yield* WriteTool
+      const edit = yield* EditTool
+      const greptool = yield* GrepTool
+      const patchtool = yield* ApplyPatchTool
+      const skilltool = yield* SkillTool
 
       const state = yield* InstanceState.make<State>(
         Effect.fn("ToolRegistry.state")(function* (ctx) {
@@ -171,16 +178,16 @@ export namespace ToolRegistry {
             bash: Tool.init(bash),
             read: Tool.init(read),
             glob: Tool.init(globtool),
-            grep: Tool.init(GrepTool),
-            edit: Tool.init(EditTool),
-            write: Tool.init(WriteTool),
+            grep: Tool.init(greptool),
+            edit: Tool.init(edit),
+            write: Tool.init(writetool),
             task: Tool.init(task),
             fetch: Tool.init(webfetch),
             todo: Tool.init(todo),
             search: Tool.init(websearch),
             code: Tool.init(codesearch),
-            skill: Tool.init(SkillTool),
-            patch: Tool.init(ApplyPatchTool),
+            skill: Tool.init(skilltool),
+            patch: Tool.init(patchtool),
             question: Tool.init(question),
             lsp: Tool.init(lsptool),
             plan: Tool.init(plan),
@@ -322,6 +329,7 @@ export namespace ToolRegistry {
       Layer.provide(Instruction.defaultLayer),
       Layer.provide(AppFileSystem.defaultLayer),
       Layer.provide(FetchHttpClient.layer),
+      Layer.provide(Format.defaultLayer),
       Layer.provide(CrossSpawnSpawner.defaultLayer),
       Layer.provide(Ripgrep.defaultLayer),
     ),

+ 82 - 80
packages/opencode/src/tool/skill.ts

@@ -1,99 +1,101 @@
 import path from "path"
 import { pathToFileURL } from "url"
 import z from "zod"
+import { Effect } from "effect"
+import * as Stream from "effect/Stream"
 import { Tool } from "./tool"
 import { Skill } from "../skill"
 import { Ripgrep } from "../file/ripgrep"
-import { iife } from "@/util/iife"
 
 const Parameters = z.object({
   name: z.string().describe("The name of the skill from available_skills"),
 })
 
-export const SkillTool = Tool.define("skill", async () => {
-  const list = await Skill.available()
+export const SkillTool = Tool.defineEffect(
+  "skill",
+  Effect.gen(function* () {
+    const skill = yield* Skill.Service
+    const rg = yield* Ripgrep.Service
 
-  const description =
-    list.length === 0
-      ? "Load a specialized skill that provides domain-specific instructions and workflows. No skills are currently available."
-      : [
-          "Load a specialized skill that provides domain-specific instructions and workflows.",
-          "",
-          "When you recognize that a task matches one of the available skills listed below, use this tool to load the full skill instructions.",
-          "",
-          "The skill will inject detailed instructions, workflows, and access to bundled resources (scripts, references, templates) into the conversation context.",
-          "",
-          'Tool output includes a `<skill_content name="...">` block with the loaded content.',
-          "",
-          "The following skills provide specialized sets of instructions for particular tasks",
-          "Invoke this tool to load a skill when a task matches one of the available skills listed below:",
-          "",
-          Skill.fmt(list, { verbose: false }),
-        ].join("\n")
+    return async () => {
+      const list = await Effect.runPromise(skill.available())
 
-  return {
-    description,
-    parameters: Parameters,
-    async execute(params: z.infer<typeof Parameters>, ctx) {
-      const skill = await Skill.get(params.name)
+      const description =
+        list.length === 0
+          ? "Load a specialized skill that provides domain-specific instructions and workflows. No skills are currently available."
+          : [
+              "Load a specialized skill that provides domain-specific instructions and workflows.",
+              "",
+              "When you recognize that a task matches one of the available skills listed below, use this tool to load the full skill instructions.",
+              "",
+              "The skill will inject detailed instructions, workflows, and access to bundled resources (scripts, references, templates) into the conversation context.",
+              "",
+              'Tool output includes a `<skill_content name="...">` block with the loaded content.',
+              "",
+              "The following skills provide specialized sets of instructions for particular tasks",
+              "Invoke this tool to load a skill when a task matches one of the available skills listed below:",
+              "",
+              Skill.fmt(list, { verbose: false }),
+            ].join("\n")
 
-      if (!skill) {
-        const available = await Skill.all().then((x) => x.map((skill) => skill.name).join(", "))
-        throw new Error(`Skill "${params.name}" not found. Available skills: ${available || "none"}`)
-      }
+      return {
+        description,
+        parameters: Parameters,
+        execute: (params: z.infer<typeof Parameters>, ctx: Tool.Context) =>
+          Effect.gen(function* () {
+            const info = yield* skill.get(params.name)
 
-      await ctx.ask({
-        permission: "skill",
-        patterns: [params.name],
-        always: [params.name],
-        metadata: {},
-      })
+            if (!info) {
+              const all = yield* skill.all()
+              const available = all.map((s) => s.name).join(", ")
+              throw new Error(`Skill "${params.name}" not found. Available skills: ${available || "none"}`)
+            }
 
-      const dir = path.dirname(skill.location)
-      const base = pathToFileURL(dir).href
+            yield* Effect.promise(() =>
+              ctx.ask({
+                permission: "skill",
+                patterns: [params.name],
+                always: [params.name],
+                metadata: {},
+              }),
+            )
 
-      const limit = 10
-      const files = await iife(async () => {
-        const arr = []
-        for await (const file of Ripgrep.files({
-          cwd: dir,
-          follow: false,
-          hidden: true,
-          signal: ctx.abort,
-        })) {
-          if (file.includes("SKILL.md")) {
-            continue
-          }
-          arr.push(path.resolve(dir, file))
-          if (arr.length >= limit) {
-            break
-          }
-        }
-        return arr
-      }).then((f) => f.map((file) => `<file>${file}</file>`).join("\n"))
+            const dir = path.dirname(info.location)
+            const base = pathToFileURL(dir).href
 
-      return {
-        title: `Loaded skill: ${skill.name}`,
-        output: [
-          `<skill_content name="${skill.name}">`,
-          `# Skill: ${skill.name}`,
-          "",
-          skill.content.trim(),
-          "",
-          `Base directory for this skill: ${base}`,
-          "Relative paths in this skill (e.g., scripts/, reference/) are relative to this base directory.",
-          "Note: file list is sampled.",
-          "",
-          "<skill_files>",
-          files,
-          "</skill_files>",
-          "</skill_content>",
-        ].join("\n"),
-        metadata: {
-          name: skill.name,
-          dir,
-        },
+            const limit = 10
+            const files = yield* rg.files({ cwd: dir, follow: false, hidden: true }).pipe(
+              Stream.filter((file) => !file.includes("SKILL.md")),
+              Stream.map((file) => path.resolve(dir, file)),
+              Stream.take(limit),
+              Stream.runCollect,
+              Effect.map((chunk) => [...chunk].map((file) => `<file>${file}</file>`).join("\n")),
+            )
+
+            return {
+              title: `Loaded skill: ${info.name}`,
+              output: [
+                `<skill_content name="${info.name}">`,
+                `# Skill: ${info.name}`,
+                "",
+                info.content.trim(),
+                "",
+                `Base directory for this skill: ${base}`,
+                "Relative paths in this skill (e.g., scripts/, reference/) are relative to this base directory.",
+                "Note: file list is sampled.",
+                "",
+                "<skill_files>",
+                files,
+                "</skill_files>",
+                "</skill_content>",
+              ].join("\n"),
+              metadata: {
+                name: info.name,
+                dir,
+              },
+            }
+          }).pipe(Effect.orDie, Effect.runPromise),
       }
-    },
-  }
-})
+    }
+  }),
+)

+ 13 - 4
packages/opencode/src/tool/task.ts

@@ -5,11 +5,17 @@ import { Session } from "../session"
 import { SessionID, MessageID } from "../session/schema"
 import { MessageV2 } from "../session/message-v2"
 import { Agent } from "../agent/agent"
-import { SessionPrompt } from "../session/prompt"
+import type { SessionPrompt } from "../session/prompt"
 import { Config } from "../config/config"
 import { Effect } from "effect"
 import { Log } from "@/util/log"
 
+export interface TaskPromptOps {
+  cancel(sessionID: SessionID): void
+  resolvePromptParts(template: string): Promise<SessionPrompt.PromptInput["parts"]>
+  prompt(input: SessionPrompt.PromptInput): Promise<MessageV2.WithParts>
+}
+
 const id = "task"
 
 const parameters = z.object({
@@ -113,10 +119,13 @@ export const TaskTool = Tool.defineEffect(
         },
       })
 
+      const ops = ctx.extra?.promptOps as TaskPromptOps
+      if (!ops) return yield* Effect.fail(new Error("TaskTool requires promptOps in ctx.extra"))
+
       const messageID = MessageID.ascending()
 
       function cancel() {
-        SessionPrompt.cancel(nextSession.id)
+        ops.cancel(nextSession.id)
       }
 
       return yield* Effect.acquireUseRelease(
@@ -125,9 +134,9 @@ export const TaskTool = Tool.defineEffect(
         }),
         () =>
           Effect.gen(function* () {
-            const parts = yield* Effect.promise(() => SessionPrompt.resolvePromptParts(params.prompt))
+            const parts = yield* Effect.promise(() => ops.resolvePromptParts(params.prompt))
             const result = yield* Effect.promise(() =>
-              SessionPrompt.prompt({
+              ops.prompt({
                 messageID,
                 sessionID: nextSession.id,
                 model: {

+ 80 - 61
packages/opencode/src/tool/write.ts

@@ -1,5 +1,6 @@
 import z from "zod"
 import * as path from "path"
+import { Effect } from "effect"
 import { Tool } from "./tool"
 import { LSP } from "../lsp"
 import { createTwoFilesPatch } from "diff"
@@ -9,76 +10,94 @@ import { File } from "../file"
 import { FileWatcher } from "../file/watcher"
 import { Format } from "../format"
 import { FileTime } from "../file/time"
-import { Filesystem } from "../util/filesystem"
+import { AppFileSystem } from "../filesystem"
 import { Instance } from "../project/instance"
 import { trimDiff } from "./edit"
-import { assertExternalDirectory } from "./external-directory"
+import { assertExternalDirectoryEffect } from "./external-directory"
 
 const MAX_DIAGNOSTICS_PER_FILE = 20
 const MAX_PROJECT_DIAGNOSTICS_FILES = 5
 
-export const WriteTool = Tool.define("write", {
-  description: DESCRIPTION,
-  parameters: z.object({
-    content: z.string().describe("The content to write to the file"),
-    filePath: z.string().describe("The absolute path to the file to write (must be absolute, not relative)"),
-  }),
-  async execute(params, ctx) {
-    const filepath = path.isAbsolute(params.filePath) ? params.filePath : path.join(Instance.directory, params.filePath)
-    await assertExternalDirectory(ctx, filepath)
+export const WriteTool = Tool.defineEffect(
+  "write",
+  Effect.gen(function* () {
+    const lsp = yield* LSP.Service
+    const fs = yield* AppFileSystem.Service
+    const filetime = yield* FileTime.Service
 
-    const exists = await Filesystem.exists(filepath)
-    const contentOld = exists ? await Filesystem.readText(filepath) : ""
-    if (exists) await FileTime.assert(ctx.sessionID, filepath)
+    return {
+      description: DESCRIPTION,
+      parameters: z.object({
+        content: z.string().describe("The content to write to the file"),
+        filePath: z.string().describe("The absolute path to the file to write (must be absolute, not relative)"),
+      }),
+      execute: (params: { content: string; filePath: string }, ctx: Tool.Context) =>
+        Effect.gen(function* () {
+          const filepath = path.isAbsolute(params.filePath)
+            ? params.filePath
+            : path.join(Instance.directory, params.filePath)
+          yield* assertExternalDirectoryEffect(ctx, filepath)
 
-    const diff = trimDiff(createTwoFilesPatch(filepath, filepath, contentOld, params.content))
-    await ctx.ask({
-      permission: "edit",
-      patterns: [path.relative(Instance.worktree, filepath)],
-      always: ["*"],
-      metadata: {
-        filepath,
-        diff,
-      },
-    })
+          const exists = yield* fs.existsSafe(filepath)
+          const contentOld = exists ? yield* fs.readFileString(filepath) : ""
+          if (exists) yield* filetime.assert(ctx.sessionID, filepath)
 
-    await Filesystem.write(filepath, params.content)
-    await Format.file(filepath)
-    Bus.publish(File.Event.Edited, { file: filepath })
-    await Bus.publish(FileWatcher.Event.Updated, {
-      file: filepath,
-      event: exists ? "change" : "add",
-    })
-    await FileTime.read(ctx.sessionID, filepath)
+          const diff = trimDiff(createTwoFilesPatch(filepath, filepath, contentOld, params.content))
+          yield* Effect.promise(() =>
+            ctx.ask({
+              permission: "edit",
+              patterns: [path.relative(Instance.worktree, filepath)],
+              always: ["*"],
+              metadata: {
+                filepath,
+                diff,
+              },
+            }),
+          )
 
-    let output = "Wrote file successfully."
-    await LSP.touchFile(filepath, true)
-    const diagnostics = await LSP.diagnostics()
-    const normalizedFilepath = Filesystem.normalizePath(filepath)
-    let projectDiagnosticsCount = 0
-    for (const [file, issues] of Object.entries(diagnostics)) {
-      const errors = issues.filter((item) => item.severity === 1)
-      if (errors.length === 0) continue
-      const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE)
-      const suffix =
-        errors.length > MAX_DIAGNOSTICS_PER_FILE ? `\n... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE} more` : ""
-      if (file === normalizedFilepath) {
-        output += `\n\nLSP errors detected in this file, please fix:\n<diagnostics file="${filepath}">\n${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}\n</diagnostics>`
-        continue
-      }
-      if (projectDiagnosticsCount >= MAX_PROJECT_DIAGNOSTICS_FILES) continue
-      projectDiagnosticsCount++
-      output += `\n\nLSP errors detected in other files:\n<diagnostics file="${file}">\n${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}\n</diagnostics>`
-    }
+          yield* fs.writeWithDirs(filepath, params.content)
+          yield* Effect.promise(() => Format.file(filepath))
+          Bus.publish(File.Event.Edited, { file: filepath })
+          yield* Effect.promise(() =>
+            Bus.publish(FileWatcher.Event.Updated, {
+              file: filepath,
+              event: exists ? "change" : "add",
+            }),
+          )
+          yield* filetime.read(ctx.sessionID, filepath)
 
-    return {
-      title: path.relative(Instance.worktree, filepath),
-      metadata: {
-        diagnostics,
-        filepath,
-        exists: exists,
-      },
-      output,
+          let output = "Wrote file successfully."
+          yield* lsp.touchFile(filepath, true)
+          const diagnostics = yield* lsp.diagnostics()
+          const normalizedFilepath = AppFileSystem.normalizePath(filepath)
+          let projectDiagnosticsCount = 0
+          for (const [file, issues] of Object.entries(diagnostics)) {
+            const errors = issues.filter((item) => item.severity === 1)
+            if (errors.length === 0) continue
+            const limited = errors.slice(0, MAX_DIAGNOSTICS_PER_FILE)
+            const suffix =
+              errors.length > MAX_DIAGNOSTICS_PER_FILE
+                ? `\n... and ${errors.length - MAX_DIAGNOSTICS_PER_FILE} more`
+                : ""
+            if (file === normalizedFilepath) {
+              output += `\n\nLSP errors detected in this file, please fix:\n<diagnostics file="${filepath}">\n${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}\n</diagnostics>`
+              continue
+            }
+            if (projectDiagnosticsCount >= MAX_PROJECT_DIAGNOSTICS_FILES) continue
+            projectDiagnosticsCount++
+            output += `\n\nLSP errors detected in other files:\n<diagnostics file="${file}">\n${limited.map(LSP.Diagnostic.pretty).join("\n")}${suffix}\n</diagnostics>`
+          }
+
+          return {
+            title: path.relative(Instance.worktree, filepath),
+            metadata: {
+              diagnostics,
+              filepath,
+              exists: exists,
+            },
+            output,
+          }
+        }).pipe(Effect.orDie, Effect.runPromise),
     }
-  },
-})
+  }),
+)

+ 1 - 1
packages/opencode/src/worktree/index.ts

@@ -590,7 +590,7 @@ export namespace Worktree {
     }),
   )
 
-  const defaultLayer = layer.pipe(
+  export const defaultLayer = layer.pipe(
     Layer.provide(Git.defaultLayer),
     Layer.provide(CrossSpawnSpawner.defaultLayer),
     Layer.provide(Project.defaultLayer),

+ 2 - 0
packages/opencode/test/session/prompt-effect.test.ts

@@ -38,6 +38,7 @@ import { Truncate } from "../../src/tool/truncate"
 import { Log } from "../../src/util/log"
 import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
 import { Ripgrep } from "../../src/file/ripgrep"
+import { Format } from "../../src/format"
 import { provideTmpdirInstance, provideTmpdirServer } from "../fixture/fixture"
 import { testEffect } from "../lib/effect"
 import { reply, TestLLMServer } from "../lib/llm-server"
@@ -174,6 +175,7 @@ function makeHttp() {
     Layer.provide(FetchHttpClient.layer),
     Layer.provide(CrossSpawnSpawner.defaultLayer),
     Layer.provide(Ripgrep.defaultLayer),
+    Layer.provide(Format.defaultLayer),
     Layer.provideMerge(todo),
     Layer.provideMerge(question),
     Layer.provideMerge(deps),

+ 2 - 0
packages/opencode/test/session/snapshot-tool-race.test.ts

@@ -54,6 +54,7 @@ import { Truncate } from "../../src/tool/truncate"
 import { AppFileSystem } from "../../src/filesystem"
 import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
 import { Ripgrep } from "../../src/file/ripgrep"
+import { Format } from "../../src/format"
 
 Log.init({ print: false })
 
@@ -138,6 +139,7 @@ function makeHttp() {
     Layer.provide(FetchHttpClient.layer),
     Layer.provide(CrossSpawnSpawner.defaultLayer),
     Layer.provide(Ripgrep.defaultLayer),
+    Layer.provide(Format.defaultLayer),
     Layer.provideMerge(todo),
     Layer.provideMerge(question),
     Layer.provideMerge(deps),

+ 8 - 1
packages/opencode/test/tool/apply_patch.test.ts

@@ -1,11 +1,17 @@
 import { describe, expect, test } from "bun:test"
 import path from "path"
 import * as fs from "fs/promises"
+import { Effect, ManagedRuntime, Layer } from "effect"
 import { ApplyPatchTool } from "../../src/tool/apply_patch"
 import { Instance } from "../../src/project/instance"
+import { LSP } from "../../src/lsp"
+import { AppFileSystem } from "../../src/filesystem"
+import { Format } from "../../src/format"
 import { tmpdir } from "../fixture/fixture"
 import { SessionID, MessageID } from "../../src/session/schema"
 
+const runtime = ManagedRuntime.make(Layer.mergeAll(LSP.defaultLayer, AppFileSystem.defaultLayer, Format.defaultLayer))
+
 const baseCtx = {
   sessionID: SessionID.make("ses_test"),
   messageID: MessageID.make(""),
@@ -40,7 +46,8 @@ type ToolCtx = typeof baseCtx & {
 }
 
 const execute = async (params: { patchText: string }, ctx: ToolCtx) => {
-  const tool = await ApplyPatchTool.init()
+  const info = await runtime.runPromise(ApplyPatchTool)
+  const tool = await info.init()
   return tool.execute(params, ctx)
 }
 

+ 35 - 19
packages/opencode/test/tool/edit.test.ts

@@ -1,10 +1,12 @@
-import { afterEach, describe, test, expect } from "bun:test"
+import { afterAll, afterEach, describe, test, expect } from "bun:test"
 import path from "path"
 import fs from "fs/promises"
+import { Effect, Layer, ManagedRuntime } from "effect"
 import { EditTool } from "../../src/tool/edit"
 import { Instance } from "../../src/project/instance"
 import { tmpdir } from "../fixture/fixture"
 import { FileTime } from "../../src/file/time"
+import { LSP } from "../../src/lsp"
 import { SessionID, MessageID } from "../../src/session/schema"
 
 const ctx = {
@@ -27,6 +29,20 @@ async function touch(file: string, time: number) {
   await fs.utimes(file, date, date)
 }
 
+const runtime = ManagedRuntime.make(Layer.mergeAll(LSP.defaultLayer, FileTime.defaultLayer))
+
+afterAll(async () => {
+  await runtime.dispose()
+})
+
+const resolve = () =>
+  runtime.runPromise(
+    Effect.gen(function* () {
+      const info = yield* EditTool
+      return yield* Effect.promise(() => info.init())
+    }),
+  )
+
 describe("tool.edit", () => {
   describe("creating new files", () => {
     test("creates new file when oldString is empty", async () => {
@@ -36,7 +52,7 @@ describe("tool.edit", () => {
       await Instance.provide({
         directory: tmp.path,
         fn: async () => {
-          const edit = await EditTool.init()
+          const edit = await resolve()
           const result = await edit.execute(
             {
               filePath: filepath,
@@ -61,7 +77,7 @@ describe("tool.edit", () => {
       await Instance.provide({
         directory: tmp.path,
         fn: async () => {
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await edit.execute(
             {
               filePath: filepath,
@@ -91,7 +107,7 @@ describe("tool.edit", () => {
           const events: string[] = []
           const unsubUpdated = Bus.subscribe(FileWatcher.Event.Updated, () => events.push("updated"))
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await edit.execute(
             {
               filePath: filepath,
@@ -119,7 +135,7 @@ describe("tool.edit", () => {
         fn: async () => {
           await FileTime.read(ctx.sessionID, filepath)
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
           const result = await edit.execute(
             {
               filePath: filepath,
@@ -146,7 +162,7 @@ describe("tool.edit", () => {
         fn: async () => {
           await FileTime.read(ctx.sessionID, filepath)
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await expect(
             edit.execute(
               {
@@ -169,7 +185,7 @@ describe("tool.edit", () => {
       await Instance.provide({
         directory: tmp.path,
         fn: async () => {
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await expect(
             edit.execute(
               {
@@ -194,7 +210,7 @@ describe("tool.edit", () => {
         fn: async () => {
           await FileTime.read(ctx.sessionID, filepath)
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await expect(
             edit.execute(
               {
@@ -217,7 +233,7 @@ describe("tool.edit", () => {
       await Instance.provide({
         directory: tmp.path,
         fn: async () => {
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await expect(
             edit.execute(
               {
@@ -249,7 +265,7 @@ describe("tool.edit", () => {
           await touch(filepath, 2_000)
 
           // Try to edit with the new content
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await expect(
             edit.execute(
               {
@@ -274,7 +290,7 @@ describe("tool.edit", () => {
         fn: async () => {
           await FileTime.read(ctx.sessionID, filepath)
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await edit.execute(
             {
               filePath: filepath,
@@ -307,7 +323,7 @@ describe("tool.edit", () => {
           const events: string[] = []
           const unsubUpdated = Bus.subscribe(FileWatcher.Event.Updated, () => events.push("updated"))
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await edit.execute(
             {
               filePath: filepath,
@@ -335,7 +351,7 @@ describe("tool.edit", () => {
         fn: async () => {
           await FileTime.read(ctx.sessionID, filepath)
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await edit.execute(
             {
               filePath: filepath,
@@ -361,7 +377,7 @@ describe("tool.edit", () => {
         fn: async () => {
           await FileTime.read(ctx.sessionID, filepath)
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await edit.execute(
             {
               filePath: filepath,
@@ -385,7 +401,7 @@ describe("tool.edit", () => {
       await Instance.provide({
         directory: tmp.path,
         fn: async () => {
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await expect(
             edit.execute(
               {
@@ -410,7 +426,7 @@ describe("tool.edit", () => {
         fn: async () => {
           await FileTime.read(ctx.sessionID, dirpath)
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
           await expect(
             edit.execute(
               {
@@ -435,7 +451,7 @@ describe("tool.edit", () => {
         fn: async () => {
           await FileTime.read(ctx.sessionID, filepath)
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
           const result = await edit.execute(
             {
               filePath: filepath,
@@ -502,7 +518,7 @@ describe("tool.edit", () => {
       return await Instance.provide({
         directory: tmp.path,
         fn: async () => {
-          const edit = await EditTool.init()
+          const edit = await resolve()
           const filePath = path.join(tmp.path, "test.txt")
           await FileTime.read(ctx.sessionID, filePath)
           await edit.execute(
@@ -647,7 +663,7 @@ describe("tool.edit", () => {
         fn: async () => {
           await FileTime.read(ctx.sessionID, filepath)
 
-          const edit = await EditTool.init()
+          const edit = await resolve()
 
           // Two concurrent edits
           const promise1 = edit.execute(

+ 11 - 3
packages/opencode/test/tool/grep.test.ts

@@ -1,9 +1,17 @@
 import { describe, expect, test } from "bun:test"
 import path from "path"
+import { Effect, Layer, ManagedRuntime } from "effect"
 import { GrepTool } from "../../src/tool/grep"
 import { Instance } from "../../src/project/instance"
 import { tmpdir } from "../fixture/fixture"
 import { SessionID, MessageID } from "../../src/session/schema"
+import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
+
+const runtime = ManagedRuntime.make(Layer.mergeAll(CrossSpawnSpawner.defaultLayer))
+
+function initGrep() {
+  return runtime.runPromise(GrepTool.pipe(Effect.flatMap((info) => Effect.promise(() => info.init()))))
+}
 
 const ctx = {
   sessionID: SessionID.make("ses_test"),
@@ -23,7 +31,7 @@ describe("tool.grep", () => {
     await Instance.provide({
       directory: projectRoot,
       fn: async () => {
-        const grep = await GrepTool.init()
+        const grep = await initGrep()
         const result = await grep.execute(
           {
             pattern: "export",
@@ -47,7 +55,7 @@ describe("tool.grep", () => {
     await Instance.provide({
       directory: tmp.path,
       fn: async () => {
-        const grep = await GrepTool.init()
+        const grep = await initGrep()
         const result = await grep.execute(
           {
             pattern: "xyznonexistentpatternxyz123",
@@ -72,7 +80,7 @@ describe("tool.grep", () => {
     await Instance.provide({
       directory: tmp.path,
       fn: async () => {
-        const grep = await GrepTool.init()
+        const grep = await initGrep()
         const result = await grep.execute(
           {
             pattern: "line",

+ 6 - 2
packages/opencode/test/tool/skill.test.ts

@@ -1,4 +1,6 @@
-import { Effect } from "effect"
+import { Effect, Layer, ManagedRuntime } from "effect"
+import { Skill } from "../../src/skill"
+import { Ripgrep } from "../../src/file/ripgrep"
 import { afterEach, describe, expect, test } from "bun:test"
 import path from "path"
 import { pathToFileURL } from "url"
@@ -148,7 +150,9 @@ Use this skill.
       await Instance.provide({
         directory: tmp.path,
         fn: async () => {
-          const tool = await SkillTool.init()
+          const runtime = ManagedRuntime.make(Layer.mergeAll(Skill.defaultLayer, Ripgrep.defaultLayer))
+          const info = await runtime.runPromise(SkillTool)
+          const tool = await info.init()
           const requests: Array<Omit<Permission.Request, "id" | "sessionID" | "tool">> = []
           const ctx: Tool.Context = {
             ...baseCtx,

+ 25 - 60
packages/opencode/test/tool/task.test.ts

@@ -6,10 +6,10 @@ import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
 import { Instance } from "../../src/project/instance"
 import { Session } from "../../src/session"
 import { MessageV2 } from "../../src/session/message-v2"
-import { SessionPrompt } from "../../src/session/prompt"
+import type { SessionPrompt } from "../../src/session/prompt"
 import { MessageID, PartID } from "../../src/session/schema"
 import { ModelID, ProviderID } from "../../src/provider/schema"
-import { TaskTool } from "../../src/tool/task"
+import { TaskTool, type TaskPromptOps } from "../../src/tool/task"
 import { ToolRegistry } from "../../src/tool/registry"
 import { provideTmpdirInstance } from "../fixture/fixture"
 import { testEffect } from "../lib/effect"
@@ -62,6 +62,17 @@ const seed = Effect.fn("TaskToolTest.seed")(function* (title = "Pinned") {
   return { chat, assistant }
 })
 
+function stubOps(opts?: { onPrompt?: (input: SessionPrompt.PromptInput) => void; text?: string }): TaskPromptOps {
+  return {
+    cancel() {},
+    resolvePromptParts: async (template) => [{ type: "text", text: template }],
+    prompt: async (input) => {
+      opts?.onPrompt?.(input)
+      return reply(input, opts?.text ?? "done")
+    },
+  }
+}
+
 function reply(input: Parameters<typeof SessionPrompt.prompt>[0], text: string): MessageV2.WithParts {
   const id = MessageID.ascending()
   return {
@@ -180,21 +191,8 @@ describe("tool.task", () => {
         const child = yield* sessions.create({ parentID: chat.id, title: "Existing child" })
         const tool = yield* TaskTool
         const def = yield* Effect.promise(() => tool.init())
-        const resolve = SessionPrompt.resolvePromptParts
-        const prompt = SessionPrompt.prompt
-        let seen: Parameters<typeof SessionPrompt.prompt>[0] | undefined
-
-        SessionPrompt.resolvePromptParts = async (template) => [{ type: "text", text: template }]
-        SessionPrompt.prompt = async (input) => {
-          seen = input
-          return reply(input, "resumed")
-        }
-        yield* Effect.addFinalizer(() =>
-          Effect.sync(() => {
-            SessionPrompt.resolvePromptParts = resolve
-            SessionPrompt.prompt = prompt
-          }),
-        )
+        let seen: SessionPrompt.PromptInput | undefined
+        const promptOps = stubOps({ text: "resumed", onPrompt: (input) => (seen = input) })
 
         const result = yield* Effect.promise(() =>
           def.execute(
@@ -209,6 +207,7 @@ describe("tool.task", () => {
               messageID: assistant.id,
               agent: "build",
               abort: new AbortController().signal,
+              extra: { promptOps },
               messages: [],
               metadata() {},
               ask: async () => {},
@@ -232,20 +231,10 @@ describe("tool.task", () => {
         const { chat, assistant } = yield* seed()
         const tool = yield* TaskTool
         const def = yield* Effect.promise(() => tool.init())
-        const resolve = SessionPrompt.resolvePromptParts
-        const prompt = SessionPrompt.prompt
         const calls: unknown[] = []
+        const promptOps = stubOps()
 
-        SessionPrompt.resolvePromptParts = async (template) => [{ type: "text", text: template }]
-        SessionPrompt.prompt = async (input) => reply(input, "done")
-        yield* Effect.addFinalizer(() =>
-          Effect.sync(() => {
-            SessionPrompt.resolvePromptParts = resolve
-            SessionPrompt.prompt = prompt
-          }),
-        )
-
-        const exec = (extra?: { bypassAgentCheck?: boolean }) =>
+        const exec = (extra?: Record<string, any>) =>
           Effect.promise(() =>
             def.execute(
               {
@@ -258,7 +247,7 @@ describe("tool.task", () => {
                 messageID: assistant.id,
                 agent: "build",
                 abort: new AbortController().signal,
-                extra,
+                extra: { promptOps, ...extra },
                 messages: [],
                 metadata() {},
                 ask: async (input) => {
@@ -292,21 +281,8 @@ describe("tool.task", () => {
         const { chat, assistant } = yield* seed()
         const tool = yield* TaskTool
         const def = yield* Effect.promise(() => tool.init())
-        const resolve = SessionPrompt.resolvePromptParts
-        const prompt = SessionPrompt.prompt
-        let seen: Parameters<typeof SessionPrompt.prompt>[0] | undefined
-
-        SessionPrompt.resolvePromptParts = async (template) => [{ type: "text", text: template }]
-        SessionPrompt.prompt = async (input) => {
-          seen = input
-          return reply(input, "created")
-        }
-        yield* Effect.addFinalizer(() =>
-          Effect.sync(() => {
-            SessionPrompt.resolvePromptParts = resolve
-            SessionPrompt.prompt = prompt
-          }),
-        )
+        let seen: SessionPrompt.PromptInput | undefined
+        const promptOps = stubOps({ text: "created", onPrompt: (input) => (seen = input) })
 
         const result = yield* Effect.promise(() =>
           def.execute(
@@ -321,6 +297,7 @@ describe("tool.task", () => {
               messageID: assistant.id,
               agent: "build",
               abort: new AbortController().signal,
+              extra: { promptOps },
               messages: [],
               metadata() {},
               ask: async () => {},
@@ -346,21 +323,8 @@ describe("tool.task", () => {
           const { chat, assistant } = yield* seed()
           const tool = yield* TaskTool
           const def = yield* Effect.promise(() => tool.init())
-          const resolve = SessionPrompt.resolvePromptParts
-          const prompt = SessionPrompt.prompt
-          let seen: Parameters<typeof SessionPrompt.prompt>[0] | undefined
-
-          SessionPrompt.resolvePromptParts = async (template) => [{ type: "text", text: template }]
-          SessionPrompt.prompt = async (input) => {
-            seen = input
-            return reply(input, "done")
-          }
-          yield* Effect.addFinalizer(() =>
-            Effect.sync(() => {
-              SessionPrompt.resolvePromptParts = resolve
-              SessionPrompt.prompt = prompt
-            }),
-          )
+          let seen: SessionPrompt.PromptInput | undefined
+          const promptOps = stubOps({ onPrompt: (input) => (seen = input) })
 
           const result = yield* Effect.promise(() =>
             def.execute(
@@ -374,6 +338,7 @@ describe("tool.task", () => {
                 messageID: assistant.id,
                 agent: "build",
                 abort: new AbortController().signal,
+                extra: { promptOps },
                 messages: [],
                 metadata() {},
                 ask: async () => {},

+ 177 - 287
packages/opencode/test/tool/write.test.ts

@@ -1,10 +1,17 @@
-import { afterEach, describe, test, expect } from "bun:test"
+import { afterEach, describe, expect } from "bun:test"
+import { Effect, Layer } from "effect"
 import path from "path"
 import fs from "fs/promises"
 import { WriteTool } from "../../src/tool/write"
 import { Instance } from "../../src/project/instance"
-import { tmpdir } from "../fixture/fixture"
+import { LSP } from "../../src/lsp"
+import { AppFileSystem } from "../../src/filesystem"
+import { FileTime } from "../../src/file/time"
+import { Tool } from "../../src/tool/tool"
 import { SessionID, MessageID } from "../../src/session/schema"
+import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
+import { provideTmpdirInstance } from "../fixture/fixture"
+import { testEffect } from "../lib/effect"
 
 const ctx = {
   sessionID: SessionID.make("ses_test-write-session"),
@@ -21,333 +28,216 @@ afterEach(async () => {
   await Instance.disposeAll()
 })
 
+const it = testEffect(
+  Layer.mergeAll(LSP.defaultLayer, AppFileSystem.defaultLayer, FileTime.defaultLayer, CrossSpawnSpawner.defaultLayer),
+)
+
+const init = Effect.fn("WriteToolTest.init")(function* () {
+  const info = yield* WriteTool
+  return yield* Effect.promise(() => info.init())
+})
+
+const run = Effect.fn("WriteToolTest.run")(function* (
+  args: Tool.InferParameters<typeof WriteTool>,
+  next: Tool.Context = ctx,
+) {
+  const tool = yield* init()
+  return yield* Effect.promise(() => tool.execute(args, next))
+})
+
+const markRead = Effect.fn("WriteToolTest.markRead")(function* (sessionID: string, filepath: string) {
+  const ft = yield* FileTime.Service
+  yield* ft.read(sessionID as any, filepath)
+})
+
 describe("tool.write", () => {
   describe("new file creation", () => {
-    test("writes content to new file", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "newfile.txt")
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const write = await WriteTool.init()
-          const result = await write.execute(
-            {
-              filePath: filepath,
-              content: "Hello, World!",
-            },
-            ctx,
-          )
+    it.live("writes content to new file", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "newfile.txt")
+          const result = yield* run({ filePath: filepath, content: "Hello, World!" })
 
           expect(result.output).toContain("Wrote file successfully")
           expect(result.metadata.exists).toBe(false)
 
-          const content = await fs.readFile(filepath, "utf-8")
+          const content = yield* Effect.promise(() => fs.readFile(filepath, "utf-8"))
           expect(content).toBe("Hello, World!")
-        },
-      })
-    })
-
-    test("creates parent directories if needed", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "nested", "deep", "file.txt")
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const write = await WriteTool.init()
-          await write.execute(
-            {
-              filePath: filepath,
-              content: "nested content",
-            },
-            ctx,
-          )
-
-          const content = await fs.readFile(filepath, "utf-8")
+        }),
+      ),
+    )
+
+    it.live("creates parent directories if needed", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "nested", "deep", "file.txt")
+          yield* run({ filePath: filepath, content: "nested content" })
+
+          const content = yield* Effect.promise(() => fs.readFile(filepath, "utf-8"))
           expect(content).toBe("nested content")
-        },
-      })
-    })
-
-    test("handles relative paths by resolving to instance directory", async () => {
-      await using tmp = await tmpdir()
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const write = await WriteTool.init()
-          await write.execute(
-            {
-              filePath: "relative.txt",
-              content: "relative content",
-            },
-            ctx,
-          )
-
-          const content = await fs.readFile(path.join(tmp.path, "relative.txt"), "utf-8")
+        }),
+      ),
+    )
+
+    it.live("handles relative paths by resolving to instance directory", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          yield* run({ filePath: "relative.txt", content: "relative content" })
+
+          const content = yield* Effect.promise(() => fs.readFile(path.join(dir, "relative.txt"), "utf-8"))
           expect(content).toBe("relative content")
-        },
-      })
-    })
+        }),
+      ),
+    )
   })
 
   describe("existing file overwrite", () => {
-    test("overwrites existing file content", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "existing.txt")
-      await fs.writeFile(filepath, "old content", "utf-8")
-
-      // First read the file to satisfy FileTime requirement
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const { FileTime } = await import("../../src/file/time")
-          await FileTime.read(ctx.sessionID, filepath)
-
-          const write = await WriteTool.init()
-          const result = await write.execute(
-            {
-              filePath: filepath,
-              content: "new content",
-            },
-            ctx,
-          )
+    it.live("overwrites existing file content", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "existing.txt")
+          yield* Effect.promise(() => fs.writeFile(filepath, "old content", "utf-8"))
+          yield* markRead(ctx.sessionID, filepath)
+
+          const result = yield* run({ filePath: filepath, content: "new content" })
 
           expect(result.output).toContain("Wrote file successfully")
           expect(result.metadata.exists).toBe(true)
 
-          const content = await fs.readFile(filepath, "utf-8")
+          const content = yield* Effect.promise(() => fs.readFile(filepath, "utf-8"))
           expect(content).toBe("new content")
-        },
-      })
-    })
-
-    test("returns diff in metadata for existing files", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "file.txt")
-      await fs.writeFile(filepath, "old", "utf-8")
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const { FileTime } = await import("../../src/file/time")
-          await FileTime.read(ctx.sessionID, filepath)
-
-          const write = await WriteTool.init()
-          const result = await write.execute(
-            {
-              filePath: filepath,
-              content: "new",
-            },
-            ctx,
-          )
-
-          // Diff should be in metadata
+        }),
+      ),
+    )
+
+    it.live("returns diff in metadata for existing files", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "file.txt")
+          yield* Effect.promise(() => fs.writeFile(filepath, "old", "utf-8"))
+          yield* markRead(ctx.sessionID, filepath)
+
+          const result = yield* run({ filePath: filepath, content: "new" })
+
           expect(result.metadata).toHaveProperty("filepath", filepath)
           expect(result.metadata).toHaveProperty("exists", true)
-        },
-      })
-    })
+        }),
+      ),
+    )
   })
 
   describe("file permissions", () => {
-    test("sets file permissions when writing sensitive data", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "sensitive.json")
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const write = await WriteTool.init()
-          await write.execute(
-            {
-              filePath: filepath,
-              content: JSON.stringify({ secret: "data" }),
-            },
-            ctx,
-          )
-
-          // On Unix systems, check permissions
+    it.live("sets file permissions when writing sensitive data", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "sensitive.json")
+          yield* run({ filePath: filepath, content: JSON.stringify({ secret: "data" }) })
+
           if (process.platform !== "win32") {
-            const stats = await fs.stat(filepath)
+            const stats = yield* Effect.promise(() => fs.stat(filepath))
             expect(stats.mode & 0o777).toBe(0o644)
           }
-        },
-      })
-    })
+        }),
+      ),
+    )
   })
 
   describe("content types", () => {
-    test("writes JSON content", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "data.json")
-      const data = { key: "value", nested: { array: [1, 2, 3] } }
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const write = await WriteTool.init()
-          await write.execute(
-            {
-              filePath: filepath,
-              content: JSON.stringify(data, null, 2),
-            },
-            ctx,
-          )
-
-          const content = await fs.readFile(filepath, "utf-8")
+    it.live("writes JSON content", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "data.json")
+          const data = { key: "value", nested: { array: [1, 2, 3] } }
+          yield* run({ filePath: filepath, content: JSON.stringify(data, null, 2) })
+
+          const content = yield* Effect.promise(() => fs.readFile(filepath, "utf-8"))
           expect(JSON.parse(content)).toEqual(data)
-        },
-      })
-    })
-
-    test("writes binary-safe content", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "binary.bin")
-      const content = "Hello\x00World\x01\x02\x03"
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const write = await WriteTool.init()
-          await write.execute(
-            {
-              filePath: filepath,
-              content,
-            },
-            ctx,
-          )
-
-          const buf = await fs.readFile(filepath)
+        }),
+      ),
+    )
+
+    it.live("writes binary-safe content", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "binary.bin")
+          const content = "Hello\x00World\x01\x02\x03"
+          yield* run({ filePath: filepath, content })
+
+          const buf = yield* Effect.promise(() => fs.readFile(filepath))
           expect(buf.toString()).toBe(content)
-        },
-      })
-    })
-
-    test("writes empty content", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "empty.txt")
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const write = await WriteTool.init()
-          await write.execute(
-            {
-              filePath: filepath,
-              content: "",
-            },
-            ctx,
-          )
-
-          const content = await fs.readFile(filepath, "utf-8")
+        }),
+      ),
+    )
+
+    it.live("writes empty content", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "empty.txt")
+          yield* run({ filePath: filepath, content: "" })
+
+          const content = yield* Effect.promise(() => fs.readFile(filepath, "utf-8"))
           expect(content).toBe("")
 
-          const stats = await fs.stat(filepath)
+          const stats = yield* Effect.promise(() => fs.stat(filepath))
           expect(stats.size).toBe(0)
-        },
-      })
-    })
-
-    test("writes multi-line content", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "multiline.txt")
-      const lines = ["Line 1", "Line 2", "Line 3", ""].join("\n")
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const write = await WriteTool.init()
-          await write.execute(
-            {
-              filePath: filepath,
-              content: lines,
-            },
-            ctx,
-          )
-
-          const content = await fs.readFile(filepath, "utf-8")
+        }),
+      ),
+    )
+
+    it.live("writes multi-line content", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "multiline.txt")
+          const lines = ["Line 1", "Line 2", "Line 3", ""].join("\n")
+          yield* run({ filePath: filepath, content: lines })
+
+          const content = yield* Effect.promise(() => fs.readFile(filepath, "utf-8"))
           expect(content).toBe(lines)
-        },
-      })
-    })
-
-    test("handles different line endings", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "crlf.txt")
-      const content = "Line 1\r\nLine 2\r\nLine 3"
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const write = await WriteTool.init()
-          await write.execute(
-            {
-              filePath: filepath,
-              content,
-            },
-            ctx,
-          )
-
-          const buf = await fs.readFile(filepath)
+        }),
+      ),
+    )
+
+    it.live("handles different line endings", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "crlf.txt")
+          const content = "Line 1\r\nLine 2\r\nLine 3"
+          yield* run({ filePath: filepath, content })
+
+          const buf = yield* Effect.promise(() => fs.readFile(filepath))
           expect(buf.toString()).toBe(content)
-        },
-      })
-    })
+        }),
+      ),
+    )
   })
 
   describe("error handling", () => {
-    test("throws error when OS denies write access", async () => {
-      await using tmp = await tmpdir()
-      const readonlyPath = path.join(tmp.path, "readonly.txt")
-
-      // Create a read-only file
-      await fs.writeFile(readonlyPath, "test", "utf-8")
-      await fs.chmod(readonlyPath, 0o444)
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const { FileTime } = await import("../../src/file/time")
-          await FileTime.read(ctx.sessionID, readonlyPath)
-
-          const write = await WriteTool.init()
-          await expect(
-            write.execute(
-              {
-                filePath: readonlyPath,
-                content: "new content",
-              },
-              ctx,
-            ),
-          ).rejects.toThrow()
-        },
-      })
-    })
+    it.live("throws error when OS denies write access", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const readonlyPath = path.join(dir, "readonly.txt")
+          yield* Effect.promise(() => fs.writeFile(readonlyPath, "test", "utf-8"))
+          yield* Effect.promise(() => fs.chmod(readonlyPath, 0o444))
+          yield* markRead(ctx.sessionID, readonlyPath)
+
+          const exit = yield* run({ filePath: readonlyPath, content: "new content" }).pipe(Effect.exit)
+          expect(exit._tag).toBe("Failure")
+        }),
+      ),
+    )
   })
 
   describe("title generation", () => {
-    test("returns relative path as title", async () => {
-      await using tmp = await tmpdir()
-      const filepath = path.join(tmp.path, "src", "components", "Button.tsx")
-      await fs.mkdir(path.dirname(filepath), { recursive: true })
-
-      await Instance.provide({
-        directory: tmp.path,
-        fn: async () => {
-          const write = await WriteTool.init()
-          const result = await write.execute(
-            {
-              filePath: filepath,
-              content: "export const Button = () => {}",
-            },
-            ctx,
-          )
+    it.live("returns relative path as title", () =>
+      provideTmpdirInstance((dir) =>
+        Effect.gen(function* () {
+          const filepath = path.join(dir, "src", "components", "Button.tsx")
+          yield* Effect.promise(() => fs.mkdir(path.dirname(filepath), { recursive: true }))
 
+          const result = yield* run({ filePath: filepath, content: "export const Button = () => {}" })
           expect(result.title).toEndWith(path.join("src", "components", "Button.tsx"))
-        },
-      })
-    })
+        }),
+      ),
+    )
   })
 })

+ 1 - 1
packages/web/src/content/docs/ar/zen.mdx

@@ -213,7 +213,7 @@ https://opencode.ai/zen/v1/models
 - MiMo V2 Pro Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
 - MiMo V2 Omni Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
 - Qwen3.6 Plus Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
-- Nemotron 3 Super Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج.
+- Nemotron 3 Super Free (نقاط نهاية NVIDIA المجانية): يُقدَّم بموجب [شروط خدمة النسخة التجريبية من واجهة NVIDIA API](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). للاستخدام التجريبي فقط، وليس للإنتاج أو البيانات الحساسة. تقوم NVIDIA بتسجيل المطالبات والمخرجات لتحسين نماذجها وخدماتها. لا ترسل بيانات شخصية أو سرية.
 - OpenAI APIs: يتم الاحتفاظ بالطلبات لمدة 30 يوما وفقا لـ [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: يتم الاحتفاظ بالطلبات لمدة 30 يوما وفقا لـ [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/bs/zen.mdx

@@ -225,7 +225,7 @@ i ne koriste vaše podatke za treniranje modela, uz sljedeće izuzetke:
 - MiMo V2 Pro Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela.
 - MiMo V2 Omni Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela.
 - Qwen3.6 Plus Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela.
-- Nemotron 3 Super Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela.
+- Nemotron 3 Super Free (besplatni NVIDIA endpointi): Dostupan je prema [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Samo za probnu upotrebu, nije za produkciju niti osjetljive podatke. NVIDIA bilježi promptove i izlaze radi poboljšanja svojih modela i usluga. Nemojte slati lične ili povjerljive podatke.
 - OpenAI APIs: Requests are retained for 30 days in accordance with [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: Requests are retained for 30 days in accordance with [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/da/zen.mdx

@@ -223,7 +223,7 @@ Alle vores modeller hostes i US. Vores udbydere følger en nul-opbevaringspoliti
 - MiMo V2 Pro Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen.
 - MiMo V2 Omni Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen.
 - Qwen3.6 Plus Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen.
-- Nemotron 3 Super Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen.
+- Nemotron 3 Super Free (gratis NVIDIA-endpoints): Leveres under [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Kun til prøvebrug, ikke til produktion eller følsomme data. Prompts og outputs logges af NVIDIA for at forbedre deres modeller og tjenester. Indsend ikke personlige eller fortrolige data.
 - OpenAI APIs: Anmodninger opbevares i 30 dage i overensstemmelse med [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: Anmodninger opbevares i 30 dage i overensstemmelse med [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/de/zen.mdx

@@ -209,7 +209,7 @@ Alle unsere Modelle werden in den USA gehostet. Unsere Provider folgen einer Zer
 - MiMo V2 Pro Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden.
 - MiMo V2 Omni Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden.
 - Qwen3.6 Plus Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden.
-- Nemotron 3 Super Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden.
+- Nemotron 3 Super Free (kostenlose NVIDIA-Endpunkte): Bereitgestellt gemäß den [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Nur für Testzwecke, nicht für Produktion oder sensible Daten. Eingaben und Ausgaben werden von NVIDIA protokolliert, um seine Modelle und Dienste zu verbessern. Übermitteln Sie keine personenbezogenen oder vertraulichen Daten.
 - OpenAI APIs: Anfragen werden in Übereinstimmung mit [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 30 Tage lang gespeichert.
 - Anthropic APIs: Anfragen werden in Übereinstimmung mit [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 30 Tage lang gespeichert.
 

+ 1 - 1
packages/web/src/content/docs/es/zen.mdx

@@ -223,7 +223,7 @@ Todos nuestros modelos están alojados en US. Nuestros proveedores siguen una po
 - MiMo V2 Pro Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo.
 - MiMo V2 Omni Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo.
 - Qwen3.6 Plus Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo.
-- Nemotron 3 Super Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo.
+- Nemotron 3 Super Free (endpoints gratuitos de NVIDIA): Se ofrece bajo los [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Solo para uso de prueba, no para producción ni datos sensibles. NVIDIA registra los prompts y las salidas para mejorar sus modelos y servicios. No envíes datos personales ni confidenciales.
 - OpenAI APIs: Las solicitudes se conservan durante 30 días de acuerdo con [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: Las solicitudes se conservan durante 30 días de acuerdo con [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/fr/zen.mdx

@@ -209,7 +209,7 @@ Tous nos modèles sont hébergés aux US. Nos fournisseurs suivent une politique
 - MiMo V2 Pro Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle.
 - MiMo V2 Omni Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle.
 - Qwen3.6 Plus Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle.
-- Nemotron 3 Super Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle.
+- Nemotron 3 Super Free (endpoints NVIDIA gratuits) : Fourni dans le cadre des [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Réservé à un usage d'essai, pas à la production ni aux données sensibles. Les prompts et les sorties sont journalisés par NVIDIA pour améliorer ses modèles et services. N'envoyez pas de données personnelles ou confidentielles.
 - OpenAI APIs : Les requêtes sont conservées pendant 30 jours conformément à [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs : Les requêtes sont conservées pendant 30 jours conformément à [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/it/zen.mdx

@@ -223,7 +223,7 @@ Tutti i nostri modelli sono ospitati negli US. I nostri provider seguono una pol
 - MiMo V2 Pro Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello.
 - MiMo V2 Omni Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello.
 - Qwen3.6 Plus Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello.
-- Nemotron 3 Super Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello.
+- Nemotron 3 Super Free (endpoint NVIDIA gratuiti): fornito secondo i [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Solo per uso di prova, non per produzione o dati sensibili. NVIDIA registra prompt e output per migliorare i propri modelli e servizi. Non inviare dati personali o riservati.
 - OpenAI APIs: le richieste vengono conservate per 30 giorni in conformità con [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: le richieste vengono conservate per 30 giorni in conformità con [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/ja/zen.mdx

@@ -209,7 +209,7 @@ https://opencode.ai/zen/v1/models
 - MiMo V2 Pro Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。
 - MiMo V2 Omni Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。
 - Qwen3.6 Plus Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。
-- Nemotron 3 Super Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります
+- Nemotron 3 Super Free(NVIDIA の無料エンドポイント): [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) に基づいて提供されます。試用専用であり、本番環境や機密性の高いデータには使用しないでください。プロンプトと出力は、NVIDIA が自社のモデルとサービスを改善するために記録します。個人情報や機密データは送信しないでください
 - OpenAI APIs: リクエストは [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) に従って 30 日間保持されます。
 - Anthropic APIs: リクエストは [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) に従って 30 日間保持されます。
 

+ 1 - 1
packages/web/src/content/docs/ko/zen.mdx

@@ -209,7 +209,7 @@ https://opencode.ai/zen/v1/models
 - MiMo V2 Pro Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다.
 - MiMo V2 Omni Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다.
 - Qwen3.6 Plus Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다.
-- Nemotron 3 Super Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다.
+- Nemotron 3 Super Free(NVIDIA 무료 엔드포인트): [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf)에 따라 제공됩니다. 평가판 전용이며 프로덕션 환경이나 민감한 데이터에는 사용할 수 없습니다. NVIDIA는 자사 모델과 서비스를 개선하기 위해 프롬프트와 출력을 기록합니다. 개인 정보나 기밀 데이터는 제출하지 마세요.
 - OpenAI APIs: 요청은 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data)에 따라 30일 동안 보관됩니다.
 - Anthropic APIs: 요청은 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage)에 따라 30일 동안 보관됩니다.
 

+ 1 - 1
packages/web/src/content/docs/nb/zen.mdx

@@ -223,7 +223,7 @@ Alle modellene våre hostes i US. Leverandørene våre følger en policy for zer
 - MiMo V2 Pro Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen.
 - MiMo V2 Omni Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen.
 - Qwen3.6 Plus Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen.
-- Nemotron 3 Super Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen.
+- Nemotron 3 Super Free (gratis NVIDIA-endepunkter): Leveres under [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Kun for prøvebruk, ikke for produksjon eller sensitive data. Prompter og svar logges av NVIDIA for å forbedre modellene og tjenestene deres. Ikke send inn personopplysninger eller konfidensielle data.
 - OpenAI APIs: Forespørsler lagres i 30 dager i samsvar med [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: Forespørsler lagres i 30 dager i samsvar med [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/pl/zen.mdx

@@ -224,7 +224,7 @@ Wszystkie nasze modele są hostowane w US. Nasi dostawcy stosują politykę zero
 - MiMo V2 Pro Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu.
 - MiMo V2 Omni Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu.
 - Qwen3.6 Plus Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu.
-- Nemotron 3 Super Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu.
+- Nemotron 3 Super Free (darmowe endpointy NVIDIA): Udostępniany zgodnie z [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Tylko do użytku próbnego, nie do produkcji ani danych wrażliwych. NVIDIA rejestruje prompty i odpowiedzi, aby ulepszać swoje modele i usługi. Nie przesyłaj danych osobowych ani poufnych.
 - OpenAI APIs: Żądania są przechowywane przez 30 dni zgodnie z [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: Żądania są przechowywane przez 30 dni zgodnie z [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/pt-br/zen.mdx

@@ -209,7 +209,7 @@ Todos os nossos modelos são hospedados nos US. Nossos provedores seguem uma pol
 - MiMo V2 Pro Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo.
 - MiMo V2 Omni Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo.
 - Qwen3.6 Plus Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo.
-- Nemotron 3 Super Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo.
+- Nemotron 3 Super Free (endpoints gratuitos da NVIDIA): Fornecido sob os [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Apenas para uso de avaliação, não para produção nem dados sensíveis. A NVIDIA registra prompts e saídas para melhorar seus modelos e serviços. Não envie dados pessoais ou confidenciais.
 - OpenAI APIs: As solicitações são retidas por 30 dias de acordo com [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: As solicitações são retidas por 30 dias de acordo com [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/ru/zen.mdx

@@ -223,7 +223,7 @@ https://opencode.ai/zen/v1/models
 - MiMo V2 Pro Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели.
 - MiMo V2 Omni Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели.
 - Qwen3.6 Plus Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели.
-- Nemotron 3 Super Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели.
+- Nemotron 3 Super Free (бесплатные эндпоинты NVIDIA): предоставляется в соответствии с [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Только для пробного использования, не для продакшена и не для чувствительных данных. NVIDIA логирует запросы и ответы, чтобы улучшать свои модели и сервисы. Не отправляйте персональные или конфиденциальные данные.
 - OpenAI APIs: запросы хранятся 30 дней в соответствии с [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: запросы хранятся 30 дней в соответствии с [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/th/zen.mdx

@@ -211,7 +211,7 @@ https://opencode.ai/zen/v1/models
 - MiMo V2 Pro Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล
 - MiMo V2 Omni Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล
 - Qwen3.6 Plus Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล
-- Nemotron 3 Super Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล
+- Nemotron 3 Super Free (endpoint ฟรีของ NVIDIA): ให้บริการภายใต้ [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) ใช้สำหรับการทดลองเท่านั้น ไม่เหมาะสำหรับ production หรือข้อมูลที่อ่อนไหว NVIDIA จะบันทึก prompt และ output เพื่อนำไปปรับปรุงโมเดลและบริการของตน โปรดอย่าส่งข้อมูลส่วนบุคคลหรือข้อมูลลับ.
 - OpenAI APIs: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/tr/zen.mdx

@@ -209,7 +209,7 @@ Tüm modellerimiz US'de barındırılıyor. Sağlayıcılarımız zero-retention
 - MiMo V2 Pro Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir.
 - MiMo V2 Omni Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir.
 - Qwen3.6 Plus Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir.
-- Nemotron 3 Super Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir.
+- Nemotron 3 Super Free (ücretsiz NVIDIA uç noktaları): [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) kapsamında sunulur. Yalnızca deneme amaçlıdır; üretim veya hassas veriler için uygun değildir. NVIDIA, modellerini ve hizmetlerini geliştirmek için promptları ve çıktıları kaydeder. Kişisel veya gizli veri göndermeyin.
 - OpenAI APIs: İstekler [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) uyarınca 30 gün boyunca saklanır.
 - Anthropic APIs: İstekler [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) uyarınca 30 gün boyunca saklanır.
 

+ 1 - 1
packages/web/src/content/docs/zen.mdx

@@ -215,7 +215,7 @@ All our models are hosted in the US. Our providers follow a zero-retention polic
 - Big Pickle: During its free period, collected data may be used to improve the model.
 - MiniMax M2.5 Free: During its free period, collected data may be used to improve the model.
 - Qwen3.6 Plus Free: During its free period, collected data may be used to improve the model.
-- Nemotron 3 Super Free: During its free period, collected data may be used to improve the model.
+- Nemotron 3 Super Free (NVIDIA free endpoints): Provided under the [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Trial use only — not for production or sensitive data. Prompts and outputs are logged by NVIDIA to improve its models and services. Do not submit personal or confidential data.
 - OpenAI APIs: Requests are retained for 30 days in accordance with [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data).
 - Anthropic APIs: Requests are retained for 30 days in accordance with [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage).
 

+ 1 - 1
packages/web/src/content/docs/zh-cn/zen.mdx

@@ -209,7 +209,7 @@ https://opencode.ai/zen/v1/models
 - MiMo V2 Pro Free:在免费期间,收集的数据可能会被用于改进模型。
 - MiMo V2 Omni Free:在免费期间,收集的数据可能会被用于改进模型。
 - Qwen3.6 Plus Free:在免费期间,收集的数据可能会被用于改进模型。
-- Nemotron 3 Super Free:在免费期间,收集的数据可能会被用于改进模型
+- Nemotron 3 Super Free(NVIDIA 免费端点):根据 [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) 提供。仅供试用,不适用于生产环境或敏感数据。NVIDIA 会记录提示词和输出内容,以改进其模型和服务。请勿提交个人或机密数据
 - OpenAI APIs:请求会根据 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 保留 30 天。
 - Anthropic APIs:请求会根据 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 保留 30 天。
 

+ 1 - 1
packages/web/src/content/docs/zh-tw/zen.mdx

@@ -216,7 +216,7 @@ https://opencode.ai/zen/v1/models
 - MiMo V2 Pro Free: 在免費期間,收集到的資料可能會用於改進模型。
 - MiMo V2 Omni Free: 在免費期間,收集到的資料可能會用於改進模型。
 - Qwen3.6 Plus Free: 在免費期間,收集到的資料可能會用於改進模型。
-- Nemotron 3 Super Free: 在免費期間,收集到的資料可能會用於改進模型
+- Nemotron 3 Super Free(NVIDIA 免費端點):依據 [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) 提供。僅供試用,不適用於正式環境或敏感資料。NVIDIA 會記錄提示詞與輸出內容,以改進其模型與服務。請勿提交個人或機密資料
 - OpenAI APIs: 請求會依據 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 保留 30 天。
 - Anthropic APIs: 請求會依據 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 保留 30 天。