فهرست منبع

fix(core): prevent agent loop from stopping after tool calls with OpenAI-compatible providers (#14973)

Co-authored-by: Aiden Cline <[email protected]>
Co-authored-by: Aiden Cline <[email protected]>
Valentin Vivaldi 2 هفته پیش
والد
کامیت
733a3bd031
2فایلهای تغییر یافته به همراه40 افزوده شده و 2 حذف شده
  1. 9 0
      packages/opencode/src/session/prompt.ts
  2. 31 2
      packages/opencode/test/session/prompt-effect.test.ts

+ 9 - 0
packages/opencode/src/session/prompt.ts

@@ -1362,9 +1362,18 @@ NOTE: At any point in time through this workflow you should feel free to ask the
             }
 
             if (!lastUser) throw new Error("No user message found in stream. This should never happen.")
+
+            const lastAssistantMsg = msgs.findLast(
+              (msg) => msg.info.role === "assistant" && msg.info.id === lastAssistant?.id,
+            )
+            // Some providers return "stop" even when the assistant message contains tool calls.
+            // Keep the loop running so tool results can be sent back to the model.
+            const hasToolCalls = lastAssistantMsg?.parts.some((part) => part.type === "tool") ?? false
+
             if (
               lastAssistant?.finish &&
               !["tool-calls"].includes(lastAssistant.finish) &&
+              !hasToolCalls &&
               lastUser.id < lastAssistant.id
             ) {
               log.info("exiting loop", { sessionID })

+ 31 - 2
packages/opencode/test/session/prompt-effect.test.ts

@@ -3,7 +3,6 @@ import { expect, spyOn } from "bun:test"
 import { Cause, Effect, Exit, Fiber, Layer } from "effect"
 import path from "path"
 import z from "zod"
-import type { Agent } from "../../src/agent/agent"
 import { Agent as AgentSvc } from "../../src/agent/agent"
 import { Bus } from "../../src/bus"
 import { Command } from "../../src/command"
@@ -35,7 +34,7 @@ import { Log } from "../../src/util/log"
 import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
 import { provideTmpdirInstance, provideTmpdirServer } from "../fixture/fixture"
 import { testEffect } from "../lib/effect"
-import { TestLLMServer } from "../lib/llm-server"
+import { reply, TestLLMServer } from "../lib/llm-server"
 
 Log.init({ print: false })
 
@@ -453,6 +452,36 @@ it.live("loop continues when finish is tool-calls", () =>
   ),
 )
 
+it.live("loop continues when finish is stop but assistant has tool parts", () =>
+  provideTmpdirServer(
+    Effect.fnUntraced(function* ({ llm }) {
+      const prompt = yield* SessionPrompt.Service
+      const sessions = yield* Session.Service
+      const session = yield* sessions.create({
+        title: "Pinned",
+        permission: [{ permission: "*", pattern: "*", action: "allow" }],
+      })
+      yield* prompt.prompt({
+        sessionID: session.id,
+        agent: "build",
+        noReply: true,
+        parts: [{ type: "text", text: "hello" }],
+      })
+      yield* llm.push(reply().tool("first", { value: "first" }).stop())
+      yield* llm.text("second")
+
+      const result = yield* prompt.loop({ sessionID: session.id })
+      expect(yield* llm.calls).toBe(2)
+      expect(result.info.role).toBe("assistant")
+      if (result.info.role === "assistant") {
+        expect(result.parts.some((part) => part.type === "text" && part.text === "second")).toBe(true)
+        expect(result.info.finish).toBe("stop")
+      }
+    }),
+    { git: true, config: providerCfg },
+  ),
+)
+
 it.live("failed subtask preserves metadata on error tool state", () =>
   provideTmpdirServer(
     Effect.fnUntraced(function* ({ llm }) {