Просмотр исходного кода

feat: add litellmProxy provider option for explicit LiteLLM compatibility (#8658)

Co-authored-by: Mark Henderson <[email protected]>
Co-authored-by: Aiden Cline <[email protected]>
seilk 1 месяц назад
Родитель
Сommit
9b57db30d1

+ 35 - 1
packages/opencode/src/session/llm.ts

@@ -10,6 +10,8 @@ import {
   type Tool,
   type ToolSet,
   extractReasoningMiddleware,
+  tool,
+  jsonSchema,
 } from "ai"
 import { clone, mergeDeep, pipe } from "remeda"
 import { ProviderTransform } from "@/provider/transform"
@@ -140,6 +142,26 @@ export namespace LLM {
 
     const tools = await resolveTools(input)
 
+    // LiteLLM and some Anthropic proxies require the tools parameter to be present
+    // when message history contains tool calls, even if no tools are being used.
+    // Add a dummy tool that is never called to satisfy this validation.
+    // This is enabled for:
+    // 1. Providers with "litellm" in their ID or API ID (auto-detected)
+    // 2. Providers with explicit "litellmProxy: true" option (opt-in for custom gateways)
+    const isLiteLLMProxy =
+      provider.options?.["litellmProxy"] === true ||
+      input.model.providerID.toLowerCase().includes("litellm") ||
+      input.model.api.id.toLowerCase().includes("litellm")
+
+    if (isLiteLLMProxy && Object.keys(tools).length === 0 && hasToolCalls(input.messages)) {
+      tools["_noop"] = tool({
+        description:
+          "Placeholder for LiteLLM/Anthropic proxy compatibility - required when message history contains tool calls but no active tools are needed",
+        inputSchema: jsonSchema({ type: "object", properties: {} }),
+        execute: async () => ({ output: "", title: "", metadata: {} }),
+      })
+    }
+
     return streamText({
       onError(error) {
         l.error("stream error", {
@@ -171,7 +193,7 @@ export namespace LLM {
       topP: params.topP,
       topK: params.topK,
       providerOptions: ProviderTransform.providerOptions(input.model, params.options),
-      activeTools: Object.keys(tools).filter((x) => x !== "invalid"),
+      activeTools: Object.keys(tools).filter((x) => x !== "invalid" && x !== "_noop"),
       tools,
       maxOutputTokens,
       abortSignal: input.abort,
@@ -238,4 +260,16 @@ export namespace LLM {
     }
     return input.tools
   }
+
+  // Check if messages contain any tool-call content
+  // Used to determine if a dummy tool should be added for LiteLLM proxy compatibility
+  export function hasToolCalls(messages: ModelMessage[]): boolean {
+    for (const msg of messages) {
+      if (!Array.isArray(msg.content)) continue
+      for (const part of msg.content) {
+        if (part.type === "tool-call" || part.type === "tool-result") return true
+      }
+    }
+    return false
+  }
 }

+ 11 - 0
packages/opencode/src/session/message-v2.ts

@@ -533,6 +533,17 @@ export namespace MessageV2 {
                 errorText: part.state.error,
                 callProviderMetadata: part.metadata,
               })
+            // Handle pending/running tool calls to prevent dangling tool_use blocks
+            // Anthropic/Claude APIs require every tool_use to have a corresponding tool_result
+            if (part.state.status === "pending" || part.state.status === "running")
+              assistantMessage.parts.push({
+                type: ("tool-" + part.tool) as `tool-${string}`,
+                state: "output-error",
+                toolCallId: part.callID,
+                input: part.state.input,
+                errorText: "[Tool execution was interrupted]",
+                callProviderMetadata: part.metadata,
+              })
           }
           if (part.type === "reasoning") {
             assistantMessage.parts.push({

+ 90 - 0
packages/opencode/test/session/llm.test.ts

@@ -0,0 +1,90 @@
+import { describe, expect, test } from "bun:test"
+import { LLM } from "../../src/session/llm"
+import type { ModelMessage } from "ai"
+
+describe("session.llm.hasToolCalls", () => {
+  test("returns false for empty messages array", () => {
+    expect(LLM.hasToolCalls([])).toBe(false)
+  })
+
+  test("returns false for messages with only text content", () => {
+    const messages: ModelMessage[] = [
+      {
+        role: "user",
+        content: [{ type: "text", text: "Hello" }],
+      },
+      {
+        role: "assistant",
+        content: [{ type: "text", text: "Hi there" }],
+      },
+    ]
+    expect(LLM.hasToolCalls(messages)).toBe(false)
+  })
+
+  test("returns true when messages contain tool-call", () => {
+    const messages = [
+      {
+        role: "user",
+        content: [{ type: "text", text: "Run a command" }],
+      },
+      {
+        role: "assistant",
+        content: [
+          {
+            type: "tool-call",
+            toolCallId: "call-123",
+            toolName: "bash",
+          },
+        ],
+      },
+    ] as ModelMessage[]
+    expect(LLM.hasToolCalls(messages)).toBe(true)
+  })
+
+  test("returns true when messages contain tool-result", () => {
+    const messages = [
+      {
+        role: "tool",
+        content: [
+          {
+            type: "tool-result",
+            toolCallId: "call-123",
+            toolName: "bash",
+          },
+        ],
+      },
+    ] as ModelMessage[]
+    expect(LLM.hasToolCalls(messages)).toBe(true)
+  })
+
+  test("returns false for messages with string content", () => {
+    const messages: ModelMessage[] = [
+      {
+        role: "user",
+        content: "Hello world",
+      },
+      {
+        role: "assistant",
+        content: "Hi there",
+      },
+    ]
+    expect(LLM.hasToolCalls(messages)).toBe(false)
+  })
+
+  test("returns true when tool-call is mixed with text content", () => {
+    const messages = [
+      {
+        role: "assistant",
+        content: [
+          { type: "text", text: "Let me run that command" },
+          {
+            type: "tool-call",
+            toolCallId: "call-456",
+            toolName: "read",
+          },
+        ],
+      },
+    ] as ModelMessage[]
+    expect(LLM.hasToolCalls(messages)).toBe(true)
+  })
+})

+ 90 - 0
packages/opencode/test/session/message-v2.test.ts

@@ -569,4 +569,94 @@ describe("session.message-v2.toModelMessage", () => {
 
     expect(MessageV2.toModelMessage(input)).toStrictEqual([])
   })
+
+  test("converts pending/running tool calls to error results to prevent dangling tool_use", () => {
+    const userID = "m-user"
+    const assistantID = "m-assistant"
+
+    const input: MessageV2.WithParts[] = [
+      {
+        info: userInfo(userID),
+        parts: [
+          {
+            ...basePart(userID, "u1"),
+            type: "text",
+            text: "run tool",
+          },
+        ] as MessageV2.Part[],
+      },
+      {
+        info: assistantInfo(assistantID, userID),
+        parts: [
+          {
+            ...basePart(assistantID, "a1"),
+            type: "tool",
+            callID: "call-pending",
+            tool: "bash",
+            state: {
+              status: "pending",
+              input: { cmd: "ls" },
+              raw: "",
+            },
+          },
+          {
+            ...basePart(assistantID, "a2"),
+            type: "tool",
+            callID: "call-running",
+            tool: "read",
+            state: {
+              status: "running",
+              input: { path: "/tmp" },
+              time: { start: 0 },
+            },
+          },
+        ] as MessageV2.Part[],
+      },
+    ]
+
+    const result = MessageV2.toModelMessage(input)
+
+    expect(result).toStrictEqual([
+      {
+        role: "user",
+        content: [{ type: "text", text: "run tool" }],
+      },
+      {
+        role: "assistant",
+        content: [
+          {
+            type: "tool-call",
+            toolCallId: "call-pending",
+            toolName: "bash",
+            input: { cmd: "ls" },
+            providerExecuted: undefined,
+          },
+          {
+            type: "tool-call",
+            toolCallId: "call-running",
+            toolName: "read",
+            input: { path: "/tmp" },
+            providerExecuted: undefined,
+          },
+        ],
+      },
+      {
+        role: "tool",
+        content: [
+          {
+            type: "tool-result",
+            toolCallId: "call-pending",
+            toolName: "bash",
+            output: { type: "error-text", value: "[Tool execution was interrupted]" },
+          },
+          {
+            type: "tool-result",
+            toolCallId: "call-running",
+            toolName: "read",
+            output: { type: "error-text", value: "[Tool execution was interrupted]" },
+          },
+        ],
+      },
+    ])
+  })
 })