|
@@ -1,6 +1,15 @@
|
|
|
-import { describe, expect, test } from "bun:test"
|
|
|
|
|
-import { LLM } from "../../src/session/llm"
|
|
|
|
|
|
|
+import { afterAll, beforeAll, beforeEach, describe, expect, test } from "bun:test"
|
|
|
|
|
+import path from "path"
|
|
|
import type { ModelMessage } from "ai"
|
|
import type { ModelMessage } from "ai"
|
|
|
|
|
+import { LLM } from "../../src/session/llm"
|
|
|
|
|
+import { Global } from "../../src/global"
|
|
|
|
|
+import { Instance } from "../../src/project/instance"
|
|
|
|
|
+import { Provider } from "../../src/provider/provider"
|
|
|
|
|
+import { ProviderTransform } from "../../src/provider/transform"
|
|
|
|
|
+import { ModelsDev } from "../../src/provider/models"
|
|
|
|
|
+import { tmpdir } from "../fixture/fixture"
|
|
|
|
|
+import type { Agent } from "../../src/agent/agent"
|
|
|
|
|
+import type { MessageV2 } from "../../src/session/message-v2"
|
|
|
|
|
|
|
|
describe("session.llm.hasToolCalls", () => {
|
|
describe("session.llm.hasToolCalls", () => {
|
|
|
test("returns false for empty messages array", () => {
|
|
test("returns false for empty messages array", () => {
|
|
@@ -88,3 +97,609 @@ describe("session.llm.hasToolCalls", () => {
|
|
|
expect(LLM.hasToolCalls(messages)).toBe(true)
|
|
expect(LLM.hasToolCalls(messages)).toBe(true)
|
|
|
})
|
|
})
|
|
|
})
|
|
})
|
|
|
|
|
+
|
|
|
|
|
+type Capture = {
|
|
|
|
|
+ url: URL
|
|
|
|
|
+ headers: Headers
|
|
|
|
|
+ body: Record<string, unknown>
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+const state = {
|
|
|
|
|
+ server: null as ReturnType<typeof Bun.serve> | null,
|
|
|
|
|
+ queue: [] as Array<{ path: string; response: Response; resolve: (value: Capture) => void }>,
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+function deferred<T>() {
|
|
|
|
|
+ const result = {} as { promise: Promise<T>; resolve: (value: T) => void }
|
|
|
|
|
+ result.promise = new Promise((resolve) => {
|
|
|
|
|
+ result.resolve = resolve
|
|
|
|
|
+ })
|
|
|
|
|
+ return result
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+function waitRequest(pathname: string, response: Response) {
|
|
|
|
|
+ const pending = deferred<Capture>()
|
|
|
|
|
+ state.queue.push({ path: pathname, response, resolve: pending.resolve })
|
|
|
|
|
+ return pending.promise
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+beforeAll(() => {
|
|
|
|
|
+ state.server = Bun.serve({
|
|
|
|
|
+ port: 0,
|
|
|
|
|
+ async fetch(req) {
|
|
|
|
|
+ const next = state.queue.shift()
|
|
|
|
|
+ if (!next) {
|
|
|
|
|
+ return new Response("unexpected request", { status: 500 })
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const url = new URL(req.url)
|
|
|
|
|
+ const body = (await req.json()) as Record<string, unknown>
|
|
|
|
|
+ next.resolve({ url, headers: req.headers, body })
|
|
|
|
|
+
|
|
|
|
|
+ if (!url.pathname.endsWith(next.path)) {
|
|
|
|
|
+ return new Response("not found", { status: 404 })
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return next.response
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+beforeEach(() => {
|
|
|
|
|
+ state.queue.length = 0
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+afterAll(() => {
|
|
|
|
|
+ state.server?.stop()
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+function createChatStream(text: string) {
|
|
|
|
|
+ const payload =
|
|
|
|
|
+ [
|
|
|
|
|
+ `data: ${JSON.stringify({
|
|
|
|
|
+ id: "chatcmpl-1",
|
|
|
|
|
+ object: "chat.completion.chunk",
|
|
|
|
|
+ choices: [{ delta: { role: "assistant" } }],
|
|
|
|
|
+ })}`,
|
|
|
|
|
+ `data: ${JSON.stringify({
|
|
|
|
|
+ id: "chatcmpl-1",
|
|
|
|
|
+ object: "chat.completion.chunk",
|
|
|
|
|
+ choices: [{ delta: { content: text } }],
|
|
|
|
|
+ })}`,
|
|
|
|
|
+ `data: ${JSON.stringify({
|
|
|
|
|
+ id: "chatcmpl-1",
|
|
|
|
|
+ object: "chat.completion.chunk",
|
|
|
|
|
+ choices: [{ delta: {}, finish_reason: "stop" }],
|
|
|
|
|
+ })}`,
|
|
|
|
|
+ "data: [DONE]",
|
|
|
|
|
+ ].join("\n\n") + "\n\n"
|
|
|
|
|
+
|
|
|
|
|
+ const encoder = new TextEncoder()
|
|
|
|
|
+ return new ReadableStream<Uint8Array>({
|
|
|
|
|
+ start(controller) {
|
|
|
|
|
+ controller.enqueue(encoder.encode(payload))
|
|
|
|
|
+ controller.close()
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+async function loadFixture(providerID: string, modelID: string) {
|
|
|
|
|
+ const fixturePath = path.join(import.meta.dir, "../tool/fixtures/models-api.json")
|
|
|
|
|
+ const data = (await Bun.file(fixturePath).json()) as Record<string, ModelsDev.Provider>
|
|
|
|
|
+ const provider = data[providerID]
|
|
|
|
|
+ if (!provider) {
|
|
|
|
|
+ throw new Error(`Missing provider in fixture: ${providerID}`)
|
|
|
|
|
+ }
|
|
|
|
|
+ const model = provider.models[modelID]
|
|
|
|
|
+ if (!model) {
|
|
|
|
|
+ throw new Error(`Missing model in fixture: ${modelID}`)
|
|
|
|
|
+ }
|
|
|
|
|
+ return { provider, model }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+async function writeModels(models: Record<string, ModelsDev.Provider>) {
|
|
|
|
|
+ const modelsPath = path.join(Global.Path.cache, "models.json")
|
|
|
|
|
+ await Bun.write(modelsPath, JSON.stringify(models))
|
|
|
|
|
+ ModelsDev.Data.reset()
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+function createEventStream(chunks: unknown[], includeDone = false) {
|
|
|
|
|
+ const lines = chunks.map((chunk) => `data: ${typeof chunk === "string" ? chunk : JSON.stringify(chunk)}`)
|
|
|
|
|
+ if (includeDone) {
|
|
|
|
|
+ lines.push("data: [DONE]")
|
|
|
|
|
+ }
|
|
|
|
|
+ const payload = lines.join("\n\n") + "\n\n"
|
|
|
|
|
+ const encoder = new TextEncoder()
|
|
|
|
|
+ return new ReadableStream<Uint8Array>({
|
|
|
|
|
+ start(controller) {
|
|
|
|
|
+ controller.enqueue(encoder.encode(payload))
|
|
|
|
|
+ controller.close()
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+function createEventResponse(chunks: unknown[], includeDone = false) {
|
|
|
|
|
+ return new Response(createEventStream(chunks, includeDone), {
|
|
|
|
|
+ status: 200,
|
|
|
|
|
+ headers: { "Content-Type": "text/event-stream" },
|
|
|
|
|
+ })
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+describe("session.llm.stream", () => {
|
|
|
|
|
+ test("sends temperature, tokens, and reasoning options for openai-compatible models", async () => {
|
|
|
|
|
+ const server = state.server
|
|
|
|
|
+ if (!server) {
|
|
|
|
|
+ throw new Error("Server not initialized")
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const providerID = "alibaba"
|
|
|
|
|
+ const modelID = "qwen-plus"
|
|
|
|
|
+ const fixture = await loadFixture(providerID, modelID)
|
|
|
|
|
+ const provider = fixture.provider
|
|
|
|
|
+ const model = fixture.model
|
|
|
|
|
+
|
|
|
|
|
+ const request = waitRequest(
|
|
|
|
|
+ "/chat/completions",
|
|
|
|
|
+ new Response(createChatStream("Hello"), {
|
|
|
|
|
+ status: 200,
|
|
|
|
|
+ headers: { "Content-Type": "text/event-stream" },
|
|
|
|
|
+ }),
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ await writeModels({ [providerID]: provider })
|
|
|
|
|
+
|
|
|
|
|
+ await using tmp = await tmpdir({
|
|
|
|
|
+ init: async (dir) => {
|
|
|
|
|
+ await Bun.write(
|
|
|
|
|
+ path.join(dir, "opencode.json"),
|
|
|
|
|
+ JSON.stringify({
|
|
|
|
|
+ $schema: "https://opencode.ai/config.json",
|
|
|
|
|
+ enabled_providers: [providerID],
|
|
|
|
|
+ provider: {
|
|
|
|
|
+ [providerID]: {
|
|
|
|
|
+ options: {
|
|
|
|
|
+ apiKey: "test-key",
|
|
|
|
|
+ baseURL: `${server.url.origin}/v1`,
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ }),
|
|
|
|
|
+ )
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ await Instance.provide({
|
|
|
|
|
+ directory: tmp.path,
|
|
|
|
|
+ fn: async () => {
|
|
|
|
|
+ const resolved = await Provider.getModel(providerID, model.id)
|
|
|
|
|
+ const sessionID = "session-test-1"
|
|
|
|
|
+ const agent = {
|
|
|
|
|
+ name: "test",
|
|
|
|
|
+ mode: "primary",
|
|
|
|
|
+ options: {},
|
|
|
|
|
+ permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
|
|
|
|
+ temperature: 0.4,
|
|
|
|
|
+ topP: 0.8,
|
|
|
|
|
+ } satisfies Agent.Info
|
|
|
|
|
+
|
|
|
|
|
+ const user = {
|
|
|
|
|
+ id: "user-1",
|
|
|
|
|
+ sessionID,
|
|
|
|
|
+ role: "user",
|
|
|
|
|
+ time: { created: Date.now() },
|
|
|
|
|
+ agent: agent.name,
|
|
|
|
|
+ model: { providerID, modelID: resolved.id },
|
|
|
|
|
+ variant: "high",
|
|
|
|
|
+ } satisfies MessageV2.User
|
|
|
|
|
+
|
|
|
|
|
+ const stream = await LLM.stream({
|
|
|
|
|
+ user,
|
|
|
|
|
+ sessionID,
|
|
|
|
|
+ model: resolved,
|
|
|
|
|
+ agent,
|
|
|
|
|
+ system: ["You are a helpful assistant."],
|
|
|
|
|
+ abort: new AbortController().signal,
|
|
|
|
|
+ messages: [{ role: "user", content: "Hello" }],
|
|
|
|
|
+ tools: {},
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ for await (const _ of stream.fullStream) {
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const capture = await request
|
|
|
|
|
+ const body = capture.body
|
|
|
|
|
+ const headers = capture.headers
|
|
|
|
|
+ const url = capture.url
|
|
|
|
|
+
|
|
|
|
|
+ expect(url.pathname.startsWith("/v1/")).toBe(true)
|
|
|
|
|
+ expect(url.pathname.endsWith("/chat/completions")).toBe(true)
|
|
|
|
|
+ expect(headers.get("Authorization")).toBe("Bearer test-key")
|
|
|
|
|
+ expect(headers.get("User-Agent") ?? "").toMatch(/^opencode\//)
|
|
|
|
|
+
|
|
|
|
|
+ expect(body.model).toBe(resolved.api.id)
|
|
|
|
|
+ expect(body.temperature).toBe(0.4)
|
|
|
|
|
+ expect(body.top_p).toBe(0.8)
|
|
|
|
|
+ expect(body.stream).toBe(true)
|
|
|
|
|
+
|
|
|
|
|
+ const maxTokens = (body.max_tokens as number | undefined) ?? (body.max_output_tokens as number | undefined)
|
|
|
|
|
+ const expectedMaxTokens = ProviderTransform.maxOutputTokens(
|
|
|
|
|
+ resolved.api.npm,
|
|
|
|
|
+ ProviderTransform.options({ model: resolved, sessionID }),
|
|
|
|
|
+ resolved.limit.output,
|
|
|
|
|
+ LLM.OUTPUT_TOKEN_MAX,
|
|
|
|
|
+ )
|
|
|
|
|
+ expect(maxTokens).toBe(expectedMaxTokens)
|
|
|
|
|
+
|
|
|
|
|
+ const reasoning = (body.reasoningEffort as string | undefined) ?? (body.reasoning_effort as string | undefined)
|
|
|
|
|
+ expect(reasoning).toBe("high")
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ test("sends responses API payload for OpenAI models", async () => {
|
|
|
|
|
+ const server = state.server
|
|
|
|
|
+ if (!server) {
|
|
|
|
|
+ throw new Error("Server not initialized")
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const source = await loadFixture("github-copilot", "gpt-5.1")
|
|
|
|
|
+ const model = source.model
|
|
|
|
|
+
|
|
|
|
|
+ const responseChunks = [
|
|
|
|
|
+ {
|
|
|
|
|
+ type: "response.created",
|
|
|
|
|
+ response: {
|
|
|
|
|
+ id: "resp-1",
|
|
|
|
|
+ created_at: Math.floor(Date.now() / 1000),
|
|
|
|
|
+ model: model.id,
|
|
|
|
|
+ service_tier: null,
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ {
|
|
|
|
|
+ type: "response.output_text.delta",
|
|
|
|
|
+ item_id: "item-1",
|
|
|
|
|
+ delta: "Hello",
|
|
|
|
|
+ logprobs: null,
|
|
|
|
|
+ },
|
|
|
|
|
+ {
|
|
|
|
|
+ type: "response.completed",
|
|
|
|
|
+ response: {
|
|
|
|
|
+ incomplete_details: null,
|
|
|
|
|
+ usage: {
|
|
|
|
|
+ input_tokens: 1,
|
|
|
|
|
+ input_tokens_details: null,
|
|
|
|
|
+ output_tokens: 1,
|
|
|
|
|
+ output_tokens_details: null,
|
|
|
|
|
+ },
|
|
|
|
|
+ service_tier: null,
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ ]
|
|
|
|
|
+ const request = waitRequest("/responses", createEventResponse(responseChunks, true))
|
|
|
|
|
+
|
|
|
|
|
+ await writeModels({})
|
|
|
|
|
+
|
|
|
|
|
+ await using tmp = await tmpdir({
|
|
|
|
|
+ init: async (dir) => {
|
|
|
|
|
+ await Bun.write(
|
|
|
|
|
+ path.join(dir, "opencode.json"),
|
|
|
|
|
+ JSON.stringify({
|
|
|
|
|
+ $schema: "https://opencode.ai/config.json",
|
|
|
|
|
+ enabled_providers: ["openai"],
|
|
|
|
|
+ provider: {
|
|
|
|
|
+ openai: {
|
|
|
|
|
+ name: "OpenAI",
|
|
|
|
|
+ env: ["OPENAI_API_KEY"],
|
|
|
|
|
+ npm: "@ai-sdk/openai",
|
|
|
|
|
+ api: "https://api.openai.com/v1",
|
|
|
|
|
+ models: {
|
|
|
|
|
+ [model.id]: model,
|
|
|
|
|
+ },
|
|
|
|
|
+ options: {
|
|
|
|
|
+ apiKey: "test-openai-key",
|
|
|
|
|
+ baseURL: `${server.url.origin}/v1`,
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ }),
|
|
|
|
|
+ )
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ await Instance.provide({
|
|
|
|
|
+ directory: tmp.path,
|
|
|
|
|
+ fn: async () => {
|
|
|
|
|
+ const resolved = await Provider.getModel("openai", model.id)
|
|
|
|
|
+ const sessionID = "session-test-2"
|
|
|
|
|
+ const agent = {
|
|
|
|
|
+ name: "test",
|
|
|
|
|
+ mode: "primary",
|
|
|
|
|
+ options: {},
|
|
|
|
|
+ permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
|
|
|
|
+ temperature: 0.2,
|
|
|
|
|
+ } satisfies Agent.Info
|
|
|
|
|
+
|
|
|
|
|
+ const user = {
|
|
|
|
|
+ id: "user-2",
|
|
|
|
|
+ sessionID,
|
|
|
|
|
+ role: "user",
|
|
|
|
|
+ time: { created: Date.now() },
|
|
|
|
|
+ agent: agent.name,
|
|
|
|
|
+ model: { providerID: "openai", modelID: resolved.id },
|
|
|
|
|
+ variant: "high",
|
|
|
|
|
+ } satisfies MessageV2.User
|
|
|
|
|
+
|
|
|
|
|
+ const stream = await LLM.stream({
|
|
|
|
|
+ user,
|
|
|
|
|
+ sessionID,
|
|
|
|
|
+ model: resolved,
|
|
|
|
|
+ agent,
|
|
|
|
|
+ system: ["You are a helpful assistant."],
|
|
|
|
|
+ abort: new AbortController().signal,
|
|
|
|
|
+ messages: [{ role: "user", content: "Hello" }],
|
|
|
|
|
+ tools: {},
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ for await (const _ of stream.fullStream) {
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const capture = await request
|
|
|
|
|
+ const body = capture.body
|
|
|
|
|
+
|
|
|
|
|
+ expect(capture.url.pathname.endsWith("/responses")).toBe(true)
|
|
|
|
|
+ expect(body.model).toBe(resolved.api.id)
|
|
|
|
|
+ expect(body.stream).toBe(true)
|
|
|
|
|
+ expect((body.reasoning as { effort?: string } | undefined)?.effort).toBe("high")
|
|
|
|
|
+
|
|
|
|
|
+ const maxTokens = body.max_output_tokens as number | undefined
|
|
|
|
|
+ const expectedMaxTokens = ProviderTransform.maxOutputTokens(
|
|
|
|
|
+ resolved.api.npm,
|
|
|
|
|
+ ProviderTransform.options({ model: resolved, sessionID }),
|
|
|
|
|
+ resolved.limit.output,
|
|
|
|
|
+ LLM.OUTPUT_TOKEN_MAX,
|
|
|
|
|
+ )
|
|
|
|
|
+ expect(maxTokens).toBe(expectedMaxTokens)
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ test("sends messages API payload for Anthropic models", async () => {
|
|
|
|
|
+ const server = state.server
|
|
|
|
|
+ if (!server) {
|
|
|
|
|
+ throw new Error("Server not initialized")
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const providerID = "anthropic"
|
|
|
|
|
+ const modelID = "claude-3-5-sonnet-20241022"
|
|
|
|
|
+ const fixture = await loadFixture(providerID, modelID)
|
|
|
|
|
+ const provider = fixture.provider
|
|
|
|
|
+ const model = fixture.model
|
|
|
|
|
+
|
|
|
|
|
+ const chunks = [
|
|
|
|
|
+ {
|
|
|
|
|
+ type: "message_start",
|
|
|
|
|
+ message: {
|
|
|
|
|
+ id: "msg-1",
|
|
|
|
|
+ model: model.id,
|
|
|
|
|
+ usage: {
|
|
|
|
|
+ input_tokens: 3,
|
|
|
|
|
+ cache_creation_input_tokens: null,
|
|
|
|
|
+ cache_read_input_tokens: null,
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ {
|
|
|
|
|
+ type: "content_block_start",
|
|
|
|
|
+ index: 0,
|
|
|
|
|
+ content_block: { type: "text", text: "" },
|
|
|
|
|
+ },
|
|
|
|
|
+ {
|
|
|
|
|
+ type: "content_block_delta",
|
|
|
|
|
+ index: 0,
|
|
|
|
|
+ delta: { type: "text_delta", text: "Hello" },
|
|
|
|
|
+ },
|
|
|
|
|
+ { type: "content_block_stop", index: 0 },
|
|
|
|
|
+ {
|
|
|
|
|
+ type: "message_delta",
|
|
|
|
|
+ delta: { stop_reason: "end_turn", stop_sequence: null, container: null },
|
|
|
|
|
+ usage: {
|
|
|
|
|
+ input_tokens: 3,
|
|
|
|
|
+ output_tokens: 2,
|
|
|
|
|
+ cache_creation_input_tokens: null,
|
|
|
|
|
+ cache_read_input_tokens: null,
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ { type: "message_stop" },
|
|
|
|
|
+ ]
|
|
|
|
|
+ const request = waitRequest("/messages", createEventResponse(chunks))
|
|
|
|
|
+
|
|
|
|
|
+ await writeModels({ [providerID]: provider })
|
|
|
|
|
+
|
|
|
|
|
+ await using tmp = await tmpdir({
|
|
|
|
|
+ init: async (dir) => {
|
|
|
|
|
+ await Bun.write(
|
|
|
|
|
+ path.join(dir, "opencode.json"),
|
|
|
|
|
+ JSON.stringify({
|
|
|
|
|
+ $schema: "https://opencode.ai/config.json",
|
|
|
|
|
+ enabled_providers: [providerID],
|
|
|
|
|
+ provider: {
|
|
|
|
|
+ [providerID]: {
|
|
|
|
|
+ options: {
|
|
|
|
|
+ apiKey: "test-anthropic-key",
|
|
|
|
|
+ baseURL: `${server.url.origin}/v1`,
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ }),
|
|
|
|
|
+ )
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ await Instance.provide({
|
|
|
|
|
+ directory: tmp.path,
|
|
|
|
|
+ fn: async () => {
|
|
|
|
|
+ const resolved = await Provider.getModel(providerID, model.id)
|
|
|
|
|
+ const sessionID = "session-test-3"
|
|
|
|
|
+ const agent = {
|
|
|
|
|
+ name: "test",
|
|
|
|
|
+ mode: "primary",
|
|
|
|
|
+ options: {},
|
|
|
|
|
+ permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
|
|
|
|
+ temperature: 0.4,
|
|
|
|
|
+ topP: 0.9,
|
|
|
|
|
+ } satisfies Agent.Info
|
|
|
|
|
+
|
|
|
|
|
+ const user = {
|
|
|
|
|
+ id: "user-3",
|
|
|
|
|
+ sessionID,
|
|
|
|
|
+ role: "user",
|
|
|
|
|
+ time: { created: Date.now() },
|
|
|
|
|
+ agent: agent.name,
|
|
|
|
|
+ model: { providerID, modelID: resolved.id },
|
|
|
|
|
+ } satisfies MessageV2.User
|
|
|
|
|
+
|
|
|
|
|
+ const stream = await LLM.stream({
|
|
|
|
|
+ user,
|
|
|
|
|
+ sessionID,
|
|
|
|
|
+ model: resolved,
|
|
|
|
|
+ agent,
|
|
|
|
|
+ system: ["You are a helpful assistant."],
|
|
|
|
|
+ abort: new AbortController().signal,
|
|
|
|
|
+ messages: [{ role: "user", content: "Hello" }],
|
|
|
|
|
+ tools: {},
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ for await (const _ of stream.fullStream) {
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const capture = await request
|
|
|
|
|
+ const body = capture.body
|
|
|
|
|
+
|
|
|
|
|
+ expect(capture.url.pathname.endsWith("/messages")).toBe(true)
|
|
|
|
|
+ expect(body.model).toBe(resolved.api.id)
|
|
|
|
|
+ expect(body.max_tokens).toBe(
|
|
|
|
|
+ ProviderTransform.maxOutputTokens(
|
|
|
|
|
+ resolved.api.npm,
|
|
|
|
|
+ ProviderTransform.options({ model: resolved, sessionID }),
|
|
|
|
|
+ resolved.limit.output,
|
|
|
|
|
+ LLM.OUTPUT_TOKEN_MAX,
|
|
|
|
|
+ ),
|
|
|
|
|
+ )
|
|
|
|
|
+ expect(body.temperature).toBe(0.4)
|
|
|
|
|
+ expect(body.top_p).toBe(0.9)
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ test("sends Google API payload for Gemini models", async () => {
|
|
|
|
|
+ const server = state.server
|
|
|
|
|
+ if (!server) {
|
|
|
|
|
+ throw new Error("Server not initialized")
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const providerID = "google"
|
|
|
|
|
+ const modelID = "gemini-2.5-flash"
|
|
|
|
|
+ const fixture = await loadFixture(providerID, modelID)
|
|
|
|
|
+ const provider = fixture.provider
|
|
|
|
|
+ const model = fixture.model
|
|
|
|
|
+ const pathSuffix = `/v1beta/models/${model.id}:streamGenerateContent`
|
|
|
|
|
+
|
|
|
|
|
+ const chunks = [
|
|
|
|
|
+ {
|
|
|
|
|
+ candidates: [
|
|
|
|
|
+ {
|
|
|
|
|
+ content: {
|
|
|
|
|
+ parts: [{ text: "Hello" }],
|
|
|
|
|
+ },
|
|
|
|
|
+ finishReason: "STOP",
|
|
|
|
|
+ },
|
|
|
|
|
+ ],
|
|
|
|
|
+ usageMetadata: {
|
|
|
|
|
+ promptTokenCount: 1,
|
|
|
|
|
+ candidatesTokenCount: 1,
|
|
|
|
|
+ totalTokenCount: 2,
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ ]
|
|
|
|
|
+ const request = waitRequest(pathSuffix, createEventResponse(chunks))
|
|
|
|
|
+
|
|
|
|
|
+ await writeModels({ [providerID]: provider })
|
|
|
|
|
+
|
|
|
|
|
+ await using tmp = await tmpdir({
|
|
|
|
|
+ init: async (dir) => {
|
|
|
|
|
+ await Bun.write(
|
|
|
|
|
+ path.join(dir, "opencode.json"),
|
|
|
|
|
+ JSON.stringify({
|
|
|
|
|
+ $schema: "https://opencode.ai/config.json",
|
|
|
|
|
+ enabled_providers: [providerID],
|
|
|
|
|
+ provider: {
|
|
|
|
|
+ [providerID]: {
|
|
|
|
|
+ options: {
|
|
|
|
|
+ apiKey: "test-google-key",
|
|
|
|
|
+ baseURL: `${server.url.origin}/v1beta`,
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ },
|
|
|
|
|
+ }),
|
|
|
|
|
+ )
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ await Instance.provide({
|
|
|
|
|
+ directory: tmp.path,
|
|
|
|
|
+ fn: async () => {
|
|
|
|
|
+ const resolved = await Provider.getModel(providerID, model.id)
|
|
|
|
|
+ const sessionID = "session-test-4"
|
|
|
|
|
+ const agent = {
|
|
|
|
|
+ name: "test",
|
|
|
|
|
+ mode: "primary",
|
|
|
|
|
+ options: {},
|
|
|
|
|
+ permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
|
|
|
|
+ temperature: 0.3,
|
|
|
|
|
+ topP: 0.8,
|
|
|
|
|
+ } satisfies Agent.Info
|
|
|
|
|
+
|
|
|
|
|
+ const user = {
|
|
|
|
|
+ id: "user-4",
|
|
|
|
|
+ sessionID,
|
|
|
|
|
+ role: "user",
|
|
|
|
|
+ time: { created: Date.now() },
|
|
|
|
|
+ agent: agent.name,
|
|
|
|
|
+ model: { providerID, modelID: resolved.id },
|
|
|
|
|
+ } satisfies MessageV2.User
|
|
|
|
|
+
|
|
|
|
|
+ const stream = await LLM.stream({
|
|
|
|
|
+ user,
|
|
|
|
|
+ sessionID,
|
|
|
|
|
+ model: resolved,
|
|
|
|
|
+ agent,
|
|
|
|
|
+ system: ["You are a helpful assistant."],
|
|
|
|
|
+ abort: new AbortController().signal,
|
|
|
|
|
+ messages: [{ role: "user", content: "Hello" }],
|
|
|
|
|
+ tools: {},
|
|
|
|
|
+ })
|
|
|
|
|
+
|
|
|
|
|
+ for await (const _ of stream.fullStream) {
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const capture = await request
|
|
|
|
|
+ const body = capture.body
|
|
|
|
|
+ const config = body.generationConfig as
|
|
|
|
|
+ | { temperature?: number; topP?: number; maxOutputTokens?: number }
|
|
|
|
|
+ | undefined
|
|
|
|
|
+
|
|
|
|
|
+ expect(capture.url.pathname).toBe(pathSuffix)
|
|
|
|
|
+ expect(config?.temperature).toBe(0.3)
|
|
|
|
|
+ expect(config?.topP).toBe(0.8)
|
|
|
|
|
+ expect(config?.maxOutputTokens).toBe(
|
|
|
|
|
+ ProviderTransform.maxOutputTokens(
|
|
|
|
|
+ resolved.api.npm,
|
|
|
|
|
+ ProviderTransform.options({ model: resolved, sessionID }),
|
|
|
|
|
+ resolved.limit.output,
|
|
|
|
|
+ LLM.OUTPUT_TOKEN_MAX,
|
|
|
|
|
+ ),
|
|
|
|
|
+ )
|
|
|
|
|
+ },
|
|
|
|
|
+ })
|
|
|
|
|
+ })
|
|
|
|
|
+})
|