| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691 |
- import { afterAll, beforeAll, beforeEach, describe, expect, test } from "bun:test"
- import path from "path"
- import type { ModelMessage } from "ai"
- import { LLM } from "../../src/session/llm"
- import { Global } from "../../src/global"
- import { Instance } from "../../src/project/instance"
- import { Provider } from "../../src/provider/provider"
- import { ProviderTransform } from "../../src/provider/transform"
- import { ModelsDev } from "../../src/provider/models"
- import { tmpdir } from "../fixture/fixture"
- import type { Agent } from "../../src/agent/agent"
- import type { MessageV2 } from "../../src/session/message-v2"
- describe("session.llm.hasToolCalls", () => {
- test("returns false for empty messages array", () => {
- expect(LLM.hasToolCalls([])).toBe(false)
- })
- test("returns false for messages with only text content", () => {
- const messages: ModelMessage[] = [
- {
- role: "user",
- content: [{ type: "text", text: "Hello" }],
- },
- {
- role: "assistant",
- content: [{ type: "text", text: "Hi there" }],
- },
- ]
- expect(LLM.hasToolCalls(messages)).toBe(false)
- })
- test("returns true when messages contain tool-call", () => {
- const messages = [
- {
- role: "user",
- content: [{ type: "text", text: "Run a command" }],
- },
- {
- role: "assistant",
- content: [
- {
- type: "tool-call",
- toolCallId: "call-123",
- toolName: "bash",
- },
- ],
- },
- ] as ModelMessage[]
- expect(LLM.hasToolCalls(messages)).toBe(true)
- })
- test("returns true when messages contain tool-result", () => {
- const messages = [
- {
- role: "tool",
- content: [
- {
- type: "tool-result",
- toolCallId: "call-123",
- toolName: "bash",
- },
- ],
- },
- ] as ModelMessage[]
- expect(LLM.hasToolCalls(messages)).toBe(true)
- })
- test("returns false for messages with string content", () => {
- const messages: ModelMessage[] = [
- {
- role: "user",
- content: "Hello world",
- },
- {
- role: "assistant",
- content: "Hi there",
- },
- ]
- expect(LLM.hasToolCalls(messages)).toBe(false)
- })
- test("returns true when tool-call is mixed with text content", () => {
- const messages = [
- {
- role: "assistant",
- content: [
- { type: "text", text: "Let me run that command" },
- {
- type: "tool-call",
- toolCallId: "call-456",
- toolName: "read",
- },
- ],
- },
- ] as ModelMessage[]
- expect(LLM.hasToolCalls(messages)).toBe(true)
- })
- })
- type Capture = {
- url: URL
- headers: Headers
- body: Record<string, unknown>
- }
- const state = {
- server: null as ReturnType<typeof Bun.serve> | null,
- queue: [] as Array<{ path: string; response: Response; resolve: (value: Capture) => void }>,
- }
- function deferred<T>() {
- const result = {} as { promise: Promise<T>; resolve: (value: T) => void }
- result.promise = new Promise((resolve) => {
- result.resolve = resolve
- })
- return result
- }
- function waitRequest(pathname: string, response: Response) {
- const pending = deferred<Capture>()
- state.queue.push({ path: pathname, response, resolve: pending.resolve })
- return pending.promise
- }
- beforeAll(() => {
- state.server = Bun.serve({
- port: 0,
- async fetch(req) {
- const next = state.queue.shift()
- if (!next) {
- return new Response("unexpected request", { status: 500 })
- }
- const url = new URL(req.url)
- const body = (await req.json()) as Record<string, unknown>
- next.resolve({ url, headers: req.headers, body })
- if (!url.pathname.endsWith(next.path)) {
- return new Response("not found", { status: 404 })
- }
- return next.response
- },
- })
- })
- beforeEach(() => {
- state.queue.length = 0
- })
- afterAll(() => {
- state.server?.stop()
- })
- function createChatStream(text: string) {
- const payload =
- [
- `data: ${JSON.stringify({
- id: "chatcmpl-1",
- object: "chat.completion.chunk",
- choices: [{ delta: { role: "assistant" } }],
- })}`,
- `data: ${JSON.stringify({
- id: "chatcmpl-1",
- object: "chat.completion.chunk",
- choices: [{ delta: { content: text } }],
- })}`,
- `data: ${JSON.stringify({
- id: "chatcmpl-1",
- object: "chat.completion.chunk",
- choices: [{ delta: {}, finish_reason: "stop" }],
- })}`,
- "data: [DONE]",
- ].join("\n\n") + "\n\n"
- const encoder = new TextEncoder()
- return new ReadableStream<Uint8Array>({
- start(controller) {
- controller.enqueue(encoder.encode(payload))
- controller.close()
- },
- })
- }
- async function loadFixture(providerID: string, modelID: string) {
- const fixturePath = path.join(import.meta.dir, "../tool/fixtures/models-api.json")
- const data = (await Bun.file(fixturePath).json()) as Record<string, ModelsDev.Provider>
- const provider = data[providerID]
- if (!provider) {
- throw new Error(`Missing provider in fixture: ${providerID}`)
- }
- const model = provider.models[modelID]
- if (!model) {
- throw new Error(`Missing model in fixture: ${modelID}`)
- }
- return { provider, model }
- }
- function createEventStream(chunks: unknown[], includeDone = false) {
- const lines = chunks.map((chunk) => `data: ${typeof chunk === "string" ? chunk : JSON.stringify(chunk)}`)
- if (includeDone) {
- lines.push("data: [DONE]")
- }
- const payload = lines.join("\n\n") + "\n\n"
- const encoder = new TextEncoder()
- return new ReadableStream<Uint8Array>({
- start(controller) {
- controller.enqueue(encoder.encode(payload))
- controller.close()
- },
- })
- }
- function createEventResponse(chunks: unknown[], includeDone = false) {
- return new Response(createEventStream(chunks, includeDone), {
- status: 200,
- headers: { "Content-Type": "text/event-stream" },
- })
- }
- describe("session.llm.stream", () => {
- test("sends temperature, tokens, and reasoning options for openai-compatible models", async () => {
- const server = state.server
- if (!server) {
- throw new Error("Server not initialized")
- }
- const providerID = "alibaba"
- const modelID = "qwen-plus"
- const fixture = await loadFixture(providerID, modelID)
- const provider = fixture.provider
- const model = fixture.model
- const request = waitRequest(
- "/chat/completions",
- new Response(createChatStream("Hello"), {
- status: 200,
- headers: { "Content-Type": "text/event-stream" },
- }),
- )
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://opencode.ai/config.json",
- enabled_providers: [providerID],
- provider: {
- [providerID]: {
- options: {
- apiKey: "test-key",
- baseURL: `${server.url.origin}/v1`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(providerID, model.id)
- const sessionID = "session-test-1"
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- temperature: 0.4,
- topP: 0.8,
- } satisfies Agent.Info
- const user = {
- id: "user-1",
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID, modelID: resolved.id },
- variant: "high",
- } satisfies MessageV2.User
- const stream = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- abort: new AbortController().signal,
- messages: [{ role: "user", content: "Hello" }],
- tools: {},
- })
- for await (const _ of stream.fullStream) {
- }
- const capture = await request
- const body = capture.body
- const headers = capture.headers
- const url = capture.url
- expect(url.pathname.startsWith("/v1/")).toBe(true)
- expect(url.pathname.endsWith("/chat/completions")).toBe(true)
- expect(headers.get("Authorization")).toBe("Bearer test-key")
- expect(headers.get("User-Agent") ?? "").toMatch(/^opencode\//)
- expect(body.model).toBe(resolved.api.id)
- expect(body.temperature).toBe(0.4)
- expect(body.top_p).toBe(0.8)
- expect(body.stream).toBe(true)
- const maxTokens = (body.max_tokens as number | undefined) ?? (body.max_output_tokens as number | undefined)
- const expectedMaxTokens = ProviderTransform.maxOutputTokens(
- resolved.api.npm,
- ProviderTransform.options({ model: resolved, sessionID }),
- resolved.limit.output,
- LLM.OUTPUT_TOKEN_MAX,
- )
- expect(maxTokens).toBe(expectedMaxTokens)
- const reasoning = (body.reasoningEffort as string | undefined) ?? (body.reasoning_effort as string | undefined)
- expect(reasoning).toBe("high")
- },
- })
- })
- test("sends responses API payload for OpenAI models", async () => {
- const server = state.server
- if (!server) {
- throw new Error("Server not initialized")
- }
- const source = await loadFixture("openai", "gpt-5.2")
- const model = source.model
- const responseChunks = [
- {
- type: "response.created",
- response: {
- id: "resp-1",
- created_at: Math.floor(Date.now() / 1000),
- model: model.id,
- service_tier: null,
- },
- },
- {
- type: "response.output_text.delta",
- item_id: "item-1",
- delta: "Hello",
- logprobs: null,
- },
- {
- type: "response.completed",
- response: {
- incomplete_details: null,
- usage: {
- input_tokens: 1,
- input_tokens_details: null,
- output_tokens: 1,
- output_tokens_details: null,
- },
- service_tier: null,
- },
- },
- ]
- const request = waitRequest("/responses", createEventResponse(responseChunks, true))
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://opencode.ai/config.json",
- enabled_providers: ["openai"],
- provider: {
- openai: {
- name: "OpenAI",
- env: ["OPENAI_API_KEY"],
- npm: "@ai-sdk/openai",
- api: "https://api.openai.com/v1",
- models: {
- [model.id]: model,
- },
- options: {
- apiKey: "test-openai-key",
- baseURL: `${server.url.origin}/v1`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel("openai", model.id)
- const sessionID = "session-test-2"
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- temperature: 0.2,
- } satisfies Agent.Info
- const user = {
- id: "user-2",
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID: "openai", modelID: resolved.id },
- variant: "high",
- } satisfies MessageV2.User
- const stream = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- abort: new AbortController().signal,
- messages: [{ role: "user", content: "Hello" }],
- tools: {},
- })
- for await (const _ of stream.fullStream) {
- }
- const capture = await request
- const body = capture.body
- expect(capture.url.pathname.endsWith("/responses")).toBe(true)
- expect(body.model).toBe(resolved.api.id)
- expect(body.stream).toBe(true)
- expect((body.reasoning as { effort?: string } | undefined)?.effort).toBe("high")
- const maxTokens = body.max_output_tokens as number | undefined
- const expectedMaxTokens = ProviderTransform.maxOutputTokens(
- resolved.api.npm,
- ProviderTransform.options({ model: resolved, sessionID }),
- resolved.limit.output,
- LLM.OUTPUT_TOKEN_MAX,
- )
- expect(maxTokens).toBe(expectedMaxTokens)
- },
- })
- })
- test("sends messages API payload for Anthropic models", async () => {
- const server = state.server
- if (!server) {
- throw new Error("Server not initialized")
- }
- const providerID = "anthropic"
- const modelID = "claude-3-5-sonnet-20241022"
- const fixture = await loadFixture(providerID, modelID)
- const provider = fixture.provider
- const model = fixture.model
- const chunks = [
- {
- type: "message_start",
- message: {
- id: "msg-1",
- model: model.id,
- usage: {
- input_tokens: 3,
- cache_creation_input_tokens: null,
- cache_read_input_tokens: null,
- },
- },
- },
- {
- type: "content_block_start",
- index: 0,
- content_block: { type: "text", text: "" },
- },
- {
- type: "content_block_delta",
- index: 0,
- delta: { type: "text_delta", text: "Hello" },
- },
- { type: "content_block_stop", index: 0 },
- {
- type: "message_delta",
- delta: { stop_reason: "end_turn", stop_sequence: null, container: null },
- usage: {
- input_tokens: 3,
- output_tokens: 2,
- cache_creation_input_tokens: null,
- cache_read_input_tokens: null,
- },
- },
- { type: "message_stop" },
- ]
- const request = waitRequest("/messages", createEventResponse(chunks))
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://opencode.ai/config.json",
- enabled_providers: [providerID],
- provider: {
- [providerID]: {
- options: {
- apiKey: "test-anthropic-key",
- baseURL: `${server.url.origin}/v1`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(providerID, model.id)
- const sessionID = "session-test-3"
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- temperature: 0.4,
- topP: 0.9,
- } satisfies Agent.Info
- const user = {
- id: "user-3",
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID, modelID: resolved.id },
- } satisfies MessageV2.User
- const stream = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- abort: new AbortController().signal,
- messages: [{ role: "user", content: "Hello" }],
- tools: {},
- })
- for await (const _ of stream.fullStream) {
- }
- const capture = await request
- const body = capture.body
- expect(capture.url.pathname.endsWith("/messages")).toBe(true)
- expect(body.model).toBe(resolved.api.id)
- expect(body.max_tokens).toBe(
- ProviderTransform.maxOutputTokens(
- resolved.api.npm,
- ProviderTransform.options({ model: resolved, sessionID }),
- resolved.limit.output,
- LLM.OUTPUT_TOKEN_MAX,
- ),
- )
- expect(body.temperature).toBe(0.4)
- expect(body.top_p).toBe(0.9)
- },
- })
- })
- test("sends Google API payload for Gemini models", async () => {
- const server = state.server
- if (!server) {
- throw new Error("Server not initialized")
- }
- const providerID = "google"
- const modelID = "gemini-2.5-flash"
- const fixture = await loadFixture(providerID, modelID)
- const provider = fixture.provider
- const model = fixture.model
- const pathSuffix = `/v1beta/models/${model.id}:streamGenerateContent`
- const chunks = [
- {
- candidates: [
- {
- content: {
- parts: [{ text: "Hello" }],
- },
- finishReason: "STOP",
- },
- ],
- usageMetadata: {
- promptTokenCount: 1,
- candidatesTokenCount: 1,
- totalTokenCount: 2,
- },
- },
- ]
- const request = waitRequest(pathSuffix, createEventResponse(chunks))
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://opencode.ai/config.json",
- enabled_providers: [providerID],
- provider: {
- [providerID]: {
- options: {
- apiKey: "test-google-key",
- baseURL: `${server.url.origin}/v1beta`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(providerID, model.id)
- const sessionID = "session-test-4"
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- temperature: 0.3,
- topP: 0.8,
- } satisfies Agent.Info
- const user = {
- id: "user-4",
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID, modelID: resolved.id },
- } satisfies MessageV2.User
- const stream = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- abort: new AbortController().signal,
- messages: [{ role: "user", content: "Hello" }],
- tools: {},
- })
- for await (const _ of stream.fullStream) {
- }
- const capture = await request
- const body = capture.body
- const config = body.generationConfig as
- | { temperature?: number; topP?: number; maxOutputTokens?: number }
- | undefined
- expect(capture.url.pathname).toBe(pathSuffix)
- expect(config?.temperature).toBe(0.3)
- expect(config?.topP).toBe(0.8)
- expect(config?.maxOutputTokens).toBe(
- ProviderTransform.maxOutputTokens(
- resolved.api.npm,
- ProviderTransform.options({ model: resolved, sessionID }),
- resolved.limit.output,
- LLM.OUTPUT_TOKEN_MAX,
- ),
- )
- },
- })
- })
- })
|