| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094 |
- import { afterAll, beforeAll, beforeEach, describe, expect, test } from "bun:test"
- import path from "path"
- import { tool, type ModelMessage } from "ai"
- import { Cause, Exit, Stream } from "effect"
- import z from "zod"
- import { makeRuntime } from "../../src/effect/run-service"
- import { LLM } from "../../src/session/llm"
- import { Instance } from "../../src/project/instance"
- import { Provider } from "../../src/provider/provider"
- import { ProviderTransform } from "../../src/provider/transform"
- import { ModelsDev } from "../../src/provider/models"
- import { ProviderID, ModelID } from "../../src/provider/schema"
- import { Filesystem } from "../../src/util/filesystem"
- import { tmpdir } from "../fixture/fixture"
- import type { Agent } from "../../src/agent/agent"
- import type { MessageV2 } from "../../src/session/message-v2"
- import { SessionID, MessageID } from "../../src/session/schema"
- describe("session.llm.hasToolCalls", () => {
- test("returns false for empty messages array", () => {
- expect(LLM.hasToolCalls([])).toBe(false)
- })
- test("returns false for messages with only text content", () => {
- const messages: ModelMessage[] = [
- {
- role: "user",
- content: [{ type: "text", text: "Hello" }],
- },
- {
- role: "assistant",
- content: [{ type: "text", text: "Hi there" }],
- },
- ]
- expect(LLM.hasToolCalls(messages)).toBe(false)
- })
- test("returns true when messages contain tool-call", () => {
- const messages = [
- {
- role: "user",
- content: [{ type: "text", text: "Run a command" }],
- },
- {
- role: "assistant",
- content: [
- {
- type: "tool-call",
- toolCallId: "call-123",
- toolName: "bash",
- },
- ],
- },
- ] as ModelMessage[]
- expect(LLM.hasToolCalls(messages)).toBe(true)
- })
- test("returns true when messages contain tool-result", () => {
- const messages = [
- {
- role: "tool",
- content: [
- {
- type: "tool-result",
- toolCallId: "call-123",
- toolName: "bash",
- },
- ],
- },
- ] as ModelMessage[]
- expect(LLM.hasToolCalls(messages)).toBe(true)
- })
- test("returns false for messages with string content", () => {
- const messages: ModelMessage[] = [
- {
- role: "user",
- content: "Hello world",
- },
- {
- role: "assistant",
- content: "Hi there",
- },
- ]
- expect(LLM.hasToolCalls(messages)).toBe(false)
- })
- test("returns true when tool-call is mixed with text content", () => {
- const messages = [
- {
- role: "assistant",
- content: [
- { type: "text", text: "Let me run that command" },
- {
- type: "tool-call",
- toolCallId: "call-456",
- toolName: "read",
- },
- ],
- },
- ] as ModelMessage[]
- expect(LLM.hasToolCalls(messages)).toBe(true)
- })
- })
- type Capture = {
- url: URL
- headers: Headers
- body: Record<string, unknown>
- }
- const state = {
- server: null as ReturnType<typeof Bun.serve> | null,
- queue: [] as Array<{
- path: string
- response: Response | ((req: Request, capture: Capture) => Response)
- resolve: (value: Capture) => void
- }>,
- }
- function deferred<T>() {
- const result = {} as { promise: Promise<T>; resolve: (value: T) => void }
- result.promise = new Promise((resolve) => {
- result.resolve = resolve
- })
- return result
- }
- function waitRequest(pathname: string, response: Response) {
- const pending = deferred<Capture>()
- state.queue.push({ path: pathname, response, resolve: pending.resolve })
- return pending.promise
- }
- function timeout(ms: number) {
- return new Promise<never>((_, reject) => {
- setTimeout(() => reject(new Error(`timed out after ${ms}ms`)), ms)
- })
- }
- function waitStreamingRequest(pathname: string) {
- const request = deferred<Capture>()
- const requestAborted = deferred<void>()
- const responseCanceled = deferred<void>()
- const encoder = new TextEncoder()
- state.queue.push({
- path: pathname,
- resolve: request.resolve,
- response(req: Request) {
- req.signal.addEventListener("abort", () => requestAborted.resolve(), { once: true })
- return new Response(
- new ReadableStream<Uint8Array>({
- start(controller) {
- controller.enqueue(
- encoder.encode(
- [
- `data: ${JSON.stringify({
- id: "chatcmpl-abort",
- object: "chat.completion.chunk",
- choices: [{ delta: { role: "assistant" } }],
- })}`,
- ].join("\n\n") + "\n\n",
- ),
- )
- },
- cancel() {
- responseCanceled.resolve()
- },
- }),
- {
- status: 200,
- headers: { "Content-Type": "text/event-stream" },
- },
- )
- },
- })
- return {
- request: request.promise,
- requestAborted: requestAborted.promise,
- responseCanceled: responseCanceled.promise,
- }
- }
- beforeAll(() => {
- state.server = Bun.serve({
- port: 0,
- async fetch(req) {
- const next = state.queue.shift()
- if (!next) {
- return new Response("unexpected request", { status: 500 })
- }
- const url = new URL(req.url)
- const body = (await req.json()) as Record<string, unknown>
- next.resolve({ url, headers: req.headers, body })
- if (!url.pathname.endsWith(next.path)) {
- return new Response("not found", { status: 404 })
- }
- return typeof next.response === "function"
- ? next.response(req, { url, headers: req.headers, body })
- : next.response
- },
- })
- })
- beforeEach(() => {
- state.queue.length = 0
- })
- afterAll(() => {
- state.server?.stop()
- })
- function createChatStream(text: string) {
- const payload =
- [
- `data: ${JSON.stringify({
- id: "chatcmpl-1",
- object: "chat.completion.chunk",
- choices: [{ delta: { role: "assistant" } }],
- })}`,
- `data: ${JSON.stringify({
- id: "chatcmpl-1",
- object: "chat.completion.chunk",
- choices: [{ delta: { content: text } }],
- })}`,
- `data: ${JSON.stringify({
- id: "chatcmpl-1",
- object: "chat.completion.chunk",
- choices: [{ delta: {}, finish_reason: "stop" }],
- })}`,
- "data: [DONE]",
- ].join("\n\n") + "\n\n"
- const encoder = new TextEncoder()
- return new ReadableStream<Uint8Array>({
- start(controller) {
- controller.enqueue(encoder.encode(payload))
- controller.close()
- },
- })
- }
- async function loadFixture(providerID: string, modelID: string) {
- const fixturePath = path.join(import.meta.dir, "../tool/fixtures/models-api.json")
- const data = await Filesystem.readJson<Record<string, ModelsDev.Provider>>(fixturePath)
- const provider = data[providerID]
- if (!provider) {
- throw new Error(`Missing provider in fixture: ${providerID}`)
- }
- const model = provider.models[modelID]
- if (!model) {
- throw new Error(`Missing model in fixture: ${modelID}`)
- }
- return { provider, model }
- }
- function createEventStream(chunks: unknown[], includeDone = false) {
- const lines = chunks.map((chunk) => `data: ${typeof chunk === "string" ? chunk : JSON.stringify(chunk)}`)
- if (includeDone) {
- lines.push("data: [DONE]")
- }
- const payload = lines.join("\n\n") + "\n\n"
- const encoder = new TextEncoder()
- return new ReadableStream<Uint8Array>({
- start(controller) {
- controller.enqueue(encoder.encode(payload))
- controller.close()
- },
- })
- }
- function createEventResponse(chunks: unknown[], includeDone = false) {
- return new Response(createEventStream(chunks, includeDone), {
- status: 200,
- headers: { "Content-Type": "text/event-stream" },
- })
- }
- describe("session.llm.stream", () => {
- test("sends temperature, tokens, and reasoning options for openai-compatible models", async () => {
- const server = state.server
- if (!server) {
- throw new Error("Server not initialized")
- }
- const providerID = "vivgrid"
- const modelID = "gemini-3.1-pro-preview"
- const fixture = await loadFixture(providerID, modelID)
- const model = fixture.model
- const request = waitRequest(
- "/chat/completions",
- new Response(createChatStream("Hello"), {
- status: 200,
- headers: { "Content-Type": "text/event-stream" },
- }),
- )
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://app.kilo.ai/config.json",
- enabled_providers: [providerID],
- provider: {
- [providerID]: {
- options: {
- apiKey: "test-key",
- baseURL: `${server.url.origin}/v1`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
- const sessionID = SessionID.make("session-test-1")
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- temperature: 0.4,
- topP: 0.8,
- } satisfies Agent.Info
- const user = {
- id: MessageID.make("user-1"),
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID: ProviderID.make(providerID), modelID: resolved.id, variant: "high" },
- } satisfies MessageV2.User
- const stream = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- abort: new AbortController().signal,
- messages: [{ role: "user", content: "Hello" }],
- tools: {},
- })
- for await (const _ of stream.fullStream) {
- }
- const capture = await request
- const body = capture.body
- const headers = capture.headers
- const url = capture.url
- expect(url.pathname.startsWith("/v1/")).toBe(true)
- expect(url.pathname.endsWith("/chat/completions")).toBe(true)
- expect(headers.get("Authorization")).toBe("Bearer test-key")
- expect(headers.get("User-Agent") ?? "").toMatch(/^Kilo-Code\//) // kilocode_change
- expect(body.model).toBe(resolved.api.id)
- expect(body.temperature).toBe(0.4)
- expect(body.top_p).toBe(0.8)
- expect(body.stream).toBe(true)
- const maxTokens = (body.max_tokens as number | undefined) ?? (body.max_output_tokens as number | undefined)
- const expectedMaxTokens = ProviderTransform.maxOutputTokens(resolved)
- expect(maxTokens).toBe(expectedMaxTokens)
- const reasoning = (body.reasoningEffort as string | undefined) ?? (body.reasoning_effort as string | undefined)
- expect(reasoning).toBe("high")
- },
- })
- })
- test("raw stream abort signal cancels provider response body promptly", async () => {
- const server = state.server
- if (!server) throw new Error("Server not initialized")
- const providerID = "alibaba"
- const modelID = "qwen-plus"
- const fixture = await loadFixture(providerID, modelID)
- const model = fixture.model
- const pending = waitStreamingRequest("/chat/completions")
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://opencode.ai/config.json",
- enabled_providers: [providerID],
- provider: {
- [providerID]: {
- options: {
- apiKey: "test-key",
- baseURL: `${server.url.origin}/v1`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
- const sessionID = SessionID.make("session-test-raw-abort")
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- } satisfies Agent.Info
- const user = {
- id: MessageID.make("user-raw-abort"),
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
- } satisfies MessageV2.User
- const ctrl = new AbortController()
- const result = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- abort: ctrl.signal,
- messages: [{ role: "user", content: "Hello" }],
- tools: {},
- })
- const iter = result.fullStream[Symbol.asyncIterator]()
- await pending.request
- await iter.next()
- ctrl.abort()
- await Promise.race([pending.responseCanceled, timeout(500)])
- await Promise.race([pending.requestAborted, timeout(500)]).catch(() => undefined)
- await iter.return?.()
- },
- })
- })
- test("service stream cancellation cancels provider response body promptly", async () => {
- const server = state.server
- if (!server) throw new Error("Server not initialized")
- const providerID = "alibaba"
- const modelID = "qwen-plus"
- const fixture = await loadFixture(providerID, modelID)
- const model = fixture.model
- const pending = waitStreamingRequest("/chat/completions")
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://opencode.ai/config.json",
- enabled_providers: [providerID],
- provider: {
- [providerID]: {
- options: {
- apiKey: "test-key",
- baseURL: `${server.url.origin}/v1`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
- const sessionID = SessionID.make("session-test-service-abort")
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- } satisfies Agent.Info
- const user = {
- id: MessageID.make("user-service-abort"),
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
- } satisfies MessageV2.User
- const ctrl = new AbortController()
- const { runPromiseExit } = makeRuntime(LLM.Service, LLM.defaultLayer)
- const run = runPromiseExit(
- (svc) =>
- svc
- .stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- messages: [{ role: "user", content: "Hello" }],
- tools: {},
- })
- .pipe(Stream.runDrain),
- { signal: ctrl.signal },
- )
- await pending.request
- ctrl.abort()
- await Promise.race([pending.responseCanceled, timeout(500)])
- const exit = await run
- expect(Exit.isFailure(exit)).toBe(true)
- if (Exit.isFailure(exit)) {
- expect(Cause.hasInterrupts(exit.cause)).toBe(true)
- }
- await Promise.race([pending.requestAborted, timeout(500)]).catch(() => undefined)
- },
- })
- })
- test("keeps tools enabled by prompt permissions", async () => {
- const server = state.server
- if (!server) {
- throw new Error("Server not initialized")
- }
- const providerID = "alibaba"
- const modelID = "qwen-plus"
- const fixture = await loadFixture(providerID, modelID)
- const model = fixture.model
- const request = waitRequest(
- "/chat/completions",
- new Response(createChatStream("Hello"), {
- status: 200,
- headers: { "Content-Type": "text/event-stream" },
- }),
- )
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://opencode.ai/config.json",
- enabled_providers: [providerID],
- provider: {
- [providerID]: {
- options: {
- apiKey: "test-key",
- baseURL: `${server.url.origin}/v1`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
- const sessionID = SessionID.make("session-test-tools")
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "question", pattern: "*", action: "deny" }],
- } satisfies Agent.Info
- const user = {
- id: MessageID.make("user-tools"),
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
- tools: { question: true },
- } satisfies MessageV2.User
- const stream = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- permission: [{ permission: "question", pattern: "*", action: "allow" }],
- system: ["You are a helpful assistant."],
- abort: new AbortController().signal,
- messages: [{ role: "user", content: "Hello" }],
- tools: {
- question: tool({
- description: "Ask a question",
- inputSchema: z.object({}),
- execute: async () => ({ output: "" }),
- }),
- },
- })
- for await (const _ of stream.fullStream) {
- }
- const capture = await request
- const tools = capture.body.tools as Array<{ function?: { name?: string } }> | undefined
- expect(tools?.some((item) => item.function?.name === "question")).toBe(true)
- },
- })
- })
- test("sends responses API payload for OpenAI models", async () => {
- const server = state.server
- if (!server) {
- throw new Error("Server not initialized")
- }
- const source = await loadFixture("openai", "gpt-5.2")
- const model = source.model
- const responseChunks = [
- {
- type: "response.created",
- response: {
- id: "resp-1",
- created_at: Math.floor(Date.now() / 1000),
- model: model.id,
- service_tier: null,
- },
- },
- {
- type: "response.output_text.delta",
- item_id: "item-1",
- delta: "Hello",
- logprobs: null,
- },
- {
- type: "response.completed",
- response: {
- incomplete_details: null,
- usage: {
- input_tokens: 1,
- input_tokens_details: null,
- output_tokens: 1,
- output_tokens_details: null,
- },
- service_tier: null,
- },
- },
- ]
- const request = waitRequest("/responses", createEventResponse(responseChunks, true))
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://app.kilo.ai/config.json",
- enabled_providers: ["openai"],
- provider: {
- openai: {
- name: "OpenAI",
- env: ["OPENAI_API_KEY"],
- npm: "@ai-sdk/openai",
- api: "https://api.openai.com/v1",
- models: {
- [model.id]: model,
- },
- options: {
- apiKey: "test-openai-key",
- baseURL: `${server.url.origin}/v1`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(ProviderID.openai, ModelID.make(model.id))
- const sessionID = SessionID.make("session-test-2")
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- temperature: 0.2,
- } satisfies Agent.Info
- const user = {
- id: MessageID.make("user-2"),
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID: ProviderID.make("openai"), modelID: resolved.id, variant: "high" },
- } satisfies MessageV2.User
- const stream = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- abort: new AbortController().signal,
- messages: [{ role: "user", content: "Hello" }],
- tools: {},
- })
- for await (const _ of stream.fullStream) {
- }
- const capture = await request
- const body = capture.body
- expect(capture.url.pathname.endsWith("/responses")).toBe(true)
- expect(body.model).toBe(resolved.api.id)
- expect(body.stream).toBe(true)
- expect((body.reasoning as { effort?: string } | undefined)?.effort).toBe("high")
- const maxTokens = body.max_output_tokens as number | undefined
- expect(maxTokens).toBe(undefined) // match codex cli behavior
- },
- })
- })
- test("accepts user image attachments as data URLs for OpenAI models", async () => {
- const server = state.server
- if (!server) {
- throw new Error("Server not initialized")
- }
- const source = await loadFixture("openai", "gpt-5.2")
- const model = source.model
- const chunks = [
- {
- type: "response.created",
- response: {
- id: "resp-data-url",
- created_at: Math.floor(Date.now() / 1000),
- model: model.id,
- service_tier: null,
- },
- },
- {
- type: "response.output_text.delta",
- item_id: "item-data-url",
- delta: "Looks good",
- logprobs: null,
- },
- {
- type: "response.completed",
- response: {
- incomplete_details: null,
- usage: {
- input_tokens: 1,
- input_tokens_details: null,
- output_tokens: 1,
- output_tokens_details: null,
- },
- service_tier: null,
- },
- },
- ]
- const request = waitRequest("/responses", createEventResponse(chunks, true))
- const image = `data:image/png;base64,${Buffer.from(
- await Bun.file(path.join(import.meta.dir, "../tool/fixtures/large-image.png")).arrayBuffer(),
- ).toString("base64")}`
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://opencode.ai/config.json",
- enabled_providers: ["openai"],
- provider: {
- openai: {
- name: "OpenAI",
- env: ["OPENAI_API_KEY"],
- npm: "@ai-sdk/openai",
- api: "https://api.openai.com/v1",
- models: {
- [model.id]: model,
- },
- options: {
- apiKey: "test-openai-key",
- baseURL: `${server.url.origin}/v1`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(ProviderID.openai, ModelID.make(model.id))
- const sessionID = SessionID.make("session-test-data-url")
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- } satisfies Agent.Info
- const user = {
- id: MessageID.make("user-data-url"),
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID: ProviderID.make("openai"), modelID: resolved.id },
- } satisfies MessageV2.User
- const stream = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- abort: new AbortController().signal,
- messages: [
- {
- role: "user",
- content: [
- { type: "text", text: "Describe this image" },
- {
- type: "file",
- mediaType: "image/png",
- filename: "large-image.png",
- data: image,
- },
- ],
- },
- ] as ModelMessage[],
- tools: {},
- })
- for await (const _ of stream.fullStream) {
- }
- const capture = await request
- expect(capture.url.pathname.endsWith("/responses")).toBe(true)
- },
- })
- })
- test("sends messages API payload for Anthropic Compatible models", async () => {
- const server = state.server
- if (!server) {
- throw new Error("Server not initialized")
- }
- const providerID = "minimax"
- const modelID = "MiniMax-M2.5"
- const fixture = await loadFixture(providerID, modelID)
- const model = fixture.model
- const chunks = [
- {
- type: "message_start",
- message: {
- id: "msg-1",
- model: model.id,
- usage: {
- input_tokens: 3,
- cache_creation_input_tokens: null,
- cache_read_input_tokens: null,
- },
- },
- },
- {
- type: "content_block_start",
- index: 0,
- content_block: { type: "text", text: "" },
- },
- {
- type: "content_block_delta",
- index: 0,
- delta: { type: "text_delta", text: "Hello" },
- },
- { type: "content_block_stop", index: 0 },
- {
- type: "message_delta",
- delta: { stop_reason: "end_turn", stop_sequence: null, container: null },
- usage: {
- input_tokens: 3,
- output_tokens: 2,
- cache_creation_input_tokens: null,
- cache_read_input_tokens: null,
- },
- },
- { type: "message_stop" },
- ]
- const request = waitRequest("/messages", createEventResponse(chunks))
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://app.kilo.ai/config.json",
- enabled_providers: [providerID],
- provider: {
- [providerID]: {
- options: {
- apiKey: "test-anthropic-key",
- baseURL: `${server.url.origin}/v1`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
- const sessionID = SessionID.make("session-test-3")
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- temperature: 0.4,
- topP: 0.9,
- } satisfies Agent.Info
- const user = {
- id: MessageID.make("user-3"),
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID: ProviderID.make("minimax"), modelID: ModelID.make("MiniMax-M2.5") },
- } satisfies MessageV2.User
- const stream = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- abort: new AbortController().signal,
- messages: [{ role: "user", content: "Hello" }],
- tools: {},
- })
- for await (const _ of stream.fullStream) {
- }
- const capture = await request
- const body = capture.body
- expect(capture.url.pathname.endsWith("/messages")).toBe(true)
- expect(body.model).toBe(resolved.api.id)
- expect(body.max_tokens).toBe(ProviderTransform.maxOutputTokens(resolved))
- expect(body.temperature).toBe(0.4)
- expect(body.top_p).toBe(0.9)
- },
- })
- })
- test("sends Google API payload for Gemini models", async () => {
- const server = state.server
- if (!server) {
- throw new Error("Server not initialized")
- }
- const providerID = "google"
- const modelID = "gemini-2.5-flash"
- const fixture = await loadFixture(providerID, modelID)
- const provider = fixture.provider
- const model = fixture.model
- const pathSuffix = `/v1beta/models/${model.id}:streamGenerateContent`
- const chunks = [
- {
- candidates: [
- {
- content: {
- parts: [{ text: "Hello" }],
- },
- finishReason: "STOP",
- },
- ],
- usageMetadata: {
- promptTokenCount: 1,
- candidatesTokenCount: 1,
- totalTokenCount: 2,
- },
- },
- ]
- const request = waitRequest(pathSuffix, createEventResponse(chunks))
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- $schema: "https://app.kilo.ai/config.json",
- enabled_providers: [providerID],
- provider: {
- [providerID]: {
- options: {
- apiKey: "test-google-key",
- baseURL: `${server.url.origin}/v1beta`,
- },
- },
- },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const resolved = await Provider.getModel(ProviderID.make(providerID), ModelID.make(model.id))
- const sessionID = SessionID.make("session-test-4")
- const agent = {
- name: "test",
- mode: "primary",
- options: {},
- permission: [{ permission: "*", pattern: "*", action: "allow" }],
- temperature: 0.3,
- topP: 0.8,
- } satisfies Agent.Info
- const user = {
- id: MessageID.make("user-4"),
- sessionID,
- role: "user",
- time: { created: Date.now() },
- agent: agent.name,
- model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
- } satisfies MessageV2.User
- const stream = await LLM.stream({
- user,
- sessionID,
- model: resolved,
- agent,
- system: ["You are a helpful assistant."],
- abort: new AbortController().signal,
- messages: [{ role: "user", content: "Hello" }],
- tools: {},
- })
- for await (const _ of stream.fullStream) {
- }
- const capture = await request
- const body = capture.body
- const config = body.generationConfig as
- | { temperature?: number; topP?: number; maxOutputTokens?: number }
- | undefined
- expect(capture.url.pathname).toBe(pathSuffix)
- expect(config?.temperature).toBe(0.3)
- expect(config?.topP).toBe(0.8)
- expect(config?.maxOutputTokens).toBe(ProviderTransform.maxOutputTokens(resolved))
- },
- })
- })
- })
|