| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293 |
- import { describe, expect, test } from "bun:test"
- import path from "path"
- import { SessionCompaction } from "../../src/session/compaction"
- import { Token } from "../../src/util/token"
- import { Instance } from "../../src/project/instance"
- import { Log } from "../../src/util/log"
- import { tmpdir } from "../fixture/fixture"
- import { Session } from "../../src/session"
- import type { Provider } from "../../src/provider/provider"
- Log.init({ print: false })
- function createModel(opts: {
- context: number
- output: number
- input?: number
- cost?: Provider.Model["cost"]
- }): Provider.Model {
- return {
- id: "test-model",
- providerID: "test",
- name: "Test",
- limit: {
- context: opts.context,
- input: opts.input,
- output: opts.output,
- },
- cost: opts.cost ?? { input: 0, output: 0, cache: { read: 0, write: 0 } },
- capabilities: {
- toolcall: true,
- attachment: false,
- reasoning: false,
- temperature: true,
- input: { text: true, image: false, audio: false, video: false },
- output: { text: true, image: false, audio: false, video: false },
- },
- api: { npm: "@ai-sdk/anthropic" },
- options: {},
- } as Provider.Model
- }
- describe("session.compaction.isOverflow", () => {
- test("returns true when token count exceeds usable context", async () => {
- await using tmp = await tmpdir()
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const model = createModel({ context: 100_000, output: 32_000 })
- const tokens = { input: 75_000, output: 5_000, reasoning: 0, cache: { read: 0, write: 0 } }
- expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
- },
- })
- })
- test("returns false when token count within usable context", async () => {
- await using tmp = await tmpdir()
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const model = createModel({ context: 200_000, output: 32_000 })
- const tokens = { input: 100_000, output: 10_000, reasoning: 0, cache: { read: 0, write: 0 } }
- expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
- },
- })
- })
- test("includes cache.read in token count", async () => {
- await using tmp = await tmpdir()
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const model = createModel({ context: 100_000, output: 32_000 })
- const tokens = { input: 50_000, output: 10_000, reasoning: 0, cache: { read: 10_000, write: 0 } }
- expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
- },
- })
- })
- test("respects input limit for input caps", async () => {
- await using tmp = await tmpdir()
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const model = createModel({ context: 400_000, input: 272_000, output: 128_000 })
- const tokens = { input: 271_000, output: 1_000, reasoning: 0, cache: { read: 2_000, write: 0 } }
- expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
- },
- })
- })
- test("returns false when input/output are within input caps", async () => {
- await using tmp = await tmpdir()
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const model = createModel({ context: 400_000, input: 272_000, output: 128_000 })
- const tokens = { input: 200_000, output: 20_000, reasoning: 0, cache: { read: 10_000, write: 0 } }
- expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
- },
- })
- })
- test("returns false when output within limit with input caps", async () => {
- await using tmp = await tmpdir()
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const model = createModel({ context: 200_000, input: 120_000, output: 10_000 })
- const tokens = { input: 50_000, output: 9_999, reasoning: 0, cache: { read: 0, write: 0 } }
- expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
- },
- })
- })
- test("returns false when model context limit is 0", async () => {
- await using tmp = await tmpdir()
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const model = createModel({ context: 0, output: 32_000 })
- const tokens = { input: 100_000, output: 10_000, reasoning: 0, cache: { read: 0, write: 0 } }
- expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
- },
- })
- })
- test("returns false when compaction.auto is disabled", async () => {
- await using tmp = await tmpdir({
- init: async (dir) => {
- await Bun.write(
- path.join(dir, "opencode.json"),
- JSON.stringify({
- compaction: { auto: false },
- }),
- )
- },
- })
- await Instance.provide({
- directory: tmp.path,
- fn: async () => {
- const model = createModel({ context: 100_000, output: 32_000 })
- const tokens = { input: 75_000, output: 5_000, reasoning: 0, cache: { read: 0, write: 0 } }
- expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
- },
- })
- })
- })
- describe("util.token.estimate", () => {
- test("estimates tokens from text (4 chars per token)", () => {
- const text = "x".repeat(4000)
- expect(Token.estimate(text)).toBe(1000)
- })
- test("estimates tokens from larger text", () => {
- const text = "y".repeat(20_000)
- expect(Token.estimate(text)).toBe(5000)
- })
- test("returns 0 for empty string", () => {
- expect(Token.estimate("")).toBe(0)
- })
- })
- describe("session.getUsage", () => {
- test("normalizes standard usage to token format", () => {
- const model = createModel({ context: 100_000, output: 32_000 })
- const result = Session.getUsage({
- model,
- usage: {
- inputTokens: 1000,
- outputTokens: 500,
- totalTokens: 1500,
- },
- })
- expect(result.tokens.input).toBe(1000)
- expect(result.tokens.output).toBe(500)
- expect(result.tokens.reasoning).toBe(0)
- expect(result.tokens.cache.read).toBe(0)
- expect(result.tokens.cache.write).toBe(0)
- })
- test("extracts cached tokens to cache.read", () => {
- const model = createModel({ context: 100_000, output: 32_000 })
- const result = Session.getUsage({
- model,
- usage: {
- inputTokens: 1000,
- outputTokens: 500,
- totalTokens: 1500,
- cachedInputTokens: 200,
- },
- })
- expect(result.tokens.input).toBe(800)
- expect(result.tokens.cache.read).toBe(200)
- })
- test("handles anthropic cache write metadata", () => {
- const model = createModel({ context: 100_000, output: 32_000 })
- const result = Session.getUsage({
- model,
- usage: {
- inputTokens: 1000,
- outputTokens: 500,
- totalTokens: 1500,
- },
- metadata: {
- anthropic: {
- cacheCreationInputTokens: 300,
- },
- },
- })
- expect(result.tokens.cache.write).toBe(300)
- })
- test("does not subtract cached tokens for anthropic provider", () => {
- const model = createModel({ context: 100_000, output: 32_000 })
- const result = Session.getUsage({
- model,
- usage: {
- inputTokens: 1000,
- outputTokens: 500,
- totalTokens: 1500,
- cachedInputTokens: 200,
- },
- metadata: {
- anthropic: {},
- },
- })
- expect(result.tokens.input).toBe(1000)
- expect(result.tokens.cache.read).toBe(200)
- })
- test("handles reasoning tokens", () => {
- const model = createModel({ context: 100_000, output: 32_000 })
- const result = Session.getUsage({
- model,
- usage: {
- inputTokens: 1000,
- outputTokens: 500,
- totalTokens: 1500,
- reasoningTokens: 100,
- },
- })
- expect(result.tokens.reasoning).toBe(100)
- })
- test("handles undefined optional values gracefully", () => {
- const model = createModel({ context: 100_000, output: 32_000 })
- const result = Session.getUsage({
- model,
- usage: {
- inputTokens: 0,
- outputTokens: 0,
- totalTokens: 0,
- },
- })
- expect(result.tokens.input).toBe(0)
- expect(result.tokens.output).toBe(0)
- expect(result.tokens.reasoning).toBe(0)
- expect(result.tokens.cache.read).toBe(0)
- expect(result.tokens.cache.write).toBe(0)
- expect(Number.isNaN(result.cost)).toBe(false)
- })
- test("calculates cost correctly", () => {
- const model = createModel({
- context: 100_000,
- output: 32_000,
- cost: {
- input: 3,
- output: 15,
- cache: { read: 0.3, write: 3.75 },
- },
- })
- const result = Session.getUsage({
- model,
- usage: {
- inputTokens: 1_000_000,
- outputTokens: 100_000,
- totalTokens: 1_100_000,
- },
- })
- expect(result.cost).toBe(3 + 1.5)
- })
- })
|