compaction.test.ts 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. import { describe, expect, test } from "bun:test"
  2. import path from "path"
  3. import { SessionCompaction } from "../../src/session/compaction"
  4. import { Token } from "../../src/util/token"
  5. import { Instance } from "../../src/project/instance"
  6. import { Log } from "../../src/util/log"
  7. import { tmpdir } from "../fixture/fixture"
  8. import { Session } from "../../src/session"
  9. import type { Provider } from "../../src/provider/provider"
  10. Log.init({ print: false })
  11. function createModel(opts: {
  12. context: number
  13. output: number
  14. input?: number
  15. cost?: Provider.Model["cost"]
  16. }): Provider.Model {
  17. return {
  18. id: "test-model",
  19. providerID: "test",
  20. name: "Test",
  21. limit: {
  22. context: opts.context,
  23. input: opts.input,
  24. output: opts.output,
  25. },
  26. cost: opts.cost ?? { input: 0, output: 0, cache: { read: 0, write: 0 } },
  27. capabilities: {
  28. toolcall: true,
  29. attachment: false,
  30. reasoning: false,
  31. temperature: true,
  32. input: { text: true, image: false, audio: false, video: false },
  33. output: { text: true, image: false, audio: false, video: false },
  34. },
  35. api: { npm: "@ai-sdk/anthropic" },
  36. options: {},
  37. } as Provider.Model
  38. }
  39. describe("session.compaction.isOverflow", () => {
  40. test("returns true when token count exceeds usable context", async () => {
  41. await using tmp = await tmpdir()
  42. await Instance.provide({
  43. directory: tmp.path,
  44. fn: async () => {
  45. const model = createModel({ context: 100_000, output: 32_000 })
  46. const tokens = { input: 75_000, output: 5_000, reasoning: 0, cache: { read: 0, write: 0 } }
  47. expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
  48. },
  49. })
  50. })
  51. test("returns false when token count within usable context", async () => {
  52. await using tmp = await tmpdir()
  53. await Instance.provide({
  54. directory: tmp.path,
  55. fn: async () => {
  56. const model = createModel({ context: 200_000, output: 32_000 })
  57. const tokens = { input: 100_000, output: 10_000, reasoning: 0, cache: { read: 0, write: 0 } }
  58. expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
  59. },
  60. })
  61. })
  62. test("includes cache.read in token count", async () => {
  63. await using tmp = await tmpdir()
  64. await Instance.provide({
  65. directory: tmp.path,
  66. fn: async () => {
  67. const model = createModel({ context: 100_000, output: 32_000 })
  68. const tokens = { input: 50_000, output: 10_000, reasoning: 0, cache: { read: 10_000, write: 0 } }
  69. expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
  70. },
  71. })
  72. })
  73. test("respects input limit for input caps", async () => {
  74. await using tmp = await tmpdir()
  75. await Instance.provide({
  76. directory: tmp.path,
  77. fn: async () => {
  78. const model = createModel({ context: 400_000, input: 272_000, output: 128_000 })
  79. const tokens = { input: 271_000, output: 1_000, reasoning: 0, cache: { read: 2_000, write: 0 } }
  80. expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
  81. },
  82. })
  83. })
  84. test("returns false when input/output are within input caps", async () => {
  85. await using tmp = await tmpdir()
  86. await Instance.provide({
  87. directory: tmp.path,
  88. fn: async () => {
  89. const model = createModel({ context: 400_000, input: 272_000, output: 128_000 })
  90. const tokens = { input: 200_000, output: 20_000, reasoning: 0, cache: { read: 10_000, write: 0 } }
  91. expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
  92. },
  93. })
  94. })
  95. test("returns false when output within limit with input caps", async () => {
  96. await using tmp = await tmpdir()
  97. await Instance.provide({
  98. directory: tmp.path,
  99. fn: async () => {
  100. const model = createModel({ context: 200_000, input: 120_000, output: 10_000 })
  101. const tokens = { input: 50_000, output: 9_999, reasoning: 0, cache: { read: 0, write: 0 } }
  102. expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
  103. },
  104. })
  105. })
  106. test("returns false when model context limit is 0", async () => {
  107. await using tmp = await tmpdir()
  108. await Instance.provide({
  109. directory: tmp.path,
  110. fn: async () => {
  111. const model = createModel({ context: 0, output: 32_000 })
  112. const tokens = { input: 100_000, output: 10_000, reasoning: 0, cache: { read: 0, write: 0 } }
  113. expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
  114. },
  115. })
  116. })
  117. test("returns false when compaction.auto is disabled", async () => {
  118. await using tmp = await tmpdir({
  119. init: async (dir) => {
  120. await Bun.write(
  121. path.join(dir, "opencode.json"),
  122. JSON.stringify({
  123. compaction: { auto: false },
  124. }),
  125. )
  126. },
  127. })
  128. await Instance.provide({
  129. directory: tmp.path,
  130. fn: async () => {
  131. const model = createModel({ context: 100_000, output: 32_000 })
  132. const tokens = { input: 75_000, output: 5_000, reasoning: 0, cache: { read: 0, write: 0 } }
  133. expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
  134. },
  135. })
  136. })
  137. })
  138. describe("util.token.estimate", () => {
  139. test("estimates tokens from text (4 chars per token)", () => {
  140. const text = "x".repeat(4000)
  141. expect(Token.estimate(text)).toBe(1000)
  142. })
  143. test("estimates tokens from larger text", () => {
  144. const text = "y".repeat(20_000)
  145. expect(Token.estimate(text)).toBe(5000)
  146. })
  147. test("returns 0 for empty string", () => {
  148. expect(Token.estimate("")).toBe(0)
  149. })
  150. })
  151. describe("session.getUsage", () => {
  152. test("normalizes standard usage to token format", () => {
  153. const model = createModel({ context: 100_000, output: 32_000 })
  154. const result = Session.getUsage({
  155. model,
  156. usage: {
  157. inputTokens: 1000,
  158. outputTokens: 500,
  159. totalTokens: 1500,
  160. },
  161. })
  162. expect(result.tokens.input).toBe(1000)
  163. expect(result.tokens.output).toBe(500)
  164. expect(result.tokens.reasoning).toBe(0)
  165. expect(result.tokens.cache.read).toBe(0)
  166. expect(result.tokens.cache.write).toBe(0)
  167. })
  168. test("extracts cached tokens to cache.read", () => {
  169. const model = createModel({ context: 100_000, output: 32_000 })
  170. const result = Session.getUsage({
  171. model,
  172. usage: {
  173. inputTokens: 1000,
  174. outputTokens: 500,
  175. totalTokens: 1500,
  176. cachedInputTokens: 200,
  177. },
  178. })
  179. expect(result.tokens.input).toBe(800)
  180. expect(result.tokens.cache.read).toBe(200)
  181. })
  182. test("handles anthropic cache write metadata", () => {
  183. const model = createModel({ context: 100_000, output: 32_000 })
  184. const result = Session.getUsage({
  185. model,
  186. usage: {
  187. inputTokens: 1000,
  188. outputTokens: 500,
  189. totalTokens: 1500,
  190. },
  191. metadata: {
  192. anthropic: {
  193. cacheCreationInputTokens: 300,
  194. },
  195. },
  196. })
  197. expect(result.tokens.cache.write).toBe(300)
  198. })
  199. test("does not subtract cached tokens for anthropic provider", () => {
  200. const model = createModel({ context: 100_000, output: 32_000 })
  201. const result = Session.getUsage({
  202. model,
  203. usage: {
  204. inputTokens: 1000,
  205. outputTokens: 500,
  206. totalTokens: 1500,
  207. cachedInputTokens: 200,
  208. },
  209. metadata: {
  210. anthropic: {},
  211. },
  212. })
  213. expect(result.tokens.input).toBe(1000)
  214. expect(result.tokens.cache.read).toBe(200)
  215. })
  216. test("handles reasoning tokens", () => {
  217. const model = createModel({ context: 100_000, output: 32_000 })
  218. const result = Session.getUsage({
  219. model,
  220. usage: {
  221. inputTokens: 1000,
  222. outputTokens: 500,
  223. totalTokens: 1500,
  224. reasoningTokens: 100,
  225. },
  226. })
  227. expect(result.tokens.reasoning).toBe(100)
  228. })
  229. test("handles undefined optional values gracefully", () => {
  230. const model = createModel({ context: 100_000, output: 32_000 })
  231. const result = Session.getUsage({
  232. model,
  233. usage: {
  234. inputTokens: 0,
  235. outputTokens: 0,
  236. totalTokens: 0,
  237. },
  238. })
  239. expect(result.tokens.input).toBe(0)
  240. expect(result.tokens.output).toBe(0)
  241. expect(result.tokens.reasoning).toBe(0)
  242. expect(result.tokens.cache.read).toBe(0)
  243. expect(result.tokens.cache.write).toBe(0)
  244. expect(Number.isNaN(result.cost)).toBe(false)
  245. })
  246. test("calculates cost correctly", () => {
  247. const model = createModel({
  248. context: 100_000,
  249. output: 32_000,
  250. cost: {
  251. input: 3,
  252. output: 15,
  253. cache: { read: 0.3, write: 3.75 },
  254. },
  255. })
  256. const result = Session.getUsage({
  257. model,
  258. usage: {
  259. inputTokens: 1_000_000,
  260. outputTokens: 100_000,
  261. totalTokens: 1_100_000,
  262. },
  263. })
  264. expect(result.cost).toBe(3 + 1.5)
  265. })
  266. })