Просмотр исходного кода

fix(opencode): add input limit for compaction (#8465)

Brandon Smith 1 месяц назад
Родитель
Сommit
8d720f9463

+ 0 - 32
packages/opencode/src/plugin/codex.ts

@@ -361,38 +361,6 @@ export async function CodexAuthPlugin(input: PluginInput): Promise<Hooks> {
           }
           }
         }
         }
 
 
-        if (!provider.models["gpt-5.2-codex"]) {
-          const model = {
-            id: "gpt-5.2-codex",
-            providerID: "openai",
-            api: {
-              id: "gpt-5.2-codex",
-              url: "https://chatgpt.com/backend-api/codex",
-              npm: "@ai-sdk/openai",
-            },
-            name: "GPT-5.2 Codex",
-            capabilities: {
-              temperature: false,
-              reasoning: true,
-              attachment: true,
-              toolcall: true,
-              input: { text: true, audio: false, image: true, video: false, pdf: false },
-              output: { text: true, audio: false, image: false, video: false, pdf: false },
-              interleaved: false,
-            },
-            cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
-            limit: { context: 400000, output: 128000 },
-            status: "active" as const,
-            options: {},
-            headers: {},
-            release_date: "2025-12-18",
-            variants: {} as Record<string, Record<string, any>>,
-            family: "gpt-codex",
-          }
-          model.variants = ProviderTransform.variants(model)
-          provider.models["gpt-5.2-codex"] = model
-        }
-
         // Zero out costs for Codex (included with ChatGPT subscription)
         // Zero out costs for Codex (included with ChatGPT subscription)
         for (const model of Object.values(provider.models)) {
         for (const model of Object.values(provider.models)) {
           model.cost = {
           model.cost = {

+ 1 - 0
packages/opencode/src/provider/models.ts

@@ -47,6 +47,7 @@ export namespace ModelsDev {
       .optional(),
       .optional(),
     limit: z.object({
     limit: z.object({
       context: z.number(),
       context: z.number(),
+      input: z.number().optional(),
       output: z.number(),
       output: z.number(),
     }),
     }),
     modalities: z
     modalities: z

+ 2 - 0
packages/opencode/src/provider/provider.ts

@@ -557,6 +557,7 @@ export namespace Provider {
       }),
       }),
       limit: z.object({
       limit: z.object({
         context: z.number(),
         context: z.number(),
+        input: z.number().optional(),
         output: z.number(),
         output: z.number(),
       }),
       }),
       status: z.enum(["alpha", "beta", "deprecated", "active"]),
       status: z.enum(["alpha", "beta", "deprecated", "active"]),
@@ -619,6 +620,7 @@ export namespace Provider {
       },
       },
       limit: {
       limit: {
         context: model.limit.context,
         context: model.limit.context,
+        input: model.limit.input,
         output: model.limit.output,
         output: model.limit.output,
       },
       },
       capabilities: {
       capabilities: {

+ 1 - 1
packages/opencode/src/session/compaction.ts

@@ -34,7 +34,7 @@ export namespace SessionCompaction {
     if (context === 0) return false
     if (context === 0) return false
     const count = input.tokens.input + input.tokens.cache.read + input.tokens.output
     const count = input.tokens.input + input.tokens.cache.read + input.tokens.output
     const output = Math.min(input.model.limit.output, SessionPrompt.OUTPUT_TOKEN_MAX) || SessionPrompt.OUTPUT_TOKEN_MAX
     const output = Math.min(input.model.limit.output, SessionPrompt.OUTPUT_TOKEN_MAX) || SessionPrompt.OUTPUT_TOKEN_MAX
-    const usable = context - output
+    const usable = input.model.limit.input || context - output
     return count > usable
     return count > usable
   }
   }
 
 

+ 43 - 1
packages/opencode/test/session/compaction.test.ts

@@ -10,13 +10,19 @@ import type { Provider } from "../../src/provider/provider"
 
 
 Log.init({ print: false })
 Log.init({ print: false })
 
 
-function createModel(opts: { context: number; output: number; cost?: Provider.Model["cost"] }): Provider.Model {
+function createModel(opts: {
+  context: number
+  output: number
+  input?: number
+  cost?: Provider.Model["cost"]
+}): Provider.Model {
   return {
   return {
     id: "test-model",
     id: "test-model",
     providerID: "test",
     providerID: "test",
     name: "Test",
     name: "Test",
     limit: {
     limit: {
       context: opts.context,
       context: opts.context,
+      input: opts.input,
       output: opts.output,
       output: opts.output,
     },
     },
     cost: opts.cost ?? { input: 0, output: 0, cache: { read: 0, write: 0 } },
     cost: opts.cost ?? { input: 0, output: 0, cache: { read: 0, write: 0 } },
@@ -70,6 +76,42 @@ describe("session.compaction.isOverflow", () => {
     })
     })
   })
   })
 
 
+  test("respects input limit for input caps", async () => {
+    await using tmp = await tmpdir()
+    await Instance.provide({
+      directory: tmp.path,
+      fn: async () => {
+        const model = createModel({ context: 400_000, input: 272_000, output: 128_000 })
+        const tokens = { input: 271_000, output: 1_000, reasoning: 0, cache: { read: 2_000, write: 0 } }
+        expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
+      },
+    })
+  })
+
+  test("returns false when input/output are within input caps", async () => {
+    await using tmp = await tmpdir()
+    await Instance.provide({
+      directory: tmp.path,
+      fn: async () => {
+        const model = createModel({ context: 400_000, input: 272_000, output: 128_000 })
+        const tokens = { input: 200_000, output: 20_000, reasoning: 0, cache: { read: 10_000, write: 0 } }
+        expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
+      },
+    })
+  })
+
+  test("returns false when output within limit with input caps", async () => {
+    await using tmp = await tmpdir()
+    await Instance.provide({
+      directory: tmp.path,
+      fn: async () => {
+        const model = createModel({ context: 200_000, input: 120_000, output: 10_000 })
+        const tokens = { input: 50_000, output: 9_999, reasoning: 0, cache: { read: 0, write: 0 } }
+        expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
+      },
+    })
+  })
+
   test("returns false when model context limit is 0", async () => {
   test("returns false when model context limit is 0", async () => {
     await using tmp = await tmpdir()
     await using tmp = await tmpdir()
     await Instance.provide({
     await Instance.provide({