Browse Source

fix: ensure variants for copilot models work w/ maxTokens being set

Aiden Cline 3 weeks ago
parent
commit
29ea9fcf25
2 changed files with 2 additions and 15 deletions
  1. 2 2
      packages/opencode/src/provider/transform.ts
  2. 0 13
      packages/opencode/src/session/llm.ts

+ 2 - 2
packages/opencode/src/provider/transform.ts

@@ -428,13 +428,13 @@ export namespace ProviderTransform {
           high: {
             thinking: {
               type: "enabled",
-              budgetTokens: 16000,
+              budgetTokens: Math.min(16_000, Math.floor(model.limit.output / 2 - 1)),
             },
           },
           max: {
             thinking: {
               type: "enabled",
-              budgetTokens: 31999,
+              budgetTokens: Math.min(31_999, model.limit.output - 1),
             },
           },
         }

+ 0 - 13
packages/opencode/src/session/llm.ts

@@ -158,19 +158,6 @@ export namespace LLM {
           input.model.limit.output,
           OUTPUT_TOKEN_MAX,
         )
-    log.info("max_output_tokens", {
-      tokens: ProviderTransform.maxOutputTokens(
-        input.model.api.npm,
-        params.options,
-        input.model.limit.output,
-        OUTPUT_TOKEN_MAX,
-      ),
-      modelOptions: params.options,
-      outputLimit: input.model.limit.output,
-    })
-    // tokens = 32000
-    // outputLimit = 64000
-    // modelOptions={"reasoningEffort":"minimal"}
 
     const tools = await resolveTools(input)