Переглянути джерело

feat: unwrap provider namespaces to flat exports + barrel (#22760)

Kit Langton 2 днів тому
батько
коміт
6b20838981

+ 1 - 1
packages/opencode/src/agent/agent.ts

@@ -6,7 +6,7 @@ import { generateObject, streamObject, type ModelMessage } from "ai"
 import { Instance } from "../project/instance"
 import { Truncate } from "../tool"
 import { Auth } from "../auth"
-import { ProviderTransform } from "../provider/transform"
+import { ProviderTransform } from "../provider"
 
 import PROMPT_GENERATE from "./generate.txt"
 import PROMPT_COMPACTION from "./prompt/compaction.txt"

+ 1 - 1
packages/opencode/src/cli/cmd/github.ts

@@ -18,7 +18,7 @@ import type {
 } from "@octokit/webhooks-types"
 import { UI } from "../ui"
 import { cmd } from "./cmd"
-import { ModelsDev } from "../../provider/models"
+import { ModelsDev } from "../../provider"
 import { Instance } from "@/project/instance"
 import { bootstrap } from "../bootstrap"
 import { SessionShare } from "@/share"

+ 1 - 1
packages/opencode/src/cli/cmd/models.ts

@@ -2,7 +2,7 @@ import type { Argv } from "yargs"
 import { Instance } from "../../project/instance"
 import { Provider } from "../../provider"
 import { ProviderID } from "../../provider/schema"
-import { ModelsDev } from "../../provider/models"
+import { ModelsDev } from "../../provider"
 import { cmd } from "./cmd"
 import { UI } from "../ui"
 import { EOL } from "os"

+ 1 - 1
packages/opencode/src/cli/cmd/providers.ts

@@ -3,7 +3,7 @@ import { AppRuntime } from "../../effect/app-runtime"
 import { cmd } from "./cmd"
 import * as prompts from "@clack/prompts"
 import { UI } from "../ui"
-import { ModelsDev } from "../../provider/models"
+import { ModelsDev } from "../../provider"
 import { map, pipe, sortBy, values } from "remeda"
 import path from "path"
 import os from "os"

+ 1 - 1
packages/opencode/src/effect/app-runtime.ts

@@ -16,7 +16,7 @@ import { Storage } from "@/storage"
 import { Snapshot } from "@/snapshot"
 import { Plugin } from "@/plugin"
 import { Provider } from "@/provider"
-import { ProviderAuth } from "@/provider/auth"
+import { ProviderAuth } from "@/provider"
 import { Agent } from "@/agent/agent"
 import { Skill } from "@/skill"
 import { Discovery } from "@/skill/discovery"

+ 206 - 208
packages/opencode/src/provider/auth.ts

@@ -9,219 +9,217 @@ import { ProviderID } from "./schema"
 import { Array as Arr, Effect, Layer, Record, Result, Context, Schema } from "effect"
 import z from "zod"
 
-export namespace ProviderAuth {
-  const When = Schema.Struct({
-    key: Schema.String,
-    op: Schema.Literals(["eq", "neq"]),
-    value: Schema.String,
-  })
-
-  const TextPrompt = Schema.Struct({
-    type: Schema.Literal("text"),
-    key: Schema.String,
-    message: Schema.String,
-    placeholder: Schema.optional(Schema.String),
-    when: Schema.optional(When),
-  })
-
-  const SelectOption = Schema.Struct({
-    label: Schema.String,
-    value: Schema.String,
-    hint: Schema.optional(Schema.String),
-  })
-
-  const SelectPrompt = Schema.Struct({
-    type: Schema.Literal("select"),
-    key: Schema.String,
-    message: Schema.String,
-    options: Schema.Array(SelectOption),
-    when: Schema.optional(When),
-  })
-
-  const Prompt = Schema.Union([TextPrompt, SelectPrompt])
-
-  export class Method extends Schema.Class<Method>("ProviderAuthMethod")({
-    type: Schema.Literals(["oauth", "api"]),
-    label: Schema.String,
-    prompts: Schema.optional(Schema.Array(Prompt)),
-  }) {
-    static readonly zod = zod(this)
-  }
-
-  export const Methods = Schema.Record(Schema.String, Schema.Array(Method)).pipe(withStatics((s) => ({ zod: zod(s) })))
-  export type Methods = typeof Methods.Type
-
-  export class Authorization extends Schema.Class<Authorization>("ProviderAuthAuthorization")({
-    url: Schema.String,
-    method: Schema.Literals(["auto", "code"]),
-    instructions: Schema.String,
-  }) {
-    static readonly zod = zod(this)
-  }
-
-  export const OauthMissing = NamedError.create("ProviderAuthOauthMissing", z.object({ providerID: ProviderID.zod }))
-
-  export const OauthCodeMissing = NamedError.create(
-    "ProviderAuthOauthCodeMissing",
-    z.object({ providerID: ProviderID.zod }),
-  )
-
-  export const OauthCallbackFailed = NamedError.create("ProviderAuthOauthCallbackFailed", z.object({}))
-
-  export const ValidationFailed = NamedError.create(
-    "ProviderAuthValidationFailed",
-    z.object({
-      field: z.string(),
-      message: z.string(),
-    }),
-  )
-
-  export type Error =
-    | Auth.AuthError
-    | InstanceType<typeof OauthMissing>
-    | InstanceType<typeof OauthCodeMissing>
-    | InstanceType<typeof OauthCallbackFailed>
-    | InstanceType<typeof ValidationFailed>
-
-  type Hook = NonNullable<Hooks["auth"]>
-
-  export interface Interface {
-    readonly methods: () => Effect.Effect<Methods>
-    readonly authorize: (input: {
-      providerID: ProviderID
-      method: number
-      inputs?: Record<string, string>
-    }) => Effect.Effect<Authorization | undefined, Error>
-    readonly callback: (input: { providerID: ProviderID; method: number; code?: string }) => Effect.Effect<void, Error>
-  }
-
-  interface State {
-    hooks: Record<ProviderID, Hook>
-    pending: Map<ProviderID, AuthOAuthResult>
-  }
-
-  export class Service extends Context.Service<Service, Interface>()("@opencode/ProviderAuth") {}
-
-  export const layer: Layer.Layer<Service, never, Auth.Service | Plugin.Service> = Layer.effect(
-    Service,
-    Effect.gen(function* () {
-      const auth = yield* Auth.Service
-      const plugin = yield* Plugin.Service
-      const state = yield* InstanceState.make<State>(
-        Effect.fn("ProviderAuth.state")(function* () {
-          const plugins = yield* plugin.list()
-          return {
-            hooks: Record.fromEntries(
-              Arr.filterMap(plugins, (x) =>
-                x.auth?.provider !== undefined
-                  ? Result.succeed([ProviderID.make(x.auth.provider), x.auth] as const)
-                  : Result.failVoid,
-              ),
-            ),
-            pending: new Map<ProviderID, AuthOAuthResult>(),
-          }
-        }),
-      )
+const When = Schema.Struct({
+  key: Schema.String,
+  op: Schema.Literals(["eq", "neq"]),
+  value: Schema.String,
+})
+
+const TextPrompt = Schema.Struct({
+  type: Schema.Literal("text"),
+  key: Schema.String,
+  message: Schema.String,
+  placeholder: Schema.optional(Schema.String),
+  when: Schema.optional(When),
+})
+
+const SelectOption = Schema.Struct({
+  label: Schema.String,
+  value: Schema.String,
+  hint: Schema.optional(Schema.String),
+})
+
+const SelectPrompt = Schema.Struct({
+  type: Schema.Literal("select"),
+  key: Schema.String,
+  message: Schema.String,
+  options: Schema.Array(SelectOption),
+  when: Schema.optional(When),
+})
+
+const Prompt = Schema.Union([TextPrompt, SelectPrompt])
+
+export class Method extends Schema.Class<Method>("ProviderAuthMethod")({
+  type: Schema.Literals(["oauth", "api"]),
+  label: Schema.String,
+  prompts: Schema.optional(Schema.Array(Prompt)),
+}) {
+  static readonly zod = zod(this)
+}
 
-      const decode = Schema.decodeUnknownSync(Methods)
-      const methods = Effect.fn("ProviderAuth.methods")(function* () {
-        const hooks = (yield* InstanceState.get(state)).hooks
-        return decode(
-          Record.map(hooks, (item) =>
-            item.methods.map((method) => ({
-              type: method.type,
-              label: method.label,
-              prompts: method.prompts?.map((prompt) => {
-                if (prompt.type === "select") {
-                  return {
-                    type: "select" as const,
-                    key: prompt.key,
-                    message: prompt.message,
-                    options: prompt.options,
-                    when: prompt.when,
-                  }
-                }
+export const Methods = Schema.Record(Schema.String, Schema.Array(Method)).pipe(withStatics((s) => ({ zod: zod(s) })))
+export type Methods = typeof Methods.Type
+
+export class Authorization extends Schema.Class<Authorization>("ProviderAuthAuthorization")({
+  url: Schema.String,
+  method: Schema.Literals(["auto", "code"]),
+  instructions: Schema.String,
+}) {
+  static readonly zod = zod(this)
+}
+
+export const OauthMissing = NamedError.create("ProviderAuthOauthMissing", z.object({ providerID: ProviderID.zod }))
+
+export const OauthCodeMissing = NamedError.create(
+  "ProviderAuthOauthCodeMissing",
+  z.object({ providerID: ProviderID.zod }),
+)
+
+export const OauthCallbackFailed = NamedError.create("ProviderAuthOauthCallbackFailed", z.object({}))
+
+export const ValidationFailed = NamedError.create(
+  "ProviderAuthValidationFailed",
+  z.object({
+    field: z.string(),
+    message: z.string(),
+  }),
+)
+
+export type Error =
+  | Auth.AuthError
+  | InstanceType<typeof OauthMissing>
+  | InstanceType<typeof OauthCodeMissing>
+  | InstanceType<typeof OauthCallbackFailed>
+  | InstanceType<typeof ValidationFailed>
+
+type Hook = NonNullable<Hooks["auth"]>
+
+export interface Interface {
+  readonly methods: () => Effect.Effect<Methods>
+  readonly authorize: (input: {
+    providerID: ProviderID
+    method: number
+    inputs?: Record<string, string>
+  }) => Effect.Effect<Authorization | undefined, Error>
+  readonly callback: (input: { providerID: ProviderID; method: number; code?: string }) => Effect.Effect<void, Error>
+}
+
+interface State {
+  hooks: Record<ProviderID, Hook>
+  pending: Map<ProviderID, AuthOAuthResult>
+}
+
+export class Service extends Context.Service<Service, Interface>()("@opencode/ProviderAuth") {}
+
+export const layer: Layer.Layer<Service, never, Auth.Service | Plugin.Service> = Layer.effect(
+  Service,
+  Effect.gen(function* () {
+    const auth = yield* Auth.Service
+    const plugin = yield* Plugin.Service
+    const state = yield* InstanceState.make<State>(
+      Effect.fn("ProviderAuth.state")(function* () {
+        const plugins = yield* plugin.list()
+        return {
+          hooks: Record.fromEntries(
+            Arr.filterMap(plugins, (x) =>
+              x.auth?.provider !== undefined
+                ? Result.succeed([ProviderID.make(x.auth.provider), x.auth] as const)
+                : Result.failVoid,
+            ),
+          ),
+          pending: new Map<ProviderID, AuthOAuthResult>(),
+        }
+      }),
+    )
+
+    const decode = Schema.decodeUnknownSync(Methods)
+    const methods = Effect.fn("ProviderAuth.methods")(function* () {
+      const hooks = (yield* InstanceState.get(state)).hooks
+      return decode(
+        Record.map(hooks, (item) =>
+          item.methods.map((method) => ({
+            type: method.type,
+            label: method.label,
+            prompts: method.prompts?.map((prompt) => {
+              if (prompt.type === "select") {
                 return {
-                  type: "text" as const,
+                  type: "select" as const,
                   key: prompt.key,
                   message: prompt.message,
-                  placeholder: prompt.placeholder,
+                  options: prompt.options,
                   when: prompt.when,
                 }
-              }),
-            })),
-          ),
-        )
-      })
-
-      const authorize = Effect.fn("ProviderAuth.authorize")(function* (input: {
-        providerID: ProviderID
-        method: number
-        inputs?: Record<string, string>
-      }) {
-        const { hooks, pending } = yield* InstanceState.get(state)
-        const method = hooks[input.providerID].methods[input.method]
-        if (method.type !== "oauth") return
-
-        if (method.prompts && input.inputs) {
-          for (const prompt of method.prompts) {
-            if (prompt.type === "text" && prompt.validate && input.inputs[prompt.key] !== undefined) {
-              const error = prompt.validate(input.inputs[prompt.key])
-              if (error) return yield* Effect.fail(new ValidationFailed({ field: prompt.key, message: error }))
-            }
-          }
-        }
-
-        const result = yield* Effect.promise(() => method.authorize(input.inputs))
-        pending.set(input.providerID, result)
-        return {
-          url: result.url,
-          method: result.method,
-          instructions: result.instructions,
-        }
-      })
-
-      const callback = Effect.fn("ProviderAuth.callback")(function* (input: {
-        providerID: ProviderID
-        method: number
-        code?: string
-      }) {
-        const pending = (yield* InstanceState.get(state)).pending
-        const match = pending.get(input.providerID)
-        if (!match) return yield* Effect.fail(new OauthMissing({ providerID: input.providerID }))
-        if (match.method === "code" && !input.code) {
-          return yield* Effect.fail(new OauthCodeMissing({ providerID: input.providerID }))
-        }
-
-        const result = yield* Effect.promise(() =>
-          match.method === "code" ? match.callback(input.code!) : match.callback(),
-        )
-        if (!result || result.type !== "success") return yield* Effect.fail(new OauthCallbackFailed({}))
-
-        if ("key" in result) {
-          yield* auth.set(input.providerID, {
-            type: "api",
-            key: result.key,
-          })
-        }
+              }
+              return {
+                type: "text" as const,
+                key: prompt.key,
+                message: prompt.message,
+                placeholder: prompt.placeholder,
+                when: prompt.when,
+              }
+            }),
+          })),
+        ),
+      )
+    })
 
-        if ("refresh" in result) {
-          const { type: _, provider: __, refresh, access, expires, ...extra } = result
-          yield* auth.set(input.providerID, {
-            type: "oauth",
-            access,
-            refresh,
-            expires,
-            ...extra,
-          })
+    const authorize = Effect.fn("ProviderAuth.authorize")(function* (input: {
+      providerID: ProviderID
+      method: number
+      inputs?: Record<string, string>
+    }) {
+      const { hooks, pending } = yield* InstanceState.get(state)
+      const method = hooks[input.providerID].methods[input.method]
+      if (method.type !== "oauth") return
+
+      if (method.prompts && input.inputs) {
+        for (const prompt of method.prompts) {
+          if (prompt.type === "text" && prompt.validate && input.inputs[prompt.key] !== undefined) {
+            const error = prompt.validate(input.inputs[prompt.key])
+            if (error) return yield* Effect.fail(new ValidationFailed({ field: prompt.key, message: error }))
+          }
         }
-      })
-
-      return Service.of({ methods, authorize, callback })
-    }),
-  )
-
-  export const defaultLayer = Layer.suspend(() =>
-    layer.pipe(Layer.provide(Auth.defaultLayer), Layer.provide(Plugin.defaultLayer)),
-  )
-}
+      }
+
+      const result = yield* Effect.promise(() => method.authorize(input.inputs))
+      pending.set(input.providerID, result)
+      return {
+        url: result.url,
+        method: result.method,
+        instructions: result.instructions,
+      }
+    })
+
+    const callback = Effect.fn("ProviderAuth.callback")(function* (input: {
+      providerID: ProviderID
+      method: number
+      code?: string
+    }) {
+      const pending = (yield* InstanceState.get(state)).pending
+      const match = pending.get(input.providerID)
+      if (!match) return yield* Effect.fail(new OauthMissing({ providerID: input.providerID }))
+      if (match.method === "code" && !input.code) {
+        return yield* Effect.fail(new OauthCodeMissing({ providerID: input.providerID }))
+      }
+
+      const result = yield* Effect.promise(() =>
+        match.method === "code" ? match.callback(input.code!) : match.callback(),
+      )
+      if (!result || result.type !== "success") return yield* Effect.fail(new OauthCallbackFailed({}))
+
+      if ("key" in result) {
+        yield* auth.set(input.providerID, {
+          type: "api",
+          key: result.key,
+        })
+      }
+
+      if ("refresh" in result) {
+        const { type: _, provider: __, refresh, access, expires, ...extra } = result
+        yield* auth.set(input.providerID, {
+          type: "oauth",
+          access,
+          refresh,
+          expires,
+          ...extra,
+        })
+      }
+    })
+
+    return Service.of({ methods, authorize, callback })
+  }),
+)
+
+export const defaultLayer = Layer.suspend(() =>
+  layer.pipe(Layer.provide(Auth.defaultLayer), Layer.provide(Plugin.defaultLayer)),
+)

+ 162 - 164
packages/opencode/src/provider/error.ts

@@ -3,195 +3,193 @@ import { STATUS_CODES } from "http"
 import { iife } from "@/util/iife"
 import type { ProviderID } from "./schema"
 
-export namespace ProviderError {
-  // Adapted from overflow detection patterns in:
-  // https://github.com/badlogic/pi-mono/blob/main/packages/ai/src/utils/overflow.ts
-  const OVERFLOW_PATTERNS = [
-    /prompt is too long/i, // Anthropic
-    /input is too long for requested model/i, // Amazon Bedrock
-    /exceeds the context window/i, // OpenAI (Completions + Responses API message text)
-    /input token count.*exceeds the maximum/i, // Google (Gemini)
-    /maximum prompt length is \d+/i, // xAI (Grok)
-    /reduce the length of the messages/i, // Groq
-    /maximum context length is \d+ tokens/i, // OpenRouter, DeepSeek, vLLM
-    /exceeds the limit of \d+/i, // GitHub Copilot
-    /exceeds the available context size/i, // llama.cpp server
-    /greater than the context length/i, // LM Studio
-    /context window exceeds limit/i, // MiniMax
-    /exceeded model token limit/i, // Kimi For Coding, Moonshot
-    /context[_ ]length[_ ]exceeded/i, // Generic fallback
-    /request entity too large/i, // HTTP 413
-    /context length is only \d+ tokens/i, // vLLM
-    /input length.*exceeds.*context length/i, // vLLM
-    /prompt too long; exceeded (?:max )?context length/i, // Ollama explicit overflow error
-    /too large for model with \d+ maximum context length/i, // Mistral
-    /model_context_window_exceeded/i, // z.ai non-standard finish_reason surfaced as error text
-  ]
-
-  function isOpenAiErrorRetryable(e: APICallError) {
-    const status = e.statusCode
-    if (!status) return e.isRetryable
-    // openai sometimes returns 404 for models that are actually available
-    return status === 404 || e.isRetryable
-  }
+// Adapted from overflow detection patterns in:
+// https://github.com/badlogic/pi-mono/blob/main/packages/ai/src/utils/overflow.ts
+const OVERFLOW_PATTERNS = [
+  /prompt is too long/i, // Anthropic
+  /input is too long for requested model/i, // Amazon Bedrock
+  /exceeds the context window/i, // OpenAI (Completions + Responses API message text)
+  /input token count.*exceeds the maximum/i, // Google (Gemini)
+  /maximum prompt length is \d+/i, // xAI (Grok)
+  /reduce the length of the messages/i, // Groq
+  /maximum context length is \d+ tokens/i, // OpenRouter, DeepSeek, vLLM
+  /exceeds the limit of \d+/i, // GitHub Copilot
+  /exceeds the available context size/i, // llama.cpp server
+  /greater than the context length/i, // LM Studio
+  /context window exceeds limit/i, // MiniMax
+  /exceeded model token limit/i, // Kimi For Coding, Moonshot
+  /context[_ ]length[_ ]exceeded/i, // Generic fallback
+  /request entity too large/i, // HTTP 413
+  /context length is only \d+ tokens/i, // vLLM
+  /input length.*exceeds.*context length/i, // vLLM
+  /prompt too long; exceeded (?:max )?context length/i, // Ollama explicit overflow error
+  /too large for model with \d+ maximum context length/i, // Mistral
+  /model_context_window_exceeded/i, // z.ai non-standard finish_reason surfaced as error text
+]
+
+function isOpenAiErrorRetryable(e: APICallError) {
+  const status = e.statusCode
+  if (!status) return e.isRetryable
+  // openai sometimes returns 404 for models that are actually available
+  return status === 404 || e.isRetryable
+}
 
-  // Providers not reliably handled in this function:
-  // - z.ai: can accept overflow silently (needs token-count/context-window checks)
-  function isOverflow(message: string) {
-    if (OVERFLOW_PATTERNS.some((p) => p.test(message))) return true
+// Providers not reliably handled in this function:
+// - z.ai: can accept overflow silently (needs token-count/context-window checks)
+function isOverflow(message: string) {
+  if (OVERFLOW_PATTERNS.some((p) => p.test(message))) return true
 
-    // Providers/status patterns handled outside of regex list:
-    // - Cerebras: often returns "400 (no body)" / "413 (no body)"
-    // - Mistral: often returns "400 (no body)" / "413 (no body)"
-    return /^4(00|13)\s*(status code)?\s*\(no body\)/i.test(message)
-  }
+  // Providers/status patterns handled outside of regex list:
+  // - Cerebras: often returns "400 (no body)" / "413 (no body)"
+  // - Mistral: often returns "400 (no body)" / "413 (no body)"
+  return /^4(00|13)\s*(status code)?\s*\(no body\)/i.test(message)
+}
 
-  function message(providerID: ProviderID, e: APICallError) {
-    return iife(() => {
-      const msg = e.message
-      if (msg === "") {
-        if (e.responseBody) return e.responseBody
-        if (e.statusCode) {
-          const err = STATUS_CODES[e.statusCode]
-          if (err) return err
-        }
-        return "Unknown error"
+function message(providerID: ProviderID, e: APICallError) {
+  return iife(() => {
+    const msg = e.message
+    if (msg === "") {
+      if (e.responseBody) return e.responseBody
+      if (e.statusCode) {
+        const err = STATUS_CODES[e.statusCode]
+        if (err) return err
       }
+      return "Unknown error"
+    }
 
-      if (!e.responseBody || (e.statusCode && msg !== STATUS_CODES[e.statusCode])) {
-        return msg
+    if (!e.responseBody || (e.statusCode && msg !== STATUS_CODES[e.statusCode])) {
+      return msg
+    }
+
+    try {
+      const body = JSON.parse(e.responseBody)
+      // try to extract common error message fields
+      const errMsg = body.message || body.error || body.error?.message
+      if (errMsg && typeof errMsg === "string") {
+        return `${msg}: ${errMsg}`
       }
+    } catch {}
 
-      try {
-        const body = JSON.parse(e.responseBody)
-        // try to extract common error message fields
-        const errMsg = body.message || body.error || body.error?.message
-        if (errMsg && typeof errMsg === "string") {
-          return `${msg}: ${errMsg}`
-        }
-      } catch {}
-
-      // If responseBody is HTML (e.g. from a gateway or proxy error page),
-      // provide a human-readable message instead of dumping raw markup
-      if (/^\s*<!doctype|^\s*<html/i.test(e.responseBody)) {
-        if (e.statusCode === 401) {
-          return "Unauthorized: request was blocked by a gateway or proxy. Your authentication token may be missing or expired — try running `opencode auth login <your provider URL>` to re-authenticate."
-        }
-        if (e.statusCode === 403) {
-          return "Forbidden: request was blocked by a gateway or proxy. You may not have permission to access this resource — check your account and provider settings."
-        }
-        return msg
+    // If responseBody is HTML (e.g. from a gateway or proxy error page),
+    // provide a human-readable message instead of dumping raw markup
+    if (/^\s*<!doctype|^\s*<html/i.test(e.responseBody)) {
+      if (e.statusCode === 401) {
+        return "Unauthorized: request was blocked by a gateway or proxy. Your authentication token may be missing or expired — try running `opencode auth login <your provider URL>` to re-authenticate."
       }
+      if (e.statusCode === 403) {
+        return "Forbidden: request was blocked by a gateway or proxy. You may not have permission to access this resource — check your account and provider settings."
+      }
+      return msg
+    }
 
-      return `${msg}: ${e.responseBody}`
-    }).trim()
+    return `${msg}: ${e.responseBody}`
+  }).trim()
+}
+
+function json(input: unknown) {
+  if (typeof input === "string") {
+    try {
+      const result = JSON.parse(input)
+      if (result && typeof result === "object") return result
+      return undefined
+    } catch {
+      return undefined
+    }
+  }
+  if (typeof input === "object" && input !== null) {
+    return input
   }
+  return undefined
+}
 
-  function json(input: unknown) {
-    if (typeof input === "string") {
-      try {
-        const result = JSON.parse(input)
-        if (result && typeof result === "object") return result
-        return undefined
-      } catch {
-        return undefined
-      }
+export type ParsedStreamError =
+  | {
+      type: "context_overflow"
+      message: string
+      responseBody: string
     }
-    if (typeof input === "object" && input !== null) {
-      return input
+  | {
+      type: "api_error"
+      message: string
+      isRetryable: false
+      responseBody: string
     }
-    return undefined
-  }
 
-  export type ParsedStreamError =
-    | {
-        type: "context_overflow"
-        message: string
-        responseBody: string
-      }
-    | {
-        type: "api_error"
-        message: string
-        isRetryable: false
-        responseBody: string
-      }
+export function parseStreamError(input: unknown): ParsedStreamError | undefined {
+  const body = json(input)
+  if (!body) return
 
-  export function parseStreamError(input: unknown): ParsedStreamError | undefined {
-    const body = json(input)
-    if (!body) return
-
-    const responseBody = JSON.stringify(body)
-    if (body.type !== "error") return
-
-    switch (body?.error?.code) {
-      case "context_length_exceeded":
-        return {
-          type: "context_overflow",
-          message: "Input exceeds context window of this model",
-          responseBody,
-        }
-      case "insufficient_quota":
-        return {
-          type: "api_error",
-          message: "Quota exceeded. Check your plan and billing details.",
-          isRetryable: false,
-          responseBody,
-        }
-      case "usage_not_included":
-        return {
-          type: "api_error",
-          message: "To use Codex with your ChatGPT plan, upgrade to Plus: https://chatgpt.com/explore/plus.",
-          isRetryable: false,
-          responseBody,
-        }
-      case "invalid_prompt":
-        return {
-          type: "api_error",
-          message: typeof body?.error?.message === "string" ? body?.error?.message : "Invalid prompt.",
-          isRetryable: false,
-          responseBody,
-        }
-    }
-  }
+  const responseBody = JSON.stringify(body)
+  if (body.type !== "error") return
 
-  export type ParsedAPICallError =
-    | {
-        type: "context_overflow"
-        message: string
-        responseBody?: string
+  switch (body?.error?.code) {
+    case "context_length_exceeded":
+      return {
+        type: "context_overflow",
+        message: "Input exceeds context window of this model",
+        responseBody,
       }
-    | {
-        type: "api_error"
-        message: string
-        statusCode?: number
-        isRetryable: boolean
-        responseHeaders?: Record<string, string>
-        responseBody?: string
-        metadata?: Record<string, string>
+    case "insufficient_quota":
+      return {
+        type: "api_error",
+        message: "Quota exceeded. Check your plan and billing details.",
+        isRetryable: false,
+        responseBody,
       }
-
-  export function parseAPICallError(input: { providerID: ProviderID; error: APICallError }): ParsedAPICallError {
-    const m = message(input.providerID, input.error)
-    const body = json(input.error.responseBody)
-    if (isOverflow(m) || input.error.statusCode === 413 || body?.error?.code === "context_length_exceeded") {
+    case "usage_not_included":
       return {
-        type: "context_overflow",
-        message: m,
-        responseBody: input.error.responseBody,
+        type: "api_error",
+        message: "To use Codex with your ChatGPT plan, upgrade to Plus: https://chatgpt.com/explore/plus.",
+        isRetryable: false,
+        responseBody,
       }
+    case "invalid_prompt":
+      return {
+        type: "api_error",
+        message: typeof body?.error?.message === "string" ? body?.error?.message : "Invalid prompt.",
+        isRetryable: false,
+        responseBody,
+      }
+  }
+}
+
+export type ParsedAPICallError =
+  | {
+      type: "context_overflow"
+      message: string
+      responseBody?: string
+    }
+  | {
+      type: "api_error"
+      message: string
+      statusCode?: number
+      isRetryable: boolean
+      responseHeaders?: Record<string, string>
+      responseBody?: string
+      metadata?: Record<string, string>
     }
 
-    const metadata = input.error.url ? { url: input.error.url } : undefined
+export function parseAPICallError(input: { providerID: ProviderID; error: APICallError }): ParsedAPICallError {
+  const m = message(input.providerID, input.error)
+  const body = json(input.error.responseBody)
+  if (isOverflow(m) || input.error.statusCode === 413 || body?.error?.code === "context_length_exceeded") {
     return {
-      type: "api_error",
+      type: "context_overflow",
       message: m,
-      statusCode: input.error.statusCode,
-      isRetryable: input.providerID.startsWith("openai")
-        ? isOpenAiErrorRetryable(input.error)
-        : input.error.isRetryable,
-      responseHeaders: input.error.responseHeaders,
       responseBody: input.error.responseBody,
-      metadata,
     }
   }
+
+  const metadata = input.error.url ? { url: input.error.url } : undefined
+  return {
+    type: "api_error",
+    message: m,
+    statusCode: input.error.statusCode,
+    isRetryable: input.providerID.startsWith("openai")
+      ? isOpenAiErrorRetryable(input.error)
+      : input.error.isRetryable,
+    responseHeaders: input.error.responseHeaders,
+    responseBody: input.error.responseBody,
+    metadata,
+  }
 }

+ 4 - 0
packages/opencode/src/provider/index.ts

@@ -1 +1,5 @@
 export * as Provider from "./provider"
+export * as ProviderAuth from "./auth"
+export * as ProviderError from "./error"
+export * as ModelsDev from "./models"
+export * as ProviderTransform from "./transform"

+ 146 - 148
packages/opencode/src/provider/models.ts

@@ -13,169 +13,167 @@ import { Hash } from "@opencode-ai/shared/util/hash"
 // Falls back to undefined in dev mode when snapshot doesn't exist
 /* @ts-ignore */
 
-export namespace ModelsDev {
-  const log = Log.create({ service: "models.dev" })
-  const source = url()
-  const filepath = path.join(
-    Global.Path.cache,
-    source === "https://models.dev" ? "models.json" : `models-${Hash.fast(source)}.json`,
-  )
-  const ttl = 5 * 60 * 1000
-
-  type JsonValue = string | number | boolean | null | { [key: string]: JsonValue } | JsonValue[]
-
-  const JsonValue: z.ZodType<JsonValue> = z.lazy(() =>
-    z.union([z.string(), z.number(), z.boolean(), z.null(), z.array(JsonValue), z.record(z.string(), JsonValue)]),
-  )
-
-  const Cost = z.object({
-    input: z.number(),
-    output: z.number(),
-    cache_read: z.number().optional(),
-    cache_write: z.number().optional(),
-    context_over_200k: z
-      .object({
-        input: z.number(),
-        output: z.number(),
-        cache_read: z.number().optional(),
-        cache_write: z.number().optional(),
-      })
-      .optional(),
-  })
-
-  export const Model = z.object({
-    id: z.string(),
-    name: z.string(),
-    family: z.string().optional(),
-    release_date: z.string(),
-    attachment: z.boolean(),
-    reasoning: z.boolean(),
-    temperature: z.boolean(),
-    tool_call: z.boolean(),
-    interleaved: z
-      .union([
-        z.literal(true),
-        z
-          .object({
-            field: z.enum(["reasoning_content", "reasoning_details"]),
-          })
-          .strict(),
-      ])
-      .optional(),
-    cost: Cost.optional(),
-    limit: z.object({
-      context: z.number(),
-      input: z.number().optional(),
+const log = Log.create({ service: "models.dev" })
+const source = url()
+const filepath = path.join(
+  Global.Path.cache,
+  source === "https://models.dev" ? "models.json" : `models-${Hash.fast(source)}.json`,
+)
+const ttl = 5 * 60 * 1000
+
+type JsonValue = string | number | boolean | null | { [key: string]: JsonValue } | JsonValue[]
+
+const JsonValue: z.ZodType<JsonValue> = z.lazy(() =>
+  z.union([z.string(), z.number(), z.boolean(), z.null(), z.array(JsonValue), z.record(z.string(), JsonValue)]),
+)
+
+const Cost = z.object({
+  input: z.number(),
+  output: z.number(),
+  cache_read: z.number().optional(),
+  cache_write: z.number().optional(),
+  context_over_200k: z
+    .object({
+      input: z.number(),
       output: z.number(),
-    }),
-    modalities: z
-      .object({
-        input: z.array(z.enum(["text", "audio", "image", "video", "pdf"])),
-        output: z.array(z.enum(["text", "audio", "image", "video", "pdf"])),
-      })
-      .optional(),
-    experimental: z
-      .object({
-        modes: z
-          .record(
-            z.string(),
-            z.object({
-              cost: Cost.optional(),
-              provider: z
-                .object({
-                  body: z.record(z.string(), JsonValue).optional(),
-                  headers: z.record(z.string(), z.string()).optional(),
-                })
-                .optional(),
-            }),
-          )
-          .optional(),
-      })
-      .optional(),
-    status: z.enum(["alpha", "beta", "deprecated"]).optional(),
-    provider: z.object({ npm: z.string().optional(), api: z.string().optional() }).optional(),
-  })
-  export type Model = z.infer<typeof Model>
-
-  export const Provider = z.object({
-    api: z.string().optional(),
-    name: z.string(),
-    env: z.array(z.string()),
-    id: z.string(),
-    npm: z.string().optional(),
-    models: z.record(z.string(), Model),
-  })
-
-  export type Provider = z.infer<typeof Provider>
-
-  function url() {
-    return Flag.OPENCODE_MODELS_URL || "https://models.dev"
-  }
+      cache_read: z.number().optional(),
+      cache_write: z.number().optional(),
+    })
+    .optional(),
+})
+
+export const Model = z.object({
+  id: z.string(),
+  name: z.string(),
+  family: z.string().optional(),
+  release_date: z.string(),
+  attachment: z.boolean(),
+  reasoning: z.boolean(),
+  temperature: z.boolean(),
+  tool_call: z.boolean(),
+  interleaved: z
+    .union([
+      z.literal(true),
+      z
+        .object({
+          field: z.enum(["reasoning_content", "reasoning_details"]),
+        })
+        .strict(),
+    ])
+    .optional(),
+  cost: Cost.optional(),
+  limit: z.object({
+    context: z.number(),
+    input: z.number().optional(),
+    output: z.number(),
+  }),
+  modalities: z
+    .object({
+      input: z.array(z.enum(["text", "audio", "image", "video", "pdf"])),
+      output: z.array(z.enum(["text", "audio", "image", "video", "pdf"])),
+    })
+    .optional(),
+  experimental: z
+    .object({
+      modes: z
+        .record(
+          z.string(),
+          z.object({
+            cost: Cost.optional(),
+            provider: z
+              .object({
+                body: z.record(z.string(), JsonValue).optional(),
+                headers: z.record(z.string(), z.string()).optional(),
+              })
+              .optional(),
+          }),
+        )
+        .optional(),
+    })
+    .optional(),
+  status: z.enum(["alpha", "beta", "deprecated"]).optional(),
+  provider: z.object({ npm: z.string().optional(), api: z.string().optional() }).optional(),
+})
+export type Model = z.infer<typeof Model>
+
+export const Provider = z.object({
+  api: z.string().optional(),
+  name: z.string(),
+  env: z.array(z.string()),
+  id: z.string(),
+  npm: z.string().optional(),
+  models: z.record(z.string(), Model),
+})
+
+export type Provider = z.infer<typeof Provider>
+
+function url() {
+  return Flag.OPENCODE_MODELS_URL || "https://models.dev"
+}
 
-  function fresh() {
-    return Date.now() - Number(Filesystem.stat(filepath)?.mtimeMs ?? 0) < ttl
-  }
+function fresh() {
+  return Date.now() - Number(Filesystem.stat(filepath)?.mtimeMs ?? 0) < ttl
+}
 
-  function skip(force: boolean) {
-    return !force && fresh()
-  }
+function skip(force: boolean) {
+  return !force && fresh()
+}
 
-  const fetchApi = async () => {
-    const result = await fetch(`${url()}/api.json`, {
-      headers: { "User-Agent": Installation.USER_AGENT },
-      signal: AbortSignal.timeout(10000),
-    })
-    return { ok: result.ok, text: await result.text() }
-  }
+const fetchApi = async () => {
+  const result = await fetch(`${url()}/api.json`, {
+    headers: { "User-Agent": Installation.USER_AGENT },
+    signal: AbortSignal.timeout(10000),
+  })
+  return { ok: result.ok, text: await result.text() }
+}
 
-  export const Data = lazy(async () => {
+export const Data = lazy(async () => {
+  const result = await Filesystem.readJson(Flag.OPENCODE_MODELS_PATH ?? filepath).catch(() => {})
+  if (result) return result
+  // @ts-ignore
+  const snapshot = await import("./models-snapshot.js")
+    .then((m) => m.snapshot as Record<string, unknown>)
+    .catch(() => undefined)
+  if (snapshot) return snapshot
+  if (Flag.OPENCODE_DISABLE_MODELS_FETCH) return {}
+  return Flock.withLock(`models-dev:${filepath}`, async () => {
     const result = await Filesystem.readJson(Flag.OPENCODE_MODELS_PATH ?? filepath).catch(() => {})
     if (result) return result
-    // @ts-ignore
-    const snapshot = await import("./models-snapshot.js")
-      .then((m) => m.snapshot as Record<string, unknown>)
-      .catch(() => undefined)
-    if (snapshot) return snapshot
-    if (Flag.OPENCODE_DISABLE_MODELS_FETCH) return {}
-    return Flock.withLock(`models-dev:${filepath}`, async () => {
-      const result = await Filesystem.readJson(Flag.OPENCODE_MODELS_PATH ?? filepath).catch(() => {})
-      if (result) return result
-      const result2 = await fetchApi()
-      if (result2.ok) {
-        await Filesystem.write(filepath, result2.text).catch((e) => {
-          log.error("Failed to write models cache", { error: e })
-        })
-      }
-      return JSON.parse(result2.text)
-    })
+    const result2 = await fetchApi()
+    if (result2.ok) {
+      await Filesystem.write(filepath, result2.text).catch((e) => {
+        log.error("Failed to write models cache", { error: e })
+      })
+    }
+    return JSON.parse(result2.text)
   })
+})
 
-  export async function get() {
-    const result = await Data()
-    return result as Record<string, Provider>
-  }
-
-  export async function refresh(force = false) {
-    if (skip(force)) return ModelsDev.Data.reset()
-    await Flock.withLock(`models-dev:${filepath}`, async () => {
-      if (skip(force)) return ModelsDev.Data.reset()
-      const result = await fetchApi()
-      if (!result.ok) return
-      await Filesystem.write(filepath, result.text)
-      ModelsDev.Data.reset()
-    }).catch((e) => {
-      log.error("Failed to fetch models.dev", {
-        error: e,
-      })
+export async function get() {
+  const result = await Data()
+  return result as Record<string, Provider>
+}
+
+export async function refresh(force = false) {
+  if (skip(force)) return Data.reset()
+  await Flock.withLock(`models-dev:${filepath}`, async () => {
+    if (skip(force)) return Data.reset()
+    const result = await fetchApi()
+    if (!result.ok) return
+    await Filesystem.write(filepath, result.text)
+    Data.reset()
+  }).catch((e) => {
+    log.error("Failed to fetch models.dev", {
+      error: e,
     })
-  }
+  })
 }
 
 if (!Flag.OPENCODE_DISABLE_MODELS_FETCH && !process.argv.includes("--get-yargs-completions")) {
-  void ModelsDev.refresh()
+  void refresh()
   setInterval(
     async () => {
-      await ModelsDev.refresh()
+      await refresh()
     },
     60 * 1000 * 60,
   ).unref()

+ 2 - 2
packages/opencode/src/provider/provider.ts

@@ -10,7 +10,7 @@ import { Hash } from "@opencode-ai/shared/util/hash"
 import { Plugin } from "../plugin"
 import { NamedError } from "@opencode-ai/shared/util/error"
 import { type LanguageModelV3 } from "@ai-sdk/provider"
-import { ModelsDev } from "./models"
+import * as ModelsDev from "./models"
 import { Auth } from "../auth"
 import { Env } from "../env"
 import { Instance } from "../project/instance"
@@ -55,7 +55,7 @@ import {
 } from "gitlab-ai-provider"
 import { fromNodeProviderChain } from "@aws-sdk/credential-providers"
 import { GoogleAuth } from "google-auth-library"
-import { ProviderTransform } from "./transform"
+import * as ProviderTransform from "./transform"
 import { Installation } from "../installation"
 import { ModelID, ProviderID } from "./schema"
 

+ 876 - 878
packages/opencode/src/provider/transform.ts

@@ -3,7 +3,7 @@ import { mergeDeep, unique } from "remeda"
 import type { JSONSchema7 } from "@ai-sdk/provider"
 import type { JSONSchema } from "zod/v4/core"
 import type * as Provider from "./provider"
-import type { ModelsDev } from "./models"
+import type * as ModelsDev from "./models"
 import { iife } from "@/util/iife"
 import { Flag } from "@/flag/flag"
 
@@ -17,570 +17,420 @@ function mimeToModality(mime: string): Modality | undefined {
   return undefined
 }
 
-export namespace ProviderTransform {
-  export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
-
-  // Maps npm package to the key the AI SDK expects for providerOptions
-  function sdkKey(npm: string): string | undefined {
-    switch (npm) {
-      case "@ai-sdk/github-copilot":
-        return "copilot"
-      case "@ai-sdk/azure":
-        return "azure"
-      case "@ai-sdk/openai":
-        return "openai"
-      case "@ai-sdk/amazon-bedrock":
-        return "bedrock"
-      case "@ai-sdk/anthropic":
-      case "@ai-sdk/google-vertex/anthropic":
-        return "anthropic"
-      case "@ai-sdk/google-vertex":
-        return "vertex"
-      case "@ai-sdk/google":
-        return "google"
-      case "@ai-sdk/gateway":
-        return "gateway"
-      case "@openrouter/ai-sdk-provider":
-        return "openrouter"
-    }
-    return undefined
+export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
+
+// Maps npm package to the key the AI SDK expects for providerOptions
+function sdkKey(npm: string): string | undefined {
+  switch (npm) {
+    case "@ai-sdk/github-copilot":
+      return "copilot"
+    case "@ai-sdk/azure":
+      return "azure"
+    case "@ai-sdk/openai":
+      return "openai"
+    case "@ai-sdk/amazon-bedrock":
+      return "bedrock"
+    case "@ai-sdk/anthropic":
+    case "@ai-sdk/google-vertex/anthropic":
+      return "anthropic"
+    case "@ai-sdk/google-vertex":
+      return "vertex"
+    case "@ai-sdk/google":
+      return "google"
+    case "@ai-sdk/gateway":
+      return "gateway"
+    case "@openrouter/ai-sdk-provider":
+      return "openrouter"
   }
+  return undefined
+}
 
-  function normalizeMessages(
-    msgs: ModelMessage[],
-    model: Provider.Model,
-    _options: Record<string, unknown>,
-  ): ModelMessage[] {
-    // Anthropic rejects messages with empty content - filter out empty string messages
-    // and remove empty text/reasoning parts from array content
-    if (model.api.npm === "@ai-sdk/anthropic" || model.api.npm === "@ai-sdk/amazon-bedrock") {
-      msgs = msgs
-        .map((msg) => {
-          if (typeof msg.content === "string") {
-            if (msg.content === "") return undefined
-            return msg
-          }
-          if (!Array.isArray(msg.content)) return msg
-          const filtered = msg.content.filter((part) => {
-            if (part.type === "text" || part.type === "reasoning") {
-              return part.text !== ""
-            }
-            return true
-          })
-          if (filtered.length === 0) return undefined
-          return { ...msg, content: filtered }
-        })
-        .filter((msg): msg is ModelMessage => msg !== undefined && msg.content !== "")
-    }
-
-    if (model.api.id.includes("claude")) {
-      const scrub = (id: string) => id.replace(/[^a-zA-Z0-9_-]/g, "_")
-      msgs = msgs.map((msg) => {
-        if (msg.role === "assistant" && Array.isArray(msg.content)) {
-          return {
-            ...msg,
-            content: msg.content.map((part) => {
-              if (part.type === "tool-call" || part.type === "tool-result") {
-                return { ...part, toolCallId: scrub(part.toolCallId) }
-              }
-              return part
-            }),
-          }
+function normalizeMessages(
+  msgs: ModelMessage[],
+  model: Provider.Model,
+  _options: Record<string, unknown>,
+): ModelMessage[] {
+  // Anthropic rejects messages with empty content - filter out empty string messages
+  // and remove empty text/reasoning parts from array content
+  if (model.api.npm === "@ai-sdk/anthropic" || model.api.npm === "@ai-sdk/amazon-bedrock") {
+    msgs = msgs
+      .map((msg) => {
+        if (typeof msg.content === "string") {
+          if (msg.content === "") return undefined
+          return msg
         }
-        if (msg.role === "tool" && Array.isArray(msg.content)) {
-          return {
-            ...msg,
-            content: msg.content.map((part) => {
-              if (part.type === "tool-result") {
-                return { ...part, toolCallId: scrub(part.toolCallId) }
-              }
-              return part
-            }),
+        if (!Array.isArray(msg.content)) return msg
+        const filtered = msg.content.filter((part) => {
+          if (part.type === "text" || part.type === "reasoning") {
+            return part.text !== ""
           }
-        }
-        return msg
-      })
-    }
-    if (["@ai-sdk/anthropic", "@ai-sdk/google-vertex/anthropic"].includes(model.api.npm)) {
-      // Anthropic rejects assistant turns where tool_use blocks are followed by non-tool
-      // content, e.g. [tool_use, tool_use, text], with:
-      // `tool_use` ids were found without `tool_result` blocks immediately after...
-      //
-      // Reorder that invalid shape into [text] + [tool_use, tool_use]. Consecutive
-      // assistant messages are later merged by the provider/SDK, so preserving the
-      // original [tool_use...] then [text] order still produces the invalid payload.
-      //
-      // The root cause appears to be somewhere upstream where the stream is originally
-      // processed. We were unable to locate an exact narrower reproduction elsewhere,
-      // so we keep this transform in place for the time being.
-      msgs = msgs.flatMap((msg) => {
-        if (msg.role !== "assistant" || !Array.isArray(msg.content)) return [msg]
-
-        const parts = msg.content
-        const first = parts.findIndex((part) => part.type === "tool-call")
-        if (first === -1) return [msg]
-        if (!parts.slice(first).some((part) => part.type !== "tool-call")) return [msg]
-        return [
-          { ...msg, content: parts.filter((part) => part.type !== "tool-call") },
-          { ...msg, content: parts.filter((part) => part.type === "tool-call") },
-        ]
+          return true
+        })
+        if (filtered.length === 0) return undefined
+        return { ...msg, content: filtered }
       })
-    }
-    if (
-      model.providerID === "mistral" ||
-      model.api.id.toLowerCase().includes("mistral") ||
-      model.api.id.toLocaleLowerCase().includes("devstral")
-    ) {
-      const scrub = (id: string) => {
-        return id
-          .replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
-          .substring(0, 9) // Take first 9 characters
-          .padEnd(9, "0") // Pad with zeros if less than 9 characters
-      }
-      const result: ModelMessage[] = []
-      for (let i = 0; i < msgs.length; i++) {
-        const msg = msgs[i]
-        const nextMsg = msgs[i + 1]
+      .filter((msg): msg is ModelMessage => msg !== undefined && msg.content !== "")
+  }
 
-        if (msg.role === "assistant" && Array.isArray(msg.content)) {
-          msg.content = msg.content.map((part) => {
+  if (model.api.id.includes("claude")) {
+    const scrub = (id: string) => id.replace(/[^a-zA-Z0-9_-]/g, "_")
+    msgs = msgs.map((msg) => {
+      if (msg.role === "assistant" && Array.isArray(msg.content)) {
+        return {
+          ...msg,
+          content: msg.content.map((part) => {
             if (part.type === "tool-call" || part.type === "tool-result") {
               return { ...part, toolCallId: scrub(part.toolCallId) }
             }
             return part
-          })
+          }),
         }
-        if (msg.role === "tool" && Array.isArray(msg.content)) {
-          msg.content = msg.content.map((part) => {
+      }
+      if (msg.role === "tool" && Array.isArray(msg.content)) {
+        return {
+          ...msg,
+          content: msg.content.map((part) => {
             if (part.type === "tool-result") {
               return { ...part, toolCallId: scrub(part.toolCallId) }
             }
             return part
-          })
+          }),
         }
-        result.push(msg)
+      }
+      return msg
+    })
+  }
+  if (["@ai-sdk/anthropic", "@ai-sdk/google-vertex/anthropic"].includes(model.api.npm)) {
+    // Anthropic rejects assistant turns where tool_use blocks are followed by non-tool
+    // content, e.g. [tool_use, tool_use, text], with:
+    // `tool_use` ids were found without `tool_result` blocks immediately after...
+    //
+    // Reorder that invalid shape into [text] + [tool_use, tool_use]. Consecutive
+    // assistant messages are later merged by the provider/SDK, so preserving the
+    // original [tool_use...] then [text] order still produces the invalid payload.
+    //
+    // The root cause appears to be somewhere upstream where the stream is originally
+    // processed. We were unable to locate an exact narrower reproduction elsewhere,
+    // so we keep this transform in place for the time being.
+    msgs = msgs.flatMap((msg) => {
+      if (msg.role !== "assistant" || !Array.isArray(msg.content)) return [msg]
+
+      const parts = msg.content
+      const first = parts.findIndex((part) => part.type === "tool-call")
+      if (first === -1) return [msg]
+      if (!parts.slice(first).some((part) => part.type !== "tool-call")) return [msg]
+      return [
+        { ...msg, content: parts.filter((part) => part.type !== "tool-call") },
+        { ...msg, content: parts.filter((part) => part.type === "tool-call") },
+      ]
+    })
+  }
+  if (
+    model.providerID === "mistral" ||
+    model.api.id.toLowerCase().includes("mistral") ||
+    model.api.id.toLocaleLowerCase().includes("devstral")
+  ) {
+    const scrub = (id: string) => {
+      return id
+        .replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
+        .substring(0, 9) // Take first 9 characters
+        .padEnd(9, "0") // Pad with zeros if less than 9 characters
+    }
+    const result: ModelMessage[] = []
+    for (let i = 0; i < msgs.length; i++) {
+      const msg = msgs[i]
+      const nextMsg = msgs[i + 1]
+
+      if (msg.role === "assistant" && Array.isArray(msg.content)) {
+        msg.content = msg.content.map((part) => {
+          if (part.type === "tool-call" || part.type === "tool-result") {
+            return { ...part, toolCallId: scrub(part.toolCallId) }
+          }
+          return part
+        })
+      }
+      if (msg.role === "tool" && Array.isArray(msg.content)) {
+        msg.content = msg.content.map((part) => {
+          if (part.type === "tool-result") {
+            return { ...part, toolCallId: scrub(part.toolCallId) }
+          }
+          return part
+        })
+      }
+      result.push(msg)
 
-        // Fix message sequence: tool messages cannot be followed by user messages
-        if (msg.role === "tool" && nextMsg?.role === "user") {
-          result.push({
-            role: "assistant",
-            content: [
-              {
-                type: "text",
-                text: "Done.",
-              },
-            ],
-          })
-        }
+      // Fix message sequence: tool messages cannot be followed by user messages
+      if (msg.role === "tool" && nextMsg?.role === "user") {
+        result.push({
+          role: "assistant",
+          content: [
+            {
+              type: "text",
+              text: "Done.",
+            },
+          ],
+        })
       }
-      return result
     }
+    return result
+  }
 
-    if (typeof model.capabilities.interleaved === "object" && model.capabilities.interleaved.field) {
-      const field = model.capabilities.interleaved.field
-      return msgs.map((msg) => {
-        if (msg.role === "assistant" && Array.isArray(msg.content)) {
-          const reasoningParts = msg.content.filter((part: any) => part.type === "reasoning")
-          const reasoningText = reasoningParts.map((part: any) => part.text).join("")
+  if (typeof model.capabilities.interleaved === "object" && model.capabilities.interleaved.field) {
+    const field = model.capabilities.interleaved.field
+    return msgs.map((msg) => {
+      if (msg.role === "assistant" && Array.isArray(msg.content)) {
+        const reasoningParts = msg.content.filter((part: any) => part.type === "reasoning")
+        const reasoningText = reasoningParts.map((part: any) => part.text).join("")
 
-          // Filter out reasoning parts from content
-          const filteredContent = msg.content.filter((part: any) => part.type !== "reasoning")
-
-          // Include reasoning_content | reasoning_details directly on the message for all assistant messages
-          if (reasoningText) {
-            return {
-              ...msg,
-              content: filteredContent,
-              providerOptions: {
-                ...msg.providerOptions,
-                openaiCompatible: {
-                  ...(msg.providerOptions as any)?.openaiCompatible,
-                  [field]: reasoningText,
-                },
-              },
-            }
-          }
+        // Filter out reasoning parts from content
+        const filteredContent = msg.content.filter((part: any) => part.type !== "reasoning")
 
+        // Include reasoning_content | reasoning_details directly on the message for all assistant messages
+        if (reasoningText) {
           return {
             ...msg,
             content: filteredContent,
+            providerOptions: {
+              ...msg.providerOptions,
+              openaiCompatible: {
+                ...(msg.providerOptions as any)?.openaiCompatible,
+                [field]: reasoningText,
+              },
+            },
           }
         }
 
-        return msg
-      })
-    }
+        return {
+          ...msg,
+          content: filteredContent,
+        }
+      }
 
-    return msgs
+      return msg
+    })
   }
 
-  function applyCaching(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
-    const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
-    const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
-
-    const providerOptions = {
-      anthropic: {
-        cacheControl: { type: "ephemeral" },
-      },
-      openrouter: {
-        cacheControl: { type: "ephemeral" },
-      },
-      bedrock: {
-        cachePoint: { type: "default" },
-      },
-      openaiCompatible: {
-        cache_control: { type: "ephemeral" },
-      },
-      copilot: {
-        copilot_cache_control: { type: "ephemeral" },
-      },
-      alibaba: {
-        cacheControl: { type: "ephemeral" },
-      },
-    }
+  return msgs
+}
 
-    for (const msg of unique([...system, ...final])) {
-      const useMessageLevelOptions =
-        model.providerID === "anthropic" ||
-        model.providerID.includes("bedrock") ||
-        model.api.npm === "@ai-sdk/amazon-bedrock"
-      const shouldUseContentOptions = !useMessageLevelOptions && Array.isArray(msg.content) && msg.content.length > 0
-
-      if (shouldUseContentOptions) {
-        const lastContent = msg.content[msg.content.length - 1]
-        if (
-          lastContent &&
-          typeof lastContent === "object" &&
-          lastContent.type !== "tool-approval-request" &&
-          lastContent.type !== "tool-approval-response"
-        ) {
-          lastContent.providerOptions = mergeDeep(lastContent.providerOptions ?? {}, providerOptions)
-          continue
-        }
-      }
+function applyCaching(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
+  const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
+  const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
+
+  const providerOptions = {
+    anthropic: {
+      cacheControl: { type: "ephemeral" },
+    },
+    openrouter: {
+      cacheControl: { type: "ephemeral" },
+    },
+    bedrock: {
+      cachePoint: { type: "default" },
+    },
+    openaiCompatible: {
+      cache_control: { type: "ephemeral" },
+    },
+    copilot: {
+      copilot_cache_control: { type: "ephemeral" },
+    },
+    alibaba: {
+      cacheControl: { type: "ephemeral" },
+    },
+  }
 
-      msg.providerOptions = mergeDeep(msg.providerOptions ?? {}, providerOptions)
+  for (const msg of unique([...system, ...final])) {
+    const useMessageLevelOptions =
+      model.providerID === "anthropic" ||
+      model.providerID.includes("bedrock") ||
+      model.api.npm === "@ai-sdk/amazon-bedrock"
+    const shouldUseContentOptions = !useMessageLevelOptions && Array.isArray(msg.content) && msg.content.length > 0
+
+    if (shouldUseContentOptions) {
+      const lastContent = msg.content[msg.content.length - 1]
+      if (
+        lastContent &&
+        typeof lastContent === "object" &&
+        lastContent.type !== "tool-approval-request" &&
+        lastContent.type !== "tool-approval-response"
+      ) {
+        lastContent.providerOptions = mergeDeep(lastContent.providerOptions ?? {}, providerOptions)
+        continue
+      }
     }
 
-    return msgs
+    msg.providerOptions = mergeDeep(msg.providerOptions ?? {}, providerOptions)
   }
 
-  function unsupportedParts(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
-    return msgs.map((msg) => {
-      if (msg.role !== "user" || !Array.isArray(msg.content)) return msg
-
-      const filtered = msg.content.map((part) => {
-        if (part.type !== "file" && part.type !== "image") return part
-
-        // Check for empty base64 image data
-        if (part.type === "image") {
-          const imageStr = String(part.image)
-          if (imageStr.startsWith("data:")) {
-            const match = imageStr.match(/^data:([^;]+);base64,(.*)$/)
-            if (match && (!match[2] || match[2].length === 0)) {
-              return {
-                type: "text" as const,
-                text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
-              }
+  return msgs
+}
+
+function unsupportedParts(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
+  return msgs.map((msg) => {
+    if (msg.role !== "user" || !Array.isArray(msg.content)) return msg
+
+    const filtered = msg.content.map((part) => {
+      if (part.type !== "file" && part.type !== "image") return part
+
+      // Check for empty base64 image data
+      if (part.type === "image") {
+        const imageStr = String(part.image)
+        if (imageStr.startsWith("data:")) {
+          const match = imageStr.match(/^data:([^;]+);base64,(.*)$/)
+          if (match && (!match[2] || match[2].length === 0)) {
+            return {
+              type: "text" as const,
+              text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
             }
           }
         }
+      }
 
-        const mime = part.type === "image" ? String(part.image).split(";")[0].replace("data:", "") : part.mediaType
-        const filename = part.type === "file" ? part.filename : undefined
-        const modality = mimeToModality(mime)
-        if (!modality) return part
-        if (model.capabilities.input[modality]) return part
-
-        const name = filename ? `"${filename}"` : modality
-        return {
-          type: "text" as const,
-          text: `ERROR: Cannot read ${name} (this model does not support ${modality} input). Inform the user.`,
-        }
-      })
+      const mime = part.type === "image" ? String(part.image).split(";")[0].replace("data:", "") : part.mediaType
+      const filename = part.type === "file" ? part.filename : undefined
+      const modality = mimeToModality(mime)
+      if (!modality) return part
+      if (model.capabilities.input[modality]) return part
 
-      return { ...msg, content: filtered }
+      const name = filename ? `"${filename}"` : modality
+      return {
+        type: "text" as const,
+        text: `ERROR: Cannot read ${name} (this model does not support ${modality} input). Inform the user.`,
+      }
     })
-  }
 
-  export function message(msgs: ModelMessage[], model: Provider.Model, options: Record<string, unknown>) {
-    msgs = unsupportedParts(msgs, model)
-    msgs = normalizeMessages(msgs, model, options)
-    if (
-      (model.providerID === "anthropic" ||
-        model.providerID === "google-vertex-anthropic" ||
-        model.api.id.includes("anthropic") ||
-        model.api.id.includes("claude") ||
-        model.id.includes("anthropic") ||
-        model.id.includes("claude") ||
-        model.api.npm === "@ai-sdk/anthropic" ||
-        model.api.npm === "@ai-sdk/alibaba") &&
-      model.api.npm !== "@ai-sdk/gateway"
-    ) {
-      msgs = applyCaching(msgs, model)
-    }
+    return { ...msg, content: filtered }
+  })
+}
 
-    // Remap providerOptions keys from stored providerID to expected SDK key
-    const key = sdkKey(model.api.npm)
-    if (key && key !== model.providerID) {
-      const remap = (opts: Record<string, any> | undefined) => {
-        if (!opts) return opts
-        if (!(model.providerID in opts)) return opts
-        const result = { ...opts }
-        result[key] = result[model.providerID]
-        delete result[model.providerID]
-        return result
-      }
+export function message(msgs: ModelMessage[], model: Provider.Model, options: Record<string, unknown>) {
+  msgs = unsupportedParts(msgs, model)
+  msgs = normalizeMessages(msgs, model, options)
+  if (
+    (model.providerID === "anthropic" ||
+      model.providerID === "google-vertex-anthropic" ||
+      model.api.id.includes("anthropic") ||
+      model.api.id.includes("claude") ||
+      model.id.includes("anthropic") ||
+      model.id.includes("claude") ||
+      model.api.npm === "@ai-sdk/anthropic" ||
+      model.api.npm === "@ai-sdk/alibaba") &&
+    model.api.npm !== "@ai-sdk/gateway"
+  ) {
+    msgs = applyCaching(msgs, model)
+  }
 
-      msgs = msgs.map((msg) => {
-        if (!Array.isArray(msg.content)) return { ...msg, providerOptions: remap(msg.providerOptions) }
-        return {
-          ...msg,
-          providerOptions: remap(msg.providerOptions),
-          content: msg.content.map((part) => {
-            if (part.type === "tool-approval-request" || part.type === "tool-approval-response") {
-              return { ...part }
-            }
-            return { ...part, providerOptions: remap(part.providerOptions) }
-          }),
-        } as typeof msg
-      })
+  // Remap providerOptions keys from stored providerID to expected SDK key
+  const key = sdkKey(model.api.npm)
+  if (key && key !== model.providerID) {
+    const remap = (opts: Record<string, any> | undefined) => {
+      if (!opts) return opts
+      if (!(model.providerID in opts)) return opts
+      const result = { ...opts }
+      result[key] = result[model.providerID]
+      delete result[model.providerID]
+      return result
     }
 
-    return msgs
+    msgs = msgs.map((msg) => {
+      if (!Array.isArray(msg.content)) return { ...msg, providerOptions: remap(msg.providerOptions) }
+      return {
+        ...msg,
+        providerOptions: remap(msg.providerOptions),
+        content: msg.content.map((part) => {
+          if (part.type === "tool-approval-request" || part.type === "tool-approval-response") {
+            return { ...part }
+          }
+          return { ...part, providerOptions: remap(part.providerOptions) }
+        }),
+      } as typeof msg
+    })
   }
 
-  export function temperature(model: Provider.Model) {
-    const id = model.id.toLowerCase()
-    if (id.includes("qwen")) return 0.55
-    if (id.includes("claude")) return undefined
-    if (id.includes("gemini")) return 1.0
-    if (id.includes("glm-4.6")) return 1.0
-    if (id.includes("glm-4.7")) return 1.0
-    if (id.includes("minimax-m2")) return 1.0
-    if (id.includes("kimi-k2")) {
-      // kimi-k2-thinking & kimi-k2.5 && kimi-k2p5 && kimi-k2-5
-      if (["thinking", "k2.", "k2p", "k2-5"].some((s) => id.includes(s))) {
-        return 1.0
-      }
-      return 0.6
-    }
-    return undefined
-  }
+  return msgs
+}
 
-  export function topP(model: Provider.Model) {
-    const id = model.id.toLowerCase()
-    if (id.includes("qwen")) return 1
-    if (["minimax-m2", "gemini", "kimi-k2.5", "kimi-k2p5", "kimi-k2-5"].some((s) => id.includes(s))) {
-      return 0.95
+export function temperature(model: Provider.Model) {
+  const id = model.id.toLowerCase()
+  if (id.includes("qwen")) return 0.55
+  if (id.includes("claude")) return undefined
+  if (id.includes("gemini")) return 1.0
+  if (id.includes("glm-4.6")) return 1.0
+  if (id.includes("glm-4.7")) return 1.0
+  if (id.includes("minimax-m2")) return 1.0
+  if (id.includes("kimi-k2")) {
+    // kimi-k2-thinking & kimi-k2.5 && kimi-k2p5 && kimi-k2-5
+    if (["thinking", "k2.", "k2p", "k2-5"].some((s) => id.includes(s))) {
+      return 1.0
     }
-    return undefined
+    return 0.6
   }
+  return undefined
+}
 
-  export function topK(model: Provider.Model) {
-    const id = model.id.toLowerCase()
-    if (id.includes("minimax-m2")) {
-      if (["m2.", "m25", "m21"].some((s) => id.includes(s))) return 40
-      return 20
-    }
-    if (id.includes("gemini")) return 64
-    return undefined
+export function topP(model: Provider.Model) {
+  const id = model.id.toLowerCase()
+  if (id.includes("qwen")) return 1
+  if (["minimax-m2", "gemini", "kimi-k2.5", "kimi-k2p5", "kimi-k2-5"].some((s) => id.includes(s))) {
+    return 0.95
   }
+  return undefined
+}
 
-  const WIDELY_SUPPORTED_EFFORTS = ["low", "medium", "high"]
-  const OPENAI_EFFORTS = ["none", "minimal", ...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
-
-  export function variants(model: Provider.Model): Record<string, Record<string, any>> {
-    if (!model.capabilities.reasoning) return {}
+export function topK(model: Provider.Model) {
+  const id = model.id.toLowerCase()
+  if (id.includes("minimax-m2")) {
+    if (["m2.", "m25", "m21"].some((s) => id.includes(s))) return 40
+    return 20
+  }
+  if (id.includes("gemini")) return 64
+  return undefined
+}
 
-    const id = model.id.toLowerCase()
-    const isAnthropicAdaptive = ["opus-4-6", "opus-4.6", "sonnet-4-6", "sonnet-4.6"].some((v) =>
-      model.api.id.includes(v),
-    )
-    const adaptiveEfforts = ["low", "medium", "high", "max"]
-    if (
-      id.includes("deepseek") ||
-      id.includes("minimax") ||
-      id.includes("glm") ||
-      id.includes("mistral") ||
-      id.includes("kimi") ||
-      id.includes("k2p5") ||
-      id.includes("qwen") ||
-      id.includes("big-pickle")
-    )
-      return {}
+const WIDELY_SUPPORTED_EFFORTS = ["low", "medium", "high"]
+const OPENAI_EFFORTS = ["none", "minimal", ...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
+
+export function variants(model: Provider.Model): Record<string, Record<string, any>> {
+  if (!model.capabilities.reasoning) return {}
+
+  const id = model.id.toLowerCase()
+  const isAnthropicAdaptive = ["opus-4-6", "opus-4.6", "sonnet-4-6", "sonnet-4.6"].some((v) =>
+    model.api.id.includes(v),
+  )
+  const adaptiveEfforts = ["low", "medium", "high", "max"]
+  if (
+    id.includes("deepseek") ||
+    id.includes("minimax") ||
+    id.includes("glm") ||
+    id.includes("mistral") ||
+    id.includes("kimi") ||
+    id.includes("k2p5") ||
+    id.includes("qwen") ||
+    id.includes("big-pickle")
+  )
+    return {}
 
-    // see: https://docs.x.ai/docs/guides/reasoning#control-how-hard-the-model-thinks
-    if (id.includes("grok") && id.includes("grok-3-mini")) {
-      if (model.api.npm === "@openrouter/ai-sdk-provider") {
-        return {
-          low: { reasoning: { effort: "low" } },
-          high: { reasoning: { effort: "high" } },
-        }
-      }
+  // see: https://docs.x.ai/docs/guides/reasoning#control-how-hard-the-model-thinks
+  if (id.includes("grok") && id.includes("grok-3-mini")) {
+    if (model.api.npm === "@openrouter/ai-sdk-provider") {
       return {
-        low: { reasoningEffort: "low" },
-        high: { reasoningEffort: "high" },
+        low: { reasoning: { effort: "low" } },
+        high: { reasoning: { effort: "high" } },
       }
     }
-    if (id.includes("grok")) return {}
-
-    switch (model.api.npm) {
-      case "@openrouter/ai-sdk-provider":
-        if (!model.id.includes("gpt") && !model.id.includes("gemini-3") && !model.id.includes("claude")) return {}
-        return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoning: { effort } }]))
-
-      case "@ai-sdk/gateway":
-        if (model.id.includes("anthropic")) {
-          if (isAnthropicAdaptive) {
-            return Object.fromEntries(
-              adaptiveEfforts.map((effort) => [
-                effort,
-                {
-                  thinking: {
-                    type: "adaptive",
-                  },
-                  effort,
-                },
-              ]),
-            )
-          }
-          return {
-            high: {
-              thinking: {
-                type: "enabled",
-                budgetTokens: 16000,
-              },
-            },
-            max: {
-              thinking: {
-                type: "enabled",
-                budgetTokens: 31999,
-              },
-            },
-          }
-        }
-        if (model.id.includes("google")) {
-          if (id.includes("2.5")) {
-            return {
-              high: {
-                thinkingConfig: {
-                  includeThoughts: true,
-                  thinkingBudget: 16000,
-                },
-              },
-              max: {
-                thinkingConfig: {
-                  includeThoughts: true,
-                  thinkingBudget: 24576,
-                },
-              },
-            }
-          }
-          return Object.fromEntries(
-            ["low", "high"].map((effort) => [
-              effort,
-              {
-                includeThoughts: true,
-                thinkingLevel: effort,
-              },
-            ]),
-          )
-        }
-        return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
-
-      case "@ai-sdk/github-copilot":
-        if (model.id.includes("gemini")) {
-          // currently github copilot only returns thinking
-          return {}
-        }
-        if (model.id.includes("claude")) {
-          return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
-        }
-        const copilotEfforts = iife(() => {
-          if (id.includes("5.1-codex-max") || id.includes("5.2") || id.includes("5.3"))
-            return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
-          const arr = [...WIDELY_SUPPORTED_EFFORTS]
-          if (id.includes("gpt-5") && model.release_date >= "2025-12-04") arr.push("xhigh")
-          return arr
-        })
-        return Object.fromEntries(
-          copilotEfforts.map((effort) => [
-            effort,
-            {
-              reasoningEffort: effort,
-              reasoningSummary: "auto",
-              include: ["reasoning.encrypted_content"],
-            },
-          ]),
-        )
-
-      case "@ai-sdk/cerebras":
-      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/cerebras
-      case "@ai-sdk/togetherai":
-      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/togetherai
-      case "@ai-sdk/xai":
-      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/xai
-      case "@ai-sdk/deepinfra":
-      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/deepinfra
-      case "venice-ai-sdk-provider":
-      // https://docs.venice.ai/overview/guides/reasoning-models#reasoning-effort
-      case "@ai-sdk/openai-compatible":
-        return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
-
-      case "@ai-sdk/azure":
-        // https://v5.ai-sdk.dev/providers/ai-sdk-providers/azure
-        if (id === "o1-mini") return {}
-        const azureEfforts = ["low", "medium", "high"]
-        if (id.includes("gpt-5-") || id === "gpt-5") {
-          azureEfforts.unshift("minimal")
-        }
-        return Object.fromEntries(
-          azureEfforts.map((effort) => [
-            effort,
-            {
-              reasoningEffort: effort,
-              reasoningSummary: "auto",
-              include: ["reasoning.encrypted_content"],
-            },
-          ]),
-        )
-      case "@ai-sdk/openai":
-        // https://v5.ai-sdk.dev/providers/ai-sdk-providers/openai
-        if (id === "gpt-5-pro") return {}
-        const openaiEfforts = iife(() => {
-          if (id.includes("codex")) {
-            if (id.includes("5.2") || id.includes("5.3")) return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
-            return WIDELY_SUPPORTED_EFFORTS
-          }
-          const arr = [...WIDELY_SUPPORTED_EFFORTS]
-          if (id.includes("gpt-5-") || id === "gpt-5") {
-            arr.unshift("minimal")
-          }
-          if (model.release_date >= "2025-11-13") {
-            arr.unshift("none")
-          }
-          if (model.release_date >= "2025-12-04") {
-            arr.push("xhigh")
-          }
-          return arr
-        })
-        return Object.fromEntries(
-          openaiEfforts.map((effort) => [
-            effort,
-            {
-              reasoningEffort: effort,
-              reasoningSummary: "auto",
-              include: ["reasoning.encrypted_content"],
-            },
-          ]),
-        )
+    return {
+      low: { reasoningEffort: "low" },
+      high: { reasoningEffort: "high" },
+    }
+  }
+  if (id.includes("grok")) return {}
 
-      case "@ai-sdk/anthropic":
-      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/anthropic
-      case "@ai-sdk/google-vertex/anthropic":
-        // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-vertex#anthropic-provider
+  switch (model.api.npm) {
+    case "@openrouter/ai-sdk-provider":
+      if (!model.id.includes("gpt") && !model.id.includes("gemini-3") && !model.id.includes("claude")) return {}
+      return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoning: { effort } }]))
 
+    case "@ai-sdk/gateway":
+      if (model.id.includes("anthropic")) {
         if (isAnthropicAdaptive) {
           return Object.fromEntries(
             adaptiveEfforts.map((effort) => [
@@ -594,499 +444,647 @@ export namespace ProviderTransform {
             ]),
           )
         }
-
         return {
           high: {
             thinking: {
               type: "enabled",
-              budgetTokens: Math.min(16_000, Math.floor(model.limit.output / 2 - 1)),
+              budgetTokens: 16000,
             },
           },
           max: {
             thinking: {
               type: "enabled",
-              budgetTokens: Math.min(31_999, model.limit.output - 1),
+              budgetTokens: 31999,
             },
           },
         }
-
-      case "@ai-sdk/amazon-bedrock":
-        // https://v5.ai-sdk.dev/providers/ai-sdk-providers/amazon-bedrock
-        if (isAnthropicAdaptive) {
-          return Object.fromEntries(
-            adaptiveEfforts.map((effort) => [
-              effort,
-              {
-                reasoningConfig: {
-                  type: "adaptive",
-                  maxReasoningEffort: effort,
-                },
-              },
-            ]),
-          )
-        }
-        // For Anthropic models on Bedrock, use reasoningConfig with budgetTokens
-        if (model.api.id.includes("anthropic")) {
+      }
+      if (model.id.includes("google")) {
+        if (id.includes("2.5")) {
           return {
             high: {
-              reasoningConfig: {
-                type: "enabled",
-                budgetTokens: 16000,
+              thinkingConfig: {
+                includeThoughts: true,
+                thinkingBudget: 16000,
               },
             },
             max: {
-              reasoningConfig: {
-                type: "enabled",
-                budgetTokens: 31999,
+              thinkingConfig: {
+                includeThoughts: true,
+                thinkingBudget: 24576,
               },
             },
           }
         }
-
-        // For Amazon Nova models, use reasoningConfig with maxReasoningEffort
         return Object.fromEntries(
-          WIDELY_SUPPORTED_EFFORTS.map((effort) => [
+          ["low", "high"].map((effort) => [
             effort,
             {
-              reasoningConfig: {
-                type: "enabled",
-                maxReasoningEffort: effort,
-              },
+              includeThoughts: true,
+              thinkingLevel: effort,
             },
           ]),
         )
+      }
+      return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
 
-      case "@ai-sdk/google-vertex":
-      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-vertex
-      case "@ai-sdk/google":
-        // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai
-        if (id.includes("2.5")) {
-          return {
-            high: {
-              thinkingConfig: {
-                includeThoughts: true,
-                thinkingBudget: 16000,
-              },
-            },
-            max: {
-              thinkingConfig: {
-                includeThoughts: true,
-                thinkingBudget: 24576,
-              },
-            },
-          }
+    case "@ai-sdk/github-copilot":
+      if (model.id.includes("gemini")) {
+        // currently github copilot only returns thinking
+        return {}
+      }
+      if (model.id.includes("claude")) {
+        return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
+      }
+      const copilotEfforts = iife(() => {
+        if (id.includes("5.1-codex-max") || id.includes("5.2") || id.includes("5.3"))
+          return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
+        const arr = [...WIDELY_SUPPORTED_EFFORTS]
+        if (id.includes("gpt-5") && model.release_date >= "2025-12-04") arr.push("xhigh")
+        return arr
+      })
+      return Object.fromEntries(
+        copilotEfforts.map((effort) => [
+          effort,
+          {
+            reasoningEffort: effort,
+            reasoningSummary: "auto",
+            include: ["reasoning.encrypted_content"],
+          },
+        ]),
+      )
+
+    case "@ai-sdk/cerebras":
+    // https://v5.ai-sdk.dev/providers/ai-sdk-providers/cerebras
+    case "@ai-sdk/togetherai":
+    // https://v5.ai-sdk.dev/providers/ai-sdk-providers/togetherai
+    case "@ai-sdk/xai":
+    // https://v5.ai-sdk.dev/providers/ai-sdk-providers/xai
+    case "@ai-sdk/deepinfra":
+    // https://v5.ai-sdk.dev/providers/ai-sdk-providers/deepinfra
+    case "venice-ai-sdk-provider":
+    // https://docs.venice.ai/overview/guides/reasoning-models#reasoning-effort
+    case "@ai-sdk/openai-compatible":
+      return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
+
+    case "@ai-sdk/azure":
+      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/azure
+      if (id === "o1-mini") return {}
+      const azureEfforts = ["low", "medium", "high"]
+      if (id.includes("gpt-5-") || id === "gpt-5") {
+        azureEfforts.unshift("minimal")
+      }
+      return Object.fromEntries(
+        azureEfforts.map((effort) => [
+          effort,
+          {
+            reasoningEffort: effort,
+            reasoningSummary: "auto",
+            include: ["reasoning.encrypted_content"],
+          },
+        ]),
+      )
+    case "@ai-sdk/openai":
+      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/openai
+      if (id === "gpt-5-pro") return {}
+      const openaiEfforts = iife(() => {
+        if (id.includes("codex")) {
+          if (id.includes("5.2") || id.includes("5.3")) return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
+          return WIDELY_SUPPORTED_EFFORTS
         }
-        let levels = ["low", "high"]
-        if (id.includes("3.1")) {
-          levels = ["low", "medium", "high"]
+        const arr = [...WIDELY_SUPPORTED_EFFORTS]
+        if (id.includes("gpt-5-") || id === "gpt-5") {
+          arr.unshift("minimal")
         }
+        if (model.release_date >= "2025-11-13") {
+          arr.unshift("none")
+        }
+        if (model.release_date >= "2025-12-04") {
+          arr.push("xhigh")
+        }
+        return arr
+      })
+      return Object.fromEntries(
+        openaiEfforts.map((effort) => [
+          effort,
+          {
+            reasoningEffort: effort,
+            reasoningSummary: "auto",
+            include: ["reasoning.encrypted_content"],
+          },
+        ]),
+      )
 
+    case "@ai-sdk/anthropic":
+    // https://v5.ai-sdk.dev/providers/ai-sdk-providers/anthropic
+    case "@ai-sdk/google-vertex/anthropic":
+      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-vertex#anthropic-provider
+
+      if (isAnthropicAdaptive) {
         return Object.fromEntries(
-          levels.map((effort) => [
+          adaptiveEfforts.map((effort) => [
             effort,
             {
-              thinkingConfig: {
-                includeThoughts: true,
-                thinkingLevel: effort,
+              thinking: {
+                type: "adaptive",
               },
+              effort,
             },
           ]),
         )
+      }
 
-      case "@ai-sdk/mistral":
-        // https://v5.ai-sdk.dev/providers/ai-sdk-providers/mistral
-        return {}
-
-      case "@ai-sdk/cohere":
-        // https://v5.ai-sdk.dev/providers/ai-sdk-providers/cohere
-        return {}
+      return {
+        high: {
+          thinking: {
+            type: "enabled",
+            budgetTokens: Math.min(16_000, Math.floor(model.limit.output / 2 - 1)),
+          },
+        },
+        max: {
+          thinking: {
+            type: "enabled",
+            budgetTokens: Math.min(31_999, model.limit.output - 1),
+          },
+        },
+      }
 
-      case "@ai-sdk/groq":
-        // https://v5.ai-sdk.dev/providers/ai-sdk-providers/groq
-        const groqEffort = ["none", ...WIDELY_SUPPORTED_EFFORTS]
+    case "@ai-sdk/amazon-bedrock":
+      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/amazon-bedrock
+      if (isAnthropicAdaptive) {
         return Object.fromEntries(
-          groqEffort.map((effort) => [
+          adaptiveEfforts.map((effort) => [
             effort,
             {
-              reasoningEffort: effort,
+              reasoningConfig: {
+                type: "adaptive",
+                maxReasoningEffort: effort,
+              },
             },
           ]),
         )
+      }
+      // For Anthropic models on Bedrock, use reasoningConfig with budgetTokens
+      if (model.api.id.includes("anthropic")) {
+        return {
+          high: {
+            reasoningConfig: {
+              type: "enabled",
+              budgetTokens: 16000,
+            },
+          },
+          max: {
+            reasoningConfig: {
+              type: "enabled",
+              budgetTokens: 31999,
+            },
+          },
+        }
+      }
 
-      case "@ai-sdk/perplexity":
-        // https://v5.ai-sdk.dev/providers/ai-sdk-providers/perplexity
-        return {}
+      // For Amazon Nova models, use reasoningConfig with maxReasoningEffort
+      return Object.fromEntries(
+        WIDELY_SUPPORTED_EFFORTS.map((effort) => [
+          effort,
+          {
+            reasoningConfig: {
+              type: "enabled",
+              maxReasoningEffort: effort,
+            },
+          },
+        ]),
+      )
+
+    case "@ai-sdk/google-vertex":
+    // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-vertex
+    case "@ai-sdk/google":
+      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai
+      if (id.includes("2.5")) {
+        return {
+          high: {
+            thinkingConfig: {
+              includeThoughts: true,
+              thinkingBudget: 16000,
+            },
+          },
+          max: {
+            thinkingConfig: {
+              includeThoughts: true,
+              thinkingBudget: 24576,
+            },
+          },
+        }
+      }
+      let levels = ["low", "high"]
+      if (id.includes("3.1")) {
+        levels = ["low", "medium", "high"]
+      }
 
-      case "@jerome-benoit/sap-ai-provider-v2":
-        if (model.api.id.includes("anthropic")) {
-          if (isAnthropicAdaptive) {
-            return Object.fromEntries(
-              adaptiveEfforts.map((effort) => [
-                effort,
-                {
-                  thinking: {
-                    type: "adaptive",
-                  },
-                  effort,
+      return Object.fromEntries(
+        levels.map((effort) => [
+          effort,
+          {
+            thinkingConfig: {
+              includeThoughts: true,
+              thinkingLevel: effort,
+            },
+          },
+        ]),
+      )
+
+    case "@ai-sdk/mistral":
+      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/mistral
+      return {}
+
+    case "@ai-sdk/cohere":
+      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/cohere
+      return {}
+
+    case "@ai-sdk/groq":
+      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/groq
+      const groqEffort = ["none", ...WIDELY_SUPPORTED_EFFORTS]
+      return Object.fromEntries(
+        groqEffort.map((effort) => [
+          effort,
+          {
+            reasoningEffort: effort,
+          },
+        ]),
+      )
+
+    case "@ai-sdk/perplexity":
+      // https://v5.ai-sdk.dev/providers/ai-sdk-providers/perplexity
+      return {}
+
+    case "@jerome-benoit/sap-ai-provider-v2":
+      if (model.api.id.includes("anthropic")) {
+        if (isAnthropicAdaptive) {
+          return Object.fromEntries(
+            adaptiveEfforts.map((effort) => [
+              effort,
+              {
+                thinking: {
+                  type: "adaptive",
                 },
-              ]),
-            )
-          }
-          return {
-            high: {
-              thinking: {
-                type: "enabled",
-                budgetTokens: 16000,
+                effort,
               },
+            ]),
+          )
+        }
+        return {
+          high: {
+            thinking: {
+              type: "enabled",
+              budgetTokens: 16000,
             },
-            max: {
-              thinking: {
-                type: "enabled",
-                budgetTokens: 31999,
-              },
+          },
+          max: {
+            thinking: {
+              type: "enabled",
+              budgetTokens: 31999,
             },
-          }
+          },
         }
-        if (model.api.id.includes("gemini") && id.includes("2.5")) {
-          return {
-            high: {
-              thinkingConfig: {
-                includeThoughts: true,
-                thinkingBudget: 16000,
-              },
+      }
+      if (model.api.id.includes("gemini") && id.includes("2.5")) {
+        return {
+          high: {
+            thinkingConfig: {
+              includeThoughts: true,
+              thinkingBudget: 16000,
             },
-            max: {
-              thinkingConfig: {
-                includeThoughts: true,
-                thinkingBudget: 24576,
-              },
+          },
+          max: {
+            thinkingConfig: {
+              includeThoughts: true,
+              thinkingBudget: 24576,
             },
-          }
-        }
-        if (model.api.id.includes("gpt") || /\bo[1-9]/.test(model.api.id)) {
-          return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
+          },
         }
-        return {}
-    }
-    return {}
+      }
+      if (model.api.id.includes("gpt") || /\bo[1-9]/.test(model.api.id)) {
+        return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
+      }
+      return {}
   }
+  return {}
+}
 
-  export function options(input: {
-    model: Provider.Model
-    sessionID: string
-    providerOptions?: Record<string, any>
-  }): Record<string, any> {
-    const result: Record<string, any> = {}
+export function options(input: {
+  model: Provider.Model
+  sessionID: string
+  providerOptions?: Record<string, any>
+}): Record<string, any> {
+  const result: Record<string, any> = {}
+
+  // openai and providers using openai package should set store to false by default.
+  if (
+    input.model.providerID === "openai" ||
+    input.model.api.npm === "@ai-sdk/openai" ||
+    input.model.api.npm === "@ai-sdk/github-copilot"
+  ) {
+    result["store"] = false
+  }
 
-    // openai and providers using openai package should set store to false by default.
-    if (
-      input.model.providerID === "openai" ||
-      input.model.api.npm === "@ai-sdk/openai" ||
-      input.model.api.npm === "@ai-sdk/github-copilot"
-    ) {
-      result["store"] = false
+  if (input.model.api.npm === "@openrouter/ai-sdk-provider") {
+    result["usage"] = {
+      include: true,
     }
-
-    if (input.model.api.npm === "@openrouter/ai-sdk-provider") {
-      result["usage"] = {
-        include: true,
-      }
-      if (input.model.api.id.includes("gemini-3")) {
-        result["reasoning"] = { effort: "high" }
-      }
+    if (input.model.api.id.includes("gemini-3")) {
+      result["reasoning"] = { effort: "high" }
     }
+  }
 
-    if (
-      input.model.providerID === "baseten" ||
-      (input.model.providerID === "opencode" && ["kimi-k2-thinking", "glm-4.6"].includes(input.model.api.id))
-    ) {
-      result["chat_template_args"] = { enable_thinking: true }
-    }
+  if (
+    input.model.providerID === "baseten" ||
+    (input.model.providerID === "opencode" && ["kimi-k2-thinking", "glm-4.6"].includes(input.model.api.id))
+  ) {
+    result["chat_template_args"] = { enable_thinking: true }
+  }
 
-    if (
-      ["zai", "zhipuai"].some((id) => input.model.providerID.includes(id)) &&
-      input.model.api.npm === "@ai-sdk/openai-compatible"
-    ) {
-      result["thinking"] = {
-        type: "enabled",
-        clear_thinking: false,
-      }
+  if (
+    ["zai", "zhipuai"].some((id) => input.model.providerID.includes(id)) &&
+    input.model.api.npm === "@ai-sdk/openai-compatible"
+  ) {
+    result["thinking"] = {
+      type: "enabled",
+      clear_thinking: false,
     }
+  }
 
-    if (input.model.providerID === "openai" || input.providerOptions?.setCacheKey) {
-      result["promptCacheKey"] = input.sessionID
-    }
+  if (input.model.providerID === "openai" || input.providerOptions?.setCacheKey) {
+    result["promptCacheKey"] = input.sessionID
+  }
 
-    if (input.model.api.npm === "@ai-sdk/google" || input.model.api.npm === "@ai-sdk/google-vertex") {
-      if (input.model.capabilities.reasoning) {
-        result["thinkingConfig"] = {
-          includeThoughts: true,
-        }
-        if (input.model.api.id.includes("gemini-3")) {
-          result["thinkingConfig"]["thinkingLevel"] = "high"
-        }
+  if (input.model.api.npm === "@ai-sdk/google" || input.model.api.npm === "@ai-sdk/google-vertex") {
+    if (input.model.capabilities.reasoning) {
+      result["thinkingConfig"] = {
+        includeThoughts: true,
       }
-    }
-
-    // Enable thinking by default for kimi-k2.5/k2p5 models using anthropic SDK
-    const modelId = input.model.api.id.toLowerCase()
-    if (
-      (input.model.api.npm === "@ai-sdk/anthropic" || input.model.api.npm === "@ai-sdk/google-vertex/anthropic") &&
-      (modelId.includes("k2p5") || modelId.includes("kimi-k2.5") || modelId.includes("kimi-k2p5"))
-    ) {
-      result["thinking"] = {
-        type: "enabled",
-        budgetTokens: Math.min(16_000, Math.floor(input.model.limit.output / 2 - 1)),
+      if (input.model.api.id.includes("gemini-3")) {
+        result["thinkingConfig"]["thinkingLevel"] = "high"
       }
     }
+  }
 
-    // Enable thinking for reasoning models on alibaba-cn (DashScope).
-    // DashScope's OpenAI-compatible API requires `enable_thinking: true` in the request body
-    // to return reasoning_content. Without it, models like kimi-k2.5, qwen-plus, qwen3, qwq,
-    // deepseek-r1, etc. never output thinking/reasoning tokens.
-    // Note: kimi-k2-thinking is excluded as it returns reasoning_content by default.
-    if (
-      input.model.providerID === "alibaba-cn" &&
-      input.model.capabilities.reasoning &&
-      input.model.api.npm === "@ai-sdk/openai-compatible" &&
-      !modelId.includes("kimi-k2-thinking")
-    ) {
-      result["enable_thinking"] = true
+  // Enable thinking by default for kimi-k2.5/k2p5 models using anthropic SDK
+  const modelId = input.model.api.id.toLowerCase()
+  if (
+    (input.model.api.npm === "@ai-sdk/anthropic" || input.model.api.npm === "@ai-sdk/google-vertex/anthropic") &&
+    (modelId.includes("k2p5") || modelId.includes("kimi-k2.5") || modelId.includes("kimi-k2p5"))
+  ) {
+    result["thinking"] = {
+      type: "enabled",
+      budgetTokens: Math.min(16_000, Math.floor(input.model.limit.output / 2 - 1)),
     }
+  }
 
-    if (input.model.api.id.includes("gpt-5") && !input.model.api.id.includes("gpt-5-chat")) {
-      if (!input.model.api.id.includes("gpt-5-pro")) {
-        result["reasoningEffort"] = "medium"
-        // Only inject reasoningSummary for providers that support it natively.
-        // @ai-sdk/openai-compatible proxies (e.g. LiteLLM) do not understand this
-        // parameter and return "Unknown parameter: 'reasoningSummary'".
-        if (
-          input.model.api.npm === "@ai-sdk/openai" ||
-          input.model.api.npm === "@ai-sdk/azure" ||
-          input.model.api.npm === "@ai-sdk/github-copilot"
-        ) {
-          result["reasoningSummary"] = "auto"
-        }
-      }
+  // Enable thinking for reasoning models on alibaba-cn (DashScope).
+  // DashScope's OpenAI-compatible API requires `enable_thinking: true` in the request body
+  // to return reasoning_content. Without it, models like kimi-k2.5, qwen-plus, qwen3, qwq,
+  // deepseek-r1, etc. never output thinking/reasoning tokens.
+  // Note: kimi-k2-thinking is excluded as it returns reasoning_content by default.
+  if (
+    input.model.providerID === "alibaba-cn" &&
+    input.model.capabilities.reasoning &&
+    input.model.api.npm === "@ai-sdk/openai-compatible" &&
+    !modelId.includes("kimi-k2-thinking")
+  ) {
+    result["enable_thinking"] = true
+  }
 
-      // Only set textVerbosity for non-chat gpt-5.x models
-      // Chat models (e.g. gpt-5.2-chat-latest) only support "medium" verbosity
+  if (input.model.api.id.includes("gpt-5") && !input.model.api.id.includes("gpt-5-chat")) {
+    if (!input.model.api.id.includes("gpt-5-pro")) {
+      result["reasoningEffort"] = "medium"
+      // Only inject reasoningSummary for providers that support it natively.
+      // @ai-sdk/openai-compatible proxies (e.g. LiteLLM) do not understand this
+      // parameter and return "Unknown parameter: 'reasoningSummary'".
       if (
-        input.model.api.id.includes("gpt-5.") &&
-        !input.model.api.id.includes("codex") &&
-        !input.model.api.id.includes("-chat") &&
-        input.model.providerID !== "azure"
+        input.model.api.npm === "@ai-sdk/openai" ||
+        input.model.api.npm === "@ai-sdk/azure" ||
+        input.model.api.npm === "@ai-sdk/github-copilot"
       ) {
-        result["textVerbosity"] = "low"
-      }
-
-      if (input.model.providerID.startsWith("opencode")) {
-        result["promptCacheKey"] = input.sessionID
-        result["include"] = ["reasoning.encrypted_content"]
         result["reasoningSummary"] = "auto"
       }
     }
 
-    if (input.model.providerID === "venice") {
-      result["promptCacheKey"] = input.sessionID
+    // Only set textVerbosity for non-chat gpt-5.x models
+    // Chat models (e.g. gpt-5.2-chat-latest) only support "medium" verbosity
+    if (
+      input.model.api.id.includes("gpt-5.") &&
+      !input.model.api.id.includes("codex") &&
+      !input.model.api.id.includes("-chat") &&
+      input.model.providerID !== "azure"
+    ) {
+      result["textVerbosity"] = "low"
     }
 
-    if (input.model.providerID === "openrouter") {
-      result["prompt_cache_key"] = input.sessionID
-    }
-    if (input.model.api.npm === "@ai-sdk/gateway") {
-      result["gateway"] = {
-        caching: "auto",
-      }
+    if (input.model.providerID.startsWith("opencode")) {
+      result["promptCacheKey"] = input.sessionID
+      result["include"] = ["reasoning.encrypted_content"]
+      result["reasoningSummary"] = "auto"
     }
+  }
 
-    return result
+  if (input.model.providerID === "venice") {
+    result["promptCacheKey"] = input.sessionID
   }
 
-  export function smallOptions(model: Provider.Model) {
-    if (
-      model.providerID === "openai" ||
-      model.api.npm === "@ai-sdk/openai" ||
-      model.api.npm === "@ai-sdk/github-copilot"
-    ) {
-      if (model.api.id.includes("gpt-5")) {
-        if (model.api.id.includes("5.")) {
-          return { store: false, reasoningEffort: "low" }
-        }
-        return { store: false, reasoningEffort: "minimal" }
-      }
-      return { store: false }
+  if (input.model.providerID === "openrouter") {
+    result["prompt_cache_key"] = input.sessionID
+  }
+  if (input.model.api.npm === "@ai-sdk/gateway") {
+    result["gateway"] = {
+      caching: "auto",
     }
-    if (model.providerID === "google") {
-      // gemini-3 uses thinkingLevel, gemini-2.5 uses thinkingBudget
-      if (model.api.id.includes("gemini-3")) {
-        return { thinkingConfig: { thinkingLevel: "minimal" } }
+  }
+
+  return result
+}
+
+export function smallOptions(model: Provider.Model) {
+  if (
+    model.providerID === "openai" ||
+    model.api.npm === "@ai-sdk/openai" ||
+    model.api.npm === "@ai-sdk/github-copilot"
+  ) {
+    if (model.api.id.includes("gpt-5")) {
+      if (model.api.id.includes("5.")) {
+        return { store: false, reasoningEffort: "low" }
       }
-      return { thinkingConfig: { thinkingBudget: 0 } }
+      return { store: false, reasoningEffort: "minimal" }
     }
-    if (model.providerID === "openrouter") {
-      if (model.api.id.includes("google")) {
-        return { reasoning: { enabled: false } }
-      }
-      return { reasoningEffort: "minimal" }
+    return { store: false }
+  }
+  if (model.providerID === "google") {
+    // gemini-3 uses thinkingLevel, gemini-2.5 uses thinkingBudget
+    if (model.api.id.includes("gemini-3")) {
+      return { thinkingConfig: { thinkingLevel: "minimal" } }
     }
-
-    if (model.providerID === "venice") {
-      return { veniceParameters: { disableThinking: true } }
+    return { thinkingConfig: { thinkingBudget: 0 } }
+  }
+  if (model.providerID === "openrouter") {
+    if (model.api.id.includes("google")) {
+      return { reasoning: { enabled: false } }
     }
-
-    return {}
+    return { reasoningEffort: "minimal" }
   }
 
-  // Maps model ID prefix to provider slug used in providerOptions.
-  // Example: "amazon/nova-2-lite" → "bedrock"
-  const SLUG_OVERRIDES: Record<string, string> = {
-    amazon: "bedrock",
+  if (model.providerID === "venice") {
+    return { veniceParameters: { disableThinking: true } }
   }
 
-  export function providerOptions(model: Provider.Model, options: { [x: string]: any }) {
-    if (model.api.npm === "@ai-sdk/gateway") {
-      // Gateway providerOptions are split across two namespaces:
-      // - `gateway`: gateway-native routing/caching controls (order, only, byok, etc.)
-      // - `<upstream slug>`: provider-specific model options (anthropic/openai/...)
-      // We keep `gateway` as-is and route every other top-level option under the
-      // model-derived upstream slug.
-      const i = model.api.id.indexOf("/")
-      const rawSlug = i > 0 ? model.api.id.slice(0, i) : undefined
-      const slug = rawSlug ? (SLUG_OVERRIDES[rawSlug] ?? rawSlug) : undefined
-      const gateway = options.gateway
-      const rest = Object.fromEntries(Object.entries(options).filter(([k]) => k !== "gateway"))
-      const has = Object.keys(rest).length > 0
-
-      const result: Record<string, any> = {}
-      if (gateway !== undefined) result.gateway = gateway
-
-      if (has) {
-        if (slug) {
-          // Route model-specific options under the provider slug
-          result[slug] = rest
-        } else if (gateway && typeof gateway === "object" && !Array.isArray(gateway)) {
-          result.gateway = { ...gateway, ...rest }
-        } else {
-          result.gateway = rest
-        }
-      }
+  return {}
+}
 
-      return result
-    }
+// Maps model ID prefix to provider slug used in providerOptions.
+// Example: "amazon/nova-2-lite" → "bedrock"
+const SLUG_OVERRIDES: Record<string, string> = {
+  amazon: "bedrock",
+}
 
-    const key = sdkKey(model.api.npm) ?? model.providerID
-    // @ai-sdk/azure delegates to OpenAIChatLanguageModel which reads from
-    // providerOptions["openai"], but OpenAIResponsesLanguageModel checks
-    // "azure" first. Pass both so model options work on either code path.
-    if (model.api.npm === "@ai-sdk/azure") {
-      return { openai: options, azure: options }
+export function providerOptions(model: Provider.Model, options: { [x: string]: any }) {
+  if (model.api.npm === "@ai-sdk/gateway") {
+    // Gateway providerOptions are split across two namespaces:
+    // - `gateway`: gateway-native routing/caching controls (order, only, byok, etc.)
+    // - `<upstream slug>`: provider-specific model options (anthropic/openai/...)
+    // We keep `gateway` as-is and route every other top-level option under the
+    // model-derived upstream slug.
+    const i = model.api.id.indexOf("/")
+    const rawSlug = i > 0 ? model.api.id.slice(0, i) : undefined
+    const slug = rawSlug ? (SLUG_OVERRIDES[rawSlug] ?? rawSlug) : undefined
+    const gateway = options.gateway
+    const rest = Object.fromEntries(Object.entries(options).filter(([k]) => k !== "gateway"))
+    const has = Object.keys(rest).length > 0
+
+    const result: Record<string, any> = {}
+    if (gateway !== undefined) result.gateway = gateway
+
+    if (has) {
+      if (slug) {
+        // Route model-specific options under the provider slug
+        result[slug] = rest
+      } else if (gateway && typeof gateway === "object" && !Array.isArray(gateway)) {
+        result.gateway = { ...gateway, ...rest }
+      } else {
+        result.gateway = rest
+      }
     }
-    return { [key]: options }
+
+    return result
   }
 
-  export function maxOutputTokens(model: Provider.Model): number {
-    return Math.min(model.limit.output, OUTPUT_TOKEN_MAX) || OUTPUT_TOKEN_MAX
+  const key = sdkKey(model.api.npm) ?? model.providerID
+  // @ai-sdk/azure delegates to OpenAIChatLanguageModel which reads from
+  // providerOptions["openai"], but OpenAIResponsesLanguageModel checks
+  // "azure" first. Pass both so model options work on either code path.
+  if (model.api.npm === "@ai-sdk/azure") {
+    return { openai: options, azure: options }
   }
+  return { [key]: options }
+}
 
-  export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema | JSONSchema7): JSONSchema7 {
-    /*
-    if (["openai", "azure"].includes(providerID)) {
-      if (schema.type === "object" && schema.properties) {
-        for (const [key, value] of Object.entries(schema.properties)) {
-          if (schema.required?.includes(key)) continue
-          schema.properties[key] = {
-            anyOf: [
-              value as JSONSchema.JSONSchema,
-              {
-                type: "null",
-              },
-            ],
-          }
+export function maxOutputTokens(model: Provider.Model): number {
+  return Math.min(model.limit.output, OUTPUT_TOKEN_MAX) || OUTPUT_TOKEN_MAX
+}
+
+export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema | JSONSchema7): JSONSchema7 {
+  /*
+  if (["openai", "azure"].includes(providerID)) {
+    if (schema.type === "object" && schema.properties) {
+      for (const [key, value] of Object.entries(schema.properties)) {
+        if (schema.required?.includes(key)) continue
+        schema.properties[key] = {
+          anyOf: [
+            value as JSONSchema.JSONSchema,
+            {
+              type: "null",
+            },
+          ],
         }
       }
     }
-    */
-
-    // Convert integer enums to string enums for Google/Gemini
-    if (model.providerID === "google" || model.api.id.includes("gemini")) {
-      const isPlainObject = (node: unknown): node is Record<string, any> =>
-        typeof node === "object" && node !== null && !Array.isArray(node)
-      const hasCombiner = (node: unknown) =>
-        isPlainObject(node) && (Array.isArray(node.anyOf) || Array.isArray(node.oneOf) || Array.isArray(node.allOf))
-      const hasSchemaIntent = (node: unknown) => {
-        if (!isPlainObject(node)) return false
-        if (hasCombiner(node)) return true
-        return [
-          "type",
-          "properties",
-          "items",
-          "prefixItems",
-          "enum",
-          "const",
-          "$ref",
-          "additionalProperties",
-          "patternProperties",
-          "required",
-          "not",
-          "if",
-          "then",
-          "else",
-        ].some((key) => key in node)
-      }
+  }
+  */
+
+  // Convert integer enums to string enums for Google/Gemini
+  if (model.providerID === "google" || model.api.id.includes("gemini")) {
+    const isPlainObject = (node: unknown): node is Record<string, any> =>
+      typeof node === "object" && node !== null && !Array.isArray(node)
+    const hasCombiner = (node: unknown) =>
+      isPlainObject(node) && (Array.isArray(node.anyOf) || Array.isArray(node.oneOf) || Array.isArray(node.allOf))
+    const hasSchemaIntent = (node: unknown) => {
+      if (!isPlainObject(node)) return false
+      if (hasCombiner(node)) return true
+      return [
+        "type",
+        "properties",
+        "items",
+        "prefixItems",
+        "enum",
+        "const",
+        "$ref",
+        "additionalProperties",
+        "patternProperties",
+        "required",
+        "not",
+        "if",
+        "then",
+        "else",
+      ].some((key) => key in node)
+    }
 
-      const sanitizeGemini = (obj: any): any => {
-        if (obj === null || typeof obj !== "object") {
-          return obj
-        }
+    const sanitizeGemini = (obj: any): any => {
+      if (obj === null || typeof obj !== "object") {
+        return obj
+      }
 
-        if (Array.isArray(obj)) {
-          return obj.map(sanitizeGemini)
-        }
+      if (Array.isArray(obj)) {
+        return obj.map(sanitizeGemini)
+      }
 
-        const result: any = {}
-        for (const [key, value] of Object.entries(obj)) {
-          if (key === "enum" && Array.isArray(value)) {
-            // Convert all enum values to strings
-            result[key] = value.map((v) => String(v))
-            // If we have integer type with enum, change type to string
-            if (result.type === "integer" || result.type === "number") {
-              result.type = "string"
-            }
-          } else if (typeof value === "object" && value !== null) {
-            result[key] = sanitizeGemini(value)
-          } else {
-            result[key] = value
+      const result: any = {}
+      for (const [key, value] of Object.entries(obj)) {
+        if (key === "enum" && Array.isArray(value)) {
+          // Convert all enum values to strings
+          result[key] = value.map((v) => String(v))
+          // If we have integer type with enum, change type to string
+          if (result.type === "integer" || result.type === "number") {
+            result.type = "string"
           }
+        } else if (typeof value === "object" && value !== null) {
+          result[key] = sanitizeGemini(value)
+        } else {
+          result[key] = value
         }
+      }
 
-        // Filter required array to only include fields that exist in properties
-        if (result.type === "object" && result.properties && Array.isArray(result.required)) {
-          result.required = result.required.filter((field: any) => field in result.properties)
-        }
+      // Filter required array to only include fields that exist in properties
+      if (result.type === "object" && result.properties && Array.isArray(result.required)) {
+        result.required = result.required.filter((field: any) => field in result.properties)
+      }
 
-        if (result.type === "array" && !hasCombiner(result)) {
-          if (result.items == null) {
-            result.items = {}
-          }
-          // Ensure items has a type only when it's still schema-empty.
-          if (isPlainObject(result.items) && !hasSchemaIntent(result.items)) {
-            result.items.type = "string"
-          }
+      if (result.type === "array" && !hasCombiner(result)) {
+        if (result.items == null) {
+          result.items = {}
         }
-
-        // Remove properties/required from non-object types (Gemini rejects these)
-        if (result.type && result.type !== "object" && !hasCombiner(result)) {
-          delete result.properties
-          delete result.required
+        // Ensure items has a type only when it's still schema-empty.
+        if (isPlainObject(result.items) && !hasSchemaIntent(result.items)) {
+          result.items.type = "string"
         }
+      }
 
-        return result
+      // Remove properties/required from non-object types (Gemini rejects these)
+      if (result.type && result.type !== "object" && !hasCombiner(result)) {
+        delete result.properties
+        delete result.required
       }
 
-      schema = sanitizeGemini(schema)
+      return result
     }
 
-    return schema as JSONSchema7
+    schema = sanitizeGemini(schema)
   }
+
+  return schema as JSONSchema7
 }

+ 1 - 1
packages/opencode/src/server/instance/httpapi/provider.ts

@@ -1,4 +1,4 @@
-import { ProviderAuth } from "@/provider/auth"
+import { ProviderAuth } from "@/provider"
 import { Effect, Layer } from "effect"
 import { HttpApi, HttpApiBuilder, HttpApiEndpoint, HttpApiGroup, OpenApi } from "effect/unstable/httpapi"
 

+ 2 - 2
packages/opencode/src/server/instance/provider.ts

@@ -3,8 +3,8 @@ import { describeRoute, validator, resolver } from "hono-openapi"
 import z from "zod"
 import { Config } from "../../config"
 import { Provider } from "../../provider"
-import { ModelsDev } from "../../provider/models"
-import { ProviderAuth } from "../../provider/auth"
+import { ModelsDev } from "../../provider"
+import { ProviderAuth } from "../../provider"
 import { ProviderID } from "../../provider/schema"
 import { AppRuntime } from "../../effect/app-runtime"
 import { mapValues } from "remeda"

+ 1 - 1
packages/opencode/src/session/llm.ts

@@ -5,7 +5,7 @@ import * as Stream from "effect/Stream"
 import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool, jsonSchema } from "ai"
 import { mergeDeep, pipe } from "remeda"
 import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
-import { ProviderTransform } from "@/provider/transform"
+import { ProviderTransform } from "@/provider"
 import { Config } from "@/config"
 import { Instance } from "@/project/instance"
 import type { Agent } from "@/agent/agent"

+ 1 - 1
packages/opencode/src/session/message-v2.ts

@@ -8,7 +8,7 @@ import { Snapshot } from "@/snapshot"
 import { SyncEvent } from "../sync"
 import { Database, NotFoundError, and, desc, eq, inArray, lt, or } from "@/storage"
 import { MessageTable, PartTable, SessionTable } from "./session.sql"
-import { ProviderError } from "@/provider/error"
+import { ProviderError } from "@/provider"
 import { iife } from "@/util/iife"
 import { errorMessage } from "@/util/error"
 import type { SystemError } from "bun"

+ 1 - 1
packages/opencode/src/session/overflow.ts

@@ -1,6 +1,6 @@
 import type { Config } from "@/config"
 import type { Provider } from "@/provider"
-import { ProviderTransform } from "@/provider/transform"
+import { ProviderTransform } from "@/provider"
 import type { MessageV2 } from "./message-v2"
 
 const COMPACTION_BUFFER = 20_000

+ 1 - 1
packages/opencode/src/session/prompt.ts

@@ -12,7 +12,7 @@ import { ModelID, ProviderID } from "../provider/schema"
 import { type Tool as AITool, tool, jsonSchema, type ToolExecutionOptions, asSchema } from "ai"
 import { SessionCompaction } from "./compaction"
 import { Bus } from "../bus"
-import { ProviderTransform } from "../provider/transform"
+import { ProviderTransform } from "../provider"
 import { SystemPrompt } from "./system"
 import { Instruction } from "./instruction"
 import { Plugin } from "../plugin"

+ 1 - 1
packages/opencode/test/plugin/auth-override.test.ts

@@ -4,7 +4,7 @@ import fs from "fs/promises"
 import { Effect } from "effect"
 import { tmpdir } from "../fixture/fixture"
 import { Instance } from "../../src/project/instance"
-import { ProviderAuth } from "../../src/provider/auth"
+import { ProviderAuth } from "../../src/provider"
 import { ProviderID } from "../../src/provider/schema"
 
 describe("plugin.auth-override", () => {

+ 1 - 1
packages/opencode/test/provider/provider.test.ts

@@ -6,7 +6,7 @@ import { tmpdir } from "../fixture/fixture"
 import { Global } from "../../src/global"
 import { Instance } from "../../src/project/instance"
 import { Plugin } from "../../src/plugin/index"
-import { ModelsDev } from "../../src/provider/models"
+import { ModelsDev } from "../../src/provider"
 import { Provider } from "../../src/provider"
 import { ProviderID, ModelID } from "../../src/provider/schema"
 import { Filesystem } from "../../src/util"

+ 1 - 1
packages/opencode/test/provider/transform.test.ts

@@ -1,5 +1,5 @@
 import { describe, expect, test } from "bun:test"
-import { ProviderTransform } from "../../src/provider/transform"
+import { ProviderTransform } from "../../src/provider"
 import { ModelID, ProviderID } from "../../src/provider/schema"
 
 describe("ProviderTransform.options - setCacheKey", () => {

+ 2 - 2
packages/opencode/test/session/llm.test.ts

@@ -7,8 +7,8 @@ import { makeRuntime } from "../../src/effect/run-service"
 import { LLM } from "../../src/session/llm"
 import { Instance } from "../../src/project/instance"
 import { Provider } from "../../src/provider"
-import { ProviderTransform } from "../../src/provider/transform"
-import { ModelsDev } from "../../src/provider/models"
+import { ProviderTransform } from "../../src/provider"
+import { ModelsDev } from "../../src/provider"
 import { ProviderID, ModelID } from "../../src/provider/schema"
 import { Filesystem } from "../../src/util"
 import { tmpdir } from "../fixture/fixture"