import type { APICallError, ModelMessage } from "ai" import { unique } from "remeda" import type { JSONSchema } from "zod/v4/core" export namespace ProviderTransform { function normalizeMessages(msgs: ModelMessage[], providerID: string, modelID: string): ModelMessage[] { if (modelID.includes("claude")) { return msgs.map((msg) => { if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) { msg.content = msg.content.map((part) => { if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) { return { ...part, toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"), } } return part }) } return msg }) } if (providerID === "mistral" || modelID.toLowerCase().includes("mistral")) { const result: ModelMessage[] = [] for (let i = 0; i < msgs.length; i++) { const msg = msgs[i] const nextMsg = msgs[i + 1] if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) { msg.content = msg.content.map((part) => { if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) { // Mistral requires alphanumeric tool call IDs with exactly 9 characters const normalizedId = part.toolCallId .replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters .substring(0, 9) // Take first 9 characters .padEnd(9, "0") // Pad with zeros if less than 9 characters return { ...part, toolCallId: normalizedId, } } return part }) } result.push(msg) // Fix message sequence: tool messages cannot be followed by user messages if (msg.role === "tool" && nextMsg?.role === "user") { result.push({ role: "assistant", content: [ { type: "text", text: "Done.", }, ], }) } } return result } return msgs } function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] { const system = msgs.filter((msg) => msg.role === "system").slice(0, 2) const final = msgs.filter((msg) => msg.role !== "system").slice(-2) const providerOptions = { anthropic: { cacheControl: { type: "ephemeral" }, }, openrouter: { cache_control: { type: "ephemeral" }, }, bedrock: { cachePoint: { type: "ephemeral" }, }, openaiCompatible: { cache_control: { type: "ephemeral" }, }, } for (const msg of unique([...system, ...final])) { const shouldUseContentOptions = providerID !== "anthropic" && Array.isArray(msg.content) && msg.content.length > 0 if (shouldUseContentOptions) { const lastContent = msg.content[msg.content.length - 1] if (lastContent && typeof lastContent === "object") { lastContent.providerOptions = { ...lastContent.providerOptions, ...providerOptions, } continue } } msg.providerOptions = { ...msg.providerOptions, ...providerOptions, } } return msgs } export function message(msgs: ModelMessage[], providerID: string, modelID: string) { msgs = normalizeMessages(msgs, providerID, modelID) if (providerID === "anthropic" || modelID.includes("anthropic") || modelID.includes("claude")) { msgs = applyCaching(msgs, providerID) } return msgs } export function temperature(_providerID: string, modelID: string) { if (modelID.toLowerCase().includes("qwen")) return 0.55 if (modelID.toLowerCase().includes("claude")) return undefined if (modelID.toLowerCase().includes("gemini-3-pro")) return 1.0 return 0 } export function topP(_providerID: string, modelID: string) { if (modelID.toLowerCase().includes("qwen")) return 1 return undefined } export function options( providerID: string, modelID: string, npm: string, sessionID: string, providerOptions?: Record, ): Record { const result: Record = {} // switch to providerID later, for now use this if (npm === "@openrouter/ai-sdk-provider") { result["usage"] = { include: true, } } if (providerID === "openai" || providerOptions?.setCacheKey) { result["promptCacheKey"] = sessionID } if (providerID === "google" || (providerID.startsWith("opencode") && modelID.includes("gemini-3"))) { result["thinkingConfig"] = { includeThoughts: true, } } if (modelID.includes("gpt-5") && !modelID.includes("gpt-5-chat")) { if (modelID.includes("codex")) { result["store"] = false } if (!modelID.includes("codex") && !modelID.includes("gpt-5-pro")) { result["reasoningEffort"] = "medium" } if (modelID.endsWith("gpt-5.1") && providerID !== "azure") { result["textVerbosity"] = "low" } if (providerID.startsWith("opencode")) { result["promptCacheKey"] = sessionID result["include"] = ["reasoning.encrypted_content"] result["reasoningSummary"] = "auto" } } return result } export function smallOptions(input: { providerID: string; modelID: string }) { const options: Record = {} if (input.providerID === "openai" || input.modelID.includes("gpt-5")) { if (input.modelID.includes("5.1")) { options["reasoningEffort"] = "low" } else { options["reasoningEffort"] = "minimal" } } if (input.providerID === "google") { options["thinkingConfig"] = { thinkingBudget: 0, } } return options } export function providerOptions(npm: string | undefined, providerID: string, options: { [x: string]: any }) { switch (npm) { case "@ai-sdk/openai": case "@ai-sdk/azure": return { ["openai" as string]: options, } case "@ai-sdk/amazon-bedrock": return { ["bedrock" as string]: options, } case "@ai-sdk/anthropic": return { ["anthropic" as string]: options, } case "@ai-sdk/google": return { ["google" as string]: options, } case "@ai-sdk/gateway": return { ["gateway" as string]: options, } case "@openrouter/ai-sdk-provider": return { ["openrouter" as string]: options, } default: return { [providerID]: options, } } } export function maxOutputTokens( npm: string, options: Record, modelLimit: number, globalLimit: number, ): number { const modelCap = modelLimit || globalLimit const standardLimit = Math.min(modelCap, globalLimit) if (npm === "@ai-sdk/anthropic") { const thinking = options?.["thinking"] const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0 const enabled = thinking?.["type"] === "enabled" if (enabled && budgetTokens > 0) { // Return text tokens so that text + thinking <= model cap, preferring 32k text when possible. if (budgetTokens + standardLimit <= modelCap) { return standardLimit } return modelCap - budgetTokens } } return standardLimit } export function schema(providerID: string, modelID: string, schema: JSONSchema.BaseSchema) { /* if (["openai", "azure"].includes(providerID)) { if (schema.type === "object" && schema.properties) { for (const [key, value] of Object.entries(schema.properties)) { if (schema.required?.includes(key)) continue schema.properties[key] = { anyOf: [ value as JSONSchema.JSONSchema, { type: "null", }, ], } } } } */ // Convert integer enums to string enums for Google/Gemini if (providerID === "google" || modelID.includes("gemini")) { const sanitizeGemini = (obj: any): any => { if (obj === null || typeof obj !== "object") { return obj } if (Array.isArray(obj)) { return obj.map(sanitizeGemini) } const result: any = {} for (const [key, value] of Object.entries(obj)) { if (key === "enum" && Array.isArray(value)) { // Convert all enum values to strings result[key] = value.map((v) => String(v)) // If we have integer type with enum, change type to string if (result.type === "integer" || result.type === "number") { result.type = "string" } } else if (typeof value === "object" && value !== null) { result[key] = sanitizeGemini(value) } else { result[key] = value } } // Filter required array to only include fields that exist in properties if (result.type === "object" && result.properties && Array.isArray(result.required)) { result.required = result.required.filter((field: any) => field in result.properties) } return result } schema = sanitizeGemini(schema) } return schema } export function error(providerID: string, error: APICallError) { let message = error.message if (providerID === "github-copilot" && message.includes("The requested model is not supported")) { return ( message + "\n\nMake sure the model is enabled in your copilot settings: https://github.com/settings/copilot/features" ) } return message } }