transform.ts 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. import type { APICallError, ModelMessage } from "ai"
  2. import { mergeDeep, unique } from "remeda"
  3. import type { JSONSchema } from "zod/v4/core"
  4. import type { Provider } from "./provider"
  5. import type { ModelsDev } from "./models"
  6. import { iife } from "@/util/iife"
  7. type Modality = NonNullable<ModelsDev.Model["modalities"]>["input"][number]
  8. function mimeToModality(mime: string): Modality | undefined {
  9. if (mime.startsWith("image/")) return "image"
  10. if (mime.startsWith("audio/")) return "audio"
  11. if (mime.startsWith("video/")) return "video"
  12. if (mime === "application/pdf") return "pdf"
  13. return undefined
  14. }
  15. export namespace ProviderTransform {
  16. // Maps npm package to the key the AI SDK expects for providerOptions
  17. function sdkKey(npm: string): string | undefined {
  18. switch (npm) {
  19. case "@ai-sdk/github-copilot":
  20. case "@ai-sdk/openai":
  21. case "@ai-sdk/azure":
  22. return "openai"
  23. case "@ai-sdk/amazon-bedrock":
  24. return "bedrock"
  25. case "@ai-sdk/anthropic":
  26. case "@ai-sdk/google-vertex/anthropic":
  27. return "anthropic"
  28. case "@ai-sdk/google-vertex":
  29. case "@ai-sdk/google":
  30. return "google"
  31. case "@ai-sdk/gateway":
  32. return "gateway"
  33. case "@openrouter/ai-sdk-provider":
  34. return "openrouter"
  35. }
  36. return undefined
  37. }
  38. function normalizeMessages(
  39. msgs: ModelMessage[],
  40. model: Provider.Model,
  41. options: Record<string, unknown>,
  42. ): ModelMessage[] {
  43. // Anthropic rejects messages with empty content - filter out empty string messages
  44. // and remove empty text/reasoning parts from array content
  45. if (model.api.npm === "@ai-sdk/anthropic") {
  46. msgs = msgs
  47. .map((msg) => {
  48. if (typeof msg.content === "string") {
  49. if (msg.content === "") return undefined
  50. return msg
  51. }
  52. if (!Array.isArray(msg.content)) return msg
  53. const filtered = msg.content.filter((part) => {
  54. if (part.type === "text" || part.type === "reasoning") {
  55. return part.text !== ""
  56. }
  57. return true
  58. })
  59. if (filtered.length === 0) return undefined
  60. return { ...msg, content: filtered }
  61. })
  62. .filter((msg): msg is ModelMessage => msg !== undefined && msg.content !== "")
  63. }
  64. if (model.api.id.includes("claude")) {
  65. return msgs.map((msg) => {
  66. if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
  67. msg.content = msg.content.map((part) => {
  68. if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
  69. return {
  70. ...part,
  71. toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
  72. }
  73. }
  74. return part
  75. })
  76. }
  77. return msg
  78. })
  79. }
  80. if (model.providerID === "mistral" || model.api.id.toLowerCase().includes("mistral")) {
  81. const result: ModelMessage[] = []
  82. for (let i = 0; i < msgs.length; i++) {
  83. const msg = msgs[i]
  84. const nextMsg = msgs[i + 1]
  85. if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
  86. msg.content = msg.content.map((part) => {
  87. if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
  88. // Mistral requires alphanumeric tool call IDs with exactly 9 characters
  89. const normalizedId = part.toolCallId
  90. .replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
  91. .substring(0, 9) // Take first 9 characters
  92. .padEnd(9, "0") // Pad with zeros if less than 9 characters
  93. return {
  94. ...part,
  95. toolCallId: normalizedId,
  96. }
  97. }
  98. return part
  99. })
  100. }
  101. result.push(msg)
  102. // Fix message sequence: tool messages cannot be followed by user messages
  103. if (msg.role === "tool" && nextMsg?.role === "user") {
  104. result.push({
  105. role: "assistant",
  106. content: [
  107. {
  108. type: "text",
  109. text: "Done.",
  110. },
  111. ],
  112. })
  113. }
  114. }
  115. return result
  116. }
  117. if (typeof model.capabilities.interleaved === "object" && model.capabilities.interleaved.field) {
  118. const field = model.capabilities.interleaved.field
  119. return msgs.map((msg) => {
  120. if (msg.role === "assistant" && Array.isArray(msg.content)) {
  121. const reasoningParts = msg.content.filter((part: any) => part.type === "reasoning")
  122. const reasoningText = reasoningParts.map((part: any) => part.text).join("")
  123. // Filter out reasoning parts from content
  124. const filteredContent = msg.content.filter((part: any) => part.type !== "reasoning")
  125. // Include reasoning_content | reasoning_details directly on the message for all assistant messages
  126. if (reasoningText) {
  127. return {
  128. ...msg,
  129. content: filteredContent,
  130. providerOptions: {
  131. ...msg.providerOptions,
  132. openaiCompatible: {
  133. ...(msg.providerOptions as any)?.openaiCompatible,
  134. [field]: reasoningText,
  135. },
  136. },
  137. }
  138. }
  139. return {
  140. ...msg,
  141. content: filteredContent,
  142. }
  143. }
  144. return msg
  145. })
  146. }
  147. return msgs
  148. }
  149. function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] {
  150. const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
  151. const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
  152. const providerOptions = {
  153. anthropic: {
  154. cacheControl: { type: "ephemeral" },
  155. },
  156. openrouter: {
  157. cacheControl: { type: "ephemeral" },
  158. },
  159. bedrock: {
  160. cachePoint: { type: "ephemeral" },
  161. },
  162. openaiCompatible: {
  163. cache_control: { type: "ephemeral" },
  164. },
  165. }
  166. for (const msg of unique([...system, ...final])) {
  167. const shouldUseContentOptions = providerID !== "anthropic" && Array.isArray(msg.content) && msg.content.length > 0
  168. if (shouldUseContentOptions) {
  169. const lastContent = msg.content[msg.content.length - 1]
  170. if (lastContent && typeof lastContent === "object") {
  171. lastContent.providerOptions = mergeDeep(lastContent.providerOptions ?? {}, providerOptions)
  172. continue
  173. }
  174. }
  175. msg.providerOptions = mergeDeep(msg.providerOptions ?? {}, providerOptions)
  176. }
  177. return msgs
  178. }
  179. function unsupportedParts(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
  180. return msgs.map((msg) => {
  181. if (msg.role !== "user" || !Array.isArray(msg.content)) return msg
  182. const filtered = msg.content.map((part) => {
  183. if (part.type !== "file" && part.type !== "image") return part
  184. // Check for empty base64 image data
  185. if (part.type === "image") {
  186. const imageStr = part.image.toString()
  187. if (imageStr.startsWith("data:")) {
  188. const match = imageStr.match(/^data:([^;]+);base64,(.*)$/)
  189. if (match && (!match[2] || match[2].length === 0)) {
  190. return {
  191. type: "text" as const,
  192. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  193. }
  194. }
  195. }
  196. }
  197. const mime = part.type === "image" ? part.image.toString().split(";")[0].replace("data:", "") : part.mediaType
  198. const filename = part.type === "file" ? part.filename : undefined
  199. const modality = mimeToModality(mime)
  200. if (!modality) return part
  201. if (model.capabilities.input[modality]) return part
  202. const name = filename ? `"${filename}"` : modality
  203. return {
  204. type: "text" as const,
  205. text: `ERROR: Cannot read ${name} (this model does not support ${modality} input). Inform the user.`,
  206. }
  207. })
  208. return { ...msg, content: filtered }
  209. })
  210. }
  211. export function message(msgs: ModelMessage[], model: Provider.Model, options: Record<string, unknown>) {
  212. msgs = unsupportedParts(msgs, model)
  213. msgs = normalizeMessages(msgs, model, options)
  214. if (
  215. model.providerID === "anthropic" ||
  216. model.api.id.includes("anthropic") ||
  217. model.api.id.includes("claude") ||
  218. model.id.includes("anthropic") ||
  219. model.id.includes("claude") ||
  220. model.api.npm === "@ai-sdk/anthropic"
  221. ) {
  222. msgs = applyCaching(msgs, model.providerID)
  223. }
  224. // Remap providerOptions keys from stored providerID to expected SDK key
  225. const key = sdkKey(model.api.npm)
  226. if (key && key !== model.providerID && model.api.npm !== "@ai-sdk/azure") {
  227. const remap = (opts: Record<string, any> | undefined) => {
  228. if (!opts) return opts
  229. if (!(model.providerID in opts)) return opts
  230. const result = { ...opts }
  231. result[key] = result[model.providerID]
  232. delete result[model.providerID]
  233. return result
  234. }
  235. msgs = msgs.map((msg) => {
  236. if (!Array.isArray(msg.content)) return { ...msg, providerOptions: remap(msg.providerOptions) }
  237. return {
  238. ...msg,
  239. providerOptions: remap(msg.providerOptions),
  240. content: msg.content.map((part) => ({ ...part, providerOptions: remap(part.providerOptions) })),
  241. } as typeof msg
  242. })
  243. }
  244. return msgs
  245. }
  246. export function temperature(model: Provider.Model) {
  247. const id = model.id.toLowerCase()
  248. if (id.includes("qwen")) return 0.55
  249. if (id.includes("claude")) return undefined
  250. if (id.includes("gemini")) return 1.0
  251. if (id.includes("glm-4.6")) return 1.0
  252. if (id.includes("glm-4.7")) return 1.0
  253. if (id.includes("minimax-m2")) return 1.0
  254. if (id.includes("kimi-k2")) {
  255. // kimi-k2-thinking & kimi-k2.5 && kimi-k2p5
  256. if (id.includes("thinking") || id.includes("k2.") || id.includes("k2p")) {
  257. return 1.0
  258. }
  259. return 0.6
  260. }
  261. return undefined
  262. }
  263. export function topP(model: Provider.Model) {
  264. const id = model.id.toLowerCase()
  265. if (id.includes("qwen")) return 1
  266. if (id.includes("minimax-m2") || id.includes("kimi-k2.5") || id.includes("kimi-k2p5") || id.includes("gemini")) {
  267. return 0.95
  268. }
  269. return undefined
  270. }
  271. export function topK(model: Provider.Model) {
  272. const id = model.id.toLowerCase()
  273. if (id.includes("minimax-m2")) {
  274. if (id.includes("m2.1")) return 40
  275. return 20
  276. }
  277. if (id.includes("gemini")) return 64
  278. return undefined
  279. }
  280. const WIDELY_SUPPORTED_EFFORTS = ["low", "medium", "high"]
  281. const OPENAI_EFFORTS = ["none", "minimal", ...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
  282. export function variants(model: Provider.Model): Record<string, Record<string, any>> {
  283. if (!model.capabilities.reasoning) return {}
  284. const id = model.id.toLowerCase()
  285. if (
  286. id.includes("deepseek") ||
  287. id.includes("minimax") ||
  288. id.includes("glm") ||
  289. id.includes("mistral") ||
  290. id.includes("kimi")
  291. )
  292. return {}
  293. // see: https://docs.x.ai/docs/guides/reasoning#control-how-hard-the-model-thinks
  294. if (id.includes("grok") && id.includes("grok-3-mini")) {
  295. if (model.api.npm === "@openrouter/ai-sdk-provider") {
  296. return {
  297. low: { reasoning: { effort: "low" } },
  298. high: { reasoning: { effort: "high" } },
  299. }
  300. }
  301. return {
  302. low: { reasoningEffort: "low" },
  303. high: { reasoningEffort: "high" },
  304. }
  305. }
  306. if (id.includes("grok")) return {}
  307. switch (model.api.npm) {
  308. case "@openrouter/ai-sdk-provider":
  309. if (!model.id.includes("gpt") && !model.id.includes("gemini-3")) return {}
  310. return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoning: { effort } }]))
  311. // TODO: YOU CANNOT SET max_tokens if this is set!!!
  312. case "@ai-sdk/gateway":
  313. return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
  314. case "@ai-sdk/github-copilot":
  315. const copilotEfforts = iife(() => {
  316. if (id.includes("5.1-codex-max") || id.includes("5.2")) return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
  317. return WIDELY_SUPPORTED_EFFORTS
  318. })
  319. return Object.fromEntries(
  320. copilotEfforts.map((effort) => [
  321. effort,
  322. {
  323. reasoningEffort: effort,
  324. reasoningSummary: "auto",
  325. include: ["reasoning.encrypted_content"],
  326. },
  327. ]),
  328. )
  329. case "@ai-sdk/cerebras":
  330. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/cerebras
  331. case "@ai-sdk/togetherai":
  332. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/togetherai
  333. case "@ai-sdk/xai":
  334. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/xai
  335. case "@ai-sdk/deepinfra":
  336. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/deepinfra
  337. case "@ai-sdk/openai-compatible":
  338. return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
  339. case "@ai-sdk/azure":
  340. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/azure
  341. if (id === "o1-mini") return {}
  342. const azureEfforts = ["low", "medium", "high"]
  343. if (id.includes("gpt-5-") || id === "gpt-5") {
  344. azureEfforts.unshift("minimal")
  345. }
  346. return Object.fromEntries(
  347. azureEfforts.map((effort) => [
  348. effort,
  349. {
  350. reasoningEffort: effort,
  351. reasoningSummary: "auto",
  352. include: ["reasoning.encrypted_content"],
  353. },
  354. ]),
  355. )
  356. case "@ai-sdk/openai":
  357. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/openai
  358. if (id === "gpt-5-pro") return {}
  359. const openaiEfforts = iife(() => {
  360. if (id.includes("codex")) {
  361. if (id.includes("5.2")) return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
  362. return WIDELY_SUPPORTED_EFFORTS
  363. }
  364. const arr = [...WIDELY_SUPPORTED_EFFORTS]
  365. if (id.includes("gpt-5-") || id === "gpt-5") {
  366. arr.unshift("minimal")
  367. }
  368. if (model.release_date >= "2025-11-13") {
  369. arr.unshift("none")
  370. }
  371. if (model.release_date >= "2025-12-04") {
  372. arr.push("xhigh")
  373. }
  374. return arr
  375. })
  376. return Object.fromEntries(
  377. openaiEfforts.map((effort) => [
  378. effort,
  379. {
  380. reasoningEffort: effort,
  381. reasoningSummary: "auto",
  382. include: ["reasoning.encrypted_content"],
  383. },
  384. ]),
  385. )
  386. case "@ai-sdk/anthropic":
  387. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/anthropic
  388. case "@ai-sdk/google-vertex/anthropic":
  389. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-vertex#anthropic-provider
  390. return {
  391. high: {
  392. thinking: {
  393. type: "enabled",
  394. budgetTokens: Math.min(16_000, Math.floor(model.limit.output / 2 - 1)),
  395. },
  396. },
  397. max: {
  398. thinking: {
  399. type: "enabled",
  400. budgetTokens: Math.min(31_999, model.limit.output - 1),
  401. },
  402. },
  403. }
  404. case "@ai-sdk/amazon-bedrock":
  405. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/amazon-bedrock
  406. // For Anthropic models on Bedrock, use reasoningConfig with budgetTokens
  407. if (model.api.id.includes("anthropic")) {
  408. return {
  409. high: {
  410. reasoningConfig: {
  411. type: "enabled",
  412. budgetTokens: 16000,
  413. },
  414. },
  415. max: {
  416. reasoningConfig: {
  417. type: "enabled",
  418. budgetTokens: 31999,
  419. },
  420. },
  421. }
  422. }
  423. // For Amazon Nova models, use reasoningConfig with maxReasoningEffort
  424. return Object.fromEntries(
  425. WIDELY_SUPPORTED_EFFORTS.map((effort) => [
  426. effort,
  427. {
  428. reasoningConfig: {
  429. type: "enabled",
  430. maxReasoningEffort: effort,
  431. },
  432. },
  433. ]),
  434. )
  435. case "@ai-sdk/google-vertex":
  436. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-vertex
  437. case "@ai-sdk/google":
  438. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai
  439. if (id.includes("2.5")) {
  440. return {
  441. high: {
  442. thinkingConfig: {
  443. includeThoughts: true,
  444. thinkingBudget: 16000,
  445. },
  446. },
  447. max: {
  448. thinkingConfig: {
  449. includeThoughts: true,
  450. thinkingBudget: 24576,
  451. },
  452. },
  453. }
  454. }
  455. return Object.fromEntries(
  456. ["low", "high"].map((effort) => [
  457. effort,
  458. {
  459. includeThoughts: true,
  460. thinkingLevel: effort,
  461. },
  462. ]),
  463. )
  464. case "@ai-sdk/mistral":
  465. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/mistral
  466. return {}
  467. case "@ai-sdk/cohere":
  468. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/cohere
  469. return {}
  470. case "@ai-sdk/groq":
  471. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/groq
  472. const groqEffort = ["none", ...WIDELY_SUPPORTED_EFFORTS]
  473. return Object.fromEntries(
  474. groqEffort.map((effort) => [
  475. effort,
  476. {
  477. includeThoughts: true,
  478. thinkingLevel: effort,
  479. },
  480. ]),
  481. )
  482. case "@ai-sdk/perplexity":
  483. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/perplexity
  484. return {}
  485. }
  486. return {}
  487. }
  488. export function options(input: {
  489. model: Provider.Model
  490. sessionID: string
  491. providerOptions?: Record<string, any>
  492. }): Record<string, any> {
  493. const result: Record<string, any> = {}
  494. // openai and providers using openai package should set store to false by default.
  495. if (
  496. input.model.providerID === "openai" ||
  497. input.model.api.npm === "@ai-sdk/openai" ||
  498. input.model.api.npm === "@ai-sdk/github-copilot"
  499. ) {
  500. result["store"] = false
  501. }
  502. if (input.model.api.npm === "@openrouter/ai-sdk-provider") {
  503. result["usage"] = {
  504. include: true,
  505. }
  506. if (input.model.api.id.includes("gemini-3")) {
  507. result["reasoning"] = { effort: "high" }
  508. }
  509. }
  510. if (
  511. input.model.providerID === "baseten" ||
  512. (input.model.providerID === "opencode" && ["kimi-k2-thinking", "glm-4.6"].includes(input.model.api.id))
  513. ) {
  514. result["chat_template_args"] = { enable_thinking: true }
  515. }
  516. if (["zai", "zhipuai"].includes(input.model.providerID) && input.model.api.npm === "@ai-sdk/openai-compatible") {
  517. result["thinking"] = {
  518. type: "enabled",
  519. clear_thinking: false,
  520. }
  521. }
  522. if (input.model.providerID === "openai" || input.providerOptions?.setCacheKey) {
  523. result["promptCacheKey"] = input.sessionID
  524. }
  525. if (input.model.api.npm === "@ai-sdk/google" || input.model.api.npm === "@ai-sdk/google-vertex") {
  526. result["thinkingConfig"] = {
  527. includeThoughts: true,
  528. }
  529. if (input.model.api.id.includes("gemini-3")) {
  530. result["thinkingConfig"]["thinkingLevel"] = "high"
  531. }
  532. }
  533. if (input.model.api.id.includes("gpt-5") && !input.model.api.id.includes("gpt-5-chat")) {
  534. if (!input.model.api.id.includes("gpt-5-pro")) {
  535. result["reasoningEffort"] = "medium"
  536. }
  537. // Only set textVerbosity for non-chat gpt-5.x models
  538. // Chat models (e.g. gpt-5.2-chat-latest) only support "medium" verbosity
  539. if (
  540. input.model.api.id.includes("gpt-5.") &&
  541. !input.model.api.id.includes("codex") &&
  542. !input.model.api.id.includes("-chat") &&
  543. input.model.providerID !== "azure"
  544. ) {
  545. result["textVerbosity"] = "low"
  546. }
  547. if (input.model.providerID.startsWith("opencode")) {
  548. result["promptCacheKey"] = input.sessionID
  549. result["include"] = ["reasoning.encrypted_content"]
  550. result["reasoningSummary"] = "auto"
  551. }
  552. }
  553. if (input.model.providerID === "venice") {
  554. result["promptCacheKey"] = input.sessionID
  555. }
  556. return result
  557. }
  558. export function smallOptions(model: Provider.Model) {
  559. if (model.providerID === "openai" || model.api.id.includes("gpt-5")) {
  560. if (model.api.id.includes("5.")) {
  561. return { reasoningEffort: "low" }
  562. }
  563. return { reasoningEffort: "minimal" }
  564. }
  565. if (model.providerID === "google") {
  566. // gemini-3 uses thinkingLevel, gemini-2.5 uses thinkingBudget
  567. if (model.api.id.includes("gemini-3")) {
  568. return { thinkingConfig: { thinkingLevel: "minimal" } }
  569. }
  570. return { thinkingConfig: { thinkingBudget: 0 } }
  571. }
  572. if (model.providerID === "openrouter") {
  573. if (model.api.id.includes("google")) {
  574. return { reasoning: { enabled: false } }
  575. }
  576. return { reasoningEffort: "minimal" }
  577. }
  578. return {}
  579. }
  580. export function providerOptions(model: Provider.Model, options: { [x: string]: any }) {
  581. const key = sdkKey(model.api.npm) ?? model.providerID
  582. return { [key]: options }
  583. }
  584. export function maxOutputTokens(
  585. npm: string,
  586. options: Record<string, any>,
  587. modelLimit: number,
  588. globalLimit: number,
  589. ): number {
  590. const modelCap = modelLimit || globalLimit
  591. const standardLimit = Math.min(modelCap, globalLimit)
  592. if (npm === "@ai-sdk/anthropic" || npm === "@ai-sdk/google-vertex/anthropic") {
  593. const thinking = options?.["thinking"]
  594. const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0
  595. const enabled = thinking?.["type"] === "enabled"
  596. if (enabled && budgetTokens > 0) {
  597. // Return text tokens so that text + thinking <= model cap, preferring 32k text when possible.
  598. if (budgetTokens + standardLimit <= modelCap) {
  599. return standardLimit
  600. }
  601. return modelCap - budgetTokens
  602. }
  603. }
  604. return standardLimit
  605. }
  606. export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema) {
  607. /*
  608. if (["openai", "azure"].includes(providerID)) {
  609. if (schema.type === "object" && schema.properties) {
  610. for (const [key, value] of Object.entries(schema.properties)) {
  611. if (schema.required?.includes(key)) continue
  612. schema.properties[key] = {
  613. anyOf: [
  614. value as JSONSchema.JSONSchema,
  615. {
  616. type: "null",
  617. },
  618. ],
  619. }
  620. }
  621. }
  622. }
  623. */
  624. // Convert integer enums to string enums for Google/Gemini
  625. if (model.providerID === "google" || model.api.id.includes("gemini")) {
  626. const sanitizeGemini = (obj: any): any => {
  627. if (obj === null || typeof obj !== "object") {
  628. return obj
  629. }
  630. if (Array.isArray(obj)) {
  631. return obj.map(sanitizeGemini)
  632. }
  633. const result: any = {}
  634. for (const [key, value] of Object.entries(obj)) {
  635. if (key === "enum" && Array.isArray(value)) {
  636. // Convert all enum values to strings
  637. result[key] = value.map((v) => String(v))
  638. // If we have integer type with enum, change type to string
  639. if (result.type === "integer" || result.type === "number") {
  640. result.type = "string"
  641. }
  642. } else if (typeof value === "object" && value !== null) {
  643. result[key] = sanitizeGemini(value)
  644. } else {
  645. result[key] = value
  646. }
  647. }
  648. // Filter required array to only include fields that exist in properties
  649. if (result.type === "object" && result.properties && Array.isArray(result.required)) {
  650. result.required = result.required.filter((field: any) => field in result.properties)
  651. }
  652. if (result.type === "array" && result.items == null) {
  653. result.items = {}
  654. }
  655. return result
  656. }
  657. schema = sanitizeGemini(schema)
  658. }
  659. return schema
  660. }
  661. export function error(providerID: string, error: APICallError) {
  662. let message = error.message
  663. if (providerID.includes("github-copilot") && error.statusCode === 403) {
  664. return "Please reauthenticate with the copilot provider to ensure your credentials work properly with OpenCode."
  665. }
  666. if (providerID.includes("github-copilot") && message.includes("The requested model is not supported")) {
  667. return (
  668. message +
  669. "\n\nMake sure the model is enabled in your copilot settings: https://github.com/settings/copilot/features"
  670. )
  671. }
  672. return message
  673. }
  674. }