transform.ts 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. import type { APICallError, ModelMessage } from "ai"
  2. import { unique } from "remeda"
  3. import type { JSONSchema } from "zod/v4/core"
  4. import type { Provider } from "./provider"
  5. import type { ModelsDev } from "./models"
  6. import { iife } from "@/util/iife"
  7. type Modality = NonNullable<ModelsDev.Model["modalities"]>["input"][number]
  8. function mimeToModality(mime: string): Modality | undefined {
  9. if (mime.startsWith("image/")) return "image"
  10. if (mime.startsWith("audio/")) return "audio"
  11. if (mime.startsWith("video/")) return "video"
  12. if (mime === "application/pdf") return "pdf"
  13. return undefined
  14. }
  15. export namespace ProviderTransform {
  16. function normalizeMessages(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
  17. // Anthropic rejects messages with empty content - filter out empty string messages
  18. // and remove empty text/reasoning parts from array content
  19. if (model.api.npm === "@ai-sdk/anthropic") {
  20. msgs = msgs
  21. .map((msg) => {
  22. if (typeof msg.content === "string") {
  23. if (msg.content === "") return undefined
  24. return msg
  25. }
  26. if (!Array.isArray(msg.content)) return msg
  27. const filtered = msg.content.filter((part) => {
  28. if (part.type === "text" || part.type === "reasoning") {
  29. return part.text !== ""
  30. }
  31. return true
  32. })
  33. if (filtered.length === 0) return undefined
  34. return { ...msg, content: filtered }
  35. })
  36. .filter((msg): msg is ModelMessage => msg !== undefined && msg.content !== "")
  37. }
  38. if (model.api.id.includes("claude")) {
  39. return msgs.map((msg) => {
  40. if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
  41. msg.content = msg.content.map((part) => {
  42. if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
  43. return {
  44. ...part,
  45. toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
  46. }
  47. }
  48. return part
  49. })
  50. }
  51. return msg
  52. })
  53. }
  54. if (model.providerID === "mistral" || model.api.id.toLowerCase().includes("mistral")) {
  55. const result: ModelMessage[] = []
  56. for (let i = 0; i < msgs.length; i++) {
  57. const msg = msgs[i]
  58. const nextMsg = msgs[i + 1]
  59. if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
  60. msg.content = msg.content.map((part) => {
  61. if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
  62. // Mistral requires alphanumeric tool call IDs with exactly 9 characters
  63. const normalizedId = part.toolCallId
  64. .replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
  65. .substring(0, 9) // Take first 9 characters
  66. .padEnd(9, "0") // Pad with zeros if less than 9 characters
  67. return {
  68. ...part,
  69. toolCallId: normalizedId,
  70. }
  71. }
  72. return part
  73. })
  74. }
  75. result.push(msg)
  76. // Fix message sequence: tool messages cannot be followed by user messages
  77. if (msg.role === "tool" && nextMsg?.role === "user") {
  78. result.push({
  79. role: "assistant",
  80. content: [
  81. {
  82. type: "text",
  83. text: "Done.",
  84. },
  85. ],
  86. })
  87. }
  88. }
  89. return result
  90. }
  91. if (
  92. model.capabilities.interleaved &&
  93. typeof model.capabilities.interleaved === "object" &&
  94. model.capabilities.interleaved.field === "reasoning_content"
  95. ) {
  96. return msgs.map((msg) => {
  97. if (msg.role === "assistant" && Array.isArray(msg.content)) {
  98. const reasoningParts = msg.content.filter((part: any) => part.type === "reasoning")
  99. const reasoningText = reasoningParts.map((part: any) => part.text).join("")
  100. // Filter out reasoning parts from content
  101. const filteredContent = msg.content.filter((part: any) => part.type !== "reasoning")
  102. // Include reasoning_content directly on the message for all assistant messages
  103. if (reasoningText) {
  104. return {
  105. ...msg,
  106. content: filteredContent,
  107. providerOptions: {
  108. ...msg.providerOptions,
  109. openaiCompatible: {
  110. ...(msg.providerOptions as any)?.openaiCompatible,
  111. reasoning_content: reasoningText,
  112. },
  113. },
  114. }
  115. }
  116. return {
  117. ...msg,
  118. content: filteredContent,
  119. }
  120. }
  121. return msg
  122. })
  123. }
  124. return msgs
  125. }
  126. function applyCaching(msgs: ModelMessage[], providerID: string): ModelMessage[] {
  127. const system = msgs.filter((msg) => msg.role === "system").slice(0, 2)
  128. const final = msgs.filter((msg) => msg.role !== "system").slice(-2)
  129. const providerOptions = {
  130. anthropic: {
  131. cacheControl: { type: "ephemeral" },
  132. },
  133. openrouter: {
  134. cacheControl: { type: "ephemeral" },
  135. },
  136. bedrock: {
  137. cachePoint: { type: "ephemeral" },
  138. },
  139. openaiCompatible: {
  140. cache_control: { type: "ephemeral" },
  141. },
  142. }
  143. for (const msg of unique([...system, ...final])) {
  144. const shouldUseContentOptions = providerID !== "anthropic" && Array.isArray(msg.content) && msg.content.length > 0
  145. if (shouldUseContentOptions) {
  146. const lastContent = msg.content[msg.content.length - 1]
  147. if (lastContent && typeof lastContent === "object") {
  148. lastContent.providerOptions = {
  149. ...lastContent.providerOptions,
  150. ...providerOptions,
  151. }
  152. continue
  153. }
  154. }
  155. msg.providerOptions = {
  156. ...msg.providerOptions,
  157. ...providerOptions,
  158. }
  159. }
  160. return msgs
  161. }
  162. function unsupportedParts(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
  163. return msgs.map((msg) => {
  164. if (msg.role !== "user" || !Array.isArray(msg.content)) return msg
  165. const filtered = msg.content.map((part) => {
  166. if (part.type !== "file" && part.type !== "image") return part
  167. // Check for empty base64 image data
  168. if (part.type === "image") {
  169. const imageStr = part.image.toString()
  170. if (imageStr.startsWith("data:")) {
  171. const match = imageStr.match(/^data:([^;]+);base64,(.*)$/)
  172. if (match && (!match[2] || match[2].length === 0)) {
  173. return {
  174. type: "text" as const,
  175. text: "ERROR: Image file is empty or corrupted. Please provide a valid image.",
  176. }
  177. }
  178. }
  179. }
  180. const mime = part.type === "image" ? part.image.toString().split(";")[0].replace("data:", "") : part.mediaType
  181. const filename = part.type === "file" ? part.filename : undefined
  182. const modality = mimeToModality(mime)
  183. if (!modality) return part
  184. if (model.capabilities.input[modality]) return part
  185. const name = filename ? `"${filename}"` : modality
  186. return {
  187. type: "text" as const,
  188. text: `ERROR: Cannot read ${name} (this model does not support ${modality} input). Inform the user.`,
  189. }
  190. })
  191. return { ...msg, content: filtered }
  192. })
  193. }
  194. export function message(msgs: ModelMessage[], model: Provider.Model) {
  195. msgs = unsupportedParts(msgs, model)
  196. msgs = normalizeMessages(msgs, model)
  197. if (
  198. model.providerID === "anthropic" ||
  199. model.api.id.includes("anthropic") ||
  200. model.api.id.includes("claude") ||
  201. model.api.npm === "@ai-sdk/anthropic"
  202. ) {
  203. msgs = applyCaching(msgs, model.providerID)
  204. }
  205. return msgs
  206. }
  207. export function temperature(model: Provider.Model) {
  208. const id = model.id.toLowerCase()
  209. if (id.includes("qwen")) return 0.55
  210. if (id.includes("claude")) return undefined
  211. if (id.includes("gemini")) return 1.0
  212. if (id.includes("glm-4.6")) return 1.0
  213. if (id.includes("glm-4.7")) return 1.0
  214. if (id.includes("minimax-m2")) return 1.0
  215. if (id.includes("kimi-k2")) {
  216. if (id.includes("thinking")) return 1.0
  217. return 0.6
  218. }
  219. return undefined
  220. }
  221. export function topP(model: Provider.Model) {
  222. const id = model.id.toLowerCase()
  223. if (id.includes("qwen")) return 1
  224. if (id.includes("minimax-m2")) {
  225. return 0.95
  226. }
  227. if (id.includes("gemini")) return 0.95
  228. return undefined
  229. }
  230. export function topK(model: Provider.Model) {
  231. const id = model.id.toLowerCase()
  232. if (id.includes("minimax-m2")) {
  233. if (id.includes("m2.1")) return 40
  234. return 20
  235. }
  236. if (id.includes("gemini")) return 64
  237. return undefined
  238. }
  239. const WIDELY_SUPPORTED_EFFORTS = ["low", "medium", "high"]
  240. const OPENAI_EFFORTS = ["none", "minimal", ...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
  241. export function variants(model: Provider.Model): Record<string, Record<string, any>> {
  242. if (!model.capabilities.reasoning) return {}
  243. const id = model.id.toLowerCase()
  244. if (id.includes("deepseek") || id.includes("minimax") || id.includes("glm") || id.includes("mistral")) return {}
  245. switch (model.api.npm) {
  246. case "@openrouter/ai-sdk-provider":
  247. if (!model.id.includes("gpt") && !model.id.includes("gemini-3") && !model.id.includes("grok-4")) return {}
  248. return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoning: { effort } }]))
  249. // TODO: YOU CANNOT SET max_tokens if this is set!!!
  250. case "@ai-sdk/gateway":
  251. return Object.fromEntries(OPENAI_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
  252. case "@ai-sdk/cerebras":
  253. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/cerebras
  254. case "@ai-sdk/togetherai":
  255. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/togetherai
  256. case "@ai-sdk/xai":
  257. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/xai
  258. case "@ai-sdk/deepinfra":
  259. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/deepinfra
  260. case "@ai-sdk/openai-compatible":
  261. return Object.fromEntries(WIDELY_SUPPORTED_EFFORTS.map((effort) => [effort, { reasoningEffort: effort }]))
  262. case "@ai-sdk/azure":
  263. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/azure
  264. if (id === "o1-mini") return {}
  265. const azureEfforts = ["low", "medium", "high"]
  266. if (id.includes("gpt-5-") || id === "gpt-5") {
  267. azureEfforts.unshift("minimal")
  268. }
  269. return Object.fromEntries(
  270. azureEfforts.map((effort) => [
  271. effort,
  272. {
  273. reasoningEffort: effort,
  274. reasoningSummary: "auto",
  275. include: ["reasoning.encrypted_content"],
  276. },
  277. ]),
  278. )
  279. case "@ai-sdk/openai":
  280. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/openai
  281. if (id === "gpt-5-pro") return {}
  282. const openaiEfforts = iife(() => {
  283. if (id.includes("codex")) {
  284. if (id.includes("5.2")) return [...WIDELY_SUPPORTED_EFFORTS, "xhigh"]
  285. return WIDELY_SUPPORTED_EFFORTS
  286. }
  287. const arr = [...WIDELY_SUPPORTED_EFFORTS]
  288. if (id.includes("gpt-5-") || id === "gpt-5") {
  289. arr.unshift("minimal")
  290. }
  291. if (model.release_date >= "2025-11-13") {
  292. arr.unshift("none")
  293. }
  294. if (model.release_date >= "2025-12-04") {
  295. arr.push("xhigh")
  296. }
  297. return arr
  298. })
  299. return Object.fromEntries(
  300. openaiEfforts.map((effort) => [
  301. effort,
  302. {
  303. reasoningEffort: effort,
  304. reasoningSummary: "auto",
  305. include: ["reasoning.encrypted_content"],
  306. },
  307. ]),
  308. )
  309. case "@ai-sdk/anthropic":
  310. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/anthropic
  311. return {
  312. high: {
  313. thinking: {
  314. type: "enabled",
  315. budgetTokens: 16000,
  316. },
  317. },
  318. max: {
  319. thinking: {
  320. type: "enabled",
  321. budgetTokens: 31999,
  322. },
  323. },
  324. }
  325. case "@ai-sdk/amazon-bedrock":
  326. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/amazon-bedrock
  327. // For Anthropic models on Bedrock, use reasoningConfig with budgetTokens
  328. if (model.api.id.includes("anthropic")) {
  329. return {
  330. high: {
  331. reasoningConfig: {
  332. type: "enabled",
  333. budgetTokens: 16000,
  334. },
  335. },
  336. max: {
  337. reasoningConfig: {
  338. type: "enabled",
  339. budgetTokens: 31999,
  340. },
  341. },
  342. }
  343. }
  344. // For Amazon Nova models, use reasoningConfig with maxReasoningEffort
  345. return Object.fromEntries(
  346. WIDELY_SUPPORTED_EFFORTS.map((effort) => [
  347. effort,
  348. {
  349. reasoningConfig: {
  350. type: "enabled",
  351. maxReasoningEffort: effort,
  352. },
  353. },
  354. ]),
  355. )
  356. case "@ai-sdk/google-vertex":
  357. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-vertex
  358. case "@ai-sdk/google":
  359. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-generative-ai
  360. if (id.includes("2.5")) {
  361. return {
  362. high: {
  363. thinkingConfig: {
  364. includeThoughts: true,
  365. thinkingBudget: 16000,
  366. },
  367. },
  368. max: {
  369. thinkingConfig: {
  370. includeThoughts: true,
  371. thinkingBudget: 24576,
  372. },
  373. },
  374. }
  375. }
  376. return Object.fromEntries(
  377. ["low", "high"].map((effort) => [
  378. effort,
  379. {
  380. includeThoughts: true,
  381. thinkingLevel: effort,
  382. },
  383. ]),
  384. )
  385. case "@ai-sdk/mistral":
  386. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/mistral
  387. return {}
  388. case "@ai-sdk/cohere":
  389. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/cohere
  390. return {}
  391. case "@ai-sdk/groq":
  392. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/groq
  393. const groqEffort = ["none", ...WIDELY_SUPPORTED_EFFORTS]
  394. return Object.fromEntries(
  395. groqEffort.map((effort) => [
  396. effort,
  397. {
  398. includeThoughts: true,
  399. thinkingLevel: effort,
  400. },
  401. ]),
  402. )
  403. case "@ai-sdk/perplexity":
  404. // https://v5.ai-sdk.dev/providers/ai-sdk-providers/perplexity
  405. return {}
  406. }
  407. return {}
  408. }
  409. export function options(
  410. model: Provider.Model,
  411. sessionID: string,
  412. providerOptions?: Record<string, any>,
  413. ): Record<string, any> {
  414. const result: Record<string, any> = {}
  415. if (model.api.npm === "@openrouter/ai-sdk-provider") {
  416. result["usage"] = {
  417. include: true,
  418. }
  419. if (model.api.id.includes("gemini-3")) {
  420. result["reasoning"] = { effort: "high" }
  421. }
  422. }
  423. if (
  424. model.providerID === "baseten" ||
  425. (model.providerID === "opencode" && ["kimi-k2-thinking", "glm-4.6"].includes(model.api.id))
  426. ) {
  427. result["chat_template_args"] = { enable_thinking: true }
  428. }
  429. if (["zai", "zhipuai"].includes(model.providerID) && model.api.npm === "@ai-sdk/openai-compatible") {
  430. result["thinking"] = {
  431. type: "enabled",
  432. clear_thinking: false,
  433. }
  434. }
  435. if (model.providerID === "openai" || providerOptions?.setCacheKey) {
  436. result["promptCacheKey"] = sessionID
  437. }
  438. if (model.api.npm === "@ai-sdk/google" || model.api.npm === "@ai-sdk/google-vertex") {
  439. result["thinkingConfig"] = {
  440. includeThoughts: true,
  441. }
  442. if (model.api.id.includes("gemini-3")) {
  443. result["thinkingConfig"]["thinkingLevel"] = "high"
  444. }
  445. }
  446. if (model.api.id.includes("gpt-5") && !model.api.id.includes("gpt-5-chat")) {
  447. if (model.providerID.includes("codex")) {
  448. result["store"] = false
  449. }
  450. if (!model.api.id.includes("codex") && !model.api.id.includes("gpt-5-pro")) {
  451. result["reasoningEffort"] = "medium"
  452. }
  453. if (model.api.id.endsWith("gpt-5.") && model.providerID !== "azure") {
  454. result["textVerbosity"] = "low"
  455. }
  456. if (model.providerID.startsWith("opencode")) {
  457. result["promptCacheKey"] = sessionID
  458. result["include"] = ["reasoning.encrypted_content"]
  459. result["reasoningSummary"] = "auto"
  460. }
  461. }
  462. return result
  463. }
  464. export function smallOptions(model: Provider.Model) {
  465. if (model.providerID === "openai" || model.api.id.includes("gpt-5")) {
  466. if (model.api.id.includes("5.")) {
  467. return { reasoningEffort: "low" }
  468. }
  469. return { reasoningEffort: "minimal" }
  470. }
  471. if (model.providerID === "google") {
  472. // gemini-3 uses thinkingLevel, gemini-2.5 uses thinkingBudget
  473. if (model.api.id.includes("gemini-3")) {
  474. return { thinkingConfig: { thinkingLevel: "minimal" } }
  475. }
  476. return { thinkingConfig: { thinkingBudget: 0 } }
  477. }
  478. if (model.providerID === "openrouter") {
  479. if (model.api.id.includes("google")) {
  480. return { reasoning: { enabled: false } }
  481. }
  482. return { reasoningEffort: "minimal" }
  483. }
  484. return {}
  485. }
  486. export function providerOptions(model: Provider.Model, options: { [x: string]: any }) {
  487. switch (model.api.npm) {
  488. case "@ai-sdk/github-copilot":
  489. case "@ai-sdk/openai":
  490. case "@ai-sdk/azure":
  491. return {
  492. ["openai" as string]: options,
  493. }
  494. case "@ai-sdk/amazon-bedrock":
  495. return {
  496. ["bedrock" as string]: options,
  497. }
  498. case "@ai-sdk/anthropic":
  499. return {
  500. ["anthropic" as string]: options,
  501. }
  502. case "@ai-sdk/google-vertex":
  503. case "@ai-sdk/google":
  504. return {
  505. ["google" as string]: options,
  506. }
  507. case "@ai-sdk/gateway":
  508. return {
  509. ["gateway" as string]: options,
  510. }
  511. case "@openrouter/ai-sdk-provider":
  512. return {
  513. ["openrouter" as string]: options,
  514. }
  515. default:
  516. return {
  517. [model.providerID]: options,
  518. }
  519. }
  520. }
  521. export function maxOutputTokens(
  522. npm: string,
  523. options: Record<string, any>,
  524. modelLimit: number,
  525. globalLimit: number,
  526. ): number {
  527. const modelCap = modelLimit || globalLimit
  528. const standardLimit = Math.min(modelCap, globalLimit)
  529. if (npm === "@ai-sdk/anthropic") {
  530. const thinking = options?.["thinking"]
  531. const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0
  532. const enabled = thinking?.["type"] === "enabled"
  533. if (enabled && budgetTokens > 0) {
  534. // Return text tokens so that text + thinking <= model cap, preferring 32k text when possible.
  535. if (budgetTokens + standardLimit <= modelCap) {
  536. return standardLimit
  537. }
  538. return modelCap - budgetTokens
  539. }
  540. }
  541. return standardLimit
  542. }
  543. export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema) {
  544. /*
  545. if (["openai", "azure"].includes(providerID)) {
  546. if (schema.type === "object" && schema.properties) {
  547. for (const [key, value] of Object.entries(schema.properties)) {
  548. if (schema.required?.includes(key)) continue
  549. schema.properties[key] = {
  550. anyOf: [
  551. value as JSONSchema.JSONSchema,
  552. {
  553. type: "null",
  554. },
  555. ],
  556. }
  557. }
  558. }
  559. }
  560. */
  561. // Convert integer enums to string enums for Google/Gemini
  562. if (model.providerID === "google" || model.api.id.includes("gemini")) {
  563. const sanitizeGemini = (obj: any): any => {
  564. if (obj === null || typeof obj !== "object") {
  565. return obj
  566. }
  567. if (Array.isArray(obj)) {
  568. return obj.map(sanitizeGemini)
  569. }
  570. const result: any = {}
  571. for (const [key, value] of Object.entries(obj)) {
  572. if (key === "enum" && Array.isArray(value)) {
  573. // Convert all enum values to strings
  574. result[key] = value.map((v) => String(v))
  575. // If we have integer type with enum, change type to string
  576. if (result.type === "integer" || result.type === "number") {
  577. result.type = "string"
  578. }
  579. } else if (typeof value === "object" && value !== null) {
  580. result[key] = sanitizeGemini(value)
  581. } else {
  582. result[key] = value
  583. }
  584. }
  585. // Filter required array to only include fields that exist in properties
  586. if (result.type === "object" && result.properties && Array.isArray(result.required)) {
  587. result.required = result.required.filter((field: any) => field in result.properties)
  588. }
  589. if (result.type === "array" && result.items == null) {
  590. result.items = {}
  591. }
  592. return result
  593. }
  594. schema = sanitizeGemini(schema)
  595. }
  596. return schema
  597. }
  598. export function error(providerID: string, error: APICallError) {
  599. let message = error.message
  600. if (providerID.includes("github-copilot") && error.statusCode === 403) {
  601. return "Please reauthenticate with the copilot provider to ensure your credentials work properly with OpenCode."
  602. }
  603. if (providerID.includes("github-copilot") && message.includes("The requested model is not supported")) {
  604. return (
  605. message +
  606. "\n\nMake sure the model is enabled in your copilot settings: https://github.com/settings/copilot/features"
  607. )
  608. }
  609. return message
  610. }
  611. }