provider.ts 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097
  1. import z from "zod"
  2. import fuzzysort from "fuzzysort"
  3. import { Config } from "../config/config"
  4. import { mapValues, mergeDeep, omit, pickBy, sortBy } from "remeda"
  5. import { NoSuchModelError, type Provider as SDK } from "ai"
  6. import { Log } from "../util/log"
  7. import { BunProc } from "../bun"
  8. import { Plugin } from "../plugin"
  9. import { ModelsDev } from "./models"
  10. import { NamedError } from "@opencode-ai/util/error"
  11. import { Auth } from "../auth"
  12. import { Env } from "../env"
  13. import { Instance } from "../project/instance"
  14. import { Flag } from "../flag/flag"
  15. import { iife } from "@/util/iife"
  16. // Direct imports for bundled providers
  17. import { createAmazonBedrock } from "@ai-sdk/amazon-bedrock"
  18. import { createAnthropic } from "@ai-sdk/anthropic"
  19. import { createAzure } from "@ai-sdk/azure"
  20. import { createGoogleGenerativeAI } from "@ai-sdk/google"
  21. import { createVertex } from "@ai-sdk/google-vertex"
  22. import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
  23. import { createOpenAI } from "@ai-sdk/openai"
  24. import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
  25. import { createOpenRouter, type LanguageModelV2 } from "@openrouter/ai-sdk-provider"
  26. import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/openai-compatible/src"
  27. import { createXai } from "@ai-sdk/xai"
  28. import { createMistral } from "@ai-sdk/mistral"
  29. import { createGroq } from "@ai-sdk/groq"
  30. import { createDeepInfra } from "@ai-sdk/deepinfra"
  31. import { createCerebras } from "@ai-sdk/cerebras"
  32. import { createCohere } from "@ai-sdk/cohere"
  33. import { createGateway } from "@ai-sdk/gateway"
  34. import { createTogetherAI } from "@ai-sdk/togetherai"
  35. import { createPerplexity } from "@ai-sdk/perplexity"
  36. import { createVercel } from "@ai-sdk/vercel"
  37. import { ProviderTransform } from "./transform"
  38. export namespace Provider {
  39. const log = Log.create({ service: "provider" })
  40. const BUNDLED_PROVIDERS: Record<string, (options: any) => SDK> = {
  41. "@ai-sdk/amazon-bedrock": createAmazonBedrock,
  42. "@ai-sdk/anthropic": createAnthropic,
  43. "@ai-sdk/azure": createAzure,
  44. "@ai-sdk/google": createGoogleGenerativeAI,
  45. "@ai-sdk/google-vertex": createVertex,
  46. "@ai-sdk/google-vertex/anthropic": createVertexAnthropic,
  47. "@ai-sdk/openai": createOpenAI,
  48. "@ai-sdk/openai-compatible": createOpenAICompatible,
  49. "@openrouter/ai-sdk-provider": createOpenRouter,
  50. "@ai-sdk/xai": createXai,
  51. "@ai-sdk/mistral": createMistral,
  52. "@ai-sdk/groq": createGroq,
  53. "@ai-sdk/deepinfra": createDeepInfra,
  54. "@ai-sdk/cerebras": createCerebras,
  55. "@ai-sdk/cohere": createCohere,
  56. "@ai-sdk/gateway": createGateway,
  57. "@ai-sdk/togetherai": createTogetherAI,
  58. "@ai-sdk/perplexity": createPerplexity,
  59. "@ai-sdk/vercel": createVercel,
  60. // @ts-ignore (TODO: kill this code so we dont have to maintain it)
  61. "@ai-sdk/github-copilot": createGitHubCopilotOpenAICompatible,
  62. }
  63. type CustomModelLoader = (sdk: any, modelID: string, options?: Record<string, any>) => Promise<any>
  64. type CustomLoader = (provider: Info) => Promise<{
  65. autoload: boolean
  66. getModel?: CustomModelLoader
  67. options?: Record<string, any>
  68. }>
  69. const CUSTOM_LOADERS: Record<string, CustomLoader> = {
  70. async anthropic() {
  71. return {
  72. autoload: false,
  73. options: {
  74. headers: {
  75. "anthropic-beta":
  76. "claude-code-20250219,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14",
  77. },
  78. },
  79. }
  80. },
  81. async opencode(input) {
  82. const hasKey = await (async () => {
  83. const env = Env.all()
  84. if (input.env.some((item) => env[item])) return true
  85. if (await Auth.get(input.id)) return true
  86. const config = await Config.get()
  87. if (config.provider?.["opencode"]?.options?.apiKey) return true
  88. return false
  89. })()
  90. if (!hasKey) {
  91. for (const [key, value] of Object.entries(input.models)) {
  92. if (value.cost.input === 0) continue
  93. delete input.models[key]
  94. }
  95. }
  96. return {
  97. autoload: Object.keys(input.models).length > 0,
  98. options: hasKey ? {} : { apiKey: "public" },
  99. }
  100. },
  101. openai: async () => {
  102. return {
  103. autoload: false,
  104. async getModel(sdk: any, modelID: string, _options?: Record<string, any>) {
  105. return sdk.responses(modelID)
  106. },
  107. options: {},
  108. }
  109. },
  110. "github-copilot": async () => {
  111. return {
  112. autoload: false,
  113. async getModel(sdk: any, modelID: string, _options?: Record<string, any>) {
  114. if (modelID.includes("codex")) {
  115. return sdk.responses(modelID)
  116. }
  117. return sdk.chat(modelID)
  118. },
  119. options: {},
  120. }
  121. },
  122. "github-copilot-enterprise": async () => {
  123. return {
  124. autoload: false,
  125. async getModel(sdk: any, modelID: string, _options?: Record<string, any>) {
  126. if (modelID.includes("codex")) {
  127. return sdk.responses(modelID)
  128. }
  129. return sdk.chat(modelID)
  130. },
  131. options: {},
  132. }
  133. },
  134. azure: async () => {
  135. return {
  136. autoload: false,
  137. async getModel(sdk: any, modelID: string, options?: Record<string, any>) {
  138. if (options?.["useCompletionUrls"]) {
  139. return sdk.chat(modelID)
  140. } else {
  141. return sdk.responses(modelID)
  142. }
  143. },
  144. options: {},
  145. }
  146. },
  147. "azure-cognitive-services": async () => {
  148. const resourceName = Env.get("AZURE_COGNITIVE_SERVICES_RESOURCE_NAME")
  149. return {
  150. autoload: false,
  151. async getModel(sdk: any, modelID: string, options?: Record<string, any>) {
  152. if (options?.["useCompletionUrls"]) {
  153. return sdk.chat(modelID)
  154. } else {
  155. return sdk.responses(modelID)
  156. }
  157. },
  158. options: {
  159. baseURL: resourceName ? `https://${resourceName}.cognitiveservices.azure.com/openai` : undefined,
  160. },
  161. }
  162. },
  163. "amazon-bedrock": async () => {
  164. const auth = await Auth.get("amazon-bedrock")
  165. const awsProfile = Env.get("AWS_PROFILE")
  166. const awsAccessKeyId = Env.get("AWS_ACCESS_KEY_ID")
  167. const awsRegion = Env.get("AWS_REGION")
  168. const awsBearerToken = iife(() => {
  169. const envToken = Env.get("AWS_BEARER_TOKEN_BEDROCK")
  170. if (envToken) return envToken
  171. if (auth?.type === "api") {
  172. Env.set("AWS_BEARER_TOKEN_BEDROCK", auth.key)
  173. return auth.key
  174. }
  175. return undefined
  176. })
  177. if (!awsProfile && !awsAccessKeyId && !awsBearerToken) return { autoload: false }
  178. const defaultRegion = awsRegion ?? "us-east-1"
  179. const { fromNodeProviderChain } = await import(await BunProc.install("@aws-sdk/credential-providers"))
  180. return {
  181. autoload: true,
  182. options: {
  183. region: defaultRegion,
  184. credentialProvider: fromNodeProviderChain(),
  185. },
  186. async getModel(sdk: any, modelID: string, options?: Record<string, any>) {
  187. // Skip region prefixing if model already has global prefix
  188. if (modelID.startsWith("global.")) {
  189. return sdk.languageModel(modelID)
  190. }
  191. // Region resolution precedence (highest to lowest):
  192. // 1. options.region from opencode.json provider config
  193. // 2. defaultRegion from AWS_REGION environment variable
  194. // 3. Default "us-east-1" (baked into defaultRegion)
  195. const region = options?.region ?? defaultRegion
  196. let regionPrefix = region.split("-")[0]
  197. switch (regionPrefix) {
  198. case "us": {
  199. const modelRequiresPrefix = [
  200. "nova-micro",
  201. "nova-lite",
  202. "nova-pro",
  203. "nova-premier",
  204. "claude",
  205. "deepseek",
  206. ].some((m) => modelID.includes(m))
  207. const isGovCloud = region.startsWith("us-gov")
  208. if (modelRequiresPrefix && !isGovCloud) {
  209. modelID = `${regionPrefix}.${modelID}`
  210. }
  211. break
  212. }
  213. case "eu": {
  214. const regionRequiresPrefix = [
  215. "eu-west-1",
  216. "eu-west-2",
  217. "eu-west-3",
  218. "eu-north-1",
  219. "eu-central-1",
  220. "eu-south-1",
  221. "eu-south-2",
  222. ].some((r) => region.includes(r))
  223. const modelRequiresPrefix = ["claude", "nova-lite", "nova-micro", "llama3", "pixtral"].some((m) =>
  224. modelID.includes(m),
  225. )
  226. if (regionRequiresPrefix && modelRequiresPrefix) {
  227. modelID = `${regionPrefix}.${modelID}`
  228. }
  229. break
  230. }
  231. case "ap": {
  232. const isAustraliaRegion = ["ap-southeast-2", "ap-southeast-4"].includes(region)
  233. if (
  234. isAustraliaRegion &&
  235. ["anthropic.claude-sonnet-4-5", "anthropic.claude-haiku"].some((m) => modelID.includes(m))
  236. ) {
  237. regionPrefix = "au"
  238. modelID = `${regionPrefix}.${modelID}`
  239. } else {
  240. const modelRequiresPrefix = ["claude", "nova-lite", "nova-micro", "nova-pro"].some((m) =>
  241. modelID.includes(m),
  242. )
  243. if (modelRequiresPrefix) {
  244. regionPrefix = "apac"
  245. modelID = `${regionPrefix}.${modelID}`
  246. }
  247. }
  248. break
  249. }
  250. }
  251. return sdk.languageModel(modelID)
  252. },
  253. }
  254. },
  255. openrouter: async () => {
  256. return {
  257. autoload: false,
  258. options: {
  259. headers: {
  260. "HTTP-Referer": "https://opencode.ai/",
  261. "X-Title": "opencode",
  262. },
  263. },
  264. }
  265. },
  266. vercel: async () => {
  267. return {
  268. autoload: false,
  269. options: {
  270. headers: {
  271. "http-referer": "https://opencode.ai/",
  272. "x-title": "opencode",
  273. },
  274. },
  275. }
  276. },
  277. "google-vertex": async () => {
  278. const project = Env.get("GOOGLE_CLOUD_PROJECT") ?? Env.get("GCP_PROJECT") ?? Env.get("GCLOUD_PROJECT")
  279. const location = Env.get("GOOGLE_CLOUD_LOCATION") ?? Env.get("VERTEX_LOCATION") ?? "us-east5"
  280. const autoload = Boolean(project)
  281. if (!autoload) return { autoload: false }
  282. return {
  283. autoload: true,
  284. options: {
  285. project,
  286. location,
  287. },
  288. async getModel(sdk: any, modelID: string) {
  289. const id = String(modelID).trim()
  290. return sdk.languageModel(id)
  291. },
  292. }
  293. },
  294. "google-vertex-anthropic": async () => {
  295. const project = Env.get("GOOGLE_CLOUD_PROJECT") ?? Env.get("GCP_PROJECT") ?? Env.get("GCLOUD_PROJECT")
  296. const location = Env.get("GOOGLE_CLOUD_LOCATION") ?? Env.get("VERTEX_LOCATION") ?? "global"
  297. const autoload = Boolean(project)
  298. if (!autoload) return { autoload: false }
  299. return {
  300. autoload: true,
  301. options: {
  302. project,
  303. location,
  304. },
  305. async getModel(sdk: any, modelID) {
  306. const id = String(modelID).trim()
  307. return sdk.languageModel(id)
  308. },
  309. }
  310. },
  311. "sap-ai-core": async () => {
  312. const auth = await Auth.get("sap-ai-core")
  313. const envServiceKey = iife(() => {
  314. const envAICoreServiceKey = Env.get("AICORE_SERVICE_KEY")
  315. if (envAICoreServiceKey) return envAICoreServiceKey
  316. if (auth?.type === "api") {
  317. Env.set("AICORE_SERVICE_KEY", auth.key)
  318. return auth.key
  319. }
  320. return undefined
  321. })
  322. const deploymentId = Env.get("AICORE_DEPLOYMENT_ID")
  323. const resourceGroup = Env.get("AICORE_RESOURCE_GROUP")
  324. return {
  325. autoload: !!envServiceKey,
  326. options: envServiceKey ? { deploymentId, resourceGroup } : {},
  327. async getModel(sdk: any, modelID: string) {
  328. return sdk(modelID)
  329. },
  330. }
  331. },
  332. zenmux: async () => {
  333. return {
  334. autoload: false,
  335. options: {
  336. headers: {
  337. "HTTP-Referer": "https://opencode.ai/",
  338. "X-Title": "opencode",
  339. },
  340. },
  341. }
  342. },
  343. "cloudflare-ai-gateway": async (input) => {
  344. const accountId = Env.get("CLOUDFLARE_ACCOUNT_ID")
  345. const gateway = Env.get("CLOUDFLARE_GATEWAY_ID")
  346. if (!accountId || !gateway) return { autoload: false }
  347. // Get API token from env or auth prompt
  348. const apiToken = await (async () => {
  349. const envToken = Env.get("CLOUDFLARE_API_TOKEN")
  350. if (envToken) return envToken
  351. const auth = await Auth.get(input.id)
  352. if (auth?.type === "api") return auth.key
  353. return undefined
  354. })()
  355. return {
  356. autoload: true,
  357. async getModel(sdk: any, modelID: string, _options?: Record<string, any>) {
  358. return sdk.chat(modelID)
  359. },
  360. options: {
  361. baseURL: `https://gateway.ai.cloudflare.com/v1/${accountId}/${gateway}/compat`,
  362. headers: {
  363. // Cloudflare AI Gateway uses cf-aig-authorization for authenticated gateways
  364. // This enables Unified Billing where Cloudflare handles upstream provider auth
  365. ...(apiToken ? { "cf-aig-authorization": `Bearer ${apiToken}` } : {}),
  366. "HTTP-Referer": "https://opencode.ai/",
  367. "X-Title": "opencode",
  368. },
  369. // Custom fetch to strip Authorization header - AI Gateway uses cf-aig-authorization instead
  370. // Sending Authorization header with invalid value causes auth errors
  371. fetch: async (input: RequestInfo | URL, init?: RequestInit) => {
  372. const headers = new Headers(init?.headers)
  373. headers.delete("Authorization")
  374. return fetch(input, { ...init, headers })
  375. },
  376. },
  377. }
  378. },
  379. cerebras: async () => {
  380. return {
  381. autoload: false,
  382. options: {
  383. headers: {
  384. "X-Cerebras-3rd-Party-Integration": "opencode",
  385. },
  386. },
  387. }
  388. },
  389. }
  390. export const Model = z
  391. .object({
  392. id: z.string(),
  393. providerID: z.string(),
  394. api: z.object({
  395. id: z.string(),
  396. url: z.string(),
  397. npm: z.string(),
  398. }),
  399. name: z.string(),
  400. family: z.string().optional(),
  401. capabilities: z.object({
  402. temperature: z.boolean(),
  403. reasoning: z.boolean(),
  404. attachment: z.boolean(),
  405. toolcall: z.boolean(),
  406. input: z.object({
  407. text: z.boolean(),
  408. audio: z.boolean(),
  409. image: z.boolean(),
  410. video: z.boolean(),
  411. pdf: z.boolean(),
  412. }),
  413. output: z.object({
  414. text: z.boolean(),
  415. audio: z.boolean(),
  416. image: z.boolean(),
  417. video: z.boolean(),
  418. pdf: z.boolean(),
  419. }),
  420. interleaved: z.union([
  421. z.boolean(),
  422. z.object({
  423. field: z.enum(["reasoning_content", "reasoning_details"]),
  424. }),
  425. ]),
  426. }),
  427. cost: z.object({
  428. input: z.number(),
  429. output: z.number(),
  430. cache: z.object({
  431. read: z.number(),
  432. write: z.number(),
  433. }),
  434. experimentalOver200K: z
  435. .object({
  436. input: z.number(),
  437. output: z.number(),
  438. cache: z.object({
  439. read: z.number(),
  440. write: z.number(),
  441. }),
  442. })
  443. .optional(),
  444. }),
  445. limit: z.object({
  446. context: z.number(),
  447. output: z.number(),
  448. }),
  449. status: z.enum(["alpha", "beta", "deprecated", "active"]),
  450. options: z.record(z.string(), z.any()),
  451. headers: z.record(z.string(), z.string()),
  452. release_date: z.string(),
  453. variants: z.record(z.string(), z.record(z.string(), z.any())).optional(),
  454. })
  455. .meta({
  456. ref: "Model",
  457. })
  458. export type Model = z.infer<typeof Model>
  459. export const Info = z
  460. .object({
  461. id: z.string(),
  462. name: z.string(),
  463. source: z.enum(["env", "config", "custom", "api"]),
  464. env: z.string().array(),
  465. key: z.string().optional(),
  466. options: z.record(z.string(), z.any()),
  467. models: z.record(z.string(), Model),
  468. })
  469. .meta({
  470. ref: "Provider",
  471. })
  472. export type Info = z.infer<typeof Info>
  473. function fromModelsDevModel(provider: ModelsDev.Provider, model: ModelsDev.Model): Model {
  474. const m: Model = {
  475. id: model.id,
  476. providerID: provider.id,
  477. name: model.name,
  478. family: model.family,
  479. api: {
  480. id: model.id,
  481. url: provider.api!,
  482. npm: model.provider?.npm ?? provider.npm ?? "@ai-sdk/openai-compatible",
  483. },
  484. status: model.status ?? "active",
  485. headers: model.headers ?? {},
  486. options: model.options ?? {},
  487. cost: {
  488. input: model.cost?.input ?? 0,
  489. output: model.cost?.output ?? 0,
  490. cache: {
  491. read: model.cost?.cache_read ?? 0,
  492. write: model.cost?.cache_write ?? 0,
  493. },
  494. experimentalOver200K: model.cost?.context_over_200k
  495. ? {
  496. cache: {
  497. read: model.cost.context_over_200k.cache_read ?? 0,
  498. write: model.cost.context_over_200k.cache_write ?? 0,
  499. },
  500. input: model.cost.context_over_200k.input,
  501. output: model.cost.context_over_200k.output,
  502. }
  503. : undefined,
  504. },
  505. limit: {
  506. context: model.limit.context,
  507. output: model.limit.output,
  508. },
  509. capabilities: {
  510. temperature: model.temperature,
  511. reasoning: model.reasoning,
  512. attachment: model.attachment,
  513. toolcall: model.tool_call,
  514. input: {
  515. text: model.modalities?.input?.includes("text") ?? false,
  516. audio: model.modalities?.input?.includes("audio") ?? false,
  517. image: model.modalities?.input?.includes("image") ?? false,
  518. video: model.modalities?.input?.includes("video") ?? false,
  519. pdf: model.modalities?.input?.includes("pdf") ?? false,
  520. },
  521. output: {
  522. text: model.modalities?.output?.includes("text") ?? false,
  523. audio: model.modalities?.output?.includes("audio") ?? false,
  524. image: model.modalities?.output?.includes("image") ?? false,
  525. video: model.modalities?.output?.includes("video") ?? false,
  526. pdf: model.modalities?.output?.includes("pdf") ?? false,
  527. },
  528. interleaved: model.interleaved ?? false,
  529. },
  530. release_date: model.release_date,
  531. variants: {},
  532. }
  533. m.variants = mapValues(ProviderTransform.variants(m), (v) => v)
  534. return m
  535. }
  536. export function fromModelsDevProvider(provider: ModelsDev.Provider): Info {
  537. return {
  538. id: provider.id,
  539. source: "custom",
  540. name: provider.name,
  541. env: provider.env ?? [],
  542. options: {},
  543. models: mapValues(provider.models, (model) => fromModelsDevModel(provider, model)),
  544. }
  545. }
  546. const state = Instance.state(async () => {
  547. using _ = log.time("state")
  548. const config = await Config.get()
  549. const modelsDev = await ModelsDev.get()
  550. const database = mapValues(modelsDev, fromModelsDevProvider)
  551. const disabled = new Set(config.disabled_providers ?? [])
  552. const enabled = config.enabled_providers ? new Set(config.enabled_providers) : null
  553. function isProviderAllowed(providerID: string): boolean {
  554. if (enabled && !enabled.has(providerID)) return false
  555. if (disabled.has(providerID)) return false
  556. return true
  557. }
  558. const providers: { [providerID: string]: Info } = {}
  559. const languages = new Map<string, LanguageModelV2>()
  560. const modelLoaders: {
  561. [providerID: string]: CustomModelLoader
  562. } = {}
  563. const sdk = new Map<number, SDK>()
  564. log.info("init")
  565. const configProviders = Object.entries(config.provider ?? {})
  566. // Add GitHub Copilot Enterprise provider that inherits from GitHub Copilot
  567. if (database["github-copilot"]) {
  568. const githubCopilot = database["github-copilot"]
  569. database["github-copilot-enterprise"] = {
  570. ...githubCopilot,
  571. id: "github-copilot-enterprise",
  572. name: "GitHub Copilot Enterprise",
  573. models: mapValues(githubCopilot.models, (model) => ({
  574. ...model,
  575. providerID: "github-copilot-enterprise",
  576. })),
  577. }
  578. }
  579. function mergeProvider(providerID: string, provider: Partial<Info>) {
  580. const existing = providers[providerID]
  581. if (existing) {
  582. // @ts-expect-error
  583. providers[providerID] = mergeDeep(existing, provider)
  584. return
  585. }
  586. const match = database[providerID]
  587. if (!match) return
  588. // @ts-expect-error
  589. providers[providerID] = mergeDeep(match, provider)
  590. }
  591. // extend database from config
  592. for (const [providerID, provider] of configProviders) {
  593. const existing = database[providerID]
  594. const parsed: Info = {
  595. id: providerID,
  596. name: provider.name ?? existing?.name ?? providerID,
  597. env: provider.env ?? existing?.env ?? [],
  598. options: mergeDeep(existing?.options ?? {}, provider.options ?? {}),
  599. source: "config",
  600. models: existing?.models ?? {},
  601. }
  602. for (const [modelID, model] of Object.entries(provider.models ?? {})) {
  603. const existingModel = parsed.models[model.id ?? modelID]
  604. const name = iife(() => {
  605. if (model.name) return model.name
  606. if (model.id && model.id !== modelID) return modelID
  607. return existingModel?.name ?? modelID
  608. })
  609. const parsedModel: Model = {
  610. id: modelID,
  611. api: {
  612. id: model.id ?? existingModel?.api.id ?? modelID,
  613. npm:
  614. model.provider?.npm ??
  615. provider.npm ??
  616. existingModel?.api.npm ??
  617. modelsDev[providerID]?.npm ??
  618. "@ai-sdk/openai-compatible",
  619. url: provider?.api ?? existingModel?.api.url ?? modelsDev[providerID]?.api,
  620. },
  621. status: model.status ?? existingModel?.status ?? "active",
  622. name,
  623. providerID,
  624. capabilities: {
  625. temperature: model.temperature ?? existingModel?.capabilities.temperature ?? false,
  626. reasoning: model.reasoning ?? existingModel?.capabilities.reasoning ?? false,
  627. attachment: model.attachment ?? existingModel?.capabilities.attachment ?? false,
  628. toolcall: model.tool_call ?? existingModel?.capabilities.toolcall ?? true,
  629. input: {
  630. text: model.modalities?.input?.includes("text") ?? existingModel?.capabilities.input.text ?? true,
  631. audio: model.modalities?.input?.includes("audio") ?? existingModel?.capabilities.input.audio ?? false,
  632. image: model.modalities?.input?.includes("image") ?? existingModel?.capabilities.input.image ?? false,
  633. video: model.modalities?.input?.includes("video") ?? existingModel?.capabilities.input.video ?? false,
  634. pdf: model.modalities?.input?.includes("pdf") ?? existingModel?.capabilities.input.pdf ?? false,
  635. },
  636. output: {
  637. text: model.modalities?.output?.includes("text") ?? existingModel?.capabilities.output.text ?? true,
  638. audio: model.modalities?.output?.includes("audio") ?? existingModel?.capabilities.output.audio ?? false,
  639. image: model.modalities?.output?.includes("image") ?? existingModel?.capabilities.output.image ?? false,
  640. video: model.modalities?.output?.includes("video") ?? existingModel?.capabilities.output.video ?? false,
  641. pdf: model.modalities?.output?.includes("pdf") ?? existingModel?.capabilities.output.pdf ?? false,
  642. },
  643. interleaved: model.interleaved ?? false,
  644. },
  645. cost: {
  646. input: model?.cost?.input ?? existingModel?.cost?.input ?? 0,
  647. output: model?.cost?.output ?? existingModel?.cost?.output ?? 0,
  648. cache: {
  649. read: model?.cost?.cache_read ?? existingModel?.cost?.cache.read ?? 0,
  650. write: model?.cost?.cache_write ?? existingModel?.cost?.cache.write ?? 0,
  651. },
  652. },
  653. options: mergeDeep(existingModel?.options ?? {}, model.options ?? {}),
  654. limit: {
  655. context: model.limit?.context ?? existingModel?.limit?.context ?? 0,
  656. output: model.limit?.output ?? existingModel?.limit?.output ?? 0,
  657. },
  658. headers: mergeDeep(existingModel?.headers ?? {}, model.headers ?? {}),
  659. family: model.family ?? existingModel?.family ?? "",
  660. release_date: model.release_date ?? existingModel?.release_date ?? "",
  661. variants: {},
  662. }
  663. const merged = mergeDeep(ProviderTransform.variants(parsedModel), model.variants ?? {})
  664. parsedModel.variants = mapValues(
  665. pickBy(merged, (v) => !v.disabled),
  666. (v) => omit(v, ["disabled"]),
  667. )
  668. parsed.models[modelID] = parsedModel
  669. }
  670. database[providerID] = parsed
  671. }
  672. // load env
  673. const env = Env.all()
  674. for (const [providerID, provider] of Object.entries(database)) {
  675. if (disabled.has(providerID)) continue
  676. const apiKey = provider.env.map((item) => env[item]).find(Boolean)
  677. if (!apiKey) continue
  678. mergeProvider(providerID, {
  679. source: "env",
  680. key: provider.env.length === 1 ? apiKey : undefined,
  681. })
  682. }
  683. // load apikeys
  684. for (const [providerID, provider] of Object.entries(await Auth.all())) {
  685. if (disabled.has(providerID)) continue
  686. if (provider.type === "api") {
  687. mergeProvider(providerID, {
  688. source: "api",
  689. key: provider.key,
  690. })
  691. }
  692. }
  693. for (const plugin of await Plugin.list()) {
  694. if (!plugin.auth) continue
  695. const providerID = plugin.auth.provider
  696. if (disabled.has(providerID)) continue
  697. // For github-copilot plugin, check if auth exists for either github-copilot or github-copilot-enterprise
  698. let hasAuth = false
  699. const auth = await Auth.get(providerID)
  700. if (auth) hasAuth = true
  701. // Special handling for github-copilot: also check for enterprise auth
  702. if (providerID === "github-copilot" && !hasAuth) {
  703. const enterpriseAuth = await Auth.get("github-copilot-enterprise")
  704. if (enterpriseAuth) hasAuth = true
  705. }
  706. if (!hasAuth) continue
  707. if (!plugin.auth.loader) continue
  708. // Load for the main provider if auth exists
  709. if (auth) {
  710. const options = await plugin.auth.loader(() => Auth.get(providerID) as any, database[plugin.auth.provider])
  711. mergeProvider(plugin.auth.provider, {
  712. source: "custom",
  713. options: options,
  714. })
  715. }
  716. // If this is github-copilot plugin, also register for github-copilot-enterprise if auth exists
  717. if (providerID === "github-copilot") {
  718. const enterpriseProviderID = "github-copilot-enterprise"
  719. if (!disabled.has(enterpriseProviderID)) {
  720. const enterpriseAuth = await Auth.get(enterpriseProviderID)
  721. if (enterpriseAuth) {
  722. const enterpriseOptions = await plugin.auth.loader(
  723. () => Auth.get(enterpriseProviderID) as any,
  724. database[enterpriseProviderID],
  725. )
  726. mergeProvider(enterpriseProviderID, {
  727. source: "custom",
  728. options: enterpriseOptions,
  729. })
  730. }
  731. }
  732. }
  733. }
  734. for (const [providerID, fn] of Object.entries(CUSTOM_LOADERS)) {
  735. if (disabled.has(providerID)) continue
  736. const result = await fn(database[providerID])
  737. if (result && (result.autoload || providers[providerID])) {
  738. if (result.getModel) modelLoaders[providerID] = result.getModel
  739. mergeProvider(providerID, {
  740. source: "custom",
  741. options: result.options,
  742. })
  743. }
  744. }
  745. // load config
  746. for (const [providerID, provider] of configProviders) {
  747. const partial: Partial<Info> = { source: "config" }
  748. if (provider.env) partial.env = provider.env
  749. if (provider.name) partial.name = provider.name
  750. if (provider.options) partial.options = provider.options
  751. mergeProvider(providerID, partial)
  752. }
  753. for (const [providerID, provider] of Object.entries(providers)) {
  754. if (!isProviderAllowed(providerID)) {
  755. delete providers[providerID]
  756. continue
  757. }
  758. if (providerID === "github-copilot" || providerID === "github-copilot-enterprise") {
  759. provider.models = mapValues(provider.models, (model) => ({
  760. ...model,
  761. api: {
  762. ...model.api,
  763. npm: "@ai-sdk/github-copilot",
  764. },
  765. }))
  766. }
  767. const configProvider = config.provider?.[providerID]
  768. for (const [modelID, model] of Object.entries(provider.models)) {
  769. model.api.id = model.api.id ?? model.id ?? modelID
  770. if (modelID === "gpt-5-chat-latest" || (providerID === "openrouter" && modelID === "openai/gpt-5-chat"))
  771. delete provider.models[modelID]
  772. if (model.status === "alpha" && !Flag.OPENCODE_ENABLE_EXPERIMENTAL_MODELS) delete provider.models[modelID]
  773. if (
  774. (configProvider?.blacklist && configProvider.blacklist.includes(modelID)) ||
  775. (configProvider?.whitelist && !configProvider.whitelist.includes(modelID))
  776. )
  777. delete provider.models[modelID]
  778. // Filter out disabled variants from config
  779. const configVariants = configProvider?.models?.[modelID]?.variants
  780. if (configVariants && model.variants) {
  781. const merged = mergeDeep(model.variants, configVariants)
  782. model.variants = mapValues(
  783. pickBy(merged, (v) => !v.disabled),
  784. (v) => omit(v, ["disabled"]),
  785. )
  786. }
  787. }
  788. if (Object.keys(provider.models).length === 0) {
  789. delete providers[providerID]
  790. continue
  791. }
  792. log.info("found", { providerID })
  793. }
  794. return {
  795. models: languages,
  796. providers,
  797. sdk,
  798. modelLoaders,
  799. }
  800. })
  801. export async function list() {
  802. return state().then((state) => state.providers)
  803. }
  804. async function getSDK(model: Model) {
  805. try {
  806. using _ = log.time("getSDK", {
  807. providerID: model.providerID,
  808. })
  809. const s = await state()
  810. const provider = s.providers[model.providerID]
  811. const options = { ...provider.options }
  812. if (model.api.npm.includes("@ai-sdk/openai-compatible") && options["includeUsage"] !== false) {
  813. options["includeUsage"] = true
  814. }
  815. if (!options["baseURL"]) options["baseURL"] = model.api.url
  816. if (options["apiKey"] === undefined && provider.key) options["apiKey"] = provider.key
  817. if (model.headers)
  818. options["headers"] = {
  819. ...options["headers"],
  820. ...model.headers,
  821. }
  822. const key = Bun.hash.xxHash32(JSON.stringify({ npm: model.api.npm, options }))
  823. const existing = s.sdk.get(key)
  824. if (existing) return existing
  825. const customFetch = options["fetch"]
  826. options["fetch"] = async (input: any, init?: BunFetchRequestInit) => {
  827. // Preserve custom fetch if it exists, wrap it with timeout logic
  828. const fetchFn = customFetch ?? fetch
  829. const opts = init ?? {}
  830. if (options["timeout"] !== undefined && options["timeout"] !== null) {
  831. const signals: AbortSignal[] = []
  832. if (opts.signal) signals.push(opts.signal)
  833. if (options["timeout"] !== false) signals.push(AbortSignal.timeout(options["timeout"]))
  834. const combined = signals.length > 1 ? AbortSignal.any(signals) : signals[0]
  835. opts.signal = combined
  836. }
  837. return fetchFn(input, {
  838. ...opts,
  839. // @ts-ignore see here: https://github.com/oven-sh/bun/issues/16682
  840. timeout: false,
  841. })
  842. }
  843. // Special case: google-vertex-anthropic uses a subpath import
  844. const bundledKey =
  845. model.providerID === "google-vertex-anthropic" ? "@ai-sdk/google-vertex/anthropic" : model.api.npm
  846. const bundledFn = BUNDLED_PROVIDERS[bundledKey]
  847. if (bundledFn) {
  848. log.info("using bundled provider", { providerID: model.providerID, pkg: bundledKey })
  849. const loaded = bundledFn({
  850. name: model.providerID,
  851. ...options,
  852. })
  853. s.sdk.set(key, loaded)
  854. return loaded as SDK
  855. }
  856. let installedPath: string
  857. if (!model.api.npm.startsWith("file://")) {
  858. installedPath = await BunProc.install(model.api.npm, "latest")
  859. } else {
  860. log.info("loading local provider", { pkg: model.api.npm })
  861. installedPath = model.api.npm
  862. }
  863. const mod = await import(installedPath)
  864. const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!]
  865. const loaded = fn({
  866. name: model.providerID,
  867. ...options,
  868. })
  869. s.sdk.set(key, loaded)
  870. return loaded as SDK
  871. } catch (e) {
  872. throw new InitError({ providerID: model.providerID }, { cause: e })
  873. }
  874. }
  875. export async function getProvider(providerID: string) {
  876. return state().then((s) => s.providers[providerID])
  877. }
  878. export async function getModel(providerID: string, modelID: string) {
  879. const s = await state()
  880. const provider = s.providers[providerID]
  881. if (!provider) {
  882. const availableProviders = Object.keys(s.providers)
  883. const matches = fuzzysort.go(providerID, availableProviders, { limit: 3, threshold: -10000 })
  884. const suggestions = matches.map((m) => m.target)
  885. throw new ModelNotFoundError({ providerID, modelID, suggestions })
  886. }
  887. const info = provider.models[modelID]
  888. if (!info) {
  889. const availableModels = Object.keys(provider.models)
  890. const matches = fuzzysort.go(modelID, availableModels, { limit: 3, threshold: -10000 })
  891. const suggestions = matches.map((m) => m.target)
  892. throw new ModelNotFoundError({ providerID, modelID, suggestions })
  893. }
  894. return info
  895. }
  896. export async function getLanguage(model: Model): Promise<LanguageModelV2> {
  897. const s = await state()
  898. const key = `${model.providerID}/${model.id}`
  899. if (s.models.has(key)) return s.models.get(key)!
  900. const provider = s.providers[model.providerID]
  901. const sdk = await getSDK(model)
  902. try {
  903. const language = s.modelLoaders[model.providerID]
  904. ? await s.modelLoaders[model.providerID](sdk, model.api.id, provider.options)
  905. : sdk.languageModel(model.api.id)
  906. s.models.set(key, language)
  907. return language
  908. } catch (e) {
  909. if (e instanceof NoSuchModelError)
  910. throw new ModelNotFoundError(
  911. {
  912. modelID: model.id,
  913. providerID: model.providerID,
  914. },
  915. { cause: e },
  916. )
  917. throw e
  918. }
  919. }
  920. export async function closest(providerID: string, query: string[]) {
  921. const s = await state()
  922. const provider = s.providers[providerID]
  923. if (!provider) return undefined
  924. for (const item of query) {
  925. for (const modelID of Object.keys(provider.models)) {
  926. if (modelID.includes(item))
  927. return {
  928. providerID,
  929. modelID,
  930. }
  931. }
  932. }
  933. }
  934. export async function getSmallModel(providerID: string) {
  935. const cfg = await Config.get()
  936. if (cfg.small_model) {
  937. const parsed = parseModel(cfg.small_model)
  938. return getModel(parsed.providerID, parsed.modelID)
  939. }
  940. const provider = await state().then((state) => state.providers[providerID])
  941. if (provider) {
  942. let priority = [
  943. "claude-haiku-4-5",
  944. "claude-haiku-4.5",
  945. "3-5-haiku",
  946. "3.5-haiku",
  947. "gemini-3-flash",
  948. "gemini-2.5-flash",
  949. "gpt-5-nano",
  950. ]
  951. if (providerID.startsWith("opencode")) {
  952. priority = ["gpt-5-nano"]
  953. }
  954. for (const item of priority) {
  955. for (const model of Object.keys(provider.models)) {
  956. if (model.includes(item)) return getModel(providerID, model)
  957. }
  958. }
  959. }
  960. // Check if opencode provider is available before using it
  961. const opencodeProvider = await state().then((state) => state.providers["opencode"])
  962. if (opencodeProvider && opencodeProvider.models["gpt-5-nano"]) {
  963. return getModel("opencode", "gpt-5-nano")
  964. }
  965. return undefined
  966. }
  967. const priority = ["gpt-5", "claude-sonnet-4", "big-pickle", "gemini-3-pro"]
  968. export function sort(models: Model[]) {
  969. return sortBy(
  970. models,
  971. [(model) => priority.findIndex((filter) => model.id.includes(filter)), "desc"],
  972. [(model) => (model.id.includes("latest") ? 0 : 1), "asc"],
  973. [(model) => model.id, "desc"],
  974. )
  975. }
  976. export async function defaultModel() {
  977. const cfg = await Config.get()
  978. if (cfg.model) return parseModel(cfg.model)
  979. const provider = await list()
  980. .then((val) => Object.values(val))
  981. .then((x) => x.find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.id)))
  982. if (!provider) throw new Error("no providers found")
  983. const [model] = sort(Object.values(provider.models))
  984. if (!model) throw new Error("no models found")
  985. return {
  986. providerID: provider.id,
  987. modelID: model.id,
  988. }
  989. }
  990. export function parseModel(model: string) {
  991. const [providerID, ...rest] = model.split("/")
  992. return {
  993. providerID: providerID,
  994. modelID: rest.join("/"),
  995. }
  996. }
  997. export const ModelNotFoundError = NamedError.create(
  998. "ProviderModelNotFoundError",
  999. z.object({
  1000. providerID: z.string(),
  1001. modelID: z.string(),
  1002. suggestions: z.array(z.string()).optional(),
  1003. }),
  1004. )
  1005. export const InitError = NamedError.create(
  1006. "ProviderInitError",
  1007. z.object({
  1008. providerID: z.string(),
  1009. }),
  1010. )
  1011. }