Просмотр исходного кода

feat: use anthropic compat messages api for anthropic models through copilot

Aiden Cline 3 недель назад
Родитель
Сommit
ac53a372b0

+ 16 - 6
packages/opencode/src/plugin/copilot.ts

@@ -26,6 +26,9 @@ export async function CopilotAuthPlugin(input: PluginInput): Promise<Hooks> {
         const info = await getAuth()
         if (!info || info.type !== "oauth") return {}
 
+        const enterpriseUrl = info.enterpriseUrl
+        const baseURL = enterpriseUrl ? `https://copilot-api.${normalizeDomain(enterpriseUrl)}` : undefined
+
         if (provider && provider.models) {
           for (const model of Object.values(provider.models)) {
             model.cost = {
@@ -36,16 +39,23 @@ export async function CopilotAuthPlugin(input: PluginInput): Promise<Hooks> {
                 write: 0,
               },
             }
+
+            // TODO: move some of this hacky-ness to models.dev presets once we have better grasp of things here...
+            const base = baseURL ?? model.api.url
+            const claude = model.id.includes("claude")
+            const url = iife(() => {
+              if (!claude) return base
+              if (base.endsWith("/v1")) return base
+              if (base.endsWith("/")) return `${base}v1`
+              return `${base}/v1`
+            })
+
+            model.api.url = url
+            model.api.npm = claude ? "@ai-sdk/anthropic" : "@ai-sdk/github-copilot"
           }
         }
 
-        const enterpriseUrl = info.enterpriseUrl
-        const baseURL = enterpriseUrl
-          ? `https://copilot-api.${normalizeDomain(enterpriseUrl)}`
-          : "https://api.githubcopilot.com"
-
         return {
-          baseURL,
           apiKey: "",
           async fetch(request: RequestInfo | URL, init?: RequestInit) {
             const info = await getAuth()

+ 5 - 4
packages/opencode/src/provider/provider.ts

@@ -132,6 +132,7 @@ export namespace Provider {
       return {
         autoload: false,
         async getModel(sdk: any, modelID: string, _options?: Record<string, any>) {
+          if (sdk.responses === undefined && sdk.chat === undefined) return sdk.languageModel(modelID)
           return shouldUseCopilotResponsesApi(modelID) ? sdk.responses(modelID) : sdk.chat(modelID)
         },
         options: {},
@@ -141,6 +142,7 @@ export namespace Provider {
       return {
         autoload: false,
         async getModel(sdk: any, modelID: string, _options?: Record<string, any>) {
+          if (sdk.responses === undefined && sdk.chat === undefined) return sdk.languageModel(modelID)
           return shouldUseCopilotResponsesApi(modelID) ? sdk.responses(modelID) : sdk.chat(modelID)
         },
         options: {},
@@ -601,10 +603,7 @@ export namespace Provider {
       api: {
         id: model.id,
         url: provider.api!,
-        npm: iife(() => {
-          if (provider.id.startsWith("github-copilot")) return "@ai-sdk/github-copilot"
-          return model.provider?.npm ?? provider.npm ?? "@ai-sdk/openai-compatible"
-        }),
+        npm: model.provider?.npm ?? provider.npm ?? "@ai-sdk/openai-compatible",
       },
       status: model.status ?? "active",
       headers: model.headers ?? {},
@@ -924,6 +923,8 @@ export namespace Provider {
         )
           delete provider.models[modelID]
 
+        model.variants = mapValues(ProviderTransform.variants(model), (v) => v)
+
         // Filter out disabled variants from config
         const configVariants = configProvider?.models?.[modelID]?.variants
         if (configVariants && model.variants) {

+ 14 - 8
packages/opencode/src/session/llm.ts

@@ -150,14 +150,20 @@ export namespace LLM {
       },
     )
 
-    const maxOutputTokens = isCodex
-      ? undefined
-      : ProviderTransform.maxOutputTokens(
-          input.model.api.npm,
-          params.options,
-          input.model.limit.output,
-          OUTPUT_TOKEN_MAX,
-        )
+    const maxOutputTokens = isCodex ? undefined : undefined
+    log.info("max_output_tokens", {
+      tokens: ProviderTransform.maxOutputTokens(
+        input.model.api.npm,
+        params.options,
+        input.model.limit.output,
+        OUTPUT_TOKEN_MAX,
+      ),
+      modelOptions: params.options,
+      outputLimit: input.model.limit.output,
+    })
+    // tokens = 32000
+    // outputLimit = 64000
+    // modelOptions={"reasoningEffort":"minimal"}
 
     const tools = await resolveTools(input)