Przeglądaj źródła

feat: adding default headers and testing for litellm fetcher (#5242)

* chore: adding x-title header and testing for litellm

* chore: indentation fi and headers order fix

* chore: spacing fix

* chore: removed white space

* fix: allow user headers to override default headers and clean up formatting

- Reorder header spread in router-provider.ts so user-provided openAiHeaders can override DEFAULT_HEADERS
- Remove unnecessary blank lines after imports for consistency
- This matches the pattern used in openai.ts where DEFAULT_HEADERS come first

---------

Co-authored-by: Brendan-Z <[email protected]>
Co-authored-by: Daniel Riccio <[email protected]>
Andrew Shu 6 miesięcy temu
rodzic
commit
569b276d93

+ 4 - 0
src/api/providers/fetchers/__tests__/litellm.spec.ts

@@ -4,6 +4,7 @@ vi.mock("axios")
 import type { Mock } from "vitest"
 import axios from "axios"
 import { getLiteLLMModels } from "../litellm"
+import { DEFAULT_HEADERS } from "../../constants"
 
 const mockedAxios = axios as typeof axios & {
 	get: Mock
@@ -32,6 +33,7 @@ describe("getLiteLLMModels", () => {
 			headers: {
 				Authorization: "Bearer test-api-key",
 				"Content-Type": "application/json",
+				...DEFAULT_HEADERS,
 			},
 			timeout: 5000,
 		})
@@ -83,6 +85,7 @@ describe("getLiteLLMModels", () => {
 			headers: {
 				Authorization: "Bearer test-api-key",
 				"Content-Type": "application/json",
+				...DEFAULT_HEADERS,
 			},
 			timeout: 5000,
 		})
@@ -125,6 +128,7 @@ describe("getLiteLLMModels", () => {
 		expect(mockedAxios.get).toHaveBeenCalledWith("http://localhost:4000/v1/model/info", {
 			headers: {
 				"Content-Type": "application/json",
+				...DEFAULT_HEADERS,
 			},
 			timeout: 5000,
 		})

+ 2 - 0
src/api/providers/fetchers/litellm.ts

@@ -4,6 +4,7 @@ import { LITELLM_COMPUTER_USE_MODELS } from "@roo-code/types"
 
 import type { ModelRecord } from "../../../shared/api"
 
+import { DEFAULT_HEADERS } from "../constants"
 /**
  * Fetches available models from a LiteLLM server
  *
@@ -16,6 +17,7 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
 	try {
 		const headers: Record<string, string> = {
 			"Content-Type": "application/json",
+			...DEFAULT_HEADERS,
 		}
 
 		if (apiKey) {

+ 10 - 1
src/api/providers/router-provider.ts

@@ -7,6 +7,8 @@ import { ApiHandlerOptions, RouterName, ModelRecord } from "../../shared/api"
 import { BaseProvider } from "./base-provider"
 import { getModels } from "./fetchers/modelCache"
 
+import { DEFAULT_HEADERS } from "./constants"
+
 type RouterProviderOptions = {
 	name: RouterName
 	baseURL: string
@@ -43,7 +45,14 @@ export abstract class RouterProvider extends BaseProvider {
 		this.defaultModelId = defaultModelId
 		this.defaultModelInfo = defaultModelInfo
 
-		this.client = new OpenAI({ baseURL, apiKey })
+		this.client = new OpenAI({
+			baseURL,
+			apiKey,
+			defaultHeaders: {
+				...DEFAULT_HEADERS,
+				...(options.openAiHeaders || {}),
+			},
+		})
 	}
 
 	public async fetchModel() {