Răsfoiți Sursa

Merge pull request #686 from shpigunov/origin/feature/support-o3-mini

support o3-mini
Matt Rubens 1 an în urmă
părinte
comite
d1f23d2520
2 a modificat fișierele cu 23 adăugiri și 0 ștergeri
  1. 15 0
      src/api/providers/__tests__/openai-native.test.ts
  2. 8 0
      src/shared/api.ts

+ 15 - 0
src/api/providers/__tests__/openai-native.test.ts

@@ -289,6 +289,21 @@ describe("OpenAiNativeHandler", () => {
 			})
 		})
 
+		it("should complete prompt successfully with o3-mini model", async () => {
+			handler = new OpenAiNativeHandler({
+				apiModelId: "o3-mini",
+				openAiNativeApiKey: "test-api-key",
+			})
+
+			const result = await handler.completePrompt("Test prompt")
+			expect(result).toBe("Test response")
+			expect(mockCreate).toHaveBeenCalledWith({
+				model: "o3-mini",
+				messages: [{ role: "user", content: "Test prompt" }],
+				temperature: 0,
+			})
+		})
+
 		it("should handle API errors", async () => {
 			mockCreate.mockRejectedValueOnce(new Error("API Error"))
 			await expect(handler.completePrompt("Test prompt")).rejects.toThrow(

+ 8 - 0
src/shared/api.ts

@@ -510,6 +510,14 @@ export type OpenAiNativeModelId = keyof typeof openAiNativeModels
 export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-4o"
 export const openAiNativeModels = {
 	// don't support tool use yet
+	"o3-mini": {
+		maxTokens: 100_000,
+		contextWindow: 200_000,
+		supportsImages: false,
+		supportsPromptCache: false,
+		inputPrice: 1.1,
+		outputPrice: 4.4,
+	},
 	o1: {
 		maxTokens: 100_000,
 		contextWindow: 200_000,