Преглед изворни кода

Add OpenAI GPT-5.4 mini and nano models (#11946)

Peter Dave Hello пре 3 недеља
родитељ
комит
137d3f4fd8

+ 6 - 0
.changeset/add-openai-gpt-5-4-mini-nano.md

@@ -0,0 +1,6 @@
+---
+"roo-cline": patch
+"@roo-code/types": patch
+---
+
+Add support for OpenAI `gpt-5.4-mini` and `gpt-5.4-nano` models.

+ 15 - 0
packages/types/src/providers/openai-codex.ts

@@ -187,6 +187,21 @@ export const openAiCodexModels = {
 		supportsTemperature: false,
 		description: "GPT-5.4: Most capable model via ChatGPT subscription",
 	},
+	"gpt-5.4-mini": {
+		maxTokens: 128000,
+		contextWindow: 400000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
+		reasoningEffort: "none",
+		inputPrice: 0,
+		outputPrice: 0,
+		supportsVerbosity: true,
+		supportsTemperature: false,
+		description: "GPT-5.4 Mini: Lower-cost GPT-5.4 model via ChatGPT subscription",
+	},
 	"gpt-5.2": {
 		maxTokens: 128000,
 		contextWindow: 400000,

+ 37 - 0
packages/types/src/providers/openai.ts

@@ -50,6 +50,43 @@ export const openAiNativeModels = {
 		],
 		description: "GPT-5.4: Our most capable model for professional work",
 	},
+	"gpt-5.4-mini": {
+		maxTokens: 128000,
+		contextWindow: 400000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
+		reasoningEffort: "none",
+		inputPrice: 0.75,
+		outputPrice: 4.5,
+		cacheReadsPrice: 0.075,
+		supportsVerbosity: true,
+		supportsTemperature: false,
+		tiers: [
+			{ name: "flex", contextWindow: 400000, inputPrice: 0.375, outputPrice: 2.25, cacheReadsPrice: 0.0375 },
+			{ name: "priority", contextWindow: 400000, inputPrice: 1.5, outputPrice: 9.0, cacheReadsPrice: 0.15 },
+		],
+		description: "GPT-5.4 Mini: A faster, lower-cost GPT-5.4 model for coding and agentic workflows",
+	},
+	"gpt-5.4-nano": {
+		maxTokens: 128000,
+		contextWindow: 400000,
+		includedTools: ["apply_patch"],
+		excludedTools: ["apply_diff", "write_to_file"],
+		supportsImages: true,
+		supportsPromptCache: true,
+		supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"],
+		reasoningEffort: "none",
+		inputPrice: 0.2,
+		outputPrice: 1.25,
+		cacheReadsPrice: 0.02,
+		supportsVerbosity: true,
+		supportsTemperature: false,
+		tiers: [{ name: "flex", contextWindow: 400000, inputPrice: 0.1, outputPrice: 0.625, cacheReadsPrice: 0.01 }],
+		description: "GPT-5.4 Nano: The smallest GPT-5.4 model for high-volume, low-latency tasks",
+	},
 	"gpt-5.2": {
 		maxTokens: 128000,
 		contextWindow: 400000,

+ 8 - 0
src/api/providers/__tests__/openai-codex.spec.ts

@@ -33,4 +33,12 @@ describe("OpenAiCodexHandler.getModel", () => {
 		expect(model.info.maxTokens).toBe(8192)
 		expect(model.info.supportsImages).toBe(false)
 	})
+
+	it("should use GPT-5.4 Mini capabilities when selected", () => {
+		const handler = new OpenAiCodexHandler({ apiModelId: "gpt-5.4-mini" })
+		const model = handler.getModel()
+
+		expect(model.id).toBe("gpt-5.4-mini")
+		expect(model.info).toBeDefined()
+	})
 })

+ 39 - 0
src/api/providers/__tests__/openai-native.spec.ts

@@ -264,6 +264,45 @@ describe("OpenAiNativeHandler", () => {
 			expect(modelInfo.info.reasoningEffort).toBe("none")
 		})
 
+		it("should return GPT-5.4 Mini model info when selected", () => {
+			const gpt54MiniHandler = new OpenAiNativeHandler({
+				...mockOptions,
+				apiModelId: "gpt-5.4-mini",
+			})
+
+			const modelInfo = gpt54MiniHandler.getModel()
+			expect(modelInfo.id).toBe("gpt-5.4-mini")
+			expect(modelInfo.info.maxTokens).toBe(128000)
+			expect(modelInfo.info.contextWindow).toBe(400000)
+			expect(modelInfo.info.supportsVerbosity).toBe(true)
+			expect(modelInfo.info.supportsReasoningEffort).toEqual(["none", "low", "medium", "high", "xhigh"])
+			expect(modelInfo.info.reasoningEffort).toBe("none")
+			expect(modelInfo.info.longContextPricing).toBeUndefined()
+		})
+
+		it("should return GPT-5.4 Nano model info when selected", () => {
+			const gpt54NanoHandler = new OpenAiNativeHandler({
+				...mockOptions,
+				apiModelId: "gpt-5.4-nano",
+			})
+
+			const modelInfo = gpt54NanoHandler.getModel()
+			expect(modelInfo.id).toBe("gpt-5.4-nano")
+			expect(modelInfo.info.maxTokens).toBe(128000)
+			expect(modelInfo.info.contextWindow).toBe(400000)
+			expect(modelInfo.info.supportsVerbosity).toBe(true)
+			expect(modelInfo.info.supportsReasoningEffort).toEqual(["none", "low", "medium", "high", "xhigh"])
+			expect(modelInfo.info.reasoningEffort).toBe("none")
+			expect(modelInfo.info.outputPrice).toBe(1.25)
+			expect(modelInfo.info.longContextPricing).toBeUndefined()
+			expect(modelInfo.info.tiers).toEqual([
+				expect.objectContaining({
+					name: "flex",
+					outputPrice: 0.625,
+				}),
+			])
+		})
+
 		it("should return GPT-5.3 Chat model info when selected", () => {
 			const chatHandler = new OpenAiNativeHandler({
 				...mockOptions,