Explorar el Código

Fix: Remove temperature parameter for Azure OpenAI reasoning models (#5116)

* Fix temperature parameter error for Azure OpenAI reasoning models

* Fix tests: Update O3 family model tests to expect temperature: undefined

- Updated failing tests in openai.spec.ts to expect temperature: undefined for O3 models
- This aligns with the PR changes that remove temperature parameter for Azure OpenAI o1, o3, and o4 models
- All 4 previously failing tests now pass

---------

Co-authored-by: Daniel Riccio <[email protected]>
ExactDoug hace 6 meses
padre
commit
889e92518b
Se han modificado 2 ficheros con 7 adiciones y 7 borrados
  1. 4 4
      src/api/providers/__tests__/openai.spec.ts
  2. 3 3
      src/api/providers/openai.ts

+ 4 - 4
src/api/providers/__tests__/openai.spec.ts

@@ -599,7 +599,7 @@ describe("OpenAiHandler", () => {
 					stream: true,
 					stream: true,
 					stream_options: { include_usage: true },
 					stream_options: { include_usage: true },
 					reasoning_effort: "medium",
 					reasoning_effort: "medium",
-					temperature: 0.5,
+					temperature: undefined,
 					// O3 models do not support deprecated max_tokens but do support max_completion_tokens
 					// O3 models do not support deprecated max_tokens but do support max_completion_tokens
 					max_completion_tokens: 32000,
 					max_completion_tokens: 32000,
 				}),
 				}),
@@ -640,7 +640,7 @@ describe("OpenAiHandler", () => {
 					stream: true,
 					stream: true,
 					stream_options: { include_usage: true },
 					stream_options: { include_usage: true },
 					reasoning_effort: "medium",
 					reasoning_effort: "medium",
-					temperature: 0.7,
+					temperature: undefined,
 				}),
 				}),
 				{},
 				{},
 			)
 			)
@@ -682,7 +682,7 @@ describe("OpenAiHandler", () => {
 						{ role: "user", content: "Hello!" },
 						{ role: "user", content: "Hello!" },
 					],
 					],
 					reasoning_effort: "medium",
 					reasoning_effort: "medium",
-					temperature: 0.3,
+					temperature: undefined,
 					// O3 models do not support deprecated max_tokens but do support max_completion_tokens
 					// O3 models do not support deprecated max_tokens but do support max_completion_tokens
 					max_completion_tokens: 65536, // Using default maxTokens from o3Options
 					max_completion_tokens: 65536, // Using default maxTokens from o3Options
 				}),
 				}),
@@ -712,7 +712,7 @@ describe("OpenAiHandler", () => {
 
 
 			expect(mockCreate).toHaveBeenCalledWith(
 			expect(mockCreate).toHaveBeenCalledWith(
 				expect.objectContaining({
 				expect.objectContaining({
-					temperature: 0, // Default temperature
+					temperature: undefined, // Temperature is not supported for O3 models
 				}),
 				}),
 				{},
 				{},
 			)
 			)

+ 3 - 3
src/api/providers/openai.ts

@@ -86,7 +86,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 		const deepseekReasoner = modelId.includes("deepseek-reasoner") || enabledR1Format
 		const deepseekReasoner = modelId.includes("deepseek-reasoner") || enabledR1Format
 		const ark = modelUrl.includes(".volces.com")
 		const ark = modelUrl.includes(".volces.com")
 
 
-		if (modelId.startsWith("o3-mini")) {
+		if (modelId.includes("o1") || modelId.includes("o3") || modelId.includes("o4")) {
 			yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages)
 			yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages)
 			return
 			return
 		}
 		}
@@ -306,7 +306,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 				stream: true,
 				stream: true,
 				...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
 				...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
 				reasoning_effort: modelInfo.reasoningEffort,
 				reasoning_effort: modelInfo.reasoningEffort,
-				temperature: this.options.modelTemperature ?? 0,
+				temperature: undefined,
 			}
 			}
 
 
 			// O3 family models do not support the deprecated max_tokens parameter
 			// O3 family models do not support the deprecated max_tokens parameter
@@ -331,7 +331,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
 					...convertToOpenAiMessages(messages),
 					...convertToOpenAiMessages(messages),
 				],
 				],
 				reasoning_effort: modelInfo.reasoningEffort,
 				reasoning_effort: modelInfo.reasoningEffort,
-				temperature: this.options.modelTemperature ?? 0,
+				temperature: undefined,
 			}
 			}
 
 
 			// O3 family models do not support the deprecated max_tokens parameter
 			// O3 family models do not support the deprecated max_tokens parameter