Просмотр исходного кода

Merge pull request #693 from RooVetGit/fix_o3_streaming

Streaming version of o3-mini
Matt Rubens 11 месяцев назад
Родитель
Сommit
0c795ec555
2 измененных файлов с 34 добавлено и 2 удалено
  1. 5 0
      .changeset/gold-pillows-fix.md
  2. 29 2
      src/api/providers/openai-native.ts

+ 5 - 0
.changeset/gold-pillows-fix.md

@@ -0,0 +1,5 @@
+---
+"roo-cline": patch
+---
+
+Streaming version of o3-mini

+ 29 - 2
src/api/providers/openai-native.ts

@@ -27,8 +27,7 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
 		switch (modelId) {
 			case "o1":
 			case "o1-preview":
-			case "o1-mini":
-			case "o3-mini": {
+			case "o1-mini": {
 				// o1-preview and o1-mini don't support streaming, non-1 temp, or system prompt
 				// o1 doesnt support streaming or non-1 temp but does support a developer prompt
 				const response = await this.client.chat.completions.create({
@@ -49,6 +48,34 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
 				}
 				break
 			}
+			case "o3-mini": {
+				const stream = await this.client.chat.completions.create({
+					model: this.getModel().id,
+					messages: [{ role: "developer", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
+					stream: true,
+					stream_options: { include_usage: true },
+				})
+
+				for await (const chunk of stream) {
+					const delta = chunk.choices[0]?.delta
+					if (delta?.content) {
+						yield {
+							type: "text",
+							text: delta.content,
+						}
+					}
+
+					// contains a null value except for the last chunk which contains the token usage statistics for the entire request
+					if (chunk.usage) {
+						yield {
+							type: "usage",
+							inputTokens: chunk.usage.prompt_tokens || 0,
+							outputTokens: chunk.usage.completion_tokens || 0,
+						}
+					}
+				}
+				break
+			}
 			default: {
 				const stream = await this.client.chat.completions.create({
 					model: this.getModel().id,