phantomreactor пре 9 месеци
родитељ
комит
2f8984fadb

+ 1 - 1
go.mod

@@ -26,7 +26,7 @@ require (
 	github.com/muesli/reflow v0.3.0
 	github.com/muesli/termenv v0.16.0
 	github.com/ncruces/go-sqlite3 v0.25.0
-	github.com/openai/openai-go v0.1.0-beta.2
+	github.com/openai/openai-go v0.1.0-beta.10
 	github.com/pressly/goose/v3 v3.24.2
 	github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3
 	github.com/spf13/cobra v1.9.1

+ 3 - 6
go.sum

@@ -68,8 +68,6 @@ github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR
 github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
 github.com/catppuccin/go v0.3.0 h1:d+0/YicIq+hSTo5oPuRi5kOpqkVA5tAsU6dNhvRu+aY=
 github.com/catppuccin/go v0.3.0/go.mod h1:8IHJuMGaUUjQM82qBrGNBv7LFq6JI3NnQCF6MOlZjpc=
-github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE=
-github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU=
 github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs=
 github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg=
 github.com/charmbracelet/bubbletea v1.3.4 h1:kCg7B+jSCFPLYRA52SDZjr51kG/fMUEoPoZrkaDHyoI=
@@ -86,9 +84,8 @@ github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2ll
 github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q=
 github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8=
 github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
-github.com/charmbracelet/x/exp/golden v0.0.0-20240815200342-61de596daa2b h1:MnAMdlwSltxJyULnrYbkZpp4k58Co7Tah3ciKhSNo0Q=
-github.com/charmbracelet/x/exp/golden v0.0.0-20240815200342-61de596daa2b/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
 github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
+github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
 github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
 github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
 github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
@@ -182,8 +179,8 @@ github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdh
 github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
 github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=
 github.com/ncruces/julianday v1.0.0/go.mod h1:Dusn2KvZrrovOMJuOt0TNXL6tB7U2E8kvza5fFc9G7g=
-github.com/openai/openai-go v0.1.0-beta.2 h1:Ra5nCFkbEl9w+UJwAciC4kqnIBUCcJazhmMA0/YN894=
-github.com/openai/openai-go v0.1.0-beta.2/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y=
+github.com/openai/openai-go v0.1.0-beta.10 h1:CknhGXe8aXQMRuqg255PFnWzgRY9nEryMxoNIBBM9tU=
+github.com/openai/openai-go v0.1.0-beta.10/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y=
 github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
 github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
 github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=

+ 1 - 11
internal/llm/agent/agent.go

@@ -4,12 +4,11 @@ import (
 	"context"
 	"errors"
 	"fmt"
+	"log/slog"
 	"strings"
 	"sync"
 	"time"
 
-	"log/slog"
-
 	"github.com/sst/opencode/internal/config"
 	"github.com/sst/opencode/internal/llm/models"
 	"github.com/sst/opencode/internal/llm/prompt"
@@ -522,15 +521,6 @@ func (a *agent) processEvent(ctx context.Context, sessionID string, assistantMsg
 		assistantMsg.AddToolCall(*event.ToolCall)
 		_, err := a.messages.Update(ctx, *assistantMsg)
 		return err
-	// TODO: see how to handle this
-	// case provider.EventToolUseDelta:
-	// 	tm := time.Unix(assistantMsg.UpdatedAt, 0)
-	// 	assistantMsg.AppendToolCallInput(event.ToolCall.ID, event.ToolCall.Input)
-	// 	if time.Since(tm) > 1000*time.Millisecond {
-	// 		err := a.messages.Update(ctx, *assistantMsg)
-	// 		assistantMsg.UpdatedAt = time.Now().Unix()
-	// 		return err
-	// 	}
 	case provider.EventToolUseStop:
 		assistantMsg.FinishToolCall(event.ToolCall.ID)
 		_, err := a.messages.Update(ctx, *assistantMsg)

+ 2 - 0
internal/llm/models/groq.go

@@ -41,6 +41,7 @@ var GroqModels = map[ModelID]Model{
 		CostPer1MInCached:   0,
 		CostPer1MOutCached:  0,
 		CostPer1MOut:        0.34,
+		DefaultMaxTokens:    8192,
 		ContextWindow:       128_000, // 10M when?
 		SupportsAttachments: true,
 	},
@@ -54,6 +55,7 @@ var GroqModels = map[ModelID]Model{
 		CostPer1MInCached:   0,
 		CostPer1MOutCached:  0,
 		CostPer1MOut:        0.20,
+		DefaultMaxTokens:    8192,
 		ContextWindow:       128_000,
 		SupportsAttachments: true,
 	},

+ 15 - 0
internal/llm/models/openai.go

@@ -3,6 +3,7 @@ package models
 const (
 	ProviderOpenAI ModelProvider = "openai"
 
+	CodexMini    ModelID = "codex-mini"
 	GPT41        ModelID = "gpt-4.1"
 	GPT41Mini    ModelID = "gpt-4.1-mini"
 	GPT41Nano    ModelID = "gpt-4.1-nano"
@@ -18,6 +19,20 @@ const (
 )
 
 var OpenAIModels = map[ModelID]Model{
+	CodexMini: {
+		ID:                  CodexMini,
+		Name:                "Codex Mini",
+		Provider:            ProviderOpenAI,
+		APIModel:            "codex-mini-latest",
+		CostPer1MIn:         1.50,
+		CostPer1MInCached:   0.375,
+		CostPer1MOutCached:  0.0,
+		CostPer1MOut:        6.00,
+		ContextWindow:       200_000,
+		DefaultMaxTokens:    100_000,
+		CanReason:           true,
+		SupportsAttachments: true,
+	},
 	GPT41: {
 		ID:                  GPT41,
 		Name:                "GPT 4.1",

+ 10 - 297
internal/llm/provider/openai.go

@@ -2,21 +2,14 @@ package provider
 
 import (
 	"context"
-	"encoding/json"
 	"errors"
 	"fmt"
-	"io"
-	"time"
-
+	"log/slog"
 	"github.com/openai/openai-go"
 	"github.com/openai/openai-go/option"
-	"github.com/openai/openai-go/shared"
-	"github.com/sst/opencode/internal/config"
 	"github.com/sst/opencode/internal/llm/models"
 	"github.com/sst/opencode/internal/llm/tools"
 	"github.com/sst/opencode/internal/message"
-	"github.com/sst/opencode/internal/status"
-	"log/slog"
 )
 
 type openaiOptions struct {
@@ -66,87 +59,21 @@ func newOpenAIClient(opts providerClientOptions) OpenAIClient {
 	}
 }
 
-func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
-	// Add system message first
-	openaiMessages = append(openaiMessages, openai.SystemMessage(o.providerOptions.systemMessage))
-
-	for _, msg := range messages {
-		switch msg.Role {
-		case message.User:
-			var content []openai.ChatCompletionContentPartUnionParam
-			textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
-			content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
-			for _, binaryContent := range msg.BinaryContent() {
-				imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(models.ProviderOpenAI)}
-				imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
-
-				content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
-			}
-
-			openaiMessages = append(openaiMessages, openai.UserMessage(content))
-
-		case message.Assistant:
-			assistantMsg := openai.ChatCompletionAssistantMessageParam{
-				Role: "assistant",
-			}
-
-			if msg.Content().String() != "" {
-				assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
-					OfString: openai.String(msg.Content().String()),
-				}
-			}
-
-			if len(msg.ToolCalls()) > 0 {
-				assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls()))
-				for i, call := range msg.ToolCalls() {
-					assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
-						ID:   call.ID,
-						Type: "function",
-						Function: openai.ChatCompletionMessageToolCallFunctionParam{
-							Name:      call.Name,
-							Arguments: call.Input,
-						},
-					}
-				}
-			}
-
-			openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
-				OfAssistant: &assistantMsg,
-			})
-
-		case message.Tool:
-			for _, result := range msg.ToolResults() {
-				openaiMessages = append(openaiMessages,
-					openai.ToolMessage(result.Content, result.ToolCallID),
-				)
-			}
-		}
+func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
+	if o.providerOptions.model.ID == models.OpenAIModels[models.CodexMini].ID || o.providerOptions.model.ID == models.OpenAIModels[models.O1Pro].ID {
+		return o.sendResponseMessages(ctx, messages, tools)
 	}
-
-	return
+	return o.sendChatcompletionMessage(ctx, messages, tools)
 }
 
-func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
-	openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
-
-	for i, tool := range tools {
-		info := tool.Info()
-		openaiTools[i] = openai.ChatCompletionToolParam{
-			Function: openai.FunctionDefinitionParam{
-				Name:        info.Name,
-				Description: openai.String(info.Description),
-				Parameters: openai.FunctionParameters{
-					"type":       "object",
-					"properties": info.Parameters,
-					"required":   info.Required,
-				},
-			},
-		}
+func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
+	if o.providerOptions.model.ID == models.OpenAIModels[models.CodexMini].ID || o.providerOptions.model.ID == models.OpenAIModels[models.O1Pro].ID {
+		return o.streamResponseMessages(ctx, messages, tools)
 	}
-
-	return openaiTools
+	return o.streamChatCompletionMessages(ctx, messages, tools)
 }
 
+
 func (o *openaiClient) finishReason(reason string) message.FinishReason {
 	switch reason {
 	case "stop":
@@ -160,190 +87,6 @@ func (o *openaiClient) finishReason(reason string) message.FinishReason {
 	}
 }
 
-func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
-	params := openai.ChatCompletionNewParams{
-		Model:    openai.ChatModel(o.providerOptions.model.APIModel),
-		Messages: messages,
-		Tools:    tools,
-	}
-
-	if o.providerOptions.model.CanReason == true {
-		params.MaxCompletionTokens = openai.Int(o.providerOptions.maxTokens)
-		switch o.options.reasoningEffort {
-		case "low":
-			params.ReasoningEffort = shared.ReasoningEffortLow
-		case "medium":
-			params.ReasoningEffort = shared.ReasoningEffortMedium
-		case "high":
-			params.ReasoningEffort = shared.ReasoningEffortHigh
-		default:
-			params.ReasoningEffort = shared.ReasoningEffortMedium
-		}
-	} else {
-		params.MaxTokens = openai.Int(o.providerOptions.maxTokens)
-	}
-
-	if o.providerOptions.model.Provider == models.ProviderOpenRouter {
-		params.WithExtraFields(map[string]any{
-			"provider": map[string]any{
-				"require_parameters": true,
-			},
-		})
-	}
-
-	return params
-}
-
-func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
-	params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
-	cfg := config.Get()
-	if cfg.Debug {
-		jsonData, _ := json.Marshal(params)
-		slog.Debug("Prepared messages", "messages", string(jsonData))
-	}
-	attempts := 0
-	for {
-		attempts++
-		openaiResponse, err := o.client.Chat.Completions.New(
-			ctx,
-			params,
-		)
-		// If there is an error we are going to see if we can retry the call
-		if err != nil {
-			retry, after, retryErr := o.shouldRetry(attempts, err)
-			duration := time.Duration(after) * time.Millisecond
-			if retryErr != nil {
-				return nil, retryErr
-			}
-			if retry {
-				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
-				select {
-				case <-ctx.Done():
-					return nil, ctx.Err()
-				case <-time.After(duration):
-					continue
-				}
-			}
-			return nil, retryErr
-		}
-
-		content := ""
-		if openaiResponse.Choices[0].Message.Content != "" {
-			content = openaiResponse.Choices[0].Message.Content
-		}
-
-		toolCalls := o.toolCalls(*openaiResponse)
-		finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
-
-		if len(toolCalls) > 0 {
-			finishReason = message.FinishReasonToolUse
-		}
-
-		return &ProviderResponse{
-			Content:      content,
-			ToolCalls:    toolCalls,
-			Usage:        o.usage(*openaiResponse),
-			FinishReason: finishReason,
-		}, nil
-	}
-}
-
-func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
-	params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
-	params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
-		IncludeUsage: openai.Bool(true),
-	}
-
-	cfg := config.Get()
-	if cfg.Debug {
-		jsonData, _ := json.Marshal(params)
-		slog.Debug("Prepared messages", "messages", string(jsonData))
-	}
-
-	attempts := 0
-	eventChan := make(chan ProviderEvent)
-
-	go func() {
-		for {
-			attempts++
-			openaiStream := o.client.Chat.Completions.NewStreaming(
-				ctx,
-				params,
-			)
-
-			acc := openai.ChatCompletionAccumulator{}
-			currentContent := ""
-			toolCalls := make([]message.ToolCall, 0)
-
-			for openaiStream.Next() {
-				chunk := openaiStream.Current()
-				acc.AddChunk(chunk)
-
-				for _, choice := range chunk.Choices {
-					if choice.Delta.Content != "" {
-						eventChan <- ProviderEvent{
-							Type:    EventContentDelta,
-							Content: choice.Delta.Content,
-						}
-						currentContent += choice.Delta.Content
-					}
-				}
-			}
-
-			err := openaiStream.Err()
-			if err == nil || errors.Is(err, io.EOF) {
-				// Stream completed successfully
-				finishReason := o.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason))
-				if len(acc.ChatCompletion.Choices[0].Message.ToolCalls) > 0 {
-					toolCalls = append(toolCalls, o.toolCalls(acc.ChatCompletion)...)
-				}
-				if len(toolCalls) > 0 {
-					finishReason = message.FinishReasonToolUse
-				}
-
-				eventChan <- ProviderEvent{
-					Type: EventComplete,
-					Response: &ProviderResponse{
-						Content:      currentContent,
-						ToolCalls:    toolCalls,
-						Usage:        o.usage(acc.ChatCompletion),
-						FinishReason: finishReason,
-					},
-				}
-				close(eventChan)
-				return
-			}
-
-			// If there is an error we are going to see if we can retry the call
-			retry, after, retryErr := o.shouldRetry(attempts, err)
-			duration := time.Duration(after) * time.Millisecond
-			if retryErr != nil {
-				eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
-				close(eventChan)
-				return
-			}
-			if retry {
-				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
-				select {
-				case <-ctx.Done():
-					// context cancelled
-					if ctx.Err() == nil {
-						eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
-					}
-					close(eventChan)
-					return
-				case <-time.After(duration):
-					continue
-				}
-			}
-			eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
-			close(eventChan)
-			return
-		}
-	}()
-
-	return eventChan
-}
 
 func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
 	var apierr *openai.Error
@@ -373,36 +116,6 @@ func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error)
 	return true, int64(retryMs), nil
 }
 
-func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
-	var toolCalls []message.ToolCall
-
-	if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
-		for _, call := range completion.Choices[0].Message.ToolCalls {
-			toolCall := message.ToolCall{
-				ID:       call.ID,
-				Name:     call.Function.Name,
-				Input:    call.Function.Arguments,
-				Type:     "function",
-				Finished: true,
-			}
-			toolCalls = append(toolCalls, toolCall)
-		}
-	}
-
-	return toolCalls
-}
-
-func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
-	cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
-	inputTokens := completion.Usage.PromptTokens - cachedTokens
-
-	return TokenUsage{
-		InputTokens:         inputTokens,
-		OutputTokens:        completion.Usage.CompletionTokens,
-		CacheCreationTokens: 0, // OpenAI doesn't provide this directly
-		CacheReadTokens:     cachedTokens,
-	}
-}
 
 func WithOpenAIBaseURL(baseURL string) OpenAIOption {
 	return func(options *openaiOptions) {

+ 317 - 0
internal/llm/provider/openai_completion.go

@@ -0,0 +1,317 @@
+package provider
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"log/slog"
+	"time"
+
+	"github.com/openai/openai-go"
+	"github.com/openai/openai-go/shared"
+	"github.com/sst/opencode/internal/config"
+	"github.com/sst/opencode/internal/llm/models"
+	"github.com/sst/opencode/internal/llm/tools"
+	"github.com/sst/opencode/internal/message"
+	"github.com/sst/opencode/internal/status"
+)
+
+func (o *openaiClient) convertMessagesToChatCompletionMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
+	// Add system message first
+	openaiMessages = append(openaiMessages, openai.SystemMessage(o.providerOptions.systemMessage))
+
+	for _, msg := range messages {
+		switch msg.Role {
+		case message.User:
+			var content []openai.ChatCompletionContentPartUnionParam
+			textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
+			content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
+			for _, binaryContent := range msg.BinaryContent() {
+				imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(models.ProviderOpenAI)}
+				imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
+
+				content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
+			}
+
+			openaiMessages = append(openaiMessages, openai.UserMessage(content))
+
+		case message.Assistant:
+			assistantMsg := openai.ChatCompletionAssistantMessageParam{
+				Role: "assistant",
+			}
+
+			if msg.Content().String() != "" {
+				assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
+					OfString: openai.String(msg.Content().String()),
+				}
+			}
+
+			if len(msg.ToolCalls()) > 0 {
+				assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls()))
+				for i, call := range msg.ToolCalls() {
+					assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
+						ID:   call.ID,
+						Type: "function",
+						Function: openai.ChatCompletionMessageToolCallFunctionParam{
+							Name:      call.Name,
+							Arguments: call.Input,
+						},
+					}
+				}
+			}
+
+			openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
+				OfAssistant: &assistantMsg,
+			})
+
+		case message.Tool:
+			for _, result := range msg.ToolResults() {
+				openaiMessages = append(openaiMessages,
+					openai.ToolMessage(result.Content, result.ToolCallID),
+				)
+			}
+		}
+	}
+
+	return
+}
+
+func (o *openaiClient) convertToChatCompletionTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
+	openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
+
+	for i, tool := range tools {
+		info := tool.Info()
+		openaiTools[i] = openai.ChatCompletionToolParam{
+			Function: openai.FunctionDefinitionParam{
+				Name:        info.Name,
+				Description: openai.String(info.Description),
+				Parameters: openai.FunctionParameters{
+					"type":       "object",
+					"properties": info.Parameters,
+					"required":   info.Required,
+				},
+			},
+		}
+	}
+
+	return openaiTools
+}
+
+func (o *openaiClient) preparedChatCompletionParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
+	params := openai.ChatCompletionNewParams{
+		Model:    openai.ChatModel(o.providerOptions.model.APIModel),
+		Messages: messages,
+		Tools:    tools,
+	}
+	if o.providerOptions.model.CanReason == true {
+		params.MaxCompletionTokens = openai.Int(o.providerOptions.maxTokens)
+		switch o.options.reasoningEffort {
+		case "low":
+			params.ReasoningEffort = shared.ReasoningEffortLow
+		case "medium":
+			params.ReasoningEffort = shared.ReasoningEffortMedium
+		case "high":
+			params.ReasoningEffort = shared.ReasoningEffortHigh
+		default:
+			params.ReasoningEffort = shared.ReasoningEffortMedium
+		}
+	} else {
+		params.MaxTokens = openai.Int(o.providerOptions.maxTokens)
+	}
+
+	if o.providerOptions.model.Provider == models.ProviderOpenRouter {
+		params.WithExtraFields(map[string]any{
+			"provider": map[string]any{
+				"require_parameters": true,
+			},
+		})
+	}
+
+	return params
+}
+
+func (o *openaiClient) sendChatcompletionMessage(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
+	params := o.preparedChatCompletionParams(o.convertMessagesToChatCompletionMessages(messages), o.convertToChatCompletionTools(tools))
+	cfg := config.Get()
+	if cfg.Debug {
+		jsonData, _ := json.Marshal(params)
+		slog.Debug("Prepared messages", "messages", string(jsonData))
+	}
+	attempts := 0
+	for {
+		attempts++
+		openaiResponse, err := o.client.Chat.Completions.New(
+			ctx,
+			params,
+		)
+		// If there is an error we are going to see if we can retry the call
+		if err != nil {
+			retry, after, retryErr := o.shouldRetry(attempts, err)
+			duration := time.Duration(after) * time.Millisecond
+			if retryErr != nil {
+				return nil, retryErr
+			}
+			if retry {
+				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
+				select {
+				case <-ctx.Done():
+					return nil, ctx.Err()
+				case <-time.After(duration):
+					continue
+				}
+			}
+			return nil, retryErr
+		}
+
+		content := ""
+		if openaiResponse.Choices[0].Message.Content != "" {
+			content = openaiResponse.Choices[0].Message.Content
+		}
+
+		toolCalls := o.chatCompletionToolCalls(*openaiResponse)
+		finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
+
+		if len(toolCalls) > 0 {
+			finishReason = message.FinishReasonToolUse
+		}
+
+		return &ProviderResponse{
+			Content:      content,
+			ToolCalls:    toolCalls,
+			Usage:        o.usage(*openaiResponse),
+			FinishReason: finishReason,
+		}, nil
+	}
+}
+
+func (o *openaiClient) streamChatCompletionMessages(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
+	params := o.preparedChatCompletionParams(o.convertMessagesToChatCompletionMessages(messages), o.convertToChatCompletionTools(tools))
+	params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
+		IncludeUsage: openai.Bool(true),
+	}
+
+	cfg := config.Get()
+	if cfg.Debug {
+		jsonData, _ := json.Marshal(params)
+		slog.Debug("Prepared messages", "messages", string(jsonData))
+	}
+
+	attempts := 0
+	eventChan := make(chan ProviderEvent)
+
+	go func() {
+		for {
+			attempts++
+			openaiStream := o.client.Chat.Completions.NewStreaming(
+				ctx,
+				params,
+			)
+
+			acc := openai.ChatCompletionAccumulator{}
+			currentContent := ""
+			toolCalls := make([]message.ToolCall, 0)
+
+			for openaiStream.Next() {
+				chunk := openaiStream.Current()
+				acc.AddChunk(chunk)
+
+				for _, choice := range chunk.Choices {
+					if choice.Delta.Content != "" {
+						eventChan <- ProviderEvent{
+							Type:    EventContentDelta,
+							Content: choice.Delta.Content,
+						}
+						currentContent += choice.Delta.Content
+					}
+				}
+			}
+
+			err := openaiStream.Err()
+			if err == nil || errors.Is(err, io.EOF) {
+				// Stream completed successfully
+				finishReason := o.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason))
+				if len(acc.ChatCompletion.Choices[0].Message.ToolCalls) > 0 {
+					toolCalls = append(toolCalls, o.chatCompletionToolCalls(acc.ChatCompletion)...)
+				}
+				if len(toolCalls) > 0 {
+					finishReason = message.FinishReasonToolUse
+				}
+
+				eventChan <- ProviderEvent{
+					Type: EventComplete,
+					Response: &ProviderResponse{
+						Content:      currentContent,
+						ToolCalls:    toolCalls,
+						Usage:        o.usage(acc.ChatCompletion),
+						FinishReason: finishReason,
+					},
+				}
+				close(eventChan)
+				return
+			}
+
+			// If there is an error we are going to see if we can retry the call
+			retry, after, retryErr := o.shouldRetry(attempts, err)
+			duration := time.Duration(after) * time.Millisecond
+			if retryErr != nil {
+				eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
+				close(eventChan)
+				return
+			}
+			if retry {
+				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
+				select {
+				case <-ctx.Done():
+					// context cancelled
+					if ctx.Err() == nil {
+						eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
+					}
+					close(eventChan)
+					return
+				case <-time.After(duration):
+					continue
+				}
+			}
+			eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
+			close(eventChan)
+			return
+		}
+	}()
+
+	return eventChan
+}
+
+
+func (o *openaiClient) chatCompletionToolCalls(completion openai.ChatCompletion) []message.ToolCall {
+	var toolCalls []message.ToolCall
+
+	if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
+		for _, call := range completion.Choices[0].Message.ToolCalls {
+			toolCall := message.ToolCall{
+				ID:       call.ID,
+				Name:     call.Function.Name,
+				Input:    call.Function.Arguments,
+				Type:     "function",
+				Finished: true,
+			}
+			toolCalls = append(toolCalls, toolCall)
+		}
+	}
+
+	return toolCalls
+}
+
+func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
+	cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
+	inputTokens := completion.Usage.PromptTokens - cachedTokens
+
+	return TokenUsage{
+		InputTokens:         inputTokens,
+		OutputTokens:        completion.Usage.CompletionTokens,
+		CacheCreationTokens: 0, // OpenAI doesn't provide this directly
+		CacheReadTokens:     cachedTokens,
+	}
+}
+

+ 393 - 0
internal/llm/provider/openai_response.go

@@ -0,0 +1,393 @@
+package provider
+
+
+import (
+	"github.com/openai/openai-go"
+	"github.com/openai/openai-go/responses"
+	"github.com/sst/opencode/internal/llm/models"
+	"github.com/sst/opencode/internal/llm/tools"
+	"github.com/sst/opencode/internal/message"
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"time"
+
+	"log/slog"
+
+	"github.com/openai/openai-go/shared"
+	"github.com/sst/opencode/internal/config"
+	"github.com/sst/opencode/internal/status"
+)
+
+func (o *openaiClient) convertMessagesToResponseParams(messages []message.Message) responses.ResponseInputParam {
+	inputItems := responses.ResponseInputParam{}
+
+	inputItems = append(inputItems, responses.ResponseInputItemUnionParam{
+		OfMessage: &responses.EasyInputMessageParam{
+			Content: responses.EasyInputMessageContentUnionParam{OfString: openai.String(o.providerOptions.systemMessage)},
+			Role:    responses.EasyInputMessageRoleSystem,
+		},
+	})
+
+	for _, msg := range messages {
+		switch msg.Role {
+		case message.User:
+			inputItemContentList := responses.ResponseInputMessageContentListParam{
+				responses.ResponseInputContentUnionParam{
+					OfInputText: &responses.ResponseInputTextParam{
+						Text: msg.Content().String(),
+					},
+				},
+			}
+
+			for _, binaryContent := range msg.BinaryContent() {
+				inputItemContentList = append(inputItemContentList, responses.ResponseInputContentUnionParam{
+					OfInputImage: &responses.ResponseInputImageParam{
+						ImageURL: openai.String(binaryContent.String(models.ProviderOpenAI)),
+					},
+				})
+			}
+
+			userMsg := responses.ResponseInputItemUnionParam{
+				OfInputMessage: &responses.ResponseInputItemMessageParam{
+					Content: inputItemContentList,
+					Role:    string(responses.ResponseInputMessageItemRoleUser),
+				},
+			}
+			inputItems = append(inputItems, userMsg)
+
+		case message.Assistant:
+			if msg.Content().String() != "" {
+				assistantMsg := responses.ResponseInputItemUnionParam{
+					OfOutputMessage: &responses.ResponseOutputMessageParam{
+						Content: []responses.ResponseOutputMessageContentUnionParam{{
+							OfOutputText: &responses.ResponseOutputTextParam{
+								Text: msg.Content().String(),
+							},
+						}},
+					},
+				}
+				inputItems = append(inputItems, assistantMsg)
+			}
+
+			if len(msg.ToolCalls()) > 0 {
+				for _, call := range msg.ToolCalls() {
+					toolMsg := responses.ResponseInputItemUnionParam{
+						OfFunctionCall: &responses.ResponseFunctionToolCallParam{
+							CallID:    call.ID,
+							Name:      call.Name,
+							Arguments: call.Input,
+						},
+					}
+					inputItems = append(inputItems, toolMsg)
+				}
+			}
+
+		case message.Tool:
+			for _, result := range msg.ToolResults() {
+				toolMsg := responses.ResponseInputItemUnionParam{
+					OfFunctionCallOutput: &responses.ResponseInputItemFunctionCallOutputParam{
+						Output: result.Content,
+						CallID: result.ToolCallID,
+					},
+				}
+				inputItems = append(inputItems, toolMsg)
+			}
+		}
+	}
+
+	return inputItems
+}
+
+func (o *openaiClient) convertToResponseTools(tools []tools.BaseTool) []responses.ToolUnionParam {
+	outputTools := make([]responses.ToolUnionParam, len(tools))
+
+	for i, tool := range tools {
+		info := tool.Info()
+		outputTools[i] = responses.ToolUnionParam{
+			OfFunction: &responses.FunctionToolParam{
+				Name:        info.Name,
+				Description: openai.String(info.Description),
+				Parameters: map[string]any{
+					"type":       "object",
+					"properties": info.Parameters,
+					"required":   info.Required,
+				},
+			},
+		}
+	}
+
+	return outputTools
+}
+
+
+func (o *openaiClient) preparedResponseParams(input responses.ResponseInputParam, tools []responses.ToolUnionParam) responses.ResponseNewParams {
+	params := responses.ResponseNewParams{
+		Model: shared.ResponsesModel(o.providerOptions.model.APIModel),
+		Input: responses.ResponseNewParamsInputUnion{OfInputItemList: input},
+		Tools: tools,
+	}
+
+	params.MaxOutputTokens = openai.Int(o.providerOptions.maxTokens)
+
+	if o.providerOptions.model.CanReason == true {
+		switch o.options.reasoningEffort {
+		case "low":
+			params.Reasoning.Effort = shared.ReasoningEffortLow
+		case "medium":
+			params.Reasoning.Effort = shared.ReasoningEffortMedium
+		case "high":
+			params.Reasoning.Effort = shared.ReasoningEffortHigh
+		default:
+			params.Reasoning.Effort = shared.ReasoningEffortMedium
+		}
+	}
+
+	if o.providerOptions.model.Provider == models.ProviderOpenRouter {
+		params.WithExtraFields(map[string]any{
+			"provider": map[string]any{
+				"require_parameters": true,
+			},
+		})
+	}
+
+	return params
+}
+
+func (o *openaiClient) sendResponseMessages(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
+	params := o.preparedResponseParams(o.convertMessagesToResponseParams(messages), o.convertToResponseTools(tools))
+	cfg := config.Get()
+	if cfg.Debug {
+		jsonData, _ := json.Marshal(params)
+		slog.Debug("Prepared messages", "messages", string(jsonData))
+	}
+	attempts := 0
+	for {
+		attempts++
+		openaiResponse, err := o.client.Responses.New(
+			ctx,
+			params,
+		)
+		// If there is an error we are going to see if we can retry the call
+		if err != nil {
+			retry, after, retryErr := o.shouldRetry(attempts, err)
+			duration := time.Duration(after) * time.Millisecond
+			if retryErr != nil {
+				return nil, retryErr
+			}
+			if retry {
+				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
+				select {
+				case <-ctx.Done():
+					return nil, ctx.Err()
+				case <-time.After(duration):
+					continue
+				}
+			}
+			return nil, retryErr
+		}
+
+		content := ""
+		if openaiResponse.OutputText() != "" {
+			content = openaiResponse.OutputText()
+		}
+
+		toolCalls := o.responseToolCalls(*openaiResponse)
+		finishReason := o.finishReason("stop")
+
+		if len(toolCalls) > 0 {
+			finishReason = message.FinishReasonToolUse
+		}
+
+		return &ProviderResponse{
+			Content:      content,
+			ToolCalls:    toolCalls,
+			Usage:        o.responseUsage(*openaiResponse),
+			FinishReason: finishReason,
+		}, nil
+	}
+}
+
+func (o *openaiClient) streamResponseMessages(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
+	eventChan := make(chan ProviderEvent)
+
+	params := o.preparedResponseParams(o.convertMessagesToResponseParams(messages), o.convertToResponseTools(tools))
+
+	cfg := config.Get()
+	if cfg.Debug {
+		jsonData, _ := json.Marshal(params)
+		slog.Debug("Prepared messages", "messages", string(jsonData))
+	}
+
+	attempts := 0
+
+	go func() {
+		for {
+			attempts++
+			stream := o.client.Responses.NewStreaming(ctx, params)
+			outputText := ""
+			currentToolCallID := ""
+			for stream.Next() {
+				event := stream.Current()
+
+				switch event := event.AsAny().(type) {
+				case responses.ResponseCompletedEvent:
+					toolCalls := o.responseToolCalls(event.Response)
+					finishReason := o.finishReason("stop")
+
+					if len(toolCalls) > 0 {
+						finishReason = message.FinishReasonToolUse
+					}
+
+					eventChan <- ProviderEvent{
+						Type: EventComplete,
+						Response: &ProviderResponse{
+							Content:      outputText,
+							ToolCalls:    toolCalls,
+							Usage:        o.responseUsage(event.Response),
+							FinishReason: finishReason,
+						},
+					}
+					close(eventChan)
+					return
+
+				case responses.ResponseTextDeltaEvent:
+					outputText += event.Delta
+					eventChan <- ProviderEvent{
+						Type:    EventContentDelta,
+						Content: event.Delta,
+					}
+
+				case responses.ResponseTextDoneEvent:
+					eventChan <- ProviderEvent{
+						Type:    EventContentStop,
+						Content: outputText,
+					}
+					close(eventChan)
+					return
+
+				case responses.ResponseOutputItemAddedEvent:
+					if event.Item.Type == "function_call" {
+						currentToolCallID = event.Item.ID
+						eventChan <- ProviderEvent{
+							Type: EventToolUseStart,
+							ToolCall: &message.ToolCall{
+								ID:       event.Item.ID,
+								Name:     event.Item.Name,
+								Finished: false,
+							},
+						}
+					}
+
+				case responses.ResponseFunctionCallArgumentsDeltaEvent:
+					if event.ItemID == currentToolCallID {
+						eventChan <- ProviderEvent{
+							Type: EventToolUseDelta,
+							ToolCall: &message.ToolCall{
+								ID:       currentToolCallID,
+								Finished: false,
+								Input:    event.Delta,
+							},
+						}
+					}
+
+				case responses.ResponseFunctionCallArgumentsDoneEvent:
+					if event.ItemID == currentToolCallID {
+						eventChan <- ProviderEvent{
+							Type: EventToolUseStop,
+							ToolCall: &message.ToolCall{
+								ID:    currentToolCallID,
+								Input: event.Arguments,
+							},
+						}
+						currentToolCallID = ""
+					}
+
+				case responses.ResponseOutputItemDoneEvent:
+					if event.Item.Type == "function_call" {
+						eventChan <- ProviderEvent{
+							Type: EventToolUseStop,
+							ToolCall: &message.ToolCall{
+								ID:       event.Item.ID,
+								Name:     event.Item.Name,
+								Input:    event.Item.Arguments,
+								Finished: true,
+							},
+						}
+						currentToolCallID = ""
+					}
+
+				}
+			}
+
+			err := stream.Err()
+			if err == nil || errors.Is(err, io.EOF) {
+				close(eventChan)
+				return
+			}
+
+			// If there is an error we are going to see if we can retry the call
+			retry, after, retryErr := o.shouldRetry(attempts, err)
+			duration := time.Duration(after) * time.Millisecond
+			if retryErr != nil {
+				eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
+				close(eventChan)
+				return
+			}
+			if retry {
+				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
+				select {
+				case <-ctx.Done():
+					// context cancelled
+					if ctx.Err() == nil {
+						eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
+					}
+					close(eventChan)
+					return
+				case <-time.After(duration):
+					continue
+				}
+			}
+			eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
+			close(eventChan)
+			return
+		}
+	}()
+
+	return eventChan
+}
+
+
+func (o *openaiClient) responseToolCalls(response responses.Response) []message.ToolCall {
+	var toolCalls []message.ToolCall
+
+	for _, output := range response.Output {
+		if output.Type == "function_call" {
+			call := output.AsFunctionCall()
+			toolCall := message.ToolCall{
+				ID:       call.ID,
+				Name:     call.Name,
+				Input:    call.Arguments,
+				Type:     "function",
+				Finished: true,
+			}
+			toolCalls = append(toolCalls, toolCall)
+		}
+	}
+
+	return toolCalls
+}
+
+func (o *openaiClient) responseUsage(response responses.Response) TokenUsage {
+	cachedTokens := response.Usage.InputTokensDetails.CachedTokens
+	inputTokens := response.Usage.InputTokens - cachedTokens
+
+	return TokenUsage{
+		InputTokens:         inputTokens,
+		OutputTokens:        response.Usage.OutputTokens,
+		CacheCreationTokens: 0, // OpenAI doesn't provide this directly
+		CacheReadTokens:     cachedTokens,
+	}
+}

+ 1 - 1
internal/llm/provider/provider.go

@@ -3,11 +3,11 @@ package provider
 import (
 	"context"
 	"fmt"
+	"log/slog"
 
 	"github.com/sst/opencode/internal/llm/models"
 	"github.com/sst/opencode/internal/llm/tools"
 	"github.com/sst/opencode/internal/message"
-	"log/slog"
 )
 
 type EventType string