| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149 |
- package provider
- import (
- "context"
- "errors"
- "fmt"
- "log/slog"
- "github.com/openai/openai-go"
- "github.com/openai/openai-go/option"
- "github.com/sst/opencode/internal/llm/models"
- "github.com/sst/opencode/internal/llm/tools"
- "github.com/sst/opencode/internal/message"
- )
- type openaiOptions struct {
- baseURL string
- disableCache bool
- reasoningEffort string
- extraHeaders map[string]string
- }
- type OpenAIOption func(*openaiOptions)
- type openaiClient struct {
- providerOptions providerClientOptions
- options openaiOptions
- client openai.Client
- }
- type OpenAIClient ProviderClient
- func newOpenAIClient(opts providerClientOptions) OpenAIClient {
- openaiOpts := openaiOptions{
- reasoningEffort: "medium",
- }
- for _, o := range opts.openaiOptions {
- o(&openaiOpts)
- }
- openaiClientOptions := []option.RequestOption{}
- if opts.apiKey != "" {
- openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
- }
- if openaiOpts.baseURL != "" {
- openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(openaiOpts.baseURL))
- }
- if openaiOpts.extraHeaders != nil {
- for key, value := range openaiOpts.extraHeaders {
- openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
- }
- }
- client := openai.NewClient(openaiClientOptions...)
- return &openaiClient{
- providerOptions: opts,
- options: openaiOpts,
- client: client,
- }
- }
- func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
- if o.providerOptions.model.ID == models.OpenAIModels[models.CodexMini].ID || o.providerOptions.model.ID == models.OpenAIModels[models.O1Pro].ID {
- return o.sendResponseMessages(ctx, messages, tools)
- }
- return o.sendChatcompletionMessage(ctx, messages, tools)
- }
- func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
- if o.providerOptions.model.ID == models.OpenAIModels[models.CodexMini].ID || o.providerOptions.model.ID == models.OpenAIModels[models.O1Pro].ID {
- return o.streamResponseMessages(ctx, messages, tools)
- }
- return o.streamChatCompletionMessages(ctx, messages, tools)
- }
- func (o *openaiClient) finishReason(reason string) message.FinishReason {
- switch reason {
- case "stop":
- return message.FinishReasonEndTurn
- case "length":
- return message.FinishReasonMaxTokens
- case "tool_calls":
- return message.FinishReasonToolUse
- default:
- return message.FinishReasonUnknown
- }
- }
- func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
- var apierr *openai.Error
- if !errors.As(err, &apierr) {
- return false, 0, err
- }
- if apierr.StatusCode != 429 && apierr.StatusCode != 500 {
- return false, 0, err
- }
- if attempts > maxRetries {
- return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
- }
- retryMs := 0
- retryAfterValues := apierr.Response.Header.Values("Retry-After")
- backoffMs := 2000 * (1 << (attempts - 1))
- jitterMs := int(float64(backoffMs) * 0.2)
- retryMs = backoffMs + jitterMs
- if len(retryAfterValues) > 0 {
- if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
- retryMs = retryMs * 1000
- }
- }
- return true, int64(retryMs), nil
- }
- func WithOpenAIBaseURL(baseURL string) OpenAIOption {
- return func(options *openaiOptions) {
- options.baseURL = baseURL
- }
- }
- func WithOpenAIExtraHeaders(headers map[string]string) OpenAIOption {
- return func(options *openaiOptions) {
- options.extraHeaders = headers
- }
- }
- func WithOpenAIDisableCache() OpenAIOption {
- return func(options *openaiOptions) {
- options.disableCache = true
- }
- }
- func WithReasoningEffort(effort string) OpenAIOption {
- return func(options *openaiOptions) {
- defaultReasoningEffort := "medium"
- switch effort {
- case "low", "medium", "high":
- defaultReasoningEffort = effort
- default:
- slog.Warn("Invalid reasoning effort, using default: medium")
- }
- options.reasoningEffort = defaultReasoningEffort
- }
- }
|