adamdottv 9 месяцев назад
Родитель
Сommit
ddbb217d0d

+ 6 - 4
internal/llm/provider/anthropic.go

@@ -224,15 +224,16 @@ func (a *anthropicClient) send(ctx context.Context, messages []message.Message,
 		if err != nil {
 			slog.Error("Error in Anthropic API call", "error", err)
 			retry, after, retryErr := a.shouldRetry(attempts, err)
+			duration := time.Duration(after) * time.Millisecond
 			if retryErr != nil {
 				return nil, retryErr
 			}
 			if retry {
-				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries))
+				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
 				select {
 				case <-ctx.Done():
 					return nil, ctx.Err()
-				case <-time.After(time.Duration(after) * time.Millisecond):
+				case <-time.After(duration):
 					continue
 				}
 			}
@@ -360,13 +361,14 @@ func (a *anthropicClient) stream(ctx context.Context, messages []message.Message
 			}
 			// If there is an error we are going to see if we can retry the call
 			retry, after, retryErr := a.shouldRetry(attempts, err)
+			duration := time.Duration(after) * time.Millisecond
 			if retryErr != nil {
 				eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
 				close(eventChan)
 				return
 			}
 			if retry {
-				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries))
+				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
 				select {
 				case <-ctx.Done():
 					// context cancelled
@@ -375,7 +377,7 @@ func (a *anthropicClient) stream(ctx context.Context, messages []message.Message
 					}
 					close(eventChan)
 					return
-				case <-time.After(time.Duration(after) * time.Millisecond):
+				case <-time.After(duration):
 					continue
 				}
 			}

+ 6 - 4
internal/llm/provider/gemini.go

@@ -197,15 +197,16 @@ func (g *geminiClient) send(ctx context.Context, messages []message.Message, too
 		// If there is an error we are going to see if we can retry the call
 		if err != nil {
 			retry, after, retryErr := g.shouldRetry(attempts, err)
+			duration := time.Duration(after) * time.Millisecond
 			if retryErr != nil {
 				return nil, retryErr
 			}
 			if retry {
-				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries))
+				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
 				select {
 				case <-ctx.Done():
 					return nil, ctx.Err()
-				case <-time.After(time.Duration(after) * time.Millisecond):
+				case <-time.After(duration):
 					continue
 				}
 			}
@@ -292,12 +293,13 @@ func (g *geminiClient) stream(ctx context.Context, messages []message.Message, t
 			for resp, err := range chat.SendMessageStream(ctx, lastMsgParts...) {
 				if err != nil {
 					retry, after, retryErr := g.shouldRetry(attempts, err)
+					duration := time.Duration(after) * time.Millisecond
 					if retryErr != nil {
 						eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
 						return
 					}
 					if retry {
-						status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries))
+						status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
 						select {
 						case <-ctx.Done():
 							if ctx.Err() != nil {
@@ -305,7 +307,7 @@ func (g *geminiClient) stream(ctx context.Context, messages []message.Message, t
 							}
 
 							return
-						case <-time.After(time.Duration(after) * time.Millisecond):
+						case <-time.After(duration):
 							break
 						}
 					} else {

+ 6 - 4
internal/llm/provider/openai.go

@@ -211,15 +211,16 @@ func (o *openaiClient) send(ctx context.Context, messages []message.Message, too
 		// If there is an error we are going to see if we can retry the call
 		if err != nil {
 			retry, after, retryErr := o.shouldRetry(attempts, err)
+			duration := time.Duration(after) * time.Millisecond
 			if retryErr != nil {
 				return nil, retryErr
 			}
 			if retry {
-				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries))
+				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
 				select {
 				case <-ctx.Done():
 					return nil, ctx.Err()
-				case <-time.After(time.Duration(after) * time.Millisecond):
+				case <-time.After(duration):
 					continue
 				}
 			}
@@ -315,13 +316,14 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t
 
 			// If there is an error we are going to see if we can retry the call
 			retry, after, retryErr := o.shouldRetry(attempts, err)
+			duration := time.Duration(after) * time.Millisecond
 			if retryErr != nil {
 				eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
 				close(eventChan)
 				return
 			}
 			if retry {
-				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries))
+				status.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), status.WithDuration(duration))
 				select {
 				case <-ctx.Done():
 					// context cancelled
@@ -330,7 +332,7 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t
 					}
 					close(eventChan)
 					return
-				case <-time.After(time.Duration(after) * time.Millisecond):
+				case <-time.After(duration):
 					continue
 				}
 			}

+ 49 - 24
internal/status/status.go

@@ -20,9 +20,28 @@ const (
 )
 
 type StatusMessage struct {
-	Level     Level     `json:"level"`
-	Message   string    `json:"message"`
-	Timestamp time.Time `json:"timestamp"`
+	Level     Level         `json:"level"`
+	Message   string        `json:"message"`
+	Timestamp time.Time     `json:"timestamp"`
+	Critical  bool          `json:"critical"`
+	Duration  time.Duration `json:"duration"`
+}
+
+// StatusOption is a function that configures a status message
+type StatusOption func(*StatusMessage)
+
+// WithCritical marks a status message as critical, causing it to be displayed immediately
+func WithCritical(critical bool) StatusOption {
+	return func(msg *StatusMessage) {
+		msg.Critical = critical
+	}
+}
+
+// WithDuration sets a custom display duration for a status message
+func WithDuration(duration time.Duration) StatusOption {
+	return func(msg *StatusMessage) {
+		msg.Duration = duration
+	}
 }
 
 const (
@@ -32,10 +51,10 @@ const (
 type Service interface {
 	pubsub.Subscriber[StatusMessage]
 
-	Info(message string)
-	Warn(message string)
-	Error(message string)
-	Debug(message string)
+	Info(message string, opts ...StatusOption)
+	Warn(message string, opts ...StatusOption)
+	Error(message string, opts ...StatusOption)
+	Debug(message string, opts ...StatusOption)
 }
 
 type service struct {
@@ -63,32 +82,38 @@ func GetService() Service {
 	return globalStatusService
 }
 
-func (s *service) Info(message string) {
-	s.publish(LevelInfo, message)
+func (s *service) Info(message string, opts ...StatusOption) {
+	s.publish(LevelInfo, message, opts...)
 	slog.Info(message)
 }
 
-func (s *service) Warn(message string) {
-	s.publish(LevelWarn, message)
+func (s *service) Warn(message string, opts ...StatusOption) {
+	s.publish(LevelWarn, message, opts...)
 	slog.Warn(message)
 }
 
-func (s *service) Error(message string) {
-	s.publish(LevelError, message)
+func (s *service) Error(message string, opts ...StatusOption) {
+	s.publish(LevelError, message, opts...)
 	slog.Error(message)
 }
 
-func (s *service) Debug(message string) {
-	s.publish(LevelDebug, message)
+func (s *service) Debug(message string, opts ...StatusOption) {
+	s.publish(LevelDebug, message, opts...)
 	slog.Debug(message)
 }
 
-func (s *service) publish(level Level, messageText string) {
+func (s *service) publish(level Level, messageText string, opts ...StatusOption) {
 	statusMsg := StatusMessage{
 		Level:     level,
 		Message:   messageText,
 		Timestamp: time.Now(),
 	}
+
+	// Apply all options
+	for _, opt := range opts {
+		opt(&statusMsg)
+	}
+
 	s.broker.Publish(EventStatusPublished, statusMsg)
 }
 
@@ -96,20 +121,20 @@ func (s *service) Subscribe(ctx context.Context) <-chan pubsub.Event[StatusMessa
 	return s.broker.Subscribe(ctx)
 }
 
-func Info(message string) {
-	GetService().Info(message)
+func Info(message string, opts ...StatusOption) {
+	GetService().Info(message, opts...)
 }
 
-func Warn(message string) {
-	GetService().Warn(message)
+func Warn(message string, opts ...StatusOption) {
+	GetService().Warn(message, opts...)
 }
 
-func Error(message string) {
-	GetService().Error(message)
+func Error(message string, opts ...StatusOption) {
+	GetService().Error(message, opts...)
 }
 
-func Debug(message string) {
-	GetService().Debug(message)
+func Debug(message string, opts ...StatusOption) {
+	GetService().Debug(message, opts...)
 }
 
 func Subscribe(ctx context.Context) <-chan pubsub.Event[StatusMessage] {

+ 92 - 35
internal/tui/components/core/status.go

@@ -24,17 +24,11 @@ type StatusCmp interface {
 }
 
 type statusCmp struct {
-	app            *app.App
-	statusMessages []statusMessage
-	width          int
-	messageTTL     time.Duration
-}
-
-type statusMessage struct {
-	Level     status.Level
-	Message   string
-	Timestamp time.Time
-	ExpiresAt time.Time
+	app         *app.App
+	queue       []status.StatusMessage
+	width       int
+	messageTTL  time.Duration
+	activeUntil time.Time
 }
 
 // clearMessageCmd is a command that clears status messages after a timeout
@@ -60,23 +54,50 @@ func (m statusCmp) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
 		return m, nil
 	case pubsub.Event[status.StatusMessage]:
 		if msg.Type == status.EventStatusPublished {
-			statusMsg := statusMessage{
-				Level:     msg.Payload.Level,
-				Message:   msg.Payload.Message,
-				Timestamp: msg.Payload.Timestamp,
-				ExpiresAt: msg.Payload.Timestamp.Add(m.messageTTL),
+			// If this is a critical message, move it to the front of the queue
+			if msg.Payload.Critical {
+				// Insert at the front of the queue
+				m.queue = append([]status.StatusMessage{msg.Payload}, m.queue...)
+
+				// Reset active time to show critical message immediately
+				m.activeUntil = time.Time{}
+			} else {
+				// Otherwise, just add it to the queue
+				m.queue = append(m.queue, msg.Payload)
+
+				// If this is the first message and nothing is active, activate it immediately
+				if len(m.queue) == 1 && m.activeUntil.IsZero() {
+					now := time.Now()
+					duration := m.messageTTL
+					if msg.Payload.Duration > 0 {
+						duration = msg.Payload.Duration
+					}
+					m.activeUntil = now.Add(duration)
+				}
 			}
-			m.statusMessages = append(m.statusMessages, statusMsg)
 		}
 	case statusCleanupMsg:
-		// Remove expired messages
-		var activeMessages []statusMessage
-		for _, sm := range m.statusMessages {
-			if sm.ExpiresAt.After(msg.time) {
-				activeMessages = append(activeMessages, sm)
+		now := msg.time
+
+		// If the active message has expired, remove it and activate the next one
+		if !m.activeUntil.IsZero() && m.activeUntil.Before(now) {
+			// Current message expired, remove it if we have one
+			if len(m.queue) > 0 {
+				m.queue = m.queue[1:]
 			}
+			m.activeUntil = time.Time{}
 		}
-		m.statusMessages = activeMessages
+
+		// If we have messages in queue but none are active, activate the first one
+		if len(m.queue) > 0 && m.activeUntil.IsZero() {
+			// Use custom duration if specified, otherwise use default
+			duration := m.messageTTL
+			if m.queue[0].Duration > 0 {
+				duration = m.queue[0].Duration
+			}
+			m.activeUntil = now.Add(duration)
+		}
+
 		return m, m.clearMessageCmd()
 	}
 	return m, nil
@@ -155,12 +176,14 @@ func (m statusCmp) View() string {
 			lipgloss.Width(diagnostics),
 	)
 
+	const minInlineWidth = 30
+
 	// Display the first status message if available
-	if len(m.statusMessages) > 0 {
-		sm := m.statusMessages[0]
+	var statusMessage string
+	if len(m.queue) > 0 {
+		sm := m.queue[0]
 		infoStyle := styles.Padded().
-			Foreground(t.Background()).
-			Width(statusWidth)
+			Foreground(t.Background())
 
 		switch sm.Level {
 		case "info":
@@ -176,11 +199,27 @@ func (m statusCmp) View() string {
 		// Truncate message if it's longer than available width
 		msg := sm.Message
 		availWidth := statusWidth - 10
-		if len(msg) > availWidth && availWidth > 0 {
-			msg = msg[:availWidth] + "..."
-		}
 
-		status += infoStyle.Render(msg)
+		// If we have enough space, show inline
+		if availWidth >= minInlineWidth {
+			if len(msg) > availWidth && availWidth > 0 {
+				msg = msg[:availWidth] + "..."
+			}
+			status += infoStyle.Width(statusWidth).Render(msg)
+		} else {
+			// Otherwise, prepare a full-width message to show above
+			if len(msg) > m.width-10 && m.width > 10 {
+				msg = msg[:m.width-10] + "..."
+			}
+			statusMessage = infoStyle.Width(m.width).Render(msg)
+
+			// Add empty space in the status bar
+			status += styles.Padded().
+				Foreground(t.Text()).
+				Background(t.BackgroundSecondary()).
+				Width(statusWidth).
+				Render("")
+		}
 	} else {
 		status += styles.Padded().
 			Foreground(t.Text()).
@@ -191,7 +230,14 @@ func (m statusCmp) View() string {
 
 	status += diagnostics
 	status += modelName
-	return status
+
+	// If we have a separate status message, prepend it
+	if statusMessage != "" {
+		return statusMessage + "\n" + status
+	} else {
+		blank := styles.BaseStyle().Background(t.Background()).Width(m.width).Render("")
+		return blank + "\n" + status
+	}
 }
 
 func (m *statusCmp) projectDiagnostics() string {
@@ -234,6 +280,16 @@ func (m *statusCmp) projectDiagnostics() string {
 		}
 	}
 
+	if len(errorDiagnostics) == 0 &&
+		len(warnDiagnostics) == 0 &&
+		len(infoDiagnostics) == 0 &&
+		len(hintDiagnostics) == 0 {
+		return styles.ForceReplaceBackgroundWithLipgloss(
+			styles.Padded().Render("No diagnostics"),
+			t.BackgroundDarker(),
+		)
+	}
+
 	diagnostics := []string{}
 
 	errStr := lipgloss.NewStyle().
@@ -293,9 +349,10 @@ func NewStatusCmp(app *app.App) StatusCmp {
 	helpWidget = getHelpWidget("")
 
 	statusComponent := &statusCmp{
-		app:            app,
-		statusMessages: []statusMessage{},
-		messageTTL:     4 * time.Second,
+		app:         app,
+		queue:       []status.StatusMessage{},
+		messageTTL:  4 * time.Second,
+		activeUntil: time.Time{},
 	}
 
 	return statusComponent