Browse Source

feat: dont save detail when hours < 0 (#410)

* feat: dont save detail when hours < 0

* fix: pre clean retry logs

* chore: shork detail size

* fix: clean detail

* fix: ci lint

* feat: ignore migrate log error when use double database

* fix: upstream 429 no need save detail
zijiren 4 months ago
parent
commit
4f7d682f4e
4 changed files with 127 additions and 13 deletions
  1. 3 3
      core/common/config/config.go
  2. 9 1
      core/model/batch.go
  3. 16 2
      core/model/log.go
  4. 99 7
      core/model/main.go

+ 3 - 3
core/common/config/config.go

@@ -14,9 +14,9 @@ var (
 	logStorageHours              int64 // default 0 means no limit
 	retryLogStorageHours         int64 // default 0 means no limit
 	saveAllLogDetail             atomic.Bool
-	logDetailRequestBodyMaxSize  int64 = 128 * 1024 // 128KB
-	logDetailResponseBodyMaxSize int64 = 128 * 1024 // 128KB
-	logDetailStorageHours        int64 = 3 * 24     // 3 days
+	logDetailRequestBodyMaxSize  int64 = 8 * 1024 // 8KB
+	logDetailResponseBodyMaxSize int64 = 8 * 1024 // 8KB
+	logDetailStorageHours        int64 = 3 * 24   // 3 days
 	cleanLogBatchSize            int64 = 5000
 	notifyNote                   atomic.Value
 	ipGroupsThreshold            int64

+ 9 - 1
core/model/batch.go

@@ -2,6 +2,7 @@ package model
 
 import (
 	"context"
+	"net/http"
 	"sync"
 	"time"
 
@@ -323,6 +324,12 @@ func BatchRecordLogs(
 		now = time.Now()
 	}
 
+	if code == http.StatusTooManyRequests ||
+		config.GetLogDetailStorageHours() < 0 ||
+		config.GetLogStorageHours() < 0 {
+		requestDetail = nil
+	}
+
 	if downstreamResult {
 		if config.GetLogStorageHours() >= 0 {
 			err = RecordConsumeLog(
@@ -351,7 +358,8 @@ func BatchRecordLogs(
 			)
 		}
 	} else {
-		if config.GetRetryLogStorageHours() >= 0 {
+		if config.GetLogStorageHours() >= 0 &&
+			config.GetRetryLogStorageHours() >= 0 {
 			err = RecordRetryLog(
 				requestID,
 				now,

+ 16 - 2
core/model/log.go

@@ -28,12 +28,18 @@ func (d *RequestDetail) BeforeSave(_ *gorm.DB) (err error) {
 		int64(len(d.RequestBody)) > reqMax {
 		d.RequestBody = common.TruncateByRune(d.RequestBody, int(reqMax)) + "..."
 		d.RequestBodyTruncated = true
+	} else if reqMax < 0 {
+		d.RequestBody = ""
+		d.RequestBodyTruncated = true
 	}
 
 	if respMax := config.GetLogDetailResponseBodyMaxSize(); respMax > 0 &&
 		int64(len(d.ResponseBody)) > respMax {
 		d.ResponseBody = common.TruncateByRune(d.ResponseBody, int(respMax)) + "..."
 		d.ResponseBodyTruncated = true
+	} else if respMax < 0 {
+		d.ResponseBody = ""
+		d.ResponseBodyTruncated = true
 	}
 
 	return err
@@ -117,7 +123,7 @@ func CreateLogIndexes(db *gorm.DB) error {
 }
 
 const (
-	contentMaxSize = 2 * 1024 // 2KB
+	contentMaxSize = 1024 // 1KB
 )
 
 func (l *Log) BeforeCreate(_ *gorm.DB) (err error) {
@@ -236,6 +242,10 @@ func cleanLog(batchSize int) error {
 	}
 
 	retryLogStorageHours := config.GetRetryLogStorageHours()
+	if retryLogStorageHours == 0 {
+		retryLogStorageHours = logStorageHours
+	}
+
 	if retryLogStorageHours != 0 {
 		subQuery := LogDB.
 			Model(&RetryLog{}).
@@ -273,7 +283,11 @@ func optimizeLog() error {
 
 func cleanLogDetail(batchSize int) error {
 	detailStorageHours := config.GetLogDetailStorageHours()
-	if detailStorageHours <= 0 {
+	if detailStorageHours == 0 {
+		detailStorageHours = config.GetLogStorageHours()
+	}
+
+	if detailStorageHours == 0 {
 		return nil
 	}
 

+ 99 - 7
core/model/main.go

@@ -180,17 +180,40 @@ func InitLogDB(batchSize int) error {
 
 	log.Info("log database migration started")
 
-	err := migrateLOGDB(batchSize)
+	err := migrateLogDB(batchSize)
 	if err != nil {
-		return fmt.Errorf("failed to migrate log database: %w", err)
-	}
+		// ignore migrate log error when use double database
+		if LogDB == DB {
+			return fmt.Errorf("failed to migrate log database: %w", err)
+		}
+
+		log.Errorf("failed to migrate log database: %v", err)
+		log.Warn("log database migration with backend started")
 
-	log.Info("log database migrated")
+		go migrateLogDBBackend(batchSize)
+	} else {
+		log.Info("log database migrated")
+	}
 
 	return nil
 }
 
-func migrateLOGDB(batchSize int) error {
+func migrateLogDBBackend(batchSize int) {
+	ticker := time.NewTicker(time.Minute)
+	defer ticker.Stop()
+
+	for range ticker.C {
+		err := migrateLogDB(batchSize)
+		if err == nil {
+			return
+		}
+
+		log.Errorf("failed to migrate log database: %v", err)
+		ticker.Reset(time.Minute)
+	}
+}
+
+func migrateLogDB(batchSize int) error {
 	// Pre-migration cleanup to remove expired data
 	err := preMigrationCleanup(batchSize)
 	if err != nil {
@@ -325,6 +348,15 @@ func preMigrationCleanup(batchSize int) error {
 		return fmt.Errorf("failed to cleanup logs: %w", err)
 	}
 
+	// Clean up retry logs
+	err = preMigrationCleanupRetryLogs(batchSize)
+	if err != nil {
+		if ignoreNoSuchTable(err) {
+			return nil
+		}
+		return fmt.Errorf("failed to cleanup retry logs: %w", err)
+	}
+
 	// Clean up request details
 	err = preMigrationCleanupRequestDetails(batchSize)
 	if err != nil {
@@ -342,7 +374,7 @@ func preMigrationCleanup(batchSize int) error {
 // preMigrationCleanupLogs cleans up expired logs using ID-based batch deletion
 func preMigrationCleanupLogs(batchSize int) error {
 	logStorageHours := config.GetLogStorageHours()
-	if logStorageHours <= 0 {
+	if logStorageHours == 0 {
 		return nil
 	}
 
@@ -391,10 +423,70 @@ func preMigrationCleanupLogs(batchSize int) error {
 	return nil
 }
 
+// preMigrationCleanupRetryLogs cleans up expired logs using ID-based batch deletion
+func preMigrationCleanupRetryLogs(batchSize int) error {
+	logStorageHours := config.GetRetryLogStorageHours()
+	if logStorageHours == 0 {
+		logStorageHours = config.GetLogStorageHours()
+	}
+
+	if logStorageHours == 0 {
+		return nil
+	}
+
+	if batchSize <= 0 {
+		batchSize = defaultCleanLogBatchSize
+	}
+
+	cutoffTime := time.Now().Add(-time.Duration(logStorageHours) * time.Hour)
+
+	// First, get the IDs to delete
+	ids := make([]int, 0, batchSize)
+
+	for {
+		ids = ids[:0]
+
+		err := LogDB.Model(&RetryLog{}).
+			Select("id").
+			Where("created_at < ?", cutoffTime).
+			Limit(batchSize).
+			Find(&ids).Error
+		if err != nil {
+			return err
+		}
+
+		// If no IDs found, we're done
+		if len(ids) == 0 {
+			break
+		}
+
+		// Delete by IDs
+		err = LogDB.Where("id IN (?)", ids).
+			Session(&gorm.Session{SkipDefaultTransaction: true}).
+			Delete(&Log{}).Error
+		if err != nil {
+			return err
+		}
+
+		log.Infof("deleted %d expired retry log records", len(ids))
+
+		// If we got less than batchSize, we're done
+		if len(ids) < batchSize {
+			break
+		}
+	}
+
+	return nil
+}
+
 // preMigrationCleanupRequestDetails cleans up expired request details using ID-based batch deletion
 func preMigrationCleanupRequestDetails(batchSize int) error {
 	detailStorageHours := config.GetLogDetailStorageHours()
-	if detailStorageHours <= 0 {
+	if detailStorageHours == 0 {
+		detailStorageHours = config.GetLogStorageHours()
+	}
+
+	if detailStorageHours == 0 {
 		return nil
 	}