Browse Source

feat: add new GPT-4.5 preview model ratios

[email protected] 10 months ago
parent
commit
d6fd50e382

+ 26 - 21
common/model-ratio.go

@@ -50,24 +50,26 @@ var defaultModelRatio = map[string]float64{
 	"gpt-4o-realtime-preview-2024-12-17":      2.5,
 	"gpt-4o-mini-realtime-preview":            0.3,
 	"gpt-4o-mini-realtime-preview-2024-12-17": 0.3,
-	"o1":                        7.5,
-	"o1-2024-12-17":             7.5,
-	"o1-preview":                7.5,
-	"o1-preview-2024-09-12":     7.5,
-	"o1-mini":                   0.55,
-	"o1-mini-2024-09-12":        0.55,
-	"o3-mini":                   0.55,
-	"o3-mini-2025-01-31":        0.55,
-	"o3-mini-high":              0.55,
-	"o3-mini-2025-01-31-high":   0.55,
-	"o3-mini-low":               0.55,
-	"o3-mini-2025-01-31-low":    0.55,
-	"o3-mini-medium":            0.55,
-	"o3-mini-2025-01-31-medium": 0.55,
-	"gpt-4o-mini":               0.075,
-	"gpt-4o-mini-2024-07-18":    0.075,
-	"gpt-4-turbo":               5, // $0.01 / 1K tokens
-	"gpt-4-turbo-2024-04-09":    5, // $0.01 / 1K tokens
+	"o1":                         7.5,
+	"o1-2024-12-17":              7.5,
+	"o1-preview":                 7.5,
+	"o1-preview-2024-09-12":      7.5,
+	"o1-mini":                    0.55,
+	"o1-mini-2024-09-12":         0.55,
+	"o3-mini":                    0.55,
+	"o3-mini-2025-01-31":         0.55,
+	"o3-mini-high":               0.55,
+	"o3-mini-2025-01-31-high":    0.55,
+	"o3-mini-low":                0.55,
+	"o3-mini-2025-01-31-low":     0.55,
+	"o3-mini-medium":             0.55,
+	"o3-mini-2025-01-31-medium":  0.55,
+	"gpt-4o-mini":                0.075,
+	"gpt-4o-mini-2024-07-18":     0.075,
+	"gpt-4-turbo":                5, // $0.01 / 1K tokens
+	"gpt-4-turbo-2024-04-09":     5, // $0.01 / 1K tokens
+	"gpt-4.5-preview":            37.5,
+	"gpt-4.5-preview-2025-02-27": 37.5,
 	//"gpt-3.5-turbo-0301":           0.75, //deprecated
 	"gpt-3.5-turbo":          0.25,
 	"gpt-3.5-turbo-0613":     0.75,
@@ -315,7 +317,7 @@ func UpdateModelRatioByJSONString(jsonStr string) error {
 	return json.Unmarshal([]byte(jsonStr), &modelRatioMap)
 }
 
-func GetModelRatio(name string) float64 {
+func GetModelRatio(name string) (float64, bool) {
 	GetModelRatioMap()
 	if strings.HasPrefix(name, "gpt-4-gizmo") {
 		name = "gpt-4-gizmo-*"
@@ -323,9 +325,9 @@ func GetModelRatio(name string) float64 {
 	ratio, ok := modelRatioMap[name]
 	if !ok {
 		SysError("model ratio not found: " + name)
-		return 30
+		return 37.5, false
 	}
-	return ratio
+	return ratio, true
 }
 
 func DefaultModelRatio2JSONString() string {
@@ -387,6 +389,9 @@ func GetCompletionRatio(name string) float64 {
 			}
 			return 4
 		}
+		if strings.HasPrefix(name, "gpt-4.5") {
+			return 2
+		}
 		if strings.HasPrefix(name, "gpt-4-turbo") || strings.HasSuffix(name, "preview") {
 			return 3
 		}

+ 4 - 1
controller/channel-test.go

@@ -146,7 +146,10 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
 		return err, nil
 	}
 	modelPrice, usePrice := common.GetModelPrice(testModel, false)
-	modelRatio := common.GetModelRatio(testModel)
+	modelRatio, success := common.GetModelRatio(testModel)
+	if !success {
+		return fmt.Errorf("模型 %s 倍率未设置", testModel), nil
+	}
 	completionRatio := common.GetCompletionRatio(testModel)
 	ratio := modelRatio
 	quota := 0

+ 2 - 1
model/pricing.go

@@ -69,7 +69,8 @@ func updatePricing() {
 			pricing.ModelPrice = modelPrice
 			pricing.QuotaType = 1
 		} else {
-			pricing.ModelRatio = common.GetModelRatio(model)
+			modelRatio, _ := common.GetModelRatio(model)
+			pricing.ModelRatio = modelRatio
 			pricing.CompletionRatio = common.GetCompletionRatio(model)
 			pricing.QuotaType = 0
 		}

+ 7 - 3
relay/helper/price.go

@@ -1,6 +1,7 @@
 package helper
 
 import (
+	"fmt"
 	"github.com/gin-gonic/gin"
 	"one-api/common"
 	relaycommon "one-api/relay/common"
@@ -15,7 +16,7 @@ type PriceData struct {
 	ShouldPreConsumedQuota int
 }
 
-func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens int, maxTokens int) PriceData {
+func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens int, maxTokens int) (PriceData, error) {
 	modelPrice, usePrice := common.GetModelPrice(info.OriginModelName, false)
 	groupRatio := setting.GetGroupRatio(info.Group)
 	var preConsumedQuota int
@@ -25,7 +26,10 @@ func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens
 		if maxTokens != 0 {
 			preConsumedTokens = promptTokens + maxTokens
 		}
-		modelRatio = common.GetModelRatio(info.OriginModelName)
+		modelRatio, success := common.GetModelRatio(info.OriginModelName)
+		if !success {
+			return PriceData{}, fmt.Errorf("model %s ratio not found", info.OriginModelName)
+		}
 		ratio := modelRatio * groupRatio
 		preConsumedQuota = int(float64(preConsumedTokens) * ratio)
 	} else {
@@ -37,5 +41,5 @@ func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens
 		GroupRatio:             groupRatio,
 		UsePrice:               usePrice,
 		ShouldPreConsumedQuota: preConsumedQuota,
-	}
+	}, nil
 }

+ 4 - 1
relay/relay-audio.go

@@ -75,7 +75,10 @@ func AudioHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
 		relayInfo.PromptTokens = promptTokens
 	}
 
-	priceData := helper.ModelPriceHelper(c, relayInfo, preConsumedTokens, 0)
+	priceData, err := helper.ModelPriceHelper(c, relayInfo, preConsumedTokens, 0)
+	if err != nil {
+		return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
+	}
 
 	userQuota, err := model.GetUserQuota(relayInfo.UserId, false)
 	if err != nil {

+ 4 - 1
relay/relay-image.go

@@ -86,7 +86,10 @@ func ImageHelper(c *gin.Context) *dto.OpenAIErrorWithStatusCode {
 
 	imageRequest.Model = relayInfo.UpstreamModelName
 
-	priceData := helper.ModelPriceHelper(c, relayInfo, 0, 0)
+	priceData, err := helper.ModelPriceHelper(c, relayInfo, 0, 0)
+	if err != nil {
+		return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
+	}
 	if !priceData.UsePrice {
 		// modelRatio 16 = modelPrice $0.04
 		// per 1 modelRatio = $0.04 / 16

+ 4 - 2
relay/relay-text.go

@@ -106,8 +106,10 @@ func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
 		c.Set("prompt_tokens", promptTokens)
 	}
 
-	priceData := helper.ModelPriceHelper(c, relayInfo, promptTokens, int(textRequest.MaxTokens))
-
+	priceData, err := helper.ModelPriceHelper(c, relayInfo, promptTokens, int(textRequest.MaxTokens))
+	if err != nil {
+		return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
+	}
 	// pre-consume quota 预消耗配额
 	preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
 	if openaiErr != nil {

+ 4 - 2
relay/relay_embedding.go

@@ -57,8 +57,10 @@ func EmbeddingHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode)
 	promptToken := getEmbeddingPromptToken(*embeddingRequest)
 	relayInfo.PromptTokens = promptToken
 
-	priceData := helper.ModelPriceHelper(c, relayInfo, promptToken, 0)
-
+	priceData, err := helper.ModelPriceHelper(c, relayInfo, promptToken, 0)
+	if err != nil {
+		return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
+	}
 	// pre-consume quota 预消耗配额
 	preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
 	if openaiErr != nil {

+ 4 - 2
relay/relay_rerank.go

@@ -50,8 +50,10 @@ func RerankHelper(c *gin.Context, relayMode int) (openaiErr *dto.OpenAIErrorWith
 	promptToken := getRerankPromptToken(*rerankRequest)
 	relayInfo.PromptTokens = promptToken
 
-	priceData := helper.ModelPriceHelper(c, relayInfo, promptToken, 0)
-
+	priceData, err := helper.ModelPriceHelper(c, relayInfo, promptToken, 0)
+	if err != nil {
+		return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
+	}
 	// pre-consume quota 预消耗配额
 	preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
 	if openaiErr != nil {

+ 1 - 1
relay/websocket.go

@@ -65,7 +65,7 @@ func WssHelper(c *gin.Context, ws *websocket.Conn) (openaiErr *dto.OpenAIErrorWi
 		//if realtimeEvent.Session.MaxResponseOutputTokens != 0 {
 		//	preConsumedTokens = promptTokens + int(realtimeEvent.Session.MaxResponseOutputTokens)
 		//}
-		modelRatio = common.GetModelRatio(relayInfo.UpstreamModelName)
+		modelRatio, _ = common.GetModelRatio(relayInfo.UpstreamModelName)
 		ratio = modelRatio * groupRatio
 		preConsumedQuota = int(float64(preConsumedTokens) * ratio)
 	} else {

+ 1 - 1
service/quota.go

@@ -75,7 +75,7 @@ func PreWssConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, usag
 	audioInputTokens := usage.InputTokenDetails.AudioTokens
 	audioOutTokens := usage.OutputTokenDetails.AudioTokens
 	groupRatio := setting.GetGroupRatio(relayInfo.Group)
-	modelRatio := common.GetModelRatio(modelName)
+	modelRatio, _ := common.GetModelRatio(modelName)
 
 	quotaInfo := QuotaInfo{
 		InputDetails: TokenDetails{