Просмотр исходного кода

refactor: update billing related code

JustSong 2 лет назад
Родитель
Сommit
fe8f216dd9
1 измененных файлов с 14 добавлено и 14 удалено
  1. 14 14
      controller/relay-text.go

+ 14 - 14
controller/relay-text.go

@@ -306,7 +306,6 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
 	}
 
 	var textResponse TextResponse
-	var streamResponseText string
 
 	defer func() {
 		if consumeQuota {
@@ -318,16 +317,10 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
 			if strings.HasPrefix(textRequest.Model, "gpt-4") {
 				completionRatio = 2
 			}
-			if isStream && apiType != APITypeBaidu && apiType != APITypeZhipu && apiType != APITypeAli {
-				completionTokens = countTokenText(streamResponseText, textRequest.Model)
-			} else {
-				promptTokens = textResponse.Usage.PromptTokens
-				completionTokens = textResponse.Usage.CompletionTokens
-				if apiType == APITypeZhipu {
-					// zhipu's API does not return prompt tokens & completion tokens
-					promptTokens = textResponse.Usage.TotalTokens
-				}
-			}
+
+			promptTokens = textResponse.Usage.PromptTokens
+			completionTokens = textResponse.Usage.CompletionTokens
+
 			quota = promptTokens + int(float64(completionTokens)*completionRatio)
 			quota = int(float64(quota) * ratio)
 			if ratio != 0 && quota <= 0 {
@@ -365,7 +358,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
 			if err != nil {
 				return err
 			}
-			streamResponseText = responseText
+			textResponse.Usage.PromptTokens = promptTokens
+			textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
 			return nil
 		} else {
 			err, usage := openaiHandler(c, resp, consumeQuota)
@@ -383,7 +377,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
 			if err != nil {
 				return err
 			}
-			streamResponseText = responseText
+			textResponse.Usage.PromptTokens = promptTokens
+			textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
 			return nil
 		} else {
 			err, usage := claudeHandler(c, resp, promptTokens, textRequest.Model)
@@ -428,7 +423,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
 			if err != nil {
 				return err
 			}
-			streamResponseText = responseText
+			textResponse.Usage.PromptTokens = promptTokens
+			textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
 			return nil
 		} else {
 			err, usage := palmHandler(c, resp, promptTokens, textRequest.Model)
@@ -449,6 +445,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
 			if usage != nil {
 				textResponse.Usage = *usage
 			}
+			// zhipu's API does not return prompt tokens & completion tokens
+			textResponse.Usage.PromptTokens = textResponse.Usage.TotalTokens
 			return nil
 		} else {
 			err, usage := zhipuHandler(c, resp)
@@ -458,6 +456,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
 			if usage != nil {
 				textResponse.Usage = *usage
 			}
+			// zhipu's API does not return prompt tokens & completion tokens
+			textResponse.Usage.PromptTokens = textResponse.Usage.TotalTokens
 			return nil
 		}
 	case APITypeAli: