gemini_handler.go 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. package relay
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "net/http"
  8. "one-api/common"
  9. "one-api/dto"
  10. "one-api/relay/channel/gemini"
  11. relaycommon "one-api/relay/common"
  12. "one-api/relay/helper"
  13. "one-api/service"
  14. "one-api/setting"
  15. "one-api/setting/model_setting"
  16. "strings"
  17. "github.com/gin-gonic/gin"
  18. )
  19. func getAndValidateGeminiRequest(c *gin.Context) (*gemini.GeminiChatRequest, error) {
  20. request := &gemini.GeminiChatRequest{}
  21. err := common.UnmarshalBodyReusable(c, request)
  22. if err != nil {
  23. return nil, err
  24. }
  25. if len(request.Contents) == 0 {
  26. return nil, errors.New("contents is required")
  27. }
  28. return request, nil
  29. }
  30. // 流模式
  31. // /v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=xxx
  32. func checkGeminiStreamMode(c *gin.Context, relayInfo *relaycommon.RelayInfo) {
  33. if c.Query("alt") == "sse" {
  34. relayInfo.IsStream = true
  35. }
  36. // if strings.Contains(c.Request.URL.Path, "streamGenerateContent") {
  37. // relayInfo.IsStream = true
  38. // }
  39. }
  40. func checkGeminiInputSensitive(textRequest *gemini.GeminiChatRequest) ([]string, error) {
  41. var inputTexts []string
  42. for _, content := range textRequest.Contents {
  43. for _, part := range content.Parts {
  44. if part.Text != "" {
  45. inputTexts = append(inputTexts, part.Text)
  46. }
  47. }
  48. }
  49. if len(inputTexts) == 0 {
  50. return nil, nil
  51. }
  52. sensitiveWords, err := service.CheckSensitiveInput(inputTexts)
  53. return sensitiveWords, err
  54. }
  55. func getGeminiInputTokens(req *gemini.GeminiChatRequest, info *relaycommon.RelayInfo) int {
  56. // 计算输入 token 数量
  57. var inputTexts []string
  58. for _, content := range req.Contents {
  59. for _, part := range content.Parts {
  60. if part.Text != "" {
  61. inputTexts = append(inputTexts, part.Text)
  62. }
  63. }
  64. }
  65. inputText := strings.Join(inputTexts, "\n")
  66. inputTokens := service.CountTokenInput(inputText, info.UpstreamModelName)
  67. info.PromptTokens = inputTokens
  68. return inputTokens
  69. }
  70. func isNoThinkingRequest(req *gemini.GeminiChatRequest) bool {
  71. if req.GenerationConfig.ThinkingConfig != nil && req.GenerationConfig.ThinkingConfig.ThinkingBudget != nil {
  72. return *req.GenerationConfig.ThinkingConfig.ThinkingBudget <= 0
  73. }
  74. return false
  75. }
  76. func trimModelThinking(modelName string) string {
  77. // 去除模型名称中的 -nothinking 后缀
  78. if strings.HasSuffix(modelName, "-nothinking") {
  79. return strings.TrimSuffix(modelName, "-nothinking")
  80. }
  81. // 去除模型名称中的 -thinking 后缀
  82. if strings.HasSuffix(modelName, "-thinking") {
  83. return strings.TrimSuffix(modelName, "-thinking")
  84. }
  85. // 去除模型名称中的 -thinking-number
  86. if strings.Contains(modelName, "-thinking-") {
  87. parts := strings.Split(modelName, "-thinking-")
  88. if len(parts) > 1 {
  89. return parts[0] + "-thinking"
  90. }
  91. }
  92. return modelName
  93. }
  94. func GeminiHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
  95. req, err := getAndValidateGeminiRequest(c)
  96. if err != nil {
  97. common.LogError(c, fmt.Sprintf("getAndValidateGeminiRequest error: %s", err.Error()))
  98. return service.OpenAIErrorWrapperLocal(err, "invalid_gemini_request", http.StatusBadRequest)
  99. }
  100. relayInfo := relaycommon.GenRelayInfoGemini(c)
  101. // 检查 Gemini 流式模式
  102. checkGeminiStreamMode(c, relayInfo)
  103. if setting.ShouldCheckPromptSensitive() {
  104. sensitiveWords, err := checkGeminiInputSensitive(req)
  105. if err != nil {
  106. common.LogWarn(c, fmt.Sprintf("user sensitive words detected: %s", strings.Join(sensitiveWords, ", ")))
  107. return service.OpenAIErrorWrapperLocal(err, "check_request_sensitive_error", http.StatusBadRequest)
  108. }
  109. }
  110. // model mapped 模型映射
  111. err = helper.ModelMappedHelper(c, relayInfo, req)
  112. if err != nil {
  113. return service.OpenAIErrorWrapperLocal(err, "model_mapped_error", http.StatusBadRequest)
  114. }
  115. if value, exists := c.Get("prompt_tokens"); exists {
  116. promptTokens := value.(int)
  117. relayInfo.SetPromptTokens(promptTokens)
  118. } else {
  119. promptTokens := getGeminiInputTokens(req, relayInfo)
  120. c.Set("prompt_tokens", promptTokens)
  121. }
  122. if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
  123. if isNoThinkingRequest(req) {
  124. // check is thinking
  125. if !strings.Contains(relayInfo.OriginModelName, "-nothinking") {
  126. // try to get no thinking model price
  127. noThinkingModelName := relayInfo.OriginModelName + "-nothinking"
  128. containPrice := helper.ContainPriceOrRatio(noThinkingModelName)
  129. if containPrice {
  130. relayInfo.OriginModelName = noThinkingModelName
  131. relayInfo.UpstreamModelName = noThinkingModelName
  132. }
  133. }
  134. }
  135. if req.GenerationConfig.ThinkingConfig == nil {
  136. gemini.ThinkingAdaptor(req, relayInfo)
  137. }
  138. }
  139. priceData, err := helper.ModelPriceHelper(c, relayInfo, relayInfo.PromptTokens, int(req.GenerationConfig.MaxOutputTokens))
  140. if err != nil {
  141. return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
  142. }
  143. // pre consume quota
  144. preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
  145. if openaiErr != nil {
  146. return openaiErr
  147. }
  148. defer func() {
  149. if openaiErr != nil {
  150. returnPreConsumedQuota(c, relayInfo, userQuota, preConsumedQuota)
  151. }
  152. }()
  153. adaptor := GetAdaptor(relayInfo.ApiType)
  154. if adaptor == nil {
  155. return service.OpenAIErrorWrapperLocal(fmt.Errorf("invalid api type: %d", relayInfo.ApiType), "invalid_api_type", http.StatusBadRequest)
  156. }
  157. adaptor.Init(relayInfo)
  158. // Clean up empty system instruction
  159. if req.SystemInstructions != nil {
  160. hasContent := false
  161. for _, part := range req.SystemInstructions.Parts {
  162. if part.Text != "" {
  163. hasContent = true
  164. break
  165. }
  166. }
  167. if !hasContent {
  168. req.SystemInstructions = nil
  169. }
  170. }
  171. requestBody, err := json.Marshal(req)
  172. if err != nil {
  173. return service.OpenAIErrorWrapperLocal(err, "marshal_text_request_failed", http.StatusInternalServerError)
  174. }
  175. if common.DebugEnabled {
  176. println("Gemini request body: %s", string(requestBody))
  177. }
  178. resp, err := adaptor.DoRequest(c, relayInfo, bytes.NewReader(requestBody))
  179. if err != nil {
  180. common.LogError(c, "Do gemini request failed: "+err.Error())
  181. return service.OpenAIErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
  182. }
  183. statusCodeMappingStr := c.GetString("status_code_mapping")
  184. var httpResp *http.Response
  185. if resp != nil {
  186. httpResp = resp.(*http.Response)
  187. relayInfo.IsStream = relayInfo.IsStream || strings.HasPrefix(httpResp.Header.Get("Content-Type"), "text/event-stream")
  188. if httpResp.StatusCode != http.StatusOK {
  189. openaiErr = service.RelayErrorHandler(httpResp, false)
  190. // reset status code 重置状态码
  191. service.ResetStatusCode(openaiErr, statusCodeMappingStr)
  192. return openaiErr
  193. }
  194. }
  195. usage, openaiErr := adaptor.DoResponse(c, resp.(*http.Response), relayInfo)
  196. if openaiErr != nil {
  197. service.ResetStatusCode(openaiErr, statusCodeMappingStr)
  198. return openaiErr
  199. }
  200. postConsumeQuota(c, relayInfo, usage.(*dto.Usage), preConsumedQuota, userQuota, priceData, "")
  201. return nil
  202. }