relay-utils.go 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. package controller
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "fmt"
  6. "github.com/gin-gonic/gin"
  7. "github.com/pkoukk/tiktoken-go"
  8. "image"
  9. _ "image/gif"
  10. _ "image/jpeg"
  11. _ "image/png"
  12. "io"
  13. "log"
  14. "math"
  15. "net/http"
  16. "one-api/common"
  17. "strconv"
  18. "strings"
  19. "unicode/utf8"
  20. )
  21. var stopFinishReason = "stop"
  22. // tokenEncoderMap won't grow after initialization
  23. var tokenEncoderMap = map[string]*tiktoken.Tiktoken{}
  24. var defaultTokenEncoder *tiktoken.Tiktoken
  25. func InitTokenEncoders() {
  26. common.SysLog("initializing token encoders")
  27. gpt35TokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo")
  28. if err != nil {
  29. common.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s", err.Error()))
  30. }
  31. defaultTokenEncoder = gpt35TokenEncoder
  32. gpt4TokenEncoder, err := tiktoken.EncodingForModel("gpt-4")
  33. if err != nil {
  34. common.FatalLog(fmt.Sprintf("failed to get gpt-4 token encoder: %s", err.Error()))
  35. }
  36. for model, _ := range common.ModelRatio {
  37. if strings.HasPrefix(model, "gpt-3.5") {
  38. tokenEncoderMap[model] = gpt35TokenEncoder
  39. } else if strings.HasPrefix(model, "gpt-4") {
  40. tokenEncoderMap[model] = gpt4TokenEncoder
  41. } else {
  42. tokenEncoderMap[model] = nil
  43. }
  44. }
  45. common.SysLog("token encoders initialized")
  46. }
  47. func getTokenEncoder(model string) *tiktoken.Tiktoken {
  48. tokenEncoder, ok := tokenEncoderMap[model]
  49. if ok && tokenEncoder != nil {
  50. return tokenEncoder
  51. }
  52. if ok {
  53. tokenEncoder, err := tiktoken.EncodingForModel(model)
  54. if err != nil {
  55. common.SysError(fmt.Sprintf("failed to get token encoder for model %s: %s, using encoder for gpt-3.5-turbo", model, err.Error()))
  56. tokenEncoder = defaultTokenEncoder
  57. }
  58. tokenEncoderMap[model] = tokenEncoder
  59. return tokenEncoder
  60. }
  61. return defaultTokenEncoder
  62. }
  63. func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
  64. return len(tokenEncoder.Encode(text, nil, nil))
  65. }
  66. func getImageToken(imageUrl *MessageImageUrl) (int, error) {
  67. if imageUrl.Detail == "low" {
  68. return 85, nil
  69. }
  70. var config image.Config
  71. var err error
  72. if strings.HasPrefix(imageUrl.Url, "http") {
  73. common.SysLog(fmt.Sprintf("downloading image: %s", imageUrl.Url))
  74. config, err = common.DecodeUrlImageData(imageUrl.Url)
  75. } else {
  76. common.SysLog(fmt.Sprintf("decoding image"))
  77. config, err = common.DecodeBase64ImageData(imageUrl.Url)
  78. }
  79. if err != nil {
  80. return 0, err
  81. }
  82. if config.Width == 0 || config.Height == 0 {
  83. return 0, errors.New(fmt.Sprintf("fail to decode image config: %s", imageUrl.Url))
  84. }
  85. // TODO: 适配官方auto计费
  86. if config.Width < 512 && config.Height < 512 {
  87. if imageUrl.Detail == "auto" || imageUrl.Detail == "" {
  88. // 如果图片尺寸小于512,强制使用low
  89. imageUrl.Detail = "low"
  90. return 85, nil
  91. }
  92. }
  93. shortSide := config.Width
  94. otherSide := config.Height
  95. log.Printf("width: %d, height: %d", config.Width, config.Height)
  96. // 缩放倍数
  97. scale := 1.0
  98. if config.Height < shortSide {
  99. shortSide = config.Height
  100. otherSide = config.Width
  101. }
  102. // 将最小变的尺寸缩小到768以下,如果大于768,则缩放到768
  103. if shortSide > 768 {
  104. scale = float64(shortSide) / 768
  105. shortSide = 768
  106. }
  107. // 将另一边按照相同的比例缩小,向上取整
  108. otherSide = int(math.Ceil(float64(otherSide) / scale))
  109. log.Printf("shortSide: %d, otherSide: %d, scale: %f", shortSide, otherSide, scale)
  110. // 计算图片的token数量(边的长度除以512,向上取整)
  111. tiles := (shortSide + 511) / 512 * ((otherSide + 511) / 512)
  112. log.Printf("tiles: %d", tiles)
  113. return tiles*170 + 85, nil
  114. }
  115. func countTokenMessages(messages []Message, model string) (int, error) {
  116. //recover when panic
  117. tokenEncoder := getTokenEncoder(model)
  118. // Reference:
  119. // https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
  120. // https://github.com/pkoukk/tiktoken-go/issues/6
  121. //
  122. // Every message follows <|start|>{role/name}\n{content}<|end|>\n
  123. var tokensPerMessage int
  124. var tokensPerName int
  125. if model == "gpt-3.5-turbo-0301" {
  126. tokensPerMessage = 4
  127. tokensPerName = -1 // If there's a name, the role is omitted
  128. } else {
  129. tokensPerMessage = 3
  130. tokensPerName = 1
  131. }
  132. tokenNum := 0
  133. for _, message := range messages {
  134. tokenNum += tokensPerMessage
  135. tokenNum += getTokenNum(tokenEncoder, message.Role)
  136. var arrayContent []MediaMessage
  137. if err := json.Unmarshal(message.Content, &arrayContent); err != nil {
  138. var stringContent string
  139. if err := json.Unmarshal(message.Content, &stringContent); err != nil {
  140. return 0, err
  141. } else {
  142. tokenNum += getTokenNum(tokenEncoder, stringContent)
  143. if message.Name != nil {
  144. tokenNum += tokensPerName
  145. tokenNum += getTokenNum(tokenEncoder, *message.Name)
  146. }
  147. }
  148. } else {
  149. for _, m := range arrayContent {
  150. if m.Type == "image_url" {
  151. imageTokenNum, err := getImageToken(&m.ImageUrl)
  152. if err != nil {
  153. return 0, err
  154. }
  155. tokenNum += imageTokenNum
  156. log.Printf("image token num: %d", imageTokenNum)
  157. } else {
  158. tokenNum += getTokenNum(tokenEncoder, m.Text)
  159. }
  160. }
  161. }
  162. }
  163. tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|>
  164. return tokenNum, nil
  165. }
  166. func countTokenInput(input any, model string) int {
  167. switch input.(type) {
  168. case string:
  169. return countTokenText(input.(string), model)
  170. case []string:
  171. text := ""
  172. for _, s := range input.([]string) {
  173. text += s
  174. }
  175. return countTokenText(text, model)
  176. }
  177. return 0
  178. }
  179. func countAudioToken(text string, model string) int {
  180. if strings.HasPrefix(model, "tts") {
  181. return utf8.RuneCountInString(text)
  182. } else {
  183. return countTokenText(text, model)
  184. }
  185. }
  186. func countTokenText(text string, model string) int {
  187. tokenEncoder := getTokenEncoder(model)
  188. return getTokenNum(tokenEncoder, text)
  189. }
  190. func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatusCode {
  191. text := err.Error()
  192. // 定义一个正则表达式匹配URL
  193. if strings.Contains(text, "Post") {
  194. common.SysLog(fmt.Sprintf("error: %s", text))
  195. text = "请求上游地址失败"
  196. }
  197. //避免暴露内部错误
  198. openAIError := OpenAIError{
  199. Message: text,
  200. Type: "new_api_error",
  201. Code: code,
  202. }
  203. return &OpenAIErrorWithStatusCode{
  204. OpenAIError: openAIError,
  205. StatusCode: statusCode,
  206. }
  207. }
  208. func shouldDisableChannel(err *OpenAIError, statusCode int) bool {
  209. if !common.AutomaticDisableChannelEnabled {
  210. return false
  211. }
  212. if err == nil {
  213. return false
  214. }
  215. if statusCode == http.StatusUnauthorized {
  216. return true
  217. }
  218. if err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated" || err.Code == "billing_not_active" {
  219. return true
  220. }
  221. return false
  222. }
  223. func setEventStreamHeaders(c *gin.Context) {
  224. c.Writer.Header().Set("Content-Type", "text/event-stream")
  225. c.Writer.Header().Set("Cache-Control", "no-cache")
  226. c.Writer.Header().Set("Connection", "keep-alive")
  227. c.Writer.Header().Set("Transfer-Encoding", "chunked")
  228. c.Writer.Header().Set("X-Accel-Buffering", "no")
  229. }
  230. func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIErrorWithStatusCode) {
  231. openAIErrorWithStatusCode = &OpenAIErrorWithStatusCode{
  232. StatusCode: resp.StatusCode,
  233. OpenAIError: OpenAIError{
  234. Message: fmt.Sprintf("bad response status code %d", resp.StatusCode),
  235. Type: "upstream_error",
  236. Code: "bad_response_status_code",
  237. Param: strconv.Itoa(resp.StatusCode),
  238. },
  239. }
  240. responseBody, err := io.ReadAll(resp.Body)
  241. if err != nil {
  242. return
  243. }
  244. err = resp.Body.Close()
  245. if err != nil {
  246. return
  247. }
  248. var textResponse TextResponse
  249. err = json.Unmarshal(responseBody, &textResponse)
  250. if err != nil {
  251. return
  252. }
  253. openAIErrorWithStatusCode.OpenAIError = textResponse.Error
  254. return
  255. }
  256. func getFullRequestURL(baseURL string, requestURL string, channelType int) string {
  257. fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
  258. if channelType == common.ChannelTypeOpenAI {
  259. if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {
  260. fullRequestURL = fmt.Sprintf("%s%s", baseURL, strings.TrimPrefix(requestURL, "/v1"))
  261. }
  262. }
  263. return fullRequestURL
  264. }