relay-utils.go 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. package controller
  2. import (
  3. "encoding/json"
  4. "errors"
  5. "fmt"
  6. "github.com/chai2010/webp"
  7. "github.com/gin-gonic/gin"
  8. "github.com/pkoukk/tiktoken-go"
  9. "image"
  10. _ "image/gif"
  11. _ "image/jpeg"
  12. _ "image/png"
  13. "io"
  14. "log"
  15. "math"
  16. "net/http"
  17. "one-api/common"
  18. "strconv"
  19. "strings"
  20. "unicode/utf8"
  21. )
  22. var stopFinishReason = "stop"
  23. // tokenEncoderMap won't grow after initialization
  24. var tokenEncoderMap = map[string]*tiktoken.Tiktoken{}
  25. var defaultTokenEncoder *tiktoken.Tiktoken
  26. func InitTokenEncoders() {
  27. common.SysLog("initializing token encoders")
  28. gpt35TokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo")
  29. if err != nil {
  30. common.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s", err.Error()))
  31. }
  32. defaultTokenEncoder = gpt35TokenEncoder
  33. gpt4TokenEncoder, err := tiktoken.EncodingForModel("gpt-4")
  34. if err != nil {
  35. common.FatalLog(fmt.Sprintf("failed to get gpt-4 token encoder: %s", err.Error()))
  36. }
  37. for model, _ := range common.ModelRatio {
  38. if strings.HasPrefix(model, "gpt-3.5") {
  39. tokenEncoderMap[model] = gpt35TokenEncoder
  40. } else if strings.HasPrefix(model, "gpt-4") {
  41. tokenEncoderMap[model] = gpt4TokenEncoder
  42. } else {
  43. tokenEncoderMap[model] = nil
  44. }
  45. }
  46. common.SysLog("token encoders initialized")
  47. }
  48. func getTokenEncoder(model string) *tiktoken.Tiktoken {
  49. tokenEncoder, ok := tokenEncoderMap[model]
  50. if ok && tokenEncoder != nil {
  51. return tokenEncoder
  52. }
  53. if ok {
  54. tokenEncoder, err := tiktoken.EncodingForModel(model)
  55. if err != nil {
  56. common.SysError(fmt.Sprintf("failed to get token encoder for model %s: %s, using encoder for gpt-3.5-turbo", model, err.Error()))
  57. tokenEncoder = defaultTokenEncoder
  58. }
  59. tokenEncoderMap[model] = tokenEncoder
  60. return tokenEncoder
  61. }
  62. return defaultTokenEncoder
  63. }
  64. func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
  65. return len(tokenEncoder.Encode(text, nil, nil))
  66. }
  67. func getImageToken(imageUrl MessageImageUrl) (int, error) {
  68. if imageUrl.Detail == "low" {
  69. return 85, nil
  70. }
  71. response, err := http.Get(imageUrl.Url)
  72. if err != nil {
  73. fmt.Println("Error: Failed to get the URL")
  74. return 0, err
  75. }
  76. defer response.Body.Close()
  77. // 限制读取的字节数,防止下载整个图片
  78. limitReader := io.LimitReader(response.Body, 8192)
  79. // 读取图片的头部信息来获取图片尺寸
  80. config, _, err := image.DecodeConfig(limitReader)
  81. if err != nil {
  82. common.SysLog(fmt.Sprintf("fail to decode image config(gif, jpg, png): %s", err.Error()))
  83. config, err = webp.DecodeConfig(limitReader)
  84. if err != nil {
  85. common.SysLog(fmt.Sprintf("fail to decode image config(webp): %s", err.Error()))
  86. }
  87. }
  88. if config.Width == 0 || config.Height == 0 {
  89. return 0, errors.New(fmt.Sprintf("fail to decode image config: %s", err.Error()))
  90. }
  91. if config.Width < 512 && config.Height < 512 {
  92. if imageUrl.Detail == "auto" || imageUrl.Detail == "" {
  93. return 85, nil
  94. }
  95. }
  96. shortSide := config.Width
  97. otherSide := config.Height
  98. log.Printf("width: %d, height: %d", config.Width, config.Height)
  99. // 缩放倍数
  100. scale := 1.0
  101. if config.Height < shortSide {
  102. shortSide = config.Height
  103. otherSide = config.Width
  104. }
  105. // 将最小变的尺寸缩小到768以下,如果大于768,则缩放到768
  106. if shortSide > 768 {
  107. scale = float64(shortSide) / 768
  108. shortSide = 768
  109. }
  110. // 将另一边按照相同的比例缩小,向上取整
  111. otherSide = int(math.Ceil(float64(otherSide) / scale))
  112. log.Printf("shortSide: %d, otherSide: %d, scale: %f", shortSide, otherSide, scale)
  113. // 计算图片的token数量(边的长度除以512,向上取整)
  114. tiles := (shortSide + 511) / 512 * ((otherSide + 511) / 512)
  115. log.Printf("tiles: %d", tiles)
  116. return tiles*170 + 85, nil
  117. }
  118. func countTokenMessages(messages []Message, model string) (int, error) {
  119. //recover when panic
  120. tokenEncoder := getTokenEncoder(model)
  121. // Reference:
  122. // https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
  123. // https://github.com/pkoukk/tiktoken-go/issues/6
  124. //
  125. // Every message follows <|start|>{role/name}\n{content}<|end|>\n
  126. var tokensPerMessage int
  127. var tokensPerName int
  128. if model == "gpt-3.5-turbo-0301" {
  129. tokensPerMessage = 4
  130. tokensPerName = -1 // If there's a name, the role is omitted
  131. } else {
  132. tokensPerMessage = 3
  133. tokensPerName = 1
  134. }
  135. tokenNum := 0
  136. for _, message := range messages {
  137. tokenNum += tokensPerMessage
  138. tokenNum += getTokenNum(tokenEncoder, message.Role)
  139. var arrayContent []MediaMessage
  140. if err := json.Unmarshal(message.Content, &arrayContent); err != nil {
  141. var stringContent string
  142. if err := json.Unmarshal(message.Content, &stringContent); err != nil {
  143. return 0, err
  144. } else {
  145. tokenNum += getTokenNum(tokenEncoder, stringContent)
  146. if message.Name != nil {
  147. tokenNum += tokensPerName
  148. tokenNum += getTokenNum(tokenEncoder, *message.Name)
  149. }
  150. }
  151. } else {
  152. for _, m := range arrayContent {
  153. if m.Type == "image_url" {
  154. imageTokenNum, err := getImageToken(m.ImageUrl)
  155. if err != nil {
  156. return 0, err
  157. }
  158. tokenNum += imageTokenNum
  159. log.Printf("image token num: %d", imageTokenNum)
  160. } else {
  161. tokenNum += getTokenNum(tokenEncoder, m.Text)
  162. }
  163. }
  164. }
  165. }
  166. tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|>
  167. return tokenNum, nil
  168. }
  169. func countTokenInput(input any, model string) int {
  170. switch input.(type) {
  171. case string:
  172. return countTokenText(input.(string), model)
  173. case []string:
  174. text := ""
  175. for _, s := range input.([]string) {
  176. text += s
  177. }
  178. return countTokenText(text, model)
  179. }
  180. return 0
  181. }
  182. func countAudioToken(text string, model string) int {
  183. if strings.HasPrefix(model, "tts") {
  184. return utf8.RuneCountInString(text)
  185. } else {
  186. return countTokenText(text, model)
  187. }
  188. }
  189. func countTokenText(text string, model string) int {
  190. tokenEncoder := getTokenEncoder(model)
  191. return getTokenNum(tokenEncoder, text)
  192. }
  193. func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatusCode {
  194. text := err.Error()
  195. // 定义一个正则表达式匹配URL
  196. if strings.Contains(text, "Post") {
  197. text = "请求上游地址失败"
  198. }
  199. //避免暴露内部错误
  200. openAIError := OpenAIError{
  201. Message: text,
  202. Type: "one_api_error",
  203. Code: code,
  204. }
  205. return &OpenAIErrorWithStatusCode{
  206. OpenAIError: openAIError,
  207. StatusCode: statusCode,
  208. }
  209. }
  210. func shouldDisableChannel(err *OpenAIError, statusCode int) bool {
  211. if !common.AutomaticDisableChannelEnabled {
  212. return false
  213. }
  214. if err == nil {
  215. return false
  216. }
  217. if statusCode == http.StatusUnauthorized {
  218. return true
  219. }
  220. if err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated" {
  221. return true
  222. }
  223. return false
  224. }
  225. func setEventStreamHeaders(c *gin.Context) {
  226. c.Writer.Header().Set("Content-Type", "text/event-stream")
  227. c.Writer.Header().Set("Cache-Control", "no-cache")
  228. c.Writer.Header().Set("Connection", "keep-alive")
  229. c.Writer.Header().Set("Transfer-Encoding", "chunked")
  230. c.Writer.Header().Set("X-Accel-Buffering", "no")
  231. }
  232. func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIErrorWithStatusCode) {
  233. openAIErrorWithStatusCode = &OpenAIErrorWithStatusCode{
  234. StatusCode: resp.StatusCode,
  235. OpenAIError: OpenAIError{
  236. Message: fmt.Sprintf("bad response status code %d", resp.StatusCode),
  237. Type: "upstream_error",
  238. Code: "bad_response_status_code",
  239. Param: strconv.Itoa(resp.StatusCode),
  240. },
  241. }
  242. responseBody, err := io.ReadAll(resp.Body)
  243. if err != nil {
  244. return
  245. }
  246. err = resp.Body.Close()
  247. if err != nil {
  248. return
  249. }
  250. var textResponse TextResponse
  251. err = json.Unmarshal(responseBody, &textResponse)
  252. if err != nil {
  253. return
  254. }
  255. openAIErrorWithStatusCode.OpenAIError = textResponse.Error
  256. return
  257. }
  258. func getFullRequestURL(baseURL string, requestURL string, channelType int) string {
  259. fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
  260. if channelType == common.ChannelTypeOpenAI {
  261. if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {
  262. fullRequestURL = fmt.Sprintf("%s%s", baseURL, strings.TrimPrefix(requestURL, "/v1"))
  263. }
  264. }
  265. return fullRequestURL
  266. }