Explorar o código

Merge branch 'alpha' into mutil_key_channel

# Conflicts:
#	controller/channel.go
#	docker-compose.yml
#	web/src/components/table/ChannelsTable.js
#	web/src/pages/Channel/EditChannel.js
CaIon hai 7 meses
pai
achega
fa2cd85007
Modificáronse 100 ficheiros con 4414 adicións e 1045 borrados
  1. 10 2
      .env.example
  2. 19 0
      .github/PULL_REQUEST_TEMPLATE/pull_request_template.md
  3. 1 0
      .github/workflows/macos-release.yml
  4. 21 0
      .github/workflows/pr-target-branch-check.yml
  5. 1 2
      Dockerfile
  6. 1 1
      README.en.md
  7. 1 4
      README.md
  8. 71 0
      common/api_type.go
  9. 0 104
      common/constants.go
  10. 41 0
      common/endpoint_type.go
  11. 35 2
      common/gin.go
  12. 57 0
      common/http.go
  13. 23 1
      common/init.go
  14. 8 4
      common/json.go
  15. 42 0
      common/model.go
  16. 62 0
      common/page_info.go
  17. 9 1
      common/redis.go
  18. 45 2
      common/utils.go
  19. 26 0
      constant/README.md
  20. 34 0
      constant/api_type.go
  21. 0 9
      constant/cache_key.go
  22. 109 0
      constant/channel.go
  23. 31 6
      constant/context_key.go
  24. 16 0
      constant/endpoint_type.go
  25. 0 40
      constant/env.go
  26. 4 0
      constant/midjourney.go
  27. 5 0
      constant/task.go
  28. 49 10
      controller/channel-billing.go
  29. 19 13
      controller/channel-test.go
  30. 156 28
      controller/channel.go
  31. 11 3
      controller/group.go
  32. 41 39
      controller/misc.go
  33. 68 93
      controller/model.go
  34. 2 1
      controller/option.go
  35. 14 4
      controller/playground.go
  36. 6 6
      controller/pricing.go
  37. 24 0
      controller/ratio_config.go
  38. 474 0
      controller/ratio_sync.go
  39. 7 7
      controller/relay.go
  40. 2 0
      controller/task.go
  41. 138 0
      controller/task_video.go
  42. 29 0
      controller/token.go
  43. 6 8
      controller/topup.go
  44. 19 15
      controller/user.go
  45. 2 1
      docker-compose.yml
  46. 8 1
      dto/claude.go
  47. 1 0
      dto/dalle.go
  48. 6 0
      dto/midjourney.go
  49. 5 1
      dto/openai_request.go
  50. 3 1
      dto/openai_response.go
  51. 6 21
      dto/pricing.go
  52. 38 0
      dto/ratio_sync.go
  53. 1 1
      dto/rerank.go
  54. 47 0
      dto/video.go
  55. 1041 0
      i18n/zh-cn.json
  56. 56 33
      main.go
  57. 1 1
      middleware/auth.go
  58. 49 22
      middleware/distributor.go
  59. 47 0
      middleware/kling_adapter.go
  60. 2 2
      middleware/model-rate-limit.go
  61. 20 5
      model/ability.go
  62. 40 1
      model/cache.go
  63. 36 0
      model/channel.go
  64. 9 0
      model/main.go
  65. 2 0
      model/midjourney.go
  66. 26 13
      model/option.go
  67. 82 36
      model/pricing.go
  68. 34 0
      model/token.go
  69. 1 1
      model/token_cache.go
  70. 4 2
      model/user.go
  71. 7 7
      model/user_cache.go
  72. 19 2
      model/utils.go
  73. 3 8
      relay/audio_handler.go
  74. 2 0
      relay/channel/adapter.go
  75. 2 2
      relay/channel/ali/adaptor.go
  76. 1 4
      relay/channel/ali/image.go
  77. 2 4
      relay/channel/ali/rerank.go
  78. 5 27
      relay/channel/ali/text.go
  79. 4 13
      relay/channel/baidu/relay-baidu.go
  80. 58 46
      relay/channel/claude/relay-claude.go
  81. 6 15
      relay/channel/cloudflare/relay_cloudflare.go
  82. 4 11
      relay/channel/cohere/relay-cohere.go
  83. 6 11
      relay/channel/coze/relay-coze.go
  84. 3 13
      relay/channel/dify/relay-dify.go
  85. 6 3
      relay/channel/gemini/adaptor.go
  86. 1 0
      relay/channel/gemini/dto.go
  87. 25 33
      relay/channel/gemini/relay-gemini-native.go
  88. 101 66
      relay/channel/gemini/relay-gemini.go
  89. 1 0
      relay/channel/jina/constant.go
  90. 3 6
      relay/channel/mokaai/relay-mokaai.go
  91. 3 30
      relay/channel/ollama/relay-ollama.go
  92. 52 41
      relay/channel/openai/adaptor.go
  93. 50 110
      relay/channel/openai/relay-openai.go
  94. 8 24
      relay/channel/openai/relay_responses.go
  95. 1 1
      relay/channel/palm/adaptor.go
  96. 5 17
      relay/channel/palm/relay-palm.go
  97. 2 4
      relay/channel/siliconflow/relay-siliconflow.go
  98. 380 0
      relay/channel/task/jimeng/adaptor.go
  99. 346 0
      relay/channel/task/kling/adaptor.go
  100. 4 0
      relay/channel/task/suno/adaptor.go

+ 10 - 2
.env.example

@@ -7,6 +7,8 @@
 # 调试相关配置
 # 启用pprof
 # ENABLE_PPROF=true
+# 启用调试模式
+# DEBUG=true
 
 # 数据库相关配置
 # 数据库连接字符串
@@ -41,6 +43,14 @@
 # 更新任务启用
 # UPDATE_TASK=true
 
+# 对话超时设置
+# 所有请求超时时间,单位秒,默认为0,表示不限制
+# RELAY_TIMEOUT=0
+# 流模式无响应超时时间,单位秒,如果出现空补全可以尝试改为更大值
+# STREAMING_TIMEOUT=120
+
+# Gemini 识别图片 最大图片数量
+# GEMINI_VISION_MAX_IMAGE_NUM=16
 
 # 会话密钥
 # SESSION_SECRET=random_string
@@ -58,8 +68,6 @@
 # GET_MEDIA_TOKEN_NOT_STREAM=true
 # 设置 Dify 渠道是否输出工作流和节点信息到客户端
 # DIFY_DEBUG=true
-# 设置流式一次回复的超时时间
-# STREAMING_TIMEOUT=90
 
 
 # 节点类型

+ 19 - 0
.github/PULL_REQUEST_TEMPLATE/pull_request_template.md

@@ -0,0 +1,19 @@
+### PR 类型
+
+- [ ] Bug 修复
+- [ ] 新功能
+- [ ] 文档更新
+- [ ] 其他
+
+### PR 是否包含破坏性更新?
+
+- [ ] 是
+- [ ] 否
+
+### PR 描述
+
+**请在下方详细描述您的 PR,包括目的、实现细节等。**
+
+### **重要提示**
+
+**所有 PR 都必须提交到 `alpha` 分支。请确保您的 PR 目标分支是 `alpha`。**

+ 1 - 0
.github/workflows/macos-release.yml

@@ -26,6 +26,7 @@ jobs:
       - name: Build Frontend
         env:
           CI: ""
+          NODE_OPTIONS: "--max-old-space-size=4096"
         run: |
           cd web
           bun install

+ 21 - 0
.github/workflows/pr-target-branch-check.yml

@@ -0,0 +1,21 @@
+name: Check PR Branching Strategy
+on:
+  pull_request:
+    types: [opened, synchronize, reopened, edited]
+
+jobs:
+  check-branching-strategy:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Enforce branching strategy
+        run: |
+          if [[ "${{ github.base_ref }}" == "main" ]]; then
+            if [[ "${{ github.head_ref }}" != "alpha" ]]; then
+              echo "Error: Pull requests to 'main' are only allowed from the 'alpha' branch."
+              exit 1
+            fi
+          elif [[ "${{ github.base_ref }}" != "alpha" ]]; then
+            echo "Error: Pull requests must be targeted to the 'alpha' or 'main' branch."
+            exit 1
+          fi
+          echo "Branching strategy check passed."

+ 1 - 2
Dockerfile

@@ -24,8 +24,7 @@ RUN go build -ldflags "-s -w -X 'one-api/common.Version=$(cat VERSION)'" -o one-
 
 FROM alpine
 
-RUN apk update \
-    && apk upgrade \
+RUN apk upgrade --no-cache \
     && apk add --no-cache ca-certificates tzdata ffmpeg \
     && update-ca-certificates
 

+ 1 - 1
README.en.md

@@ -100,7 +100,7 @@ This version supports multiple models, please refer to [API Documentation-Relay
 For detailed configuration instructions, please refer to [Installation Guide-Environment Variables Configuration](https://docs.newapi.pro/installation/environment-variables):
 
 - `GENERATE_DEFAULT_TOKEN`: Whether to generate initial tokens for newly registered users, default is `false`
-- `STREAMING_TIMEOUT`: Streaming response timeout, default is 60 seconds
+- `STREAMING_TIMEOUT`: Streaming response timeout, default is 120 seconds
 - `DIFY_DEBUG`: Whether to output workflow and node information for Dify channels, default is `true`
 - `FORCE_STREAM_OPTION`: Whether to override client stream_options parameter, default is `true`
 - `GET_MEDIA_TOKEN`: Whether to count image tokens, default is `true`

+ 1 - 4
README.md

@@ -27,9 +27,6 @@
   <a href="https://goreportcard.com/report/github.com/Calcium-Ion/new-api">
     <img src="https://goreportcard.com/badge/github.com/Calcium-Ion/new-api" alt="GoReportCard">
   </a>
-  <a href="https://coderabbit.ai">
-    <img src="https://img.shields.io/coderabbit/prs/github/QuantumNous/new-api?utm_source=oss&utm_medium=github&utm_campaign=QuantumNous%2Fnew-api&labelColor=171717&color=FF570A&link=https%3A%2F%2Fcoderabbit.ai&label=CodeRabbit+Reviews" alt="CodeRabbit Pull Request Reviews">
-  </a>
 </p>
 </div>
 
@@ -103,7 +100,7 @@ New API提供了丰富的功能,详细特性请参考[特性说明](https://do
 详细配置说明请参考[安装指南-环境变量配置](https://docs.newapi.pro/installation/environment-variables):
 
 - `GENERATE_DEFAULT_TOKEN`:是否为新注册用户生成初始令牌,默认为 `false`
-- `STREAMING_TIMEOUT`:流式回复超时时间,默认60秒
+- `STREAMING_TIMEOUT`:流式回复超时时间,默认120秒
 - `DIFY_DEBUG`:Dify渠道是否输出工作流和节点信息,默认 `true`
 - `FORCE_STREAM_OPTION`:是否覆盖客户端stream_options参数,默认 `true`
 - `GET_MEDIA_TOKEN`:是否统计图片token,默认 `true`

+ 71 - 0
common/api_type.go

@@ -0,0 +1,71 @@
+package common
+
+import "one-api/constant"
+
+func ChannelType2APIType(channelType int) (int, bool) {
+	apiType := -1
+	switch channelType {
+	case constant.ChannelTypeOpenAI:
+		apiType = constant.APITypeOpenAI
+	case constant.ChannelTypeAnthropic:
+		apiType = constant.APITypeAnthropic
+	case constant.ChannelTypeBaidu:
+		apiType = constant.APITypeBaidu
+	case constant.ChannelTypePaLM:
+		apiType = constant.APITypePaLM
+	case constant.ChannelTypeZhipu:
+		apiType = constant.APITypeZhipu
+	case constant.ChannelTypeAli:
+		apiType = constant.APITypeAli
+	case constant.ChannelTypeXunfei:
+		apiType = constant.APITypeXunfei
+	case constant.ChannelTypeAIProxyLibrary:
+		apiType = constant.APITypeAIProxyLibrary
+	case constant.ChannelTypeTencent:
+		apiType = constant.APITypeTencent
+	case constant.ChannelTypeGemini:
+		apiType = constant.APITypeGemini
+	case constant.ChannelTypeZhipu_v4:
+		apiType = constant.APITypeZhipuV4
+	case constant.ChannelTypeOllama:
+		apiType = constant.APITypeOllama
+	case constant.ChannelTypePerplexity:
+		apiType = constant.APITypePerplexity
+	case constant.ChannelTypeAws:
+		apiType = constant.APITypeAws
+	case constant.ChannelTypeCohere:
+		apiType = constant.APITypeCohere
+	case constant.ChannelTypeDify:
+		apiType = constant.APITypeDify
+	case constant.ChannelTypeJina:
+		apiType = constant.APITypeJina
+	case constant.ChannelCloudflare:
+		apiType = constant.APITypeCloudflare
+	case constant.ChannelTypeSiliconFlow:
+		apiType = constant.APITypeSiliconFlow
+	case constant.ChannelTypeVertexAi:
+		apiType = constant.APITypeVertexAi
+	case constant.ChannelTypeMistral:
+		apiType = constant.APITypeMistral
+	case constant.ChannelTypeDeepSeek:
+		apiType = constant.APITypeDeepSeek
+	case constant.ChannelTypeMokaAI:
+		apiType = constant.APITypeMokaAI
+	case constant.ChannelTypeVolcEngine:
+		apiType = constant.APITypeVolcEngine
+	case constant.ChannelTypeBaiduV2:
+		apiType = constant.APITypeBaiduV2
+	case constant.ChannelTypeOpenRouter:
+		apiType = constant.APITypeOpenRouter
+	case constant.ChannelTypeXinference:
+		apiType = constant.APITypeXinference
+	case constant.ChannelTypeXai:
+		apiType = constant.APITypeXai
+	case constant.ChannelTypeCoze:
+		apiType = constant.APITypeCoze
+	}
+	if apiType == -1 {
+		return constant.APITypeOpenAI, false
+	}
+	return apiType, true
+}

+ 0 - 104
common/constants.go

@@ -193,107 +193,3 @@ const (
 	ChannelStatusManuallyDisabled = 2 // also don't use 0
 	ChannelStatusAutoDisabled     = 3
 )
-
-const (
-	ChannelTypeUnknown        = 0
-	ChannelTypeOpenAI         = 1
-	ChannelTypeMidjourney     = 2
-	ChannelTypeAzure          = 3
-	ChannelTypeOllama         = 4
-	ChannelTypeMidjourneyPlus = 5
-	ChannelTypeOpenAIMax      = 6
-	ChannelTypeOhMyGPT        = 7
-	ChannelTypeCustom         = 8
-	ChannelTypeAILS           = 9
-	ChannelTypeAIProxy        = 10
-	ChannelTypePaLM           = 11
-	ChannelTypeAPI2GPT        = 12
-	ChannelTypeAIGC2D         = 13
-	ChannelTypeAnthropic      = 14
-	ChannelTypeBaidu          = 15
-	ChannelTypeZhipu          = 16
-	ChannelTypeAli            = 17
-	ChannelTypeXunfei         = 18
-	ChannelType360            = 19
-	ChannelTypeOpenRouter     = 20
-	ChannelTypeAIProxyLibrary = 21
-	ChannelTypeFastGPT        = 22
-	ChannelTypeTencent        = 23
-	ChannelTypeGemini         = 24
-	ChannelTypeMoonshot       = 25
-	ChannelTypeZhipu_v4       = 26
-	ChannelTypePerplexity     = 27
-	ChannelTypeLingYiWanWu    = 31
-	ChannelTypeAws            = 33
-	ChannelTypeCohere         = 34
-	ChannelTypeMiniMax        = 35
-	ChannelTypeSunoAPI        = 36
-	ChannelTypeDify           = 37
-	ChannelTypeJina           = 38
-	ChannelCloudflare         = 39
-	ChannelTypeSiliconFlow    = 40
-	ChannelTypeVertexAi       = 41
-	ChannelTypeMistral        = 42
-	ChannelTypeDeepSeek       = 43
-	ChannelTypeMokaAI         = 44
-	ChannelTypeVolcEngine     = 45
-	ChannelTypeBaiduV2        = 46
-	ChannelTypeXinference     = 47
-	ChannelTypeXai            = 48
-	ChannelTypeCoze           = 49
-	ChannelTypeDummy          // this one is only for count, do not add any channel after this
-
-)
-
-var ChannelBaseURLs = []string{
-	"",                                    // 0
-	"https://api.openai.com",              // 1
-	"https://oa.api2d.net",                // 2
-	"",                                    // 3
-	"http://localhost:11434",              // 4
-	"https://api.openai-sb.com",           // 5
-	"https://api.openaimax.com",           // 6
-	"https://api.ohmygpt.com",             // 7
-	"",                                    // 8
-	"https://api.caipacity.com",           // 9
-	"https://api.aiproxy.io",              // 10
-	"",                                    // 11
-	"https://api.api2gpt.com",             // 12
-	"https://api.aigc2d.com",              // 13
-	"https://api.anthropic.com",           // 14
-	"https://aip.baidubce.com",            // 15
-	"https://open.bigmodel.cn",            // 16
-	"https://dashscope.aliyuncs.com",      // 17
-	"",                                    // 18
-	"https://api.360.cn",                  // 19
-	"https://openrouter.ai/api",           // 20
-	"https://api.aiproxy.io",              // 21
-	"https://fastgpt.run/api/openapi",     // 22
-	"https://hunyuan.tencentcloudapi.com", //23
-	"https://generativelanguage.googleapis.com", //24
-	"https://api.moonshot.cn",                   //25
-	"https://open.bigmodel.cn",                  //26
-	"https://api.perplexity.ai",                 //27
-	"",                                          //28
-	"",                                          //29
-	"",                                          //30
-	"https://api.lingyiwanwu.com",               //31
-	"",                                          //32
-	"",                                          //33
-	"https://api.cohere.ai",                     //34
-	"https://api.minimax.chat",                  //35
-	"",                                          //36
-	"https://api.dify.ai",                       //37
-	"https://api.jina.ai",                       //38
-	"https://api.cloudflare.com",                //39
-	"https://api.siliconflow.cn",                //40
-	"",                                          //41
-	"https://api.mistral.ai",                    //42
-	"https://api.deepseek.com",                  //43
-	"https://api.moka.ai",                       //44
-	"https://ark.cn-beijing.volces.com",         //45
-	"https://qianfan.baidubce.com",              //46
-	"",                                          //47
-	"https://api.x.ai",                          //48
-	"https://api.coze.cn",                       //49
-}

+ 41 - 0
common/endpoint_type.go

@@ -0,0 +1,41 @@
+package common
+
+import "one-api/constant"
+
+// GetEndpointTypesByChannelType 获取渠道最优先端点类型(所有的渠道都支持 OpenAI 端点)
+func GetEndpointTypesByChannelType(channelType int, modelName string) []constant.EndpointType {
+	var endpointTypes []constant.EndpointType
+	switch channelType {
+	case constant.ChannelTypeJina:
+		endpointTypes = []constant.EndpointType{constant.EndpointTypeJinaRerank}
+	//case constant.ChannelTypeMidjourney, constant.ChannelTypeMidjourneyPlus:
+	//	endpointTypes = []constant.EndpointType{constant.EndpointTypeMidjourney}
+	//case constant.ChannelTypeSunoAPI:
+	//	endpointTypes = []constant.EndpointType{constant.EndpointTypeSuno}
+	//case constant.ChannelTypeKling:
+	//	endpointTypes = []constant.EndpointType{constant.EndpointTypeKling}
+	//case constant.ChannelTypeJimeng:
+	//	endpointTypes = []constant.EndpointType{constant.EndpointTypeJimeng}
+	case constant.ChannelTypeAws:
+		fallthrough
+	case constant.ChannelTypeAnthropic:
+		endpointTypes = []constant.EndpointType{constant.EndpointTypeAnthropic, constant.EndpointTypeOpenAI}
+	case constant.ChannelTypeVertexAi:
+		fallthrough
+	case constant.ChannelTypeGemini:
+		endpointTypes = []constant.EndpointType{constant.EndpointTypeGemini, constant.EndpointTypeOpenAI}
+	case constant.ChannelTypeOpenRouter: // OpenRouter 只支持 OpenAI 端点
+		endpointTypes = []constant.EndpointType{constant.EndpointTypeOpenAI}
+	default:
+		if IsOpenAIResponseOnlyModel(modelName) {
+			endpointTypes = []constant.EndpointType{constant.EndpointTypeOpenAIResponse}
+		} else {
+			endpointTypes = []constant.EndpointType{constant.EndpointTypeOpenAI}
+		}
+	}
+	if IsImageGenerationModel(modelName) {
+		// add to first
+		endpointTypes = append([]constant.EndpointType{constant.EndpointTypeImageGeneration}, endpointTypes...)
+	}
+	return endpointTypes
+}

+ 35 - 2
common/gin.go

@@ -2,10 +2,11 @@ package common
 
 import (
 	"bytes"
-	"encoding/json"
 	"github.com/gin-gonic/gin"
 	"io"
+	"one-api/constant"
 	"strings"
+	"time"
 )
 
 const KeyRequestBody = "key_request_body"
@@ -31,7 +32,7 @@ func UnmarshalBodyReusable(c *gin.Context, v any) error {
 	}
 	contentType := c.Request.Header.Get("Content-Type")
 	if strings.HasPrefix(contentType, "application/json") {
-		err = json.Unmarshal(requestBody, &v)
+		err = UnmarshalJson(requestBody, &v)
 	} else {
 		// skip for now
 		// TODO: someday non json request have variant model, we will need to implementation this
@@ -43,3 +44,35 @@ func UnmarshalBodyReusable(c *gin.Context, v any) error {
 	c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
 	return nil
 }
+
+func SetContextKey(c *gin.Context, key constant.ContextKey, value any) {
+	c.Set(string(key), value)
+}
+
+func GetContextKey(c *gin.Context, key constant.ContextKey) (any, bool) {
+	return c.Get(string(key))
+}
+
+func GetContextKeyString(c *gin.Context, key constant.ContextKey) string {
+	return c.GetString(string(key))
+}
+
+func GetContextKeyInt(c *gin.Context, key constant.ContextKey) int {
+	return c.GetInt(string(key))
+}
+
+func GetContextKeyBool(c *gin.Context, key constant.ContextKey) bool {
+	return c.GetBool(string(key))
+}
+
+func GetContextKeyStringSlice(c *gin.Context, key constant.ContextKey) []string {
+	return c.GetStringSlice(string(key))
+}
+
+func GetContextKeyStringMap(c *gin.Context, key constant.ContextKey) map[string]any {
+	return c.GetStringMap(string(key))
+}
+
+func GetContextKeyTime(c *gin.Context, key constant.ContextKey) time.Time {
+	return c.GetTime(string(key))
+}

+ 57 - 0
common/http.go

@@ -0,0 +1,57 @@
+package common
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net/http"
+
+	"github.com/gin-gonic/gin"
+)
+
+func CloseResponseBodyGracefully(httpResponse *http.Response) {
+	if httpResponse == nil || httpResponse.Body == nil {
+		return
+	}
+	err := httpResponse.Body.Close()
+	if err != nil {
+		SysError("failed to close response body: " + err.Error())
+	}
+}
+
+func IOCopyBytesGracefully(c *gin.Context, src *http.Response, data []byte) {
+	if c.Writer == nil {
+		return
+	}
+
+	body := io.NopCloser(bytes.NewBuffer(data))
+
+	// We shouldn't set the header before we parse the response body, because the parse part may fail.
+	// And then we will have to send an error response, but in this case, the header has already been set.
+	// So the httpClient will be confused by the response.
+	// For example, Postman will report error, and we cannot check the response at all.
+	if src != nil {
+		for k, v := range src.Header {
+			// avoid setting Content-Length
+			if k == "Content-Length" {
+				continue
+			}
+			c.Writer.Header().Set(k, v[0])
+		}
+	}
+
+	// set Content-Length header manually BEFORE calling WriteHeader
+	c.Writer.Header().Set("Content-Length", fmt.Sprintf("%d", len(data)))
+
+	// Write header with status code (this sends the headers)
+	if src != nil {
+		c.Writer.WriteHeader(src.StatusCode)
+	} else {
+		c.Writer.WriteHeader(http.StatusOK)
+	}
+
+	_, err := io.Copy(c.Writer, body)
+	if err != nil {
+		LogError(c, fmt.Sprintf("failed to copy response body: %s", err.Error()))
+	}
+}

+ 23 - 1
common/init.go

@@ -4,6 +4,7 @@ import (
 	"flag"
 	"fmt"
 	"log"
+	"one-api/constant"
 	"os"
 	"path/filepath"
 	"strconv"
@@ -24,7 +25,7 @@ func printHelp() {
 	fmt.Println("Usage: one-api [--port <port>] [--log-dir <log directory>] [--version] [--help]")
 }
 
-func LoadEnv() {
+func InitEnv() {
 	flag.Parse()
 
 	if *PrintVersion {
@@ -95,4 +96,25 @@ func LoadEnv() {
 	GlobalWebRateLimitEnable = GetEnvOrDefaultBool("GLOBAL_WEB_RATE_LIMIT_ENABLE", true)
 	GlobalWebRateLimitNum = GetEnvOrDefault("GLOBAL_WEB_RATE_LIMIT", 60)
 	GlobalWebRateLimitDuration = int64(GetEnvOrDefault("GLOBAL_WEB_RATE_LIMIT_DURATION", 180))
+
+	initConstantEnv()
+}
+
+func initConstantEnv() {
+	constant.StreamingTimeout = GetEnvOrDefault("STREAMING_TIMEOUT", 120)
+	constant.DifyDebug = GetEnvOrDefaultBool("DIFY_DEBUG", true)
+	constant.MaxFileDownloadMB = GetEnvOrDefault("MAX_FILE_DOWNLOAD_MB", 20)
+	// ForceStreamOption 覆盖请求参数,强制返回usage信息
+	constant.ForceStreamOption = GetEnvOrDefaultBool("FORCE_STREAM_OPTION", true)
+	constant.GetMediaToken = GetEnvOrDefaultBool("GET_MEDIA_TOKEN", true)
+	constant.GetMediaTokenNotStream = GetEnvOrDefaultBool("GET_MEDIA_TOKEN_NOT_STREAM", true)
+	constant.UpdateTask = GetEnvOrDefaultBool("UPDATE_TASK", true)
+	constant.AzureDefaultAPIVersion = GetEnvOrDefaultString("AZURE_DEFAULT_API_VERSION", "2025-04-01-preview")
+	constant.GeminiVisionMaxImageNum = GetEnvOrDefault("GEMINI_VISION_MAX_IMAGE_NUM", 16)
+	constant.NotifyLimitCount = GetEnvOrDefault("NOTIFY_LIMIT_COUNT", 2)
+	constant.NotificationLimitDurationMinute = GetEnvOrDefault("NOTIFICATION_LIMIT_DURATION_MINUTE", 10)
+	// GenerateDefaultToken 是否生成初始令牌,默认关闭。
+	constant.GenerateDefaultToken = GetEnvOrDefaultBool("GENERATE_DEFAULT_TOKEN", false)
+	// 是否启用错误日志
+	constant.ErrorLogEnabled = GetEnvOrDefaultBool("ERROR_LOG_ENABLED", false)
 }

+ 8 - 4
common/json.go

@@ -5,12 +5,16 @@ import (
 	"encoding/json"
 )
 
-func DecodeJson(data []byte, v any) error {
-	return json.NewDecoder(bytes.NewReader(data)).Decode(v)
+func UnmarshalJson(data []byte, v any) error {
+	return json.Unmarshal(data, v)
 }
 
-func DecodeJsonStr(data string, v any) error {
-	return DecodeJson(StringToByteSlice(data), v)
+func UnmarshalJsonStr(data string, v any) error {
+	return json.Unmarshal(StringToByteSlice(data), v)
+}
+
+func DecodeJson(reader *bytes.Reader, v any) error {
+	return json.NewDecoder(reader).Decode(v)
 }
 
 func EncodeJson(v any) ([]byte, error) {

+ 42 - 0
common/model.go

@@ -0,0 +1,42 @@
+package common
+
+import "strings"
+
+var (
+	// OpenAIResponseOnlyModels is a list of models that are only available for OpenAI responses.
+	OpenAIResponseOnlyModels = []string{
+		"o3-pro",
+		"o3-deep-research",
+		"o4-mini-deep-research",
+	}
+	ImageGenerationModels = []string{
+		"dall-e-3",
+		"dall-e-2",
+		"gpt-image-1",
+		"prefix:imagen-",
+		"flux-",
+		"flux.1-",
+	}
+)
+
+func IsOpenAIResponseOnlyModel(modelName string) bool {
+	for _, m := range OpenAIResponseOnlyModels {
+		if strings.Contains(modelName, m) {
+			return true
+		}
+	}
+	return false
+}
+
+func IsImageGenerationModel(modelName string) bool {
+	modelName = strings.ToLower(modelName)
+	for _, m := range ImageGenerationModels {
+		if strings.Contains(modelName, m) {
+			return true
+		}
+		if strings.HasPrefix(m, "prefix:") && strings.HasPrefix(modelName, strings.TrimPrefix(m, "prefix:")) {
+			return true
+		}
+	}
+	return false
+}

+ 62 - 0
common/page_info.go

@@ -0,0 +1,62 @@
+package common
+
+import (
+	"github.com/gin-gonic/gin"
+	"strconv"
+)
+
+type PageInfo struct {
+	Page           int   `json:"page"`            // page num 页码
+	PageSize       int   `json:"page_size"`       // page size 页大小
+	StartTimestamp int64 `json:"start_timestamp"` // 秒级
+	EndTimestamp   int64 `json:"end_timestamp"`   // 秒级
+
+	Total int `json:"total"` // 总条数,后设置
+	Items any `json:"items"` // 数据,后设置
+}
+
+func (p *PageInfo) GetStartIdx() int {
+	return (p.Page - 1) * p.PageSize
+}
+
+func (p *PageInfo) GetEndIdx() int {
+	return p.Page * p.PageSize
+}
+
+func (p *PageInfo) GetPageSize() int {
+	return p.PageSize
+}
+
+func (p *PageInfo) GetPage() int {
+	return p.Page
+}
+
+func (p *PageInfo) SetTotal(total int) {
+	p.Total = total
+}
+
+func (p *PageInfo) SetItems(items any) {
+	p.Items = items
+}
+
+func GetPageQuery(c *gin.Context) (*PageInfo, error) {
+	pageInfo := &PageInfo{}
+	err := c.BindQuery(pageInfo)
+	if err != nil {
+		return nil, err
+	}
+	if pageInfo.Page < 1 {
+		// 兼容
+		page, _ := strconv.Atoi(c.Query("p"))
+		if page != 0 {
+			pageInfo.Page = page
+		} else {
+			pageInfo.Page = 1
+		}
+	}
+
+	if pageInfo.PageSize == 0 {
+		pageInfo.PageSize = ItemsPerPage
+	}
+	return pageInfo, nil
+}

+ 9 - 1
common/redis.go

@@ -16,6 +16,10 @@ import (
 var RDB *redis.Client
 var RedisEnabled = true
 
+func RedisKeyCacheSeconds() int {
+	return SyncFrequency
+}
+
 // InitRedisClient This function is called after init()
 func InitRedisClient() (err error) {
 	if os.Getenv("REDIS_CONN_STRING") == "" {
@@ -141,7 +145,11 @@ func RedisHSetObj(key string, obj interface{}, expiration time.Duration) error {
 
 	txn := RDB.TxPipeline()
 	txn.HSet(ctx, key, data)
-	txn.Expire(ctx, key, expiration)
+
+	// 只有在 expiration 大于 0 时才设置过期时间
+	if expiration > 0 {
+		txn.Expire(ctx, key, expiration)
+	}
 
 	_, err := txn.Exec(ctx)
 	if err != nil {

+ 45 - 2
common/utils.go

@@ -13,6 +13,7 @@ import (
 	"math/big"
 	"math/rand"
 	"net"
+	"net/url"
 	"os"
 	"os/exec"
 	"runtime"
@@ -249,13 +250,55 @@ func SaveTmpFile(filename string, data io.Reader) (string, error) {
 }
 
 // GetAudioDuration returns the duration of an audio file in seconds.
-func GetAudioDuration(ctx context.Context, filename string) (float64, error) {
+func GetAudioDuration(ctx context.Context, filename string, ext string) (float64, error) {
 	// ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 {{input}}
 	c := exec.CommandContext(ctx, "ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", filename)
 	output, err := c.Output()
 	if err != nil {
 		return 0, errors.Wrap(err, "failed to get audio duration")
 	}
+  durationStr := string(bytes.TrimSpace(output))
+  if durationStr == "N/A" {
+    // Create a temporary output file name
+    tmpFp, err := os.CreateTemp("", "audio-*"+ext)
+    if err != nil {
+      return 0, errors.Wrap(err, "failed to create temporary file")
+    }
+    tmpName := tmpFp.Name()
+    // Close immediately so ffmpeg can open the file on Windows.
+    _ = tmpFp.Close()
+    defer os.Remove(tmpName)
+
+    // ffmpeg -y -i filename -vcodec copy -acodec copy <tmpName>
+    ffmpegCmd := exec.CommandContext(ctx, "ffmpeg", "-y", "-i", filename, "-vcodec", "copy", "-acodec", "copy", tmpName)
+    if err := ffmpegCmd.Run(); err != nil {
+      return 0, errors.Wrap(err, "failed to run ffmpeg")
+    }
+
+    // Recalculate the duration of the new file
+    c = exec.CommandContext(ctx, "ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", tmpName)
+    output, err := c.Output()
+    if err != nil {
+      return 0, errors.Wrap(err, "failed to get audio duration after ffmpeg")
+    }
+    durationStr = string(bytes.TrimSpace(output))
+  }
+	return strconv.ParseFloat(durationStr, 64)
+}
 
-	return strconv.ParseFloat(string(bytes.TrimSpace(output)), 64)
+// BuildURL concatenates base and endpoint, returns the complete url string
+func BuildURL(base string, endpoint string) string {
+	u, err := url.Parse(base)
+	if err != nil {
+		return base + endpoint
+	}
+	end := endpoint
+	if end == "" {
+		end = "/"
+	}
+	ref, err := url.Parse(end)
+	if err != nil {
+		return base + endpoint
+	}
+	return u.ResolveReference(ref).String()
 }

+ 26 - 0
constant/README.md

@@ -0,0 +1,26 @@
+# constant 包 (`/constant`)
+
+该目录仅用于放置全局可复用的**常量定义**,不包含任何业务逻辑或依赖关系。
+
+## 当前文件
+
+| 文件                   | 说明                                                                  |
+|----------------------|---------------------------------------------------------------------|
+| `azure.go`           | 定义与 Azure 相关的全局常量,如 `AzureNoRemoveDotTime`(控制删除 `.` 的截止时间)。         |
+| `cache_key.go`       | 缓存键格式字符串及 Token 相关字段常量,统一缓存命名规则。                                    |
+| `channel_setting.go` | Channel 级别的设置键,如 `proxy`、`force_format` 等。                          |
+| `context_key.go`     | 定义 `ContextKey` 类型以及在整个项目中使用的上下文键常量(请求时间、Token/Channel/User 相关信息等)。 |
+| `env.go`             | 环境配置相关的全局变量,在启动阶段根据配置文件或环境变量注入。                                     |
+| `finish_reason.go`   | OpenAI/GPT 请求返回的 `finish_reason` 字符串常量集合。                           |
+| `midjourney.go`      | Midjourney 相关错误码及动作(Action)常量与模型到动作的映射表。                            |
+| `setup.go`           | 标识项目是否已完成初始化安装 (`Setup` 布尔值)。                                       |
+| `task.go`            | 各种任务(Task)平台、动作常量及模型与动作映射表,如 Suno、Midjourney 等。                     |
+| `user_setting.go`    | 用户设置相关键常量以及通知类型(Email/Webhook)等。                                    |
+
+## 使用约定
+
+1. `constant` 包**只能被其他包引用**(import),**禁止在此包中引用项目内的其他自定义包**。如确有需要,仅允许引用 **Go 标准库**。
+2. 不允许在此目录内编写任何与业务流程、数据库操作、第三方服务调用等相关的逻辑代码。
+3. 新增类型时,请保持命名语义清晰,并在本 README 的 **当前文件** 表格中补充说明,确保团队成员能够快速了解其用途。
+
+> ⚠️ 违反以上约定将导致包之间产生不必要的耦合,影响代码可维护性与可测试性。请在提交代码前自行检查。

+ 34 - 0
constant/api_type.go

@@ -0,0 +1,34 @@
+package constant
+
+const (
+	APITypeOpenAI = iota
+	APITypeAnthropic
+	APITypePaLM
+	APITypeBaidu
+	APITypeZhipu
+	APITypeAli
+	APITypeXunfei
+	APITypeAIProxyLibrary
+	APITypeTencent
+	APITypeGemini
+	APITypeZhipuV4
+	APITypeOllama
+	APITypePerplexity
+	APITypeAws
+	APITypeCohere
+	APITypeDify
+	APITypeJina
+	APITypeCloudflare
+	APITypeSiliconFlow
+	APITypeVertexAi
+	APITypeMistral
+	APITypeDeepSeek
+	APITypeMokaAI
+	APITypeVolcEngine
+	APITypeBaiduV2
+	APITypeOpenRouter
+	APITypeXinference
+	APITypeXai
+	APITypeCoze
+	APITypeDummy // this one is only for count, do not add any channel after this
+)

+ 0 - 9
constant/cache_key.go

@@ -1,14 +1,5 @@
 package constant
 
-import "one-api/common"
-
-var (
-	TokenCacheSeconds         = common.SyncFrequency
-	UserId2GroupCacheSeconds  = common.SyncFrequency
-	UserId2QuotaCacheSeconds  = common.SyncFrequency
-	UserId2StatusCacheSeconds = common.SyncFrequency
-)
-
 // Cache keys
 const (
 	UserGroupKeyFmt    = "user_group:%d"

+ 109 - 0
constant/channel.go

@@ -0,0 +1,109 @@
+package constant
+
+const (
+	ChannelTypeUnknown        = 0
+	ChannelTypeOpenAI         = 1
+	ChannelTypeMidjourney     = 2
+	ChannelTypeAzure          = 3
+	ChannelTypeOllama         = 4
+	ChannelTypeMidjourneyPlus = 5
+	ChannelTypeOpenAIMax      = 6
+	ChannelTypeOhMyGPT        = 7
+	ChannelTypeCustom         = 8
+	ChannelTypeAILS           = 9
+	ChannelTypeAIProxy        = 10
+	ChannelTypePaLM           = 11
+	ChannelTypeAPI2GPT        = 12
+	ChannelTypeAIGC2D         = 13
+	ChannelTypeAnthropic      = 14
+	ChannelTypeBaidu          = 15
+	ChannelTypeZhipu          = 16
+	ChannelTypeAli            = 17
+	ChannelTypeXunfei         = 18
+	ChannelType360            = 19
+	ChannelTypeOpenRouter     = 20
+	ChannelTypeAIProxyLibrary = 21
+	ChannelTypeFastGPT        = 22
+	ChannelTypeTencent        = 23
+	ChannelTypeGemini         = 24
+	ChannelTypeMoonshot       = 25
+	ChannelTypeZhipu_v4       = 26
+	ChannelTypePerplexity     = 27
+	ChannelTypeLingYiWanWu    = 31
+	ChannelTypeAws            = 33
+	ChannelTypeCohere         = 34
+	ChannelTypeMiniMax        = 35
+	ChannelTypeSunoAPI        = 36
+	ChannelTypeDify           = 37
+	ChannelTypeJina           = 38
+	ChannelCloudflare         = 39
+	ChannelTypeSiliconFlow    = 40
+	ChannelTypeVertexAi       = 41
+	ChannelTypeMistral        = 42
+	ChannelTypeDeepSeek       = 43
+	ChannelTypeMokaAI         = 44
+	ChannelTypeVolcEngine     = 45
+	ChannelTypeBaiduV2        = 46
+	ChannelTypeXinference     = 47
+	ChannelTypeXai            = 48
+	ChannelTypeCoze           = 49
+	ChannelTypeKling          = 50
+	ChannelTypeJimeng         = 51
+	ChannelTypeDummy          // this one is only for count, do not add any channel after this
+
+)
+
+var ChannelBaseURLs = []string{
+	"",                                    // 0
+	"https://api.openai.com",              // 1
+	"https://oa.api2d.net",                // 2
+	"",                                    // 3
+	"http://localhost:11434",              // 4
+	"https://api.openai-sb.com",           // 5
+	"https://api.openaimax.com",           // 6
+	"https://api.ohmygpt.com",             // 7
+	"",                                    // 8
+	"https://api.caipacity.com",           // 9
+	"https://api.aiproxy.io",              // 10
+	"",                                    // 11
+	"https://api.api2gpt.com",             // 12
+	"https://api.aigc2d.com",              // 13
+	"https://api.anthropic.com",           // 14
+	"https://aip.baidubce.com",            // 15
+	"https://open.bigmodel.cn",            // 16
+	"https://dashscope.aliyuncs.com",      // 17
+	"",                                    // 18
+	"https://api.360.cn",                  // 19
+	"https://openrouter.ai/api",           // 20
+	"https://api.aiproxy.io",              // 21
+	"https://fastgpt.run/api/openapi",     // 22
+	"https://hunyuan.tencentcloudapi.com", //23
+	"https://generativelanguage.googleapis.com", //24
+	"https://api.moonshot.cn",                   //25
+	"https://open.bigmodel.cn",                  //26
+	"https://api.perplexity.ai",                 //27
+	"",                                          //28
+	"",                                          //29
+	"",                                          //30
+	"https://api.lingyiwanwu.com",               //31
+	"",                                          //32
+	"",                                          //33
+	"https://api.cohere.ai",                     //34
+	"https://api.minimax.chat",                  //35
+	"",                                          //36
+	"https://api.dify.ai",                       //37
+	"https://api.jina.ai",                       //38
+	"https://api.cloudflare.com",                //39
+	"https://api.siliconflow.cn",                //40
+	"",                                          //41
+	"https://api.mistral.ai",                    //42
+	"https://api.deepseek.com",                  //43
+	"https://api.moka.ai",                       //44
+	"https://ark.cn-beijing.volces.com",         //45
+	"https://qianfan.baidubce.com",              //46
+	"",                                          //47
+	"https://api.x.ai",                          //48
+	"https://api.coze.cn",                       //49
+	"https://api.klingai.com",                   //50
+	"https://visual.volcengineapi.com",          //51
+}

+ 31 - 6
constant/context_key.go

@@ -1,10 +1,35 @@
 package constant
 
+type ContextKey string
+
 const (
-	ContextKeyRequestStartTime = "request_start_time"
-	ContextKeyUserSetting      = "user_setting"
-	ContextKeyUserQuota        = "user_quota"
-	ContextKeyUserStatus       = "user_status"
-	ContextKeyUserEmail        = "user_email"
-	ContextKeyUserGroup        = "user_group"
+	ContextKeyOriginalModel    ContextKey = "original_model"
+	ContextKeyRequestStartTime ContextKey = "request_start_time"
+
+	/* token related keys */
+	ContextKeyTokenUnlimited         ContextKey = "token_unlimited_quota"
+	ContextKeyTokenKey               ContextKey = "token_key"
+	ContextKeyTokenId                ContextKey = "token_id"
+	ContextKeyTokenGroup             ContextKey = "token_group"
+	ContextKeyTokenAllowIps          ContextKey = "allow_ips"
+	ContextKeyTokenSpecificChannelId ContextKey = "specific_channel_id"
+	ContextKeyTokenModelLimitEnabled ContextKey = "token_model_limit_enabled"
+	ContextKeyTokenModelLimit        ContextKey = "token_model_limit"
+
+	/* channel related keys */
+	ContextKeyBaseUrl        ContextKey = "base_url"
+	ContextKeyChannelType    ContextKey = "channel_type"
+	ContextKeyChannelId      ContextKey = "channel_id"
+	ContextKeyChannelSetting ContextKey = "channel_setting"
+	ContextKeyParamOverride  ContextKey = "param_override"
+
+	/* user related keys */
+	ContextKeyUserId      ContextKey = "id"
+	ContextKeyUserSetting ContextKey = "user_setting"
+	ContextKeyUserQuota   ContextKey = "user_quota"
+	ContextKeyUserStatus  ContextKey = "user_status"
+	ContextKeyUserEmail   ContextKey = "user_email"
+	ContextKeyUserGroup   ContextKey = "user_group"
+	ContextKeyUsingGroup  ContextKey = "group"
+	ContextKeyUserName    ContextKey = "username"
 )

+ 16 - 0
constant/endpoint_type.go

@@ -0,0 +1,16 @@
+package constant
+
+type EndpointType string
+
+const (
+	EndpointTypeOpenAI          EndpointType = "openai"
+	EndpointTypeOpenAIResponse  EndpointType = "openai-response"
+	EndpointTypeAnthropic       EndpointType = "anthropic"
+	EndpointTypeGemini          EndpointType = "gemini"
+	EndpointTypeJinaRerank      EndpointType = "jina-rerank"
+	EndpointTypeImageGeneration EndpointType = "image-generation"
+	//EndpointTypeMidjourney     EndpointType = "midjourney-proxy"
+	//EndpointTypeSuno           EndpointType = "suno-proxy"
+	//EndpointTypeKling          EndpointType = "kling"
+	//EndpointTypeJimeng         EndpointType = "jimeng"
+)

+ 0 - 40
constant/env.go

@@ -1,9 +1,5 @@
 package constant
 
-import (
-	"one-api/common"
-)
-
 var StreamingTimeout int
 var DifyDebug bool
 var MaxFileDownloadMB int
@@ -17,39 +13,3 @@ var NotifyLimitCount int
 var NotificationLimitDurationMinute int
 var GenerateDefaultToken bool
 var ErrorLogEnabled bool
-
-//var GeminiModelMap = map[string]string{
-//	"gemini-1.0-pro": "v1",
-//}
-
-func InitEnv() {
-	StreamingTimeout = common.GetEnvOrDefault("STREAMING_TIMEOUT", 60)
-	DifyDebug = common.GetEnvOrDefaultBool("DIFY_DEBUG", true)
-	MaxFileDownloadMB = common.GetEnvOrDefault("MAX_FILE_DOWNLOAD_MB", 20)
-	// ForceStreamOption 覆盖请求参数,强制返回usage信息
-	ForceStreamOption = common.GetEnvOrDefaultBool("FORCE_STREAM_OPTION", true)
-	GetMediaToken = common.GetEnvOrDefaultBool("GET_MEDIA_TOKEN", true)
-	GetMediaTokenNotStream = common.GetEnvOrDefaultBool("GET_MEDIA_TOKEN_NOT_STREAM", true)
-	UpdateTask = common.GetEnvOrDefaultBool("UPDATE_TASK", true)
-	AzureDefaultAPIVersion = common.GetEnvOrDefaultString("AZURE_DEFAULT_API_VERSION", "2025-04-01-preview")
-	GeminiVisionMaxImageNum = common.GetEnvOrDefault("GEMINI_VISION_MAX_IMAGE_NUM", 16)
-	NotifyLimitCount = common.GetEnvOrDefault("NOTIFY_LIMIT_COUNT", 2)
-	NotificationLimitDurationMinute = common.GetEnvOrDefault("NOTIFICATION_LIMIT_DURATION_MINUTE", 10)
-	// GenerateDefaultToken 是否生成初始令牌,默认关闭。
-	GenerateDefaultToken = common.GetEnvOrDefaultBool("GENERATE_DEFAULT_TOKEN", false)
-	// 是否启用错误日志
-	ErrorLogEnabled = common.GetEnvOrDefaultBool("ERROR_LOG_ENABLED", false)
-
-	//modelVersionMapStr := strings.TrimSpace(os.Getenv("GEMINI_MODEL_MAP"))
-	//if modelVersionMapStr == "" {
-	//	return
-	//}
-	//for _, pair := range strings.Split(modelVersionMapStr, ",") {
-	//	parts := strings.Split(pair, ":")
-	//	if len(parts) == 2 {
-	//		GeminiModelMap[parts[0]] = parts[1]
-	//	} else {
-	//		common.SysError(fmt.Sprintf("invalid model version map: %s", pair))
-	//	}
-	//}
-}

+ 4 - 0
constant/midjourney.go

@@ -22,6 +22,8 @@ const (
 	MjActionPan           = "PAN"
 	MjActionSwapFace      = "SWAP_FACE"
 	MjActionUpload        = "UPLOAD"
+	MjActionVideo         = "VIDEO"
+	MjActionEdits         = "EDITS"
 )
 
 var MidjourneyModel2Action = map[string]string{
@@ -41,4 +43,6 @@ var MidjourneyModel2Action = map[string]string{
 	"mj_pan":            MjActionPan,
 	"swap_face":         MjActionSwapFace,
 	"mj_upload":         MjActionUpload,
+	"mj_video":          MjActionVideo,
+	"mj_edits":          MjActionEdits,
 }

+ 5 - 0
constant/task.go

@@ -5,11 +5,16 @@ type TaskPlatform string
 const (
 	TaskPlatformSuno       TaskPlatform = "suno"
 	TaskPlatformMidjourney              = "mj"
+	TaskPlatformKling      TaskPlatform = "kling"
+	TaskPlatformJimeng     TaskPlatform = "jimeng"
 )
 
 const (
 	SunoActionMusic  = "MUSIC"
 	SunoActionLyrics = "LYRICS"
+
+	TaskActionGenerate     = "generate"
+	TaskActionTextGenerate = "textGenerate"
 )
 
 var SunoModel2Action = map[string]string{

+ 49 - 10
controller/channel-billing.go

@@ -4,11 +4,14 @@ import (
 	"encoding/json"
 	"errors"
 	"fmt"
+	"github.com/shopspring/decimal"
 	"io"
 	"net/http"
 	"one-api/common"
+	"one-api/constant"
 	"one-api/model"
 	"one-api/service"
+	"one-api/setting"
 	"strconv"
 	"time"
 
@@ -304,34 +307,70 @@ func updateChannelOpenRouterBalance(channel *model.Channel) (float64, error) {
 	return balance, nil
 }
 
+func updateChannelMoonshotBalance(channel *model.Channel) (float64, error) {
+	url := "https://api.moonshot.cn/v1/users/me/balance"
+	body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
+	if err != nil {
+		return 0, err
+	}
+
+	type MoonshotBalanceData struct {
+		AvailableBalance float64 `json:"available_balance"`
+		VoucherBalance   float64 `json:"voucher_balance"`
+		CashBalance      float64 `json:"cash_balance"`
+	}
+
+	type MoonshotBalanceResponse struct {
+		Code   int                 `json:"code"`
+		Data   MoonshotBalanceData `json:"data"`
+		Scode  string              `json:"scode"`
+		Status bool                `json:"status"`
+	}
+
+	response := MoonshotBalanceResponse{}
+	err = json.Unmarshal(body, &response)
+	if err != nil {
+		return 0, err
+	}
+	if !response.Status || response.Code != 0 {
+		return 0, fmt.Errorf("failed to update moonshot balance, status: %v, code: %d, scode: %s", response.Status, response.Code, response.Scode)
+	}
+	availableBalanceCny := response.Data.AvailableBalance
+	availableBalanceUsd := decimal.NewFromFloat(availableBalanceCny).Div(decimal.NewFromFloat(setting.Price)).InexactFloat64()
+	channel.UpdateBalance(availableBalanceUsd)
+	return availableBalanceUsd, nil
+}
+
 func updateChannelBalance(channel *model.Channel) (float64, error) {
-	baseURL := common.ChannelBaseURLs[channel.Type]
+	baseURL := constant.ChannelBaseURLs[channel.Type]
 	if channel.GetBaseURL() == "" {
 		channel.BaseURL = &baseURL
 	}
 	switch channel.Type {
-	case common.ChannelTypeOpenAI:
+	case constant.ChannelTypeOpenAI:
 		if channel.GetBaseURL() != "" {
 			baseURL = channel.GetBaseURL()
 		}
-	case common.ChannelTypeAzure:
+	case constant.ChannelTypeAzure:
 		return 0, errors.New("尚未实现")
-	case common.ChannelTypeCustom:
+	case constant.ChannelTypeCustom:
 		baseURL = channel.GetBaseURL()
 	//case common.ChannelTypeOpenAISB:
 	//	return updateChannelOpenAISBBalance(channel)
-	case common.ChannelTypeAIProxy:
+	case constant.ChannelTypeAIProxy:
 		return updateChannelAIProxyBalance(channel)
-	case common.ChannelTypeAPI2GPT:
+	case constant.ChannelTypeAPI2GPT:
 		return updateChannelAPI2GPTBalance(channel)
-	case common.ChannelTypeAIGC2D:
+	case constant.ChannelTypeAIGC2D:
 		return updateChannelAIGC2DBalance(channel)
-	case common.ChannelTypeSiliconFlow:
+	case constant.ChannelTypeSiliconFlow:
 		return updateChannelSiliconFlowBalance(channel)
-	case common.ChannelTypeDeepSeek:
+	case constant.ChannelTypeDeepSeek:
 		return updateChannelDeepSeekBalance(channel)
-	case common.ChannelTypeOpenRouter:
+	case constant.ChannelTypeOpenRouter:
 		return updateChannelOpenRouterBalance(channel)
+	case constant.ChannelTypeMoonshot:
+		return updateChannelMoonshotBalance(channel)
 	default:
 		return 0, errors.New("尚未实现")
 	}

+ 19 - 13
controller/channel-test.go

@@ -11,12 +11,12 @@ import (
 	"net/http/httptest"
 	"net/url"
 	"one-api/common"
+	"one-api/constant"
 	"one-api/dto"
 	"one-api/middleware"
 	"one-api/model"
 	"one-api/relay"
 	relaycommon "one-api/relay/common"
-	"one-api/relay/constant"
 	"one-api/relay/helper"
 	"one-api/service"
 	"strconv"
@@ -31,15 +31,21 @@ import (
 
 func testChannel(channel *model.Channel, testModel string) (err error, openAIErrorWithStatusCode *dto.OpenAIErrorWithStatusCode) {
 	tik := time.Now()
-	if channel.Type == common.ChannelTypeMidjourney {
+	if channel.Type == constant.ChannelTypeMidjourney {
 		return errors.New("midjourney channel test is not supported"), nil
 	}
-	if channel.Type == common.ChannelTypeMidjourneyPlus {
-		return errors.New("midjourney plus channel test is not supported!!!"), nil
+	if channel.Type == constant.ChannelTypeMidjourneyPlus {
+		return errors.New("midjourney plus channel test is not supported"), nil
 	}
-	if channel.Type == common.ChannelTypeSunoAPI {
+	if channel.Type == constant.ChannelTypeSunoAPI {
 		return errors.New("suno channel test is not supported"), nil
 	}
+	if channel.Type == constant.ChannelTypeKling {
+		return errors.New("kling channel test is not supported"), nil
+	}
+	if channel.Type == constant.ChannelTypeJimeng {
+		return errors.New("jimeng channel test is not supported"), nil
+	}
 	w := httptest.NewRecorder()
 	c, _ := gin.CreateTestContext(w)
 
@@ -50,7 +56,7 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
 		strings.HasPrefix(testModel, "m3e") || // m3e 系列模型
 		strings.Contains(testModel, "bge-") || // bge 系列模型
 		strings.Contains(testModel, "embed") ||
-		channel.Type == common.ChannelTypeMokaAI { // 其他 embedding 模型
+		channel.Type == constant.ChannelTypeMokaAI { // 其他 embedding 模型
 		requestPath = "/v1/embeddings" // 修改请求路径
 	}
 
@@ -90,13 +96,13 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
 
 	info := relaycommon.GenRelayInfo(c)
 
-	err = helper.ModelMappedHelper(c, info)
+	err = helper.ModelMappedHelper(c, info, nil)
 	if err != nil {
 		return err, nil
 	}
 	testModel = info.UpstreamModelName
 
-	apiType, _ := constant.ChannelType2APIType(channel.Type)
+	apiType, _ := common.ChannelType2APIType(channel.Type)
 	adaptor := relay.GetAdaptor(apiType)
 	if adaptor == nil {
 		return fmt.Errorf("invalid api type: %d, adaptor is nil", apiType), nil
@@ -165,10 +171,10 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
 	tok := time.Now()
 	milliseconds := tok.Sub(tik).Milliseconds()
 	consumedTime := float64(milliseconds) / 1000.0
-	other := service.GenerateTextOtherInfo(c, info, priceData.ModelRatio, priceData.GroupRatio, priceData.CompletionRatio,
-		usage.PromptTokensDetails.CachedTokens, priceData.CacheRatio, priceData.ModelPrice, priceData.UserGroupRatio)
+	other := service.GenerateTextOtherInfo(c, info, priceData.ModelRatio, priceData.GroupRatioInfo.GroupRatio, priceData.CompletionRatio,
+		usage.PromptTokensDetails.CachedTokens, priceData.CacheRatio, priceData.ModelPrice, priceData.GroupRatioInfo.GroupSpecialRatio)
 	model.RecordConsumeLog(c, 1, channel.Id, usage.PromptTokens, usage.CompletionTokens, info.OriginModelName, "模型测试",
-		quota, "模型测试", 0, quota, int(consumedTime), false, info.Group, other)
+		quota, "模型测试", 0, quota, int(consumedTime), false, info.UsingGroup, other)
 	common.SysLog(fmt.Sprintf("testing channel #%d, response: \n%s", channel.Id, string(respBody)))
 	return nil, nil
 }
@@ -196,7 +202,7 @@ func buildTestRequest(model string) *dto.GeneralOpenAIRequest {
 			testRequest.MaxTokens = 50
 		}
 	} else if strings.Contains(model, "gemini") {
-		testRequest.MaxTokens = 300
+		testRequest.MaxTokens = 3000
 	} else {
 		testRequest.MaxTokens = 10
 	}
@@ -312,7 +318,7 @@ func testAllChannels(notify bool) error {
 			channel.UpdateResponseTime(milliseconds)
 			time.Sleep(common.RequestInterval)
 		}
-		
+
 		if notify {
 			service.NotifyRootUser(dto.NotifyTypeChannelTest, "通道测试完成", "所有通道测试已完成")
 		}

+ 156 - 28
controller/channel.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"net/http"
 	"one-api/common"
+	"one-api/constant"
 	"one-api/model"
 	"strconv"
 	"strings"
@@ -40,6 +41,17 @@ type OpenAIModelsResponse struct {
 	Success bool          `json:"success"`
 }
 
+func parseStatusFilter(statusParam string) int {
+	switch strings.ToLower(statusParam) {
+	case "enabled", "1":
+		return common.ChannelStatusEnabled
+	case "disabled", "0":
+		return 0
+	default:
+		return -1
+	}
+}
+
 func GetAllChannels(c *gin.Context) {
 	p, _ := strconv.Atoi(c.Query("p"))
 	pageSize, _ := strconv.Atoi(c.Query("page_size"))
@@ -52,44 +64,100 @@ func GetAllChannels(c *gin.Context) {
 	channelData := make([]*model.Channel, 0)
 	idSort, _ := strconv.ParseBool(c.Query("id_sort"))
 	enableTagMode, _ := strconv.ParseBool(c.Query("tag_mode"))
+	statusParam := c.Query("status")
+	// statusFilter: -1 all, 1 enabled, 0 disabled (include auto & manual)
+	statusFilter := parseStatusFilter(statusParam)
+	// type filter
+	typeStr := c.Query("type")
+	typeFilter := -1
+	if typeStr != "" {
+		if t, err := strconv.Atoi(typeStr); err == nil {
+			typeFilter = t
+		}
+	}
 
 	var total int64
 
 	if enableTagMode {
-		// tag 分页:先分页 tag,再取各 tag 下 channels
 		tags, err := model.GetPaginatedTags((p-1)*pageSize, pageSize)
 		if err != nil {
 			c.JSON(http.StatusOK, gin.H{"success": false, "message": err.Error()})
 			return
 		}
 		for _, tag := range tags {
-			if tag != nil && *tag != "" {
-				tagChannel, err := model.GetChannelsByTag(*tag, idSort)
-				if err == nil {
-					channelData = append(channelData, tagChannel...)
+			if tag == nil || *tag == "" {
+				continue
+			}
+			tagChannels, err := model.GetChannelsByTag(*tag, idSort)
+			if err != nil {
+				continue
+			}
+			filtered := make([]*model.Channel, 0)
+			for _, ch := range tagChannels {
+				if statusFilter == common.ChannelStatusEnabled && ch.Status != common.ChannelStatusEnabled {
+					continue
 				}
+				if statusFilter == 0 && ch.Status == common.ChannelStatusEnabled {
+					continue
+				}
+				if typeFilter >= 0 && ch.Type != typeFilter {
+					continue
+				}
+				filtered = append(filtered, ch)
 			}
+			channelData = append(channelData, filtered...)
 		}
-		// 计算 tag 总数用于分页
 		total, _ = model.CountAllTags()
 	} else {
-		channels, err := model.GetAllChannels((p-1)*pageSize, pageSize, false, idSort)
+		baseQuery := model.DB.Model(&model.Channel{})
+		if typeFilter >= 0 {
+			baseQuery = baseQuery.Where("type = ?", typeFilter)
+		}
+		if statusFilter == common.ChannelStatusEnabled {
+			baseQuery = baseQuery.Where("status = ?", common.ChannelStatusEnabled)
+		} else if statusFilter == 0 {
+			baseQuery = baseQuery.Where("status != ?", common.ChannelStatusEnabled)
+		}
+
+		baseQuery.Count(&total)
+
+		order := "priority desc"
+		if idSort {
+			order = "id desc"
+		}
+
+		err := baseQuery.Order(order).Limit(pageSize).Offset((p - 1) * pageSize).Omit("key").Find(&channelData).Error
 		if err != nil {
 			c.JSON(http.StatusOK, gin.H{"success": false, "message": err.Error()})
 			return
 		}
-		channelData = channels
-		total, _ = model.CountAllChannels()
+	}
+
+	countQuery := model.DB.Model(&model.Channel{})
+	if statusFilter == common.ChannelStatusEnabled {
+		countQuery = countQuery.Where("status = ?", common.ChannelStatusEnabled)
+	} else if statusFilter == 0 {
+		countQuery = countQuery.Where("status != ?", common.ChannelStatusEnabled)
+	}
+	var results []struct {
+		Type  int64
+		Count int64
+	}
+	_ = countQuery.Select("type, count(*) as count").Group("type").Find(&results).Error
+	typeCounts := make(map[int64]int64)
+	for _, r := range results {
+		typeCounts[r.Type] = r.Count
 	}
 
 	c.JSON(http.StatusOK, gin.H{
 		"success": true,
 		"message": "",
 		"data": gin.H{
-			"items":     channelData,
-			"total":     total,
-			"page":      p,
-			"page_size": pageSize,
+			"items":       channelData,
+			"total":       total,
+			"page":        p,
+			"page_size":   pageSize,
+			"type_counts": typeCounts,
 		},
 	})
 	return
@@ -114,22 +182,15 @@ func FetchUpstreamModels(c *gin.Context) {
 		return
 	}
 
-	//if channel.Type != common.ChannelTypeOpenAI {
-	//	c.JSON(http.StatusOK, gin.H{
-	//		"success": false,
-	//		"message": "仅支持 OpenAI 类型渠道",
-	//	})
-	//	return
-	//}
-	baseURL := common.ChannelBaseURLs[channel.Type]
+	baseURL := constant.ChannelBaseURLs[channel.Type]
 	if channel.GetBaseURL() != "" {
 		baseURL = channel.GetBaseURL()
 	}
 	url := fmt.Sprintf("%s/v1/models", baseURL)
 	switch channel.Type {
-	case common.ChannelTypeGemini:
+	case constant.ChannelTypeGemini:
 		url = fmt.Sprintf("%s/v1beta/openai/models", baseURL)
-	case common.ChannelTypeAli:
+	case constant.ChannelTypeAli:
 		url = fmt.Sprintf("%s/compatible-mode/v1/models", baseURL)
 	}
 	body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
@@ -153,7 +214,7 @@ func FetchUpstreamModels(c *gin.Context) {
 	var ids []string
 	for _, model := range result.Data {
 		id := model.ID
-		if channel.Type == common.ChannelTypeGemini {
+		if channel.Type == constant.ChannelTypeGemini {
 			id = strings.TrimPrefix(id, "models/")
 		}
 		ids = append(ids, id)
@@ -186,6 +247,8 @@ func SearchChannels(c *gin.Context) {
 	keyword := c.Query("keyword")
 	group := c.Query("group")
 	modelKeyword := c.Query("model")
+	statusParam := c.Query("status")
+	statusFilter := parseStatusFilter(statusParam)
 	idSort, _ := strconv.ParseBool(c.Query("id_sort"))
 	enableTagMode, _ := strconv.ParseBool(c.Query("tag_mode"))
 	channelData := make([]*model.Channel, 0)
@@ -217,10 +280,74 @@ func SearchChannels(c *gin.Context) {
 		}
 		channelData = channels
 	}
+
+	if statusFilter == common.ChannelStatusEnabled || statusFilter == 0 {
+		filtered := make([]*model.Channel, 0, len(channelData))
+		for _, ch := range channelData {
+			if statusFilter == common.ChannelStatusEnabled && ch.Status != common.ChannelStatusEnabled {
+				continue
+			}
+			if statusFilter == 0 && ch.Status == common.ChannelStatusEnabled {
+				continue
+			}
+			filtered = append(filtered, ch)
+		}
+		channelData = filtered
+	}
+
+	// calculate type counts for search results
+	typeCounts := make(map[int64]int64)
+	for _, channel := range channelData {
+		typeCounts[int64(channel.Type)]++
+	}
+
+	typeParam := c.Query("type")
+	typeFilter := -1
+	if typeParam != "" {
+		if tp, err := strconv.Atoi(typeParam); err == nil {
+			typeFilter = tp
+		}
+	}
+
+	if typeFilter >= 0 {
+		filtered := make([]*model.Channel, 0, len(channelData))
+		for _, ch := range channelData {
+			if ch.Type == typeFilter {
+				filtered = append(filtered, ch)
+			}
+		}
+		channelData = filtered
+	}
+
+	page, _ := strconv.Atoi(c.DefaultQuery("p", "1"))
+	pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "20"))
+	if page < 1 {
+		page = 1
+	}
+	if pageSize <= 0 {
+		pageSize = 20
+	}
+
+	total := len(channelData)
+	startIdx := (page - 1) * pageSize
+	if startIdx > total {
+		startIdx = total
+	}
+	endIdx := startIdx + pageSize
+	if endIdx > total {
+		endIdx = total
+	}
+
+	pagedData := channelData[startIdx:endIdx]
+
 	c.JSON(http.StatusOK, gin.H{
 		"success": true,
 		"message": "",
-		"data":    channelData,
+		"data": gin.H{
+			"items":       pagedData,
+			"total":       total,
+			"type_counts": typeCounts,
+		},
 	})
 	return
 }
@@ -283,7 +410,7 @@ func AddChannel(c *gin.Context) {
 			return
 		}
 	}
-	if addChannelRequest.Channel.Type == common.ChannelTypeVertexAi {
+	if addChannelRequest.Channel.Type == constant.ChannelTypeVertexAi {
 		if addChannelRequest.Channel.Other == "" {
 			c.JSON(http.StatusOK, gin.H{
 				"success": false,
@@ -566,7 +693,7 @@ func UpdateChannel(c *gin.Context) {
 		})
 		return
 	}
-	if channel.Type == common.ChannelTypeVertexAi {
+	if channel.Type == constant.ChannelTypeVertexAi {
 		if channel.Other == "" {
 			c.JSON(http.StatusOK, gin.H{
 				"success": false,
@@ -595,6 +722,7 @@ func UpdateChannel(c *gin.Context) {
 		})
 		return
 	}
+	channel.Key = ""
 	c.JSON(http.StatusOK, gin.H{
 		"success": true,
 		"message": "",
@@ -620,7 +748,7 @@ func FetchModels(c *gin.Context) {
 
 	baseURL := req.BaseURL
 	if baseURL == "" {
-		baseURL = common.ChannelBaseURLs[req.Type]
+		baseURL = constant.ChannelBaseURLs[req.Type]
 	}
 
 	client := &http.Client{}

+ 11 - 3
controller/group.go

@@ -1,15 +1,17 @@
 package controller
 
 import (
-	"github.com/gin-gonic/gin"
 	"net/http"
 	"one-api/model"
 	"one-api/setting"
+	"one-api/setting/ratio_setting"
+
+	"github.com/gin-gonic/gin"
 )
 
 func GetGroups(c *gin.Context) {
 	groupNames := make([]string, 0)
-	for groupName, _ := range setting.GetGroupRatioCopy() {
+	for groupName := range ratio_setting.GetGroupRatioCopy() {
 		groupNames = append(groupNames, groupName)
 	}
 	c.JSON(http.StatusOK, gin.H{
@@ -24,7 +26,7 @@ func GetUserGroups(c *gin.Context) {
 	userGroup := ""
 	userId := c.GetInt("id")
 	userGroup, _ = model.GetUserGroup(userId, false)
-	for groupName, ratio := range setting.GetGroupRatioCopy() {
+	for groupName, ratio := range ratio_setting.GetGroupRatioCopy() {
 		// UserUsableGroups contains the groups that the user can use
 		userUsableGroups := setting.GetUserUsableGroups(userGroup)
 		if desc, ok := userUsableGroups[groupName]; ok {
@@ -34,6 +36,12 @@ func GetUserGroups(c *gin.Context) {
 			}
 		}
 	}
+	if setting.GroupInUserUsableGroups("auto") {
+		usableGroups["auto"] = map[string]interface{}{
+			"ratio": "自动",
+			"desc":  setting.GetUsableGroupDescription("auto"),
+		}
+	}
 	c.JSON(http.StatusOK, gin.H{
 		"success": true,
 		"message": "",

+ 41 - 39
controller/misc.go

@@ -9,9 +9,9 @@ import (
 	"one-api/middleware"
 	"one-api/model"
 	"one-api/setting"
+	"one-api/setting/console_setting"
 	"one-api/setting/operation_setting"
 	"one-api/setting/system_setting"
-	"one-api/setting/console_setting"
 	"strings"
 
 	"github.com/gin-gonic/gin"
@@ -41,46 +41,48 @@ func GetStatus(c *gin.Context) {
 	cs := console_setting.GetConsoleSetting()
 
 	data := gin.H{
-		"version":                     common.Version,
-		"start_time":                  common.StartTime,
-		"email_verification":          common.EmailVerificationEnabled,
-		"github_oauth":                common.GitHubOAuthEnabled,
-		"github_client_id":            common.GitHubClientId,
-		"linuxdo_oauth":               common.LinuxDOOAuthEnabled,
-		"linuxdo_client_id":           common.LinuxDOClientId,
-		"telegram_oauth":              common.TelegramOAuthEnabled,
-		"telegram_bot_name":           common.TelegramBotName,
-		"system_name":                 common.SystemName,
-		"logo":                        common.Logo,
-		"footer_html":                 common.Footer,
-		"wechat_qrcode":               common.WeChatAccountQRCodeImageURL,
-		"wechat_login":                common.WeChatAuthEnabled,
-		"server_address":              setting.ServerAddress,
-		"price":                       setting.Price,
-		"min_topup":                   setting.MinTopUp,
-		"turnstile_check":             common.TurnstileCheckEnabled,
-		"turnstile_site_key":          common.TurnstileSiteKey,
-		"top_up_link":                 common.TopUpLink,
-		"docs_link":                   operation_setting.GetGeneralSetting().DocsLink,
-		"quota_per_unit":              common.QuotaPerUnit,
-		"display_in_currency":         common.DisplayInCurrencyEnabled,
-		"enable_batch_update":         common.BatchUpdateEnabled,
-		"enable_drawing":              common.DrawingEnabled,
-		"enable_task":                 common.TaskEnabled,
-		"enable_data_export":          common.DataExportEnabled,
-		"data_export_default_time":    common.DataExportDefaultTime,
-		"default_collapse_sidebar":    common.DefaultCollapseSidebar,
-		"enable_online_topup":         setting.PayAddress != "" && setting.EpayId != "" && setting.EpayKey != "",
-		"mj_notify_enabled":           setting.MjNotifyEnabled,
-		"chats":                       setting.Chats,
-		"demo_site_enabled":           operation_setting.DemoSiteEnabled,
-		"self_use_mode_enabled":       operation_setting.SelfUseModeEnabled,
+		"version":                  common.Version,
+		"start_time":               common.StartTime,
+		"email_verification":       common.EmailVerificationEnabled,
+		"github_oauth":             common.GitHubOAuthEnabled,
+		"github_client_id":         common.GitHubClientId,
+		"linuxdo_oauth":            common.LinuxDOOAuthEnabled,
+		"linuxdo_client_id":        common.LinuxDOClientId,
+		"telegram_oauth":           common.TelegramOAuthEnabled,
+		"telegram_bot_name":        common.TelegramBotName,
+		"system_name":              common.SystemName,
+		"logo":                     common.Logo,
+		"footer_html":              common.Footer,
+		"wechat_qrcode":            common.WeChatAccountQRCodeImageURL,
+		"wechat_login":             common.WeChatAuthEnabled,
+		"server_address":           setting.ServerAddress,
+		"price":                    setting.Price,
+		"min_topup":                setting.MinTopUp,
+		"turnstile_check":          common.TurnstileCheckEnabled,
+		"turnstile_site_key":       common.TurnstileSiteKey,
+		"top_up_link":              common.TopUpLink,
+		"docs_link":                operation_setting.GetGeneralSetting().DocsLink,
+		"quota_per_unit":           common.QuotaPerUnit,
+		"display_in_currency":      common.DisplayInCurrencyEnabled,
+		"enable_batch_update":      common.BatchUpdateEnabled,
+		"enable_drawing":           common.DrawingEnabled,
+		"enable_task":              common.TaskEnabled,
+		"enable_data_export":       common.DataExportEnabled,
+		"data_export_default_time": common.DataExportDefaultTime,
+		"default_collapse_sidebar": common.DefaultCollapseSidebar,
+		"enable_online_topup":      setting.PayAddress != "" && setting.EpayId != "" && setting.EpayKey != "",
+		"mj_notify_enabled":        setting.MjNotifyEnabled,
+		"chats":                    setting.Chats,
+		"demo_site_enabled":        operation_setting.DemoSiteEnabled,
+		"self_use_mode_enabled":    operation_setting.SelfUseModeEnabled,
+		"default_use_auto_group":   setting.DefaultUseAutoGroup,
+		"pay_methods":              setting.PayMethods,
 
 		// 面板启用开关
-		"api_info_enabled":            cs.ApiInfoEnabled,
-		"uptime_kuma_enabled":         cs.UptimeKumaEnabled,
-		"announcements_enabled":       cs.AnnouncementsEnabled,
-		"faq_enabled":                 cs.FAQEnabled,
+		"api_info_enabled":      cs.ApiInfoEnabled,
+		"uptime_kuma_enabled":   cs.UptimeKumaEnabled,
+		"announcements_enabled": cs.AnnouncementsEnabled,
+		"faq_enabled":           cs.FAQEnabled,
 
 		"oidc_enabled":                system_setting.GetOIDCSettings().Enabled,
 		"oidc_client_id":              system_setting.GetOIDCSettings().ClientId,

+ 68 - 93
controller/model.go

@@ -3,6 +3,7 @@ package controller
 import (
 	"fmt"
 	"github.com/gin-gonic/gin"
+	"github.com/samber/lo"
 	"net/http"
 	"one-api/common"
 	"one-api/constant"
@@ -14,7 +15,7 @@ import (
 	"one-api/relay/channel/minimax"
 	"one-api/relay/channel/moonshot"
 	relaycommon "one-api/relay/common"
-	relayconstant "one-api/relay/constant"
+	"one-api/setting"
 )
 
 // https://platform.openai.com/docs/api-reference/models/list
@@ -23,30 +24,10 @@ var openAIModels []dto.OpenAIModels
 var openAIModelsMap map[string]dto.OpenAIModels
 var channelId2Models map[int][]string
 
-func getPermission() []dto.OpenAIModelPermission {
-	var permission []dto.OpenAIModelPermission
-	permission = append(permission, dto.OpenAIModelPermission{
-		Id:                 "modelperm-LwHkVFn8AcMItP432fKKDIKJ",
-		Object:             "model_permission",
-		Created:            1626777600,
-		AllowCreateEngine:  true,
-		AllowSampling:      true,
-		AllowLogprobs:      true,
-		AllowSearchIndices: false,
-		AllowView:          true,
-		AllowFineTuning:    false,
-		Organization:       "*",
-		Group:              nil,
-		IsBlocking:         false,
-	})
-	return permission
-}
-
 func init() {
 	// https://platform.openai.com/docs/models/model-endpoint-compatibility
-	permission := getPermission()
-	for i := 0; i < relayconstant.APITypeDummy; i++ {
-		if i == relayconstant.APITypeAIProxyLibrary {
+	for i := 0; i < constant.APITypeDummy; i++ {
+		if i == constant.APITypeAIProxyLibrary {
 			continue
 		}
 		adaptor := relay.GetAdaptor(i)
@@ -54,69 +35,51 @@ func init() {
 		modelNames := adaptor.GetModelList()
 		for _, modelName := range modelNames {
 			openAIModels = append(openAIModels, dto.OpenAIModels{
-				Id:         modelName,
-				Object:     "model",
-				Created:    1626777600,
-				OwnedBy:    channelName,
-				Permission: permission,
-				Root:       modelName,
-				Parent:     nil,
+				Id:      modelName,
+				Object:  "model",
+				Created: 1626777600,
+				OwnedBy: channelName,
 			})
 		}
 	}
 	for _, modelName := range ai360.ModelList {
 		openAIModels = append(openAIModels, dto.OpenAIModels{
-			Id:         modelName,
-			Object:     "model",
-			Created:    1626777600,
-			OwnedBy:    ai360.ChannelName,
-			Permission: permission,
-			Root:       modelName,
-			Parent:     nil,
+			Id:      modelName,
+			Object:  "model",
+			Created: 1626777600,
+			OwnedBy: ai360.ChannelName,
 		})
 	}
 	for _, modelName := range moonshot.ModelList {
 		openAIModels = append(openAIModels, dto.OpenAIModels{
-			Id:         modelName,
-			Object:     "model",
-			Created:    1626777600,
-			OwnedBy:    moonshot.ChannelName,
-			Permission: permission,
-			Root:       modelName,
-			Parent:     nil,
+			Id:      modelName,
+			Object:  "model",
+			Created: 1626777600,
+			OwnedBy: moonshot.ChannelName,
 		})
 	}
 	for _, modelName := range lingyiwanwu.ModelList {
 		openAIModels = append(openAIModels, dto.OpenAIModels{
-			Id:         modelName,
-			Object:     "model",
-			Created:    1626777600,
-			OwnedBy:    lingyiwanwu.ChannelName,
-			Permission: permission,
-			Root:       modelName,
-			Parent:     nil,
+			Id:      modelName,
+			Object:  "model",
+			Created: 1626777600,
+			OwnedBy: lingyiwanwu.ChannelName,
 		})
 	}
 	for _, modelName := range minimax.ModelList {
 		openAIModels = append(openAIModels, dto.OpenAIModels{
-			Id:         modelName,
-			Object:     "model",
-			Created:    1626777600,
-			OwnedBy:    minimax.ChannelName,
-			Permission: permission,
-			Root:       modelName,
-			Parent:     nil,
+			Id:      modelName,
+			Object:  "model",
+			Created: 1626777600,
+			OwnedBy: minimax.ChannelName,
 		})
 	}
 	for modelName, _ := range constant.MidjourneyModel2Action {
 		openAIModels = append(openAIModels, dto.OpenAIModels{
-			Id:         modelName,
-			Object:     "model",
-			Created:    1626777600,
-			OwnedBy:    "midjourney",
-			Permission: permission,
-			Root:       modelName,
-			Parent:     nil,
+			Id:      modelName,
+			Object:  "model",
+			Created: 1626777600,
+			OwnedBy: "midjourney",
 		})
 	}
 	openAIModelsMap = make(map[string]dto.OpenAIModels)
@@ -124,9 +87,9 @@ func init() {
 		openAIModelsMap[aiModel.Id] = aiModel
 	}
 	channelId2Models = make(map[int][]string)
-	for i := 1; i <= common.ChannelTypeDummy; i++ {
-		apiType, success := relayconstant.ChannelType2APIType(i)
-		if !success || apiType == relayconstant.APITypeAIProxyLibrary {
+	for i := 1; i <= constant.ChannelTypeDummy; i++ {
+		apiType, success := common.ChannelType2APIType(i)
+		if !success || apiType == constant.APITypeAIProxyLibrary {
 			continue
 		}
 		meta := &relaycommon.RelayInfo{ChannelType: i}
@@ -134,15 +97,17 @@ func init() {
 		adaptor.Init(meta)
 		channelId2Models[i] = adaptor.GetModelList()
 	}
+	openAIModels = lo.UniqBy(openAIModels, func(m dto.OpenAIModels) string {
+		return m.Id
+	})
 }
 
 func ListModels(c *gin.Context) {
 	userOpenAiModels := make([]dto.OpenAIModels, 0)
-	permission := getPermission()
 
-	modelLimitEnable := c.GetBool("token_model_limit_enabled")
+	modelLimitEnable := common.GetContextKeyBool(c, constant.ContextKeyTokenModelLimitEnabled)
 	if modelLimitEnable {
-		s, ok := c.Get("token_model_limit")
+		s, ok := common.GetContextKey(c, constant.ContextKeyTokenModelLimit)
 		var tokenModelLimit map[string]bool
 		if ok {
 			tokenModelLimit = s.(map[string]bool)
@@ -150,23 +115,22 @@ func ListModels(c *gin.Context) {
 			tokenModelLimit = map[string]bool{}
 		}
 		for allowModel, _ := range tokenModelLimit {
-			if _, ok := openAIModelsMap[allowModel]; ok {
-				userOpenAiModels = append(userOpenAiModels, openAIModelsMap[allowModel])
+			if oaiModel, ok := openAIModelsMap[allowModel]; ok {
+				oaiModel.SupportedEndpointTypes = model.GetModelSupportEndpointTypes(allowModel)
+				userOpenAiModels = append(userOpenAiModels, oaiModel)
 			} else {
 				userOpenAiModels = append(userOpenAiModels, dto.OpenAIModels{
-					Id:         allowModel,
-					Object:     "model",
-					Created:    1626777600,
-					OwnedBy:    "custom",
-					Permission: permission,
-					Root:       allowModel,
-					Parent:     nil,
+					Id:                     allowModel,
+					Object:                 "model",
+					Created:                1626777600,
+					OwnedBy:                "custom",
+					SupportedEndpointTypes: model.GetModelSupportEndpointTypes(allowModel),
 				})
 			}
 		}
 	} else {
 		userId := c.GetInt("id")
-		userGroup, err := model.GetUserGroup(userId, true)
+		userGroup, err := model.GetUserGroup(userId, false)
 		if err != nil {
 			c.JSON(http.StatusOK, gin.H{
 				"success": false,
@@ -175,23 +139,34 @@ func ListModels(c *gin.Context) {
 			return
 		}
 		group := userGroup
-		tokenGroup := c.GetString("token_group")
+		tokenGroup := common.GetContextKeyString(c, constant.ContextKeyTokenGroup)
 		if tokenGroup != "" {
 			group = tokenGroup
 		}
-		models := model.GetGroupModels(group)
-		for _, s := range models {
-			if _, ok := openAIModelsMap[s]; ok {
-				userOpenAiModels = append(userOpenAiModels, openAIModelsMap[s])
+		var models []string
+		if tokenGroup == "auto" {
+			for _, autoGroup := range setting.AutoGroups {
+				groupModels := model.GetGroupEnabledModels(autoGroup)
+				for _, g := range groupModels {
+					if !common.StringsContains(models, g) {
+						models = append(models, g)
+					}
+				}
+			}
+		} else {
+			models = model.GetGroupEnabledModels(group)
+		}
+		for _, modelName := range models {
+			if oaiModel, ok := openAIModelsMap[modelName]; ok {
+				oaiModel.SupportedEndpointTypes = model.GetModelSupportEndpointTypes(modelName)
+				userOpenAiModels = append(userOpenAiModels, oaiModel)
 			} else {
 				userOpenAiModels = append(userOpenAiModels, dto.OpenAIModels{
-					Id:         s,
-					Object:     "model",
-					Created:    1626777600,
-					OwnedBy:    "custom",
-					Permission: permission,
-					Root:       s,
-					Parent:     nil,
+					Id:                     modelName,
+					Object:                 "model",
+					Created:                1626777600,
+					OwnedBy:                "custom",
+					SupportedEndpointTypes: model.GetModelSupportEndpointTypes(modelName),
 				})
 			}
 		}

+ 2 - 1
controller/option.go

@@ -7,6 +7,7 @@ import (
 	"one-api/model"
 	"one-api/setting"
 	"one-api/setting/console_setting"
+	"one-api/setting/ratio_setting"
 	"one-api/setting/system_setting"
 	"strings"
 
@@ -103,7 +104,7 @@ func UpdateOption(c *gin.Context) {
 			return
 		}
 	case "GroupRatio":
-		err = setting.CheckGroupRatio(option.Value)
+		err = ratio_setting.CheckGroupRatio(option.Value)
 		if err != nil {
 			c.JSON(http.StatusOK, gin.H{
 				"success": false,

+ 14 - 4
controller/playground.go

@@ -3,7 +3,6 @@ package controller
 import (
 	"errors"
 	"fmt"
-	"github.com/gin-gonic/gin"
 	"net/http"
 	"one-api/common"
 	"one-api/constant"
@@ -13,6 +12,8 @@ import (
 	"one-api/service"
 	"one-api/setting"
 	"time"
+
+	"github.com/gin-gonic/gin"
 )
 
 func Playground(c *gin.Context) {
@@ -57,13 +58,22 @@ func Playground(c *gin.Context) {
 		c.Set("group", group)
 	}
 	c.Set("token_name", "playground-"+group)
-	channel, err := model.CacheGetRandomSatisfiedChannel(group, playgroundRequest.Model, 0)
+	channel, finalGroup, err := model.CacheGetRandomSatisfiedChannel(c, group, playgroundRequest.Model, 0)
 	if err != nil {
-		message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", group, playgroundRequest.Model)
+		message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", finalGroup, playgroundRequest.Model)
 		openaiErr = service.OpenAIErrorWrapperLocal(errors.New(message), "get_playground_channel_failed", http.StatusInternalServerError)
 		return
 	}
 	middleware.SetupContextForSelectedChannel(c, channel, playgroundRequest.Model)
-	c.Set(constant.ContextKeyRequestStartTime, time.Now())
+	common.SetContextKey(c, constant.ContextKeyRequestStartTime, time.Now())
+
+	// Write user context to ensure acceptUnsetRatio is available
+	userId := c.GetInt("id")
+	userCache, err := model.GetUserCache(userId)
+	if err != nil {
+		openaiErr = service.OpenAIErrorWrapperLocal(err, "get_user_cache_failed", http.StatusInternalServerError)
+		return
+	}
+	userCache.WriteContext(c)
 	Relay(c)
 }

+ 6 - 6
controller/pricing.go

@@ -3,7 +3,7 @@ package controller
 import (
 	"one-api/model"
 	"one-api/setting"
-	"one-api/setting/operation_setting"
+	"one-api/setting/ratio_setting"
 
 	"github.com/gin-gonic/gin"
 )
@@ -13,7 +13,7 @@ func GetPricing(c *gin.Context) {
 	userId, exists := c.Get("id")
 	usableGroup := map[string]string{}
 	groupRatio := map[string]float64{}
-	for s, f := range setting.GetGroupRatioCopy() {
+	for s, f := range ratio_setting.GetGroupRatioCopy() {
 		groupRatio[s] = f
 	}
 	var group string
@@ -22,7 +22,7 @@ func GetPricing(c *gin.Context) {
 		if err == nil {
 			group = user.Group
 			for g := range groupRatio {
-				ratio, ok := setting.GetGroupGroupRatio(group, g)
+				ratio, ok := ratio_setting.GetGroupGroupRatio(group, g)
 				if ok {
 					groupRatio[g] = ratio
 				}
@@ -32,7 +32,7 @@ func GetPricing(c *gin.Context) {
 
 	usableGroup = setting.GetUserUsableGroups(group)
 	// check groupRatio contains usableGroup
-	for group := range setting.GetGroupRatioCopy() {
+	for group := range ratio_setting.GetGroupRatioCopy() {
 		if _, ok := usableGroup[group]; !ok {
 			delete(groupRatio, group)
 		}
@@ -47,7 +47,7 @@ func GetPricing(c *gin.Context) {
 }
 
 func ResetModelRatio(c *gin.Context) {
-	defaultStr := operation_setting.DefaultModelRatio2JSONString()
+	defaultStr := ratio_setting.DefaultModelRatio2JSONString()
 	err := model.UpdateOption("ModelRatio", defaultStr)
 	if err != nil {
 		c.JSON(200, gin.H{
@@ -56,7 +56,7 @@ func ResetModelRatio(c *gin.Context) {
 		})
 		return
 	}
-	err = operation_setting.UpdateModelRatioByJSONString(defaultStr)
+	err = ratio_setting.UpdateModelRatioByJSONString(defaultStr)
 	if err != nil {
 		c.JSON(200, gin.H{
 			"success": false,

+ 24 - 0
controller/ratio_config.go

@@ -0,0 +1,24 @@
+package controller
+
+import (
+    "net/http"
+    "one-api/setting/ratio_setting"
+
+    "github.com/gin-gonic/gin"
+)
+
+func GetRatioConfig(c *gin.Context) {
+    if !ratio_setting.IsExposeRatioEnabled() {
+        c.JSON(http.StatusForbidden, gin.H{
+            "success": false,
+            "message": "倍率配置接口未启用",
+        })
+        return
+    }
+
+    c.JSON(http.StatusOK, gin.H{
+        "success": true,
+        "message": "",
+        "data":    ratio_setting.GetExposedData(),
+    })
+} 

+ 474 - 0
controller/ratio_sync.go

@@ -0,0 +1,474 @@
+package controller
+
+import (
+    "context"
+    "encoding/json"
+    "fmt"
+    "net/http"
+    "strings"
+    "sync"
+    "time"
+
+    "one-api/common"
+    "one-api/dto"
+    "one-api/model"
+    "one-api/setting/ratio_setting"
+
+    "github.com/gin-gonic/gin"
+)
+
+const (
+    defaultTimeoutSeconds  = 10
+    defaultEndpoint        = "/api/ratio_config"
+    maxConcurrentFetches   = 8
+)
+
+var ratioTypes = []string{"model_ratio", "completion_ratio", "cache_ratio", "model_price"}
+
+type upstreamResult struct {
+    Name string                 `json:"name"`
+    Data map[string]any         `json:"data,omitempty"`
+    Err  string                 `json:"err,omitempty"`
+}
+
+func FetchUpstreamRatios(c *gin.Context) {
+    var req dto.UpstreamRequest
+    if err := c.ShouldBindJSON(&req); err != nil {
+        c.JSON(http.StatusBadRequest, gin.H{"success": false, "message": err.Error()})
+        return
+    }
+
+    if req.Timeout <= 0 {
+        req.Timeout = defaultTimeoutSeconds
+    }
+
+    var upstreams []dto.UpstreamDTO
+
+    if len(req.Upstreams) > 0 {
+        for _, u := range req.Upstreams {
+            if strings.HasPrefix(u.BaseURL, "http") {
+                if u.Endpoint == "" {
+                    u.Endpoint = defaultEndpoint
+                }
+                u.BaseURL = strings.TrimRight(u.BaseURL, "/")
+                upstreams = append(upstreams, u)
+            }
+        }
+    } else if len(req.ChannelIDs) > 0 {
+        intIds := make([]int, 0, len(req.ChannelIDs))
+        for _, id64 := range req.ChannelIDs {
+            intIds = append(intIds, int(id64))
+        }
+        dbChannels, err := model.GetChannelsByIds(intIds)
+        if err != nil {
+            common.LogError(c.Request.Context(), "failed to query channels: "+err.Error())
+            c.JSON(http.StatusInternalServerError, gin.H{"success": false, "message": "查询渠道失败"})
+            return
+        }
+        for _, ch := range dbChannels {
+            if base := ch.GetBaseURL(); strings.HasPrefix(base, "http") {
+                upstreams = append(upstreams, dto.UpstreamDTO{
+                    ID:       ch.Id,
+                    Name:     ch.Name,
+                    BaseURL:  strings.TrimRight(base, "/"),
+                    Endpoint: "",
+                })
+            }
+        }
+    }
+
+    if len(upstreams) == 0 {
+        c.JSON(http.StatusOK, gin.H{"success": false, "message": "无有效上游渠道"})
+        return
+    }
+
+    var wg sync.WaitGroup
+    ch := make(chan upstreamResult, len(upstreams))
+
+    sem := make(chan struct{}, maxConcurrentFetches)
+
+    client := &http.Client{Transport: &http.Transport{MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second}}
+
+    for _, chn := range upstreams {
+        wg.Add(1)
+        go func(chItem dto.UpstreamDTO) {
+            defer wg.Done()
+
+            sem <- struct{}{}
+            defer func() { <-sem }()
+
+            endpoint := chItem.Endpoint
+            if endpoint == "" {
+                endpoint = defaultEndpoint
+            } else if !strings.HasPrefix(endpoint, "/") {
+                endpoint = "/" + endpoint
+            }
+            fullURL := chItem.BaseURL + endpoint
+
+            uniqueName := chItem.Name
+            if chItem.ID != 0 {
+                uniqueName = fmt.Sprintf("%s(%d)", chItem.Name, chItem.ID)
+            }
+
+            ctx, cancel := context.WithTimeout(c.Request.Context(), time.Duration(req.Timeout)*time.Second)
+            defer cancel()
+
+            httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, fullURL, nil)
+            if err != nil {
+                common.LogWarn(c.Request.Context(), "build request failed: "+err.Error())
+                ch <- upstreamResult{Name: uniqueName, Err: err.Error()}
+                return
+            }
+
+            resp, err := client.Do(httpReq)
+            if err != nil {
+                common.LogWarn(c.Request.Context(), "http error on "+chItem.Name+": "+err.Error())
+                ch <- upstreamResult{Name: uniqueName, Err: err.Error()}
+                return
+            }
+            defer resp.Body.Close()
+            if resp.StatusCode != http.StatusOK {
+                common.LogWarn(c.Request.Context(), "non-200 from "+chItem.Name+": "+resp.Status)
+                ch <- upstreamResult{Name: uniqueName, Err: resp.Status}
+                return
+            }
+            // 兼容两种上游接口格式:
+            //  type1: /api/ratio_config -> data 为 map[string]any,包含 model_ratio/completion_ratio/cache_ratio/model_price
+            //  type2: /api/pricing      -> data 为 []Pricing 列表,需要转换为与 type1 相同的 map 格式
+            var body struct {
+                Success bool            `json:"success"`
+                Data    json.RawMessage `json:"data"`
+                Message string          `json:"message"`
+            }
+
+            if err := json.NewDecoder(resp.Body).Decode(&body); err != nil {
+                common.LogWarn(c.Request.Context(), "json decode failed from "+chItem.Name+": "+err.Error())
+                ch <- upstreamResult{Name: uniqueName, Err: err.Error()}
+                return
+            }
+
+            if !body.Success {
+                ch <- upstreamResult{Name: uniqueName, Err: body.Message}
+                return
+            }
+
+            // 尝试按 type1 解析
+            var type1Data map[string]any
+            if err := json.Unmarshal(body.Data, &type1Data); err == nil {
+                // 如果包含至少一个 ratioTypes 字段,则认为是 type1
+                isType1 := false
+                for _, rt := range ratioTypes {
+                    if _, ok := type1Data[rt]; ok {
+                        isType1 = true
+                        break
+                    }
+                }
+                if isType1 {
+                    ch <- upstreamResult{Name: uniqueName, Data: type1Data}
+                    return
+                }
+            }
+
+            // 如果不是 type1,则尝试按 type2 (/api/pricing) 解析
+            var pricingItems []struct {
+                ModelName       string  `json:"model_name"`
+                QuotaType       int     `json:"quota_type"`
+                ModelRatio      float64 `json:"model_ratio"`
+                ModelPrice      float64 `json:"model_price"`
+                CompletionRatio float64 `json:"completion_ratio"`
+            }
+            if err := json.Unmarshal(body.Data, &pricingItems); err != nil {
+                common.LogWarn(c.Request.Context(), "unrecognized data format from "+chItem.Name+": "+err.Error())
+                ch <- upstreamResult{Name: uniqueName, Err: "无法解析上游返回数据"}
+                return
+            }
+
+            modelRatioMap := make(map[string]float64)
+            completionRatioMap := make(map[string]float64)
+            modelPriceMap := make(map[string]float64)
+
+            for _, item := range pricingItems {
+                if item.QuotaType == 1 {
+                    modelPriceMap[item.ModelName] = item.ModelPrice
+                } else {
+                    modelRatioMap[item.ModelName] = item.ModelRatio
+                    // completionRatio 可能为 0,此时也直接赋值,保持与上游一致
+                    completionRatioMap[item.ModelName] = item.CompletionRatio
+                }
+            }
+
+            converted := make(map[string]any)
+
+            if len(modelRatioMap) > 0 {
+                ratioAny := make(map[string]any, len(modelRatioMap))
+                for k, v := range modelRatioMap {
+                    ratioAny[k] = v
+                }
+                converted["model_ratio"] = ratioAny
+            }
+
+            if len(completionRatioMap) > 0 {
+                compAny := make(map[string]any, len(completionRatioMap))
+                for k, v := range completionRatioMap {
+                    compAny[k] = v
+                }
+                converted["completion_ratio"] = compAny
+            }
+
+            if len(modelPriceMap) > 0 {
+                priceAny := make(map[string]any, len(modelPriceMap))
+                for k, v := range modelPriceMap {
+                    priceAny[k] = v
+                }
+                converted["model_price"] = priceAny
+            }
+
+            ch <- upstreamResult{Name: uniqueName, Data: converted}
+        }(chn)
+    }
+
+    wg.Wait()
+    close(ch)
+
+    localData := ratio_setting.GetExposedData()
+
+    var testResults []dto.TestResult
+    var successfulChannels []struct {
+        name string
+        data map[string]any
+    }
+
+    for r := range ch {
+        if r.Err != "" {
+            testResults = append(testResults, dto.TestResult{
+                Name:   r.Name,
+                Status: "error",
+                Error:  r.Err,
+            })
+        } else {
+            testResults = append(testResults, dto.TestResult{
+                Name:   r.Name,
+                Status: "success",
+            })
+            successfulChannels = append(successfulChannels, struct {
+                name string
+                data map[string]any
+            }{name: r.Name, data: r.Data})
+        }
+    }
+
+    differences := buildDifferences(localData, successfulChannels)
+
+    c.JSON(http.StatusOK, gin.H{
+        "success": true,
+        "data": gin.H{
+            "differences":  differences,
+            "test_results": testResults,
+        },
+    })
+}
+
+func buildDifferences(localData map[string]any, successfulChannels []struct {
+    name string
+    data map[string]any
+}) map[string]map[string]dto.DifferenceItem {
+    differences := make(map[string]map[string]dto.DifferenceItem)
+
+    allModels := make(map[string]struct{})
+    
+    for _, ratioType := range ratioTypes {
+        if localRatioAny, ok := localData[ratioType]; ok {
+            if localRatio, ok := localRatioAny.(map[string]float64); ok {
+                for modelName := range localRatio {
+                    allModels[modelName] = struct{}{}
+                }
+            }
+        }
+    }
+    
+    for _, channel := range successfulChannels {
+        for _, ratioType := range ratioTypes {
+            if upstreamRatio, ok := channel.data[ratioType].(map[string]any); ok {
+                for modelName := range upstreamRatio {
+                    allModels[modelName] = struct{}{}
+                }
+            }
+        }
+    }
+
+    confidenceMap := make(map[string]map[string]bool)
+    
+    // 预处理阶段:检查pricing接口的可信度
+    for _, channel := range successfulChannels {
+        confidenceMap[channel.name] = make(map[string]bool)
+        
+        modelRatios, hasModelRatio := channel.data["model_ratio"].(map[string]any)
+        completionRatios, hasCompletionRatio := channel.data["completion_ratio"].(map[string]any)
+        
+        if hasModelRatio && hasCompletionRatio {
+            // 遍历所有模型,检查是否满足不可信条件
+            for modelName := range allModels {
+                // 默认为可信
+                confidenceMap[channel.name][modelName] = true
+                
+                // 检查是否满足不可信条件:model_ratio为37.5且completion_ratio为1
+                if modelRatioVal, ok := modelRatios[modelName]; ok {
+                    if completionRatioVal, ok := completionRatios[modelName]; ok {
+                        // 转换为float64进行比较
+                        if modelRatioFloat, ok := modelRatioVal.(float64); ok {
+                            if completionRatioFloat, ok := completionRatioVal.(float64); ok {
+                                if modelRatioFloat == 37.5 && completionRatioFloat == 1.0 {
+                                    confidenceMap[channel.name][modelName] = false
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        } else {
+            // 如果不是从pricing接口获取的数据,则全部标记为可信
+            for modelName := range allModels {
+                confidenceMap[channel.name][modelName] = true
+            }
+        }
+    }
+
+    for modelName := range allModels {
+        for _, ratioType := range ratioTypes {
+            var localValue interface{} = nil
+            if localRatioAny, ok := localData[ratioType]; ok {
+                if localRatio, ok := localRatioAny.(map[string]float64); ok {
+                    if val, exists := localRatio[modelName]; exists {
+                        localValue = val
+                    }
+                }
+            }
+
+            upstreamValues := make(map[string]interface{})
+            confidenceValues := make(map[string]bool)
+            hasUpstreamValue := false
+            hasDifference := false
+
+            for _, channel := range successfulChannels {
+                var upstreamValue interface{} = nil
+                
+                if upstreamRatio, ok := channel.data[ratioType].(map[string]any); ok {
+                    if val, exists := upstreamRatio[modelName]; exists {
+                        upstreamValue = val
+                        hasUpstreamValue = true
+                        
+                        if localValue != nil && localValue != val {
+                            hasDifference = true
+                        } else if localValue == val {
+                            upstreamValue = "same"
+                        }
+                    }
+                }
+                if upstreamValue == nil && localValue == nil {
+                    upstreamValue = "same"
+                }
+                
+                if localValue == nil && upstreamValue != nil && upstreamValue != "same" {
+                    hasDifference = true
+                }
+                
+                upstreamValues[channel.name] = upstreamValue
+                
+                confidenceValues[channel.name] = confidenceMap[channel.name][modelName]
+            }
+
+            shouldInclude := false
+            
+            if localValue != nil {
+                if hasDifference {
+                    shouldInclude = true
+                }
+            } else {
+                if hasUpstreamValue {
+                    shouldInclude = true
+                }
+            }
+
+            if shouldInclude {
+                if differences[modelName] == nil {
+                    differences[modelName] = make(map[string]dto.DifferenceItem)
+                }
+                differences[modelName][ratioType] = dto.DifferenceItem{
+                    Current:   localValue,
+                    Upstreams: upstreamValues,
+                    Confidence: confidenceValues,
+                }
+            }
+        }
+    }
+
+    channelHasDiff := make(map[string]bool)
+    for _, ratioMap := range differences {
+        for _, item := range ratioMap {
+            for chName, val := range item.Upstreams {
+                if val != nil && val != "same" {
+                    channelHasDiff[chName] = true
+                }
+            }
+        }
+    }
+
+    for modelName, ratioMap := range differences {
+        for ratioType, item := range ratioMap {
+            for chName := range item.Upstreams {
+                if !channelHasDiff[chName] {
+                    delete(item.Upstreams, chName)
+                    delete(item.Confidence, chName)
+                }
+            }
+
+            allSame := true
+            for _, v := range item.Upstreams {
+                if v != "same" {
+                    allSame = false
+                    break
+                }
+            }
+            if len(item.Upstreams) == 0 || allSame {
+                delete(ratioMap, ratioType)
+            } else {
+                differences[modelName][ratioType] = item
+            }
+        }
+
+        if len(ratioMap) == 0 {
+            delete(differences, modelName)
+        }
+    }
+
+    return differences
+}
+
+func GetSyncableChannels(c *gin.Context) {
+    channels, err := model.GetAllChannels(0, 0, true, false)
+    if err != nil {
+        c.JSON(http.StatusOK, gin.H{
+            "success": false,
+            "message": err.Error(),
+        })
+        return
+    }
+
+    var syncableChannels []dto.SyncableChannel
+    for _, channel := range channels {
+        if channel.GetBaseURL() != "" {
+            syncableChannels = append(syncableChannels, dto.SyncableChannel{
+                ID:      channel.Id,
+                Name:    channel.Name,
+                BaseURL: channel.GetBaseURL(),
+                Status:  channel.Status,
+            })
+        }
+    }
+
+    c.JSON(http.StatusOK, gin.H{
+        "success": true,
+        "message": "",
+        "data":    syncableChannels,
+    })
+} 

+ 7 - 7
controller/relay.go

@@ -8,12 +8,12 @@ import (
 	"log"
 	"net/http"
 	"one-api/common"
+	"one-api/constant"
 	constant2 "one-api/constant"
 	"one-api/dto"
 	"one-api/middleware"
 	"one-api/model"
 	"one-api/relay"
-	"one-api/relay/constant"
 	relayconstant "one-api/relay/constant"
 	"one-api/relay/helper"
 	"one-api/service"
@@ -69,7 +69,7 @@ func relayHandler(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusCode
 }
 
 func Relay(c *gin.Context) {
-	relayMode := constant.Path2RelayMode(c.Request.URL.Path)
+	relayMode := relayconstant.Path2RelayMode(c.Request.URL.Path)
 	requestId := c.GetString(common.RequestIdKey)
 	group := c.GetString("group")
 	originalModel := c.GetString("original_model")
@@ -132,7 +132,7 @@ func WssRelay(c *gin.Context) {
 		return
 	}
 
-	relayMode := constant.Path2RelayMode(c.Request.URL.Path)
+	relayMode := relayconstant.Path2RelayMode(c.Request.URL.Path)
 	requestId := c.GetString(common.RequestIdKey)
 	group := c.GetString("group")
 	//wss://api.openai.com/v1/realtime?model=gpt-4o-realtime-preview-2024-10-01
@@ -259,7 +259,7 @@ func getChannel(c *gin.Context, group, originalModel string, retryCount int) (*m
 			AutoBan: &autoBanInt,
 		}, nil
 	}
-	channel, err := model.CacheGetRandomSatisfiedChannel(group, originalModel, retryCount)
+	channel, _, err := model.CacheGetRandomSatisfiedChannel(c, group, originalModel, retryCount)
 	if err != nil {
 		return nil, errors.New(fmt.Sprintf("获取重试渠道失败: %s", err.Error()))
 	}
@@ -295,7 +295,7 @@ func shouldRetry(c *gin.Context, openaiErr *dto.OpenAIErrorWithStatusCode, retry
 	}
 	if openaiErr.StatusCode == http.StatusBadRequest {
 		channelType := c.GetInt("channel_type")
-		if channelType == common.ChannelTypeAnthropic {
+		if channelType == constant.ChannelTypeAnthropic {
 			return true
 		}
 		return false
@@ -388,7 +388,7 @@ func RelayTask(c *gin.Context) {
 		retryTimes = 0
 	}
 	for i := 0; shouldRetryTaskRelay(c, channelId, taskErr, retryTimes) && i < retryTimes; i++ {
-		channel, err := model.CacheGetRandomSatisfiedChannel(group, originalModel, i)
+		channel, _, err := model.CacheGetRandomSatisfiedChannel(c, group, originalModel, i)
 		if err != nil {
 			common.LogError(c, fmt.Sprintf("CacheGetRandomSatisfiedChannel failed: %s", err.Error()))
 			break
@@ -420,7 +420,7 @@ func RelayTask(c *gin.Context) {
 func taskRelayHandler(c *gin.Context, relayMode int) *dto.TaskError {
 	var err *dto.TaskError
 	switch relayMode {
-	case relayconstant.RelayModeSunoFetch, relayconstant.RelayModeSunoFetchByID:
+	case relayconstant.RelayModeSunoFetch, relayconstant.RelayModeSunoFetchByID, relayconstant.RelayModeKlingFetchByID:
 		err = relay.RelayTaskFetch(c, relayMode)
 	default:
 		err = relay.RelayTaskSubmit(c, relayMode)

+ 2 - 0
controller/task.go

@@ -74,6 +74,8 @@ func UpdateTaskByPlatform(platform constant.TaskPlatform, taskChannelM map[int][
 		//_ = UpdateMidjourneyTaskAll(context.Background(), tasks)
 	case constant.TaskPlatformSuno:
 		_ = UpdateSunoTaskAll(context.Background(), taskChannelM, taskM)
+	case constant.TaskPlatformKling, constant.TaskPlatformJimeng:
+		_ = UpdateVideoTaskAll(context.Background(), platform, taskChannelM, taskM)
 	default:
 		common.SysLog("未知平台")
 	}

+ 138 - 0
controller/task_video.go

@@ -0,0 +1,138 @@
+package controller
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"one-api/common"
+	"one-api/constant"
+	"one-api/model"
+	"one-api/relay"
+	"one-api/relay/channel"
+	"time"
+)
+
+func UpdateVideoTaskAll(ctx context.Context, platform constant.TaskPlatform, taskChannelM map[int][]string, taskM map[string]*model.Task) error {
+	for channelId, taskIds := range taskChannelM {
+		if err := updateVideoTaskAll(ctx, platform, channelId, taskIds, taskM); err != nil {
+			common.LogError(ctx, fmt.Sprintf("Channel #%d failed to update video async tasks: %s", channelId, err.Error()))
+		}
+	}
+	return nil
+}
+
+func updateVideoTaskAll(ctx context.Context, platform constant.TaskPlatform, channelId int, taskIds []string, taskM map[string]*model.Task) error {
+	common.LogInfo(ctx, fmt.Sprintf("Channel #%d pending video tasks: %d", channelId, len(taskIds)))
+	if len(taskIds) == 0 {
+		return nil
+	}
+	cacheGetChannel, err := model.CacheGetChannel(channelId)
+	if err != nil {
+		errUpdate := model.TaskBulkUpdate(taskIds, map[string]any{
+			"fail_reason": fmt.Sprintf("Failed to get channel info, channel ID: %d", channelId),
+			"status":      "FAILURE",
+			"progress":    "100%",
+		})
+		if errUpdate != nil {
+			common.SysError(fmt.Sprintf("UpdateVideoTask error: %v", errUpdate))
+		}
+		return fmt.Errorf("CacheGetChannel failed: %w", err)
+	}
+	adaptor := relay.GetTaskAdaptor(platform)
+	if adaptor == nil {
+		return fmt.Errorf("video adaptor not found")
+	}
+	for _, taskId := range taskIds {
+		if err := updateVideoSingleTask(ctx, adaptor, cacheGetChannel, taskId, taskM); err != nil {
+			common.LogError(ctx, fmt.Sprintf("Failed to update video task %s: %s", taskId, err.Error()))
+		}
+	}
+	return nil
+}
+
+func updateVideoSingleTask(ctx context.Context, adaptor channel.TaskAdaptor, channel *model.Channel, taskId string, taskM map[string]*model.Task) error {
+	baseURL := constant.ChannelBaseURLs[channel.Type]
+	if channel.GetBaseURL() != "" {
+		baseURL = channel.GetBaseURL()
+	}
+
+	task := taskM[taskId]
+	if task == nil {
+		common.LogError(ctx, fmt.Sprintf("Task %s not found in taskM", taskId))
+		return fmt.Errorf("task %s not found", taskId)
+	}
+	resp, err := adaptor.FetchTask(baseURL, channel.Key, map[string]any{
+		"task_id": taskId,
+		"action":  task.Action,
+	})
+	if err != nil {
+		return fmt.Errorf("fetchTask failed for task %s: %w", taskId, err)
+	}
+	//if resp.StatusCode != http.StatusOK {
+	//return fmt.Errorf("get Video Task status code: %d", resp.StatusCode)
+	//}
+	defer resp.Body.Close()
+	responseBody, err := io.ReadAll(resp.Body)
+	if err != nil {
+		return fmt.Errorf("readAll failed for task %s: %w", taskId, err)
+	}
+
+	taskResult, err := adaptor.ParseTaskResult(responseBody)
+	if err != nil {
+		return fmt.Errorf("parseTaskResult failed for task %s: %w", taskId, err)
+	}
+	//if taskResult.Code != 0 {
+	//	return fmt.Errorf("video task fetch failed for task %s", taskId)
+	//}
+
+	now := time.Now().Unix()
+	if taskResult.Status == "" {
+		return fmt.Errorf("task %s status is empty", taskId)
+	}
+	task.Status = model.TaskStatus(taskResult.Status)
+	switch taskResult.Status {
+	case model.TaskStatusSubmitted:
+		task.Progress = "10%"
+	case model.TaskStatusQueued:
+		task.Progress = "20%"
+	case model.TaskStatusInProgress:
+		task.Progress = "30%"
+		if task.StartTime == 0 {
+			task.StartTime = now
+		}
+	case model.TaskStatusSuccess:
+		task.Progress = "100%"
+		if task.FinishTime == 0 {
+			task.FinishTime = now
+		}
+		task.FailReason = taskResult.Url
+	case model.TaskStatusFailure:
+		task.Status = model.TaskStatusFailure
+		task.Progress = "100%"
+		if task.FinishTime == 0 {
+			task.FinishTime = now
+		}
+		task.FailReason = taskResult.Reason
+		common.LogInfo(ctx, fmt.Sprintf("Task %s failed: %s", task.TaskID, task.FailReason))
+		quota := task.Quota
+		if quota != 0 {
+			if err := model.IncreaseUserQuota(task.UserId, quota, false); err != nil {
+				common.LogError(ctx, "Failed to increase user quota: "+err.Error())
+			}
+			logContent := fmt.Sprintf("Video async task failed %s, refund %s", task.TaskID, common.LogQuota(quota))
+			model.RecordLog(task.UserId, model.LogTypeSystem, logContent)
+		}
+	default:
+		return fmt.Errorf("unknown task status %s for task %s", taskResult.Status, taskId)
+	}
+	if taskResult.Progress != "" {
+		task.Progress = taskResult.Progress
+	}
+
+	task.Data = responseBody
+	if err := task.Update(); err != nil {
+		common.SysError("UpdateVideoTask task error: " + err.Error())
+	}
+
+	return nil
+}

+ 29 - 0
controller/token.go

@@ -258,3 +258,32 @@ func UpdateToken(c *gin.Context) {
 	})
 	return
 }
+
+type TokenBatch struct {
+	Ids []int `json:"ids"`
+}
+
+func DeleteTokenBatch(c *gin.Context) {
+	tokenBatch := TokenBatch{}
+	if err := c.ShouldBindJSON(&tokenBatch); err != nil || len(tokenBatch.Ids) == 0 {
+		c.JSON(http.StatusOK, gin.H{
+			"success": false,
+			"message": "参数错误",
+		})
+		return
+	}
+	userId := c.GetInt("id")
+	count, err := model.BatchDeleteTokens(tokenBatch.Ids, userId)
+	if err != nil {
+		c.JSON(http.StatusOK, gin.H{
+			"success": false,
+			"message": err.Error(),
+		})
+		return
+	}
+	c.JSON(http.StatusOK, gin.H{
+		"success": true,
+		"message": "",
+		"data":    count,
+	})
+}

+ 6 - 8
controller/topup.go

@@ -97,14 +97,12 @@ func RequestEpay(c *gin.Context) {
 		c.JSON(200, gin.H{"message": "error", "data": "充值金额过低"})
 		return
 	}
-	payType := "wxpay"
-	if req.PaymentMethod == "zfb" {
-		payType = "alipay"
-	}
-	if req.PaymentMethod == "wx" {
-		req.PaymentMethod = "wxpay"
-		payType = "wxpay"
+
+	if !setting.ContainsPayMethod(req.PaymentMethod) {
+		c.JSON(200, gin.H{"message": "error", "data": "支付方式不存在"})
+		return
 	}
+
 	callBackAddress := service.GetCallbackAddress()
 	returnUrl, _ := url.Parse(setting.ServerAddress + "/console/log")
 	notifyUrl, _ := url.Parse(callBackAddress + "/api/user/epay/notify")
@@ -116,7 +114,7 @@ func RequestEpay(c *gin.Context) {
 		return
 	}
 	uri, params, err := client.Purchase(&epay.PurchaseArgs{
-		Type:           payType,
+		Type:           req.PaymentMethod,
 		ServiceTradeNo: tradeNo,
 		Name:           fmt.Sprintf("TUC%d", req.Amount),
 		Money:          strconv.FormatFloat(payMoney, 'f', 2, 64),

+ 19 - 15
controller/user.go

@@ -226,6 +226,9 @@ func Register(c *gin.Context) {
 			UnlimitedQuota:     true,
 			ModelLimitsEnabled: false,
 		}
+		if setting.DefaultUseAutoGroup {
+			token.Group = "auto"
+		}
 		if err := token.Insert(); err != nil {
 			c.JSON(http.StatusOK, gin.H{
 				"success": false,
@@ -243,15 +246,15 @@ func Register(c *gin.Context) {
 }
 
 func GetAllUsers(c *gin.Context) {
-	p, _ := strconv.Atoi(c.Query("p"))
-	pageSize, _ := strconv.Atoi(c.Query("page_size"))
-	if p < 1 {
-		p = 1
-	}
-	if pageSize < 0 {
-		pageSize = common.ItemsPerPage
+	pageInfo, err := common.GetPageQuery(c)
+	if err != nil {
+		c.JSON(http.StatusOK, gin.H{
+			"success": false,
+			"message": "parse page query failed",
+		})
+		return
 	}
-	users, total, err := model.GetAllUsers((p-1)*pageSize, pageSize)
+	users, total, err := model.GetAllUsers(pageInfo)
 	if err != nil {
 		c.JSON(http.StatusOK, gin.H{
 			"success": false,
@@ -259,15 +262,13 @@ func GetAllUsers(c *gin.Context) {
 		})
 		return
 	}
+
+	pageInfo.SetTotal(int(total))
+	pageInfo.SetItems(users)
 	c.JSON(http.StatusOK, gin.H{
 		"success": true,
 		"message": "",
-		"data": gin.H{
-			"items":     users,
-			"total":     total,
-			"page":      p,
-			"page_size": pageSize,
-		},
+		"data":    pageInfo,
 	})
 	return
 }
@@ -459,6 +460,9 @@ func GetSelf(c *gin.Context) {
 		})
 		return
 	}
+	// Hide admin remarks: set to empty to trigger omitempty tag, ensuring the remark field is not included in JSON returned to regular users
+	user.Remark = ""
+
 	c.JSON(http.StatusOK, gin.H{
 		"success": true,
 		"message": "",
@@ -483,7 +487,7 @@ func GetUserModels(c *gin.Context) {
 	groups := setting.GetUserUsableGroups(user.Group)
 	var models []string
 	for group := range groups {
-		for _, g := range model.GetGroupModels(group) {
+		for _, g := range model.GetGroupEnabledModels(group) {
 			if !common.StringsContains(models, g) {
 				models = append(models, g)
 			}

+ 2 - 1
docker-compose.yml

@@ -1,4 +1,4 @@
-version: '3.4' # 兼容旧版docker-compose
+version: '3.4'
 
 services:
   new-api:
@@ -16,6 +16,7 @@ services:
       - REDIS_CONN_STRING=redis://redis
       - TZ=Asia/Shanghai
       - ERROR_LOG_ENABLED=true # 是否启用错误日志记录
+    #      - STREAMING_TIMEOUT=120  # 流模式无响应超时时间,单位秒,默认120秒,如果出现空补全可以尝试改为更大值
     #      - SESSION_SECRET=random_string  # 多机部署时设置,必须修改这个随机字符串!!!!!!!
     #      - NODE_TYPE=slave  # Uncomment for slave node in multi-node deployment
     #      - SYNC_FREQUENCY=60  # Uncomment if regular database syncing is needed

+ 8 - 1
dto/claude.go

@@ -178,7 +178,14 @@ type ClaudeRequest struct {
 
 type Thinking struct {
 	Type         string `json:"type"`
-	BudgetTokens int    `json:"budget_tokens"`
+	BudgetTokens *int   `json:"budget_tokens,omitempty"`
+}
+
+func (c *Thinking) GetBudgetTokens() int {
+	if c.BudgetTokens == nil {
+		return 0
+	}
+	return *c.BudgetTokens
 }
 
 func (c *ClaudeRequest) IsStringSystem() bool {

+ 1 - 0
dto/dalle.go

@@ -15,6 +15,7 @@ type ImageRequest struct {
 	Background     string          `json:"background,omitempty"`
 	Moderation     string          `json:"moderation,omitempty"`
 	OutputFormat   string          `json:"output_format,omitempty"`
+	Watermark      *bool           `json:"watermark,omitempty"`
 }
 
 type ImageResponse struct {

+ 6 - 0
dto/midjourney.go

@@ -57,6 +57,8 @@ type MidjourneyDto struct {
 	StartTime   int64       `json:"startTime"`
 	FinishTime  int64       `json:"finishTime"`
 	ImageUrl    string      `json:"imageUrl"`
+	VideoUrl    string      `json:"videoUrl"`
+	VideoUrls   []ImgUrls   `json:"videoUrls"`
 	Status      string      `json:"status"`
 	Progress    string      `json:"progress"`
 	FailReason  string      `json:"failReason"`
@@ -65,6 +67,10 @@ type MidjourneyDto struct {
 	Properties  *Properties `json:"properties"`
 }
 
+type ImgUrls struct {
+	Url string `json:"url"`
+}
+
 type MidjourneyStatus struct {
 	Status int `json:"status"`
 }

+ 5 - 1
dto/openai_request.go

@@ -53,9 +53,11 @@ type GeneralOpenAIRequest struct {
 	Modalities          json.RawMessage   `json:"modalities,omitempty"`
 	Audio               json.RawMessage   `json:"audio,omitempty"`
 	EnableThinking      any               `json:"enable_thinking,omitempty"` // ali
+	THINKING            json.RawMessage   `json:"thinking,omitempty"`        // doubao
 	ExtraBody           json.RawMessage   `json:"extra_body,omitempty"`
 	WebSearchOptions    *WebSearchOptions `json:"web_search_options,omitempty"`
 	// OpenRouter Params
+	Usage     json.RawMessage `json:"usage,omitempty"`
 	Reasoning json.RawMessage `json:"reasoning,omitempty"`
 	// Ali Qwen Params
 	VlHighResolutionImages json.RawMessage `json:"vl_high_resolution_images,omitempty"`
@@ -64,7 +66,7 @@ type GeneralOpenAIRequest struct {
 func (r *GeneralOpenAIRequest) ToMap() map[string]any {
 	result := make(map[string]any)
 	data, _ := common.EncodeJson(r)
-	_ = common.DecodeJson(data, &result)
+	_ = common.UnmarshalJson(data, &result)
 	return result
 }
 
@@ -644,4 +646,6 @@ type ResponsesToolsCall struct {
 	Name        string          `json:"name,omitempty"`
 	Description string          `json:"description,omitempty"`
 	Parameters  json.RawMessage `json:"parameters,omitempty"`
+	Function    json.RawMessage `json:"function,omitempty"`
+	Container   json.RawMessage `json:"container,omitempty"`
 }

+ 3 - 1
dto/openai_response.go

@@ -26,7 +26,7 @@ type OpenAITextResponse struct {
 	Id      string                     `json:"id"`
 	Model   string                     `json:"model"`
 	Object  string                     `json:"object"`
-	Created int64                      `json:"created"`
+	Created any                        `json:"created"`
 	Choices []OpenAITextResponseChoice `json:"choices"`
 	Error   *OpenAIError               `json:"error,omitempty"`
 	Usage   `json:"usage"`
@@ -178,6 +178,8 @@ type Usage struct {
 	InputTokens            int                `json:"input_tokens"`
 	OutputTokens           int                `json:"output_tokens"`
 	InputTokensDetails     *InputTokenDetails `json:"input_tokens_details"`
+	// OpenRouter Params
+	Cost float64 `json:"cost,omitempty"`
 }
 
 type InputTokenDetails struct {

+ 6 - 21
dto/pricing.go

@@ -1,26 +1,11 @@
 package dto
 
-type OpenAIModelPermission struct {
-	Id                 string  `json:"id"`
-	Object             string  `json:"object"`
-	Created            int     `json:"created"`
-	AllowCreateEngine  bool    `json:"allow_create_engine"`
-	AllowSampling      bool    `json:"allow_sampling"`
-	AllowLogprobs      bool    `json:"allow_logprobs"`
-	AllowSearchIndices bool    `json:"allow_search_indices"`
-	AllowView          bool    `json:"allow_view"`
-	AllowFineTuning    bool    `json:"allow_fine_tuning"`
-	Organization       string  `json:"organization"`
-	Group              *string `json:"group"`
-	IsBlocking         bool    `json:"is_blocking"`
-}
+import "one-api/constant"
 
 type OpenAIModels struct {
-	Id         string                  `json:"id"`
-	Object     string                  `json:"object"`
-	Created    int                     `json:"created"`
-	OwnedBy    string                  `json:"owned_by"`
-	Permission []OpenAIModelPermission `json:"permission"`
-	Root       string                  `json:"root"`
-	Parent     *string                 `json:"parent"`
+	Id                     string                  `json:"id"`
+	Object                 string                  `json:"object"`
+	Created                int                     `json:"created"`
+	OwnedBy                string                  `json:"owned_by"`
+	SupportedEndpointTypes []constant.EndpointType `json:"supported_endpoint_types"`
 }

+ 38 - 0
dto/ratio_sync.go

@@ -0,0 +1,38 @@
+package dto
+
+type UpstreamDTO struct {
+    ID       int    `json:"id,omitempty"`
+    Name     string `json:"name" binding:"required"`
+    BaseURL  string `json:"base_url" binding:"required"`
+    Endpoint string `json:"endpoint"`
+}
+
+type UpstreamRequest struct {
+    ChannelIDs []int64 `json:"channel_ids"`
+    Upstreams   []UpstreamDTO `json:"upstreams"`
+    Timeout    int     `json:"timeout"`
+}
+
+// TestResult 上游测试连通性结果
+type TestResult struct {
+    Name   string `json:"name"`
+    Status string `json:"status"`
+    Error  string `json:"error,omitempty"`
+}
+
+// DifferenceItem 差异项
+// Current 为本地值,可能为 nil
+// Upstreams 为各渠道的上游值,具体数值 / "same" / nil
+
+type DifferenceItem struct {
+    Current   interface{}            `json:"current"`
+    Upstreams map[string]interface{} `json:"upstreams"`
+    Confidence map[string]bool       `json:"confidence"`
+}
+
+type SyncableChannel struct {
+    ID      int    `json:"id"`
+    Name    string `json:"name"`
+    BaseURL string `json:"base_url"`
+    Status  int    `json:"status"`
+} 

+ 1 - 1
dto/rerank.go

@@ -4,7 +4,7 @@ type RerankRequest struct {
 	Documents       []any  `json:"documents"`
 	Query           string `json:"query"`
 	Model           string `json:"model"`
-	TopN            int    `json:"top_n"`
+	TopN            int    `json:"top_n,omitempty"`
 	ReturnDocuments *bool  `json:"return_documents,omitempty"`
 	MaxChunkPerDoc  int    `json:"max_chunk_per_doc,omitempty"`
 	OverLapTokens   int    `json:"overlap_tokens,omitempty"`

+ 47 - 0
dto/video.go

@@ -0,0 +1,47 @@
+package dto
+
+type VideoRequest struct {
+	Model          string         `json:"model,omitempty" example:"kling-v1"`                                                                                                                                    // Model/style ID
+	Prompt         string         `json:"prompt,omitempty" example:"宇航员站起身走了"`                                                                                                                                   // Text prompt
+	Image          string         `json:"image,omitempty" example:"https://h2.inkwai.com/bs2/upload-ylab-stunt/se/ai_portal_queue_mmu_image_upscale_aiweb/3214b798-e1b4-4b00-b7af-72b5b0417420_raw_image_0.jpg"` // Image input (URL/Base64)
+	Duration       float64        `json:"duration" example:"5.0"`                                                                                                                                                // Video duration (seconds)
+	Width          int            `json:"width" example:"512"`                                                                                                                                                   // Video width
+	Height         int            `json:"height" example:"512"`                                                                                                                                                  // Video height
+	Fps            int            `json:"fps,omitempty" example:"30"`                                                                                                                                            // Video frame rate
+	Seed           int            `json:"seed,omitempty" example:"20231234"`                                                                                                                                     // Random seed
+	N              int            `json:"n,omitempty" example:"1"`                                                                                                                                               // Number of videos to generate
+	ResponseFormat string         `json:"response_format,omitempty" example:"url"`                                                                                                                               // Response format
+	User           string         `json:"user,omitempty" example:"user-1234"`                                                                                                                                    // User identifier
+	Metadata       map[string]any `json:"metadata,omitempty"`                                                                                                                                                    // Vendor-specific/custom params (e.g. negative_prompt, style, quality_level, etc.)
+}
+
+// VideoResponse 视频生成提交任务后的响应
+type VideoResponse struct {
+	TaskId string `json:"task_id"`
+	Status string `json:"status"`
+}
+
+// VideoTaskResponse 查询视频生成任务状态的响应
+type VideoTaskResponse struct {
+	TaskId   string             `json:"task_id" example:"abcd1234efgh"` // 任务ID
+	Status   string             `json:"status" example:"succeeded"`     // 任务状态
+	Url      string             `json:"url,omitempty"`                  // 视频资源URL(成功时)
+	Format   string             `json:"format,omitempty" example:"mp4"` // 视频格式
+	Metadata *VideoTaskMetadata `json:"metadata,omitempty"`             // 结果元数据
+	Error    *VideoTaskError    `json:"error,omitempty"`                // 错误信息(失败时)
+}
+
+// VideoTaskMetadata 视频任务元数据
+type VideoTaskMetadata struct {
+	Duration float64 `json:"duration" example:"5.0"`  // 实际生成的视频时长
+	Fps      int     `json:"fps" example:"30"`        // 实际帧率
+	Width    int     `json:"width" example:"512"`     // 实际宽度
+	Height   int     `json:"height" example:"512"`    // 实际高度
+	Seed     int     `json:"seed" example:"20231234"` // 使用的随机种子
+}
+
+// VideoTaskError 视频任务错误信息
+type VideoTaskError struct {
+	Code    int    `json:"code"`
+	Message string `json:"message"`
+}

+ 1041 - 0
i18n/zh-cn.json

@@ -0,0 +1,1041 @@
+{
+  "未登录或登录已过期,请重新登录": "未登录或登录已过期,请重新登录",
+  "登 录": "登 录",
+  "使用 微信 继续": "使用 微信 继续",
+  "使用 GitHub 继续": "使用 GitHub 继续",
+  "使用 LinuxDO 继续": "使用 LinuxDO 继续",
+  "使用 邮箱或用户名 登录": "使用 邮箱或用户名 登录",
+  "没有账户?": "没有账户?",
+  "用户名或邮箱": "用户名或邮箱",
+  "请输入您的用户名或邮箱地址": "请输入您的用户名或邮箱地址",
+  "请输入您的密码": "请输入您的密码",
+  "继续": "继续",
+  "忘记密码?": "忘记密码?",
+  "其他登录选项": "其他登录选项",
+  "微信扫码登录": "微信扫码登录",
+  "登录": "登录",
+  "微信扫码关注公众号,输入「验证码」获取验证码(三分钟内有效)": "微信扫码关注公众号,输入「验证码」获取验证码(三分钟内有效)",
+  "验证码": "验证码",
+  "处理中...": "处理中...",
+  "绑定成功!": "绑定成功!",
+  "登录成功!": "登录成功!",
+  "操作失败,重定向至登录界面中...": "操作失败,重定向至登录界面中...",
+  "出现错误,第 ${count} 次重试中...": "出现错误,第 ${count} 次重试中...",
+  "无效的重置链接,请重新发起密码重置请求": "无效的重置链接,请重新发起密码重置请求",
+  "密码已重置并已复制到剪贴板:": "密码已重置并已复制到剪贴板:",
+  "密码重置确认": "密码重置确认",
+  "等待获取邮箱信息...": "等待获取邮箱信息...",
+  "新密码": "新密码",
+  "密码已复制到剪贴板:": "密码已复制到剪贴板:",
+  "密码重置完成": "密码重置完成",
+  "确认重置密码": "确认重置密码",
+  "返回登录": "返回登录",
+  "请输入邮箱地址": "请输入邮箱地址",
+  "请稍后几秒重试,Turnstile 正在检查用户环境!": "请稍后几秒重试,Turnstile 正在检查用户环境!",
+  "重置邮件发送成功,请检查邮箱!": "重置邮件发送成功,请检查邮箱!",
+  "密码重置": "密码重置",
+  "请输入您的邮箱地址": "请输入您的邮箱地址",
+  "重试": "重试",
+  "想起来了?": "想起来了?",
+  "注 册": "注 册",
+  "使用 用户名 注册": "使用 用户名 注册",
+  "已有账户?": "已有账户?",
+  "用户名": "用户名",
+  "请输入用户名": "请输入用户名",
+  "输入密码,最短 8 位,最长 20 位": "输入密码,最短 8 位,最长 20 位",
+  "确认密码": "确认密码",
+  "输入邮箱地址": "输入邮箱地址",
+  "获取验证码": "获取验证码",
+  "输入验证码": "输入验证码",
+  "或": "或",
+  "其他注册选项": "其他注册选项",
+  "加载中...": "加载中...",
+  "复制代码": "复制代码",
+  "代码已复制到剪贴板": "代码已复制到剪贴板",
+  "复制失败,请手动复制": "复制失败,请手动复制",
+  "显示更多": "显示更多",
+  "关于我们": "关于我们",
+  "关于项目": "关于项目",
+  "联系我们": "联系我们",
+  "功能特性": "功能特性",
+  "快速开始": "快速开始",
+  "安装指南": "安装指南",
+  "API 文档": "API 文档",
+  "基于New API的项目": "基于New API的项目",
+  "版权所有": "版权所有",
+  "设计与开发由": "设计与开发由",
+  "首页": "首页",
+  "控制台": "控制台",
+  "文档": "文档",
+  "关于": "关于",
+  "注销成功!": "注销成功!",
+  "个人设置": "个人设置",
+  "API令牌": "API令牌",
+  "退出": "退出",
+  "关闭侧边栏": "关闭侧边栏",
+  "打开侧边栏": "打开侧边栏",
+  "关闭菜单": "关闭菜单",
+  "打开菜单": "打开菜单",
+  "演示站点": "演示站点",
+  "自用模式": "自用模式",
+  "系统公告": "系统公告",
+  "切换主题": "切换主题",
+  "切换语言": "切换语言",
+  "暂无公告": "暂无公告",
+  "暂无系统公告": "暂无系统公告",
+  "今日关闭": "今日关闭",
+  "关闭公告": "关闭公告",
+  "数据看板": "数据看板",
+  "绘图日志": "绘图日志",
+  "任务日志": "任务日志",
+  "渠道": "渠道",
+  "兑换码": "兑换码",
+  "用户管理": "用户管理",
+  "操练场": "操练场",
+  "聊天": "聊天",
+  "管理员": "管理员",
+  "个人中心": "个人中心",
+  "展开侧边栏": "展开侧边栏",
+  "AI 对话": "AI 对话",
+  "选择模型开始对话": "选择模型开始对话",
+  "显示调试": "显示调试",
+  "请输入您的问题...": "请输入您的问题...",
+  "已复制到剪贴板": "已复制到剪贴板",
+  "复制失败": "复制失败",
+  "正在构造请求体预览...": "正在构造请求体预览...",
+  "暂无请求数据": "暂无请求数据",
+  "暂无响应数据": "暂无响应数据",
+  "内容较大,已启用性能优化模式": "内容较大,已启用性能优化模式",
+  "内容较大,部分功能可能受限": "内容较大,部分功能可能受限",
+  "已复制": "已复制",
+  "正在处理大内容...": "正在处理大内容...",
+  "显示完整内容": "显示完整内容",
+  "收起": "收起",
+  "配置已导出到下载文件夹": "配置已导出到下载文件夹",
+  "导出配置失败: ": "导出配置失败: ",
+  "确认导入配置": "确认导入配置",
+  "导入的配置将覆盖当前设置,是否继续?": "导入的配置将覆盖当前设置,是否继续?",
+  "取消": "取消",
+  "配置导入成功": "配置导入成功",
+  "导入配置失败: ": "导入配置失败: ",
+  "重置配置": "重置配置",
+  "将清除所有保存的配置并恢复默认设置,此操作不可撤销。是否继续?": "将清除所有保存的配置并恢复默认设置,此操作不可撤销。是否继续?",
+  "重置选项": "重置选项",
+  "是否同时重置对话消息?选择\"是\"将清空所有对话记录并恢复默认示例;选择\"否\"将保留当前对话记录。": "是否同时重置对话消息?选择\"是\"将清空所有对话记录并恢复默认示例;选择\"否\"将保留当前对话记录。",
+  "同时重置消息": "同时重置消息",
+  "仅重置配置": "仅重置配置",
+  "配置和消息已全部重置": "配置和消息已全部重置",
+  "配置已重置,对话消息已保留": "配置已重置,对话消息已保留",
+  "已有保存的配置": "已有保存的配置",
+  "暂无保存的配置": "暂无保存的配置",
+  "导出配置": "导出配置",
+  "导入配置": "导入配置",
+  "导出": "导出",
+  "导入": "导入",
+  "调试信息": "调试信息",
+  "预览请求体": "预览请求体",
+  "实际请求体": "实际请求体",
+  "预览更新": "预览更新",
+  "最后请求": "最后请求",
+  "操作暂时被禁用": "操作暂时被禁用",
+  "复制": "复制",
+  "编辑": "编辑",
+  "切换为System角色": "切换为System角色",
+  "切换为Assistant角色": "切换为Assistant角色",
+  "删除": "删除",
+  "请求发生错误": "请求发生错误",
+  "系统消息": "系统消息",
+  "请输入消息内容...": "请输入消息内容...",
+  "保存": "保存",
+  "模型配置": "模型配置",
+  "分组": "分组",
+  "请选择分组": "请选择分组",
+  "请选择模型": "请选择模型",
+  "思考中...": "思考中...",
+  "思考过程": "思考过程",
+  "选择同步渠道": "选择同步渠道",
+  "搜索渠道名称或地址": "搜索渠道名称或地址",
+  "暂无渠道": "暂无渠道",
+  "暂无选择": "暂无选择",
+  "无搜索结果": "无搜索结果",
+  "公告已更新": "公告已更新",
+  "公告更新失败": "公告更新失败",
+  "系统名称已更新": "系统名称已更新",
+  "系统名称更新失败": "系统名称更新失败",
+  "系统信息": "系统信息",
+  "当前版本": "当前版本",
+  "检查更新": "检查更新",
+  "启动时间": "启动时间",
+  "通用设置": "通用设置",
+  "设置公告": "设置公告",
+  "个性化设置": "个性化设置",
+  "系统名称": "系统名称",
+  "在此输入系统名称": "在此输入系统名称",
+  "设置系统名称": "设置系统名称",
+  "Logo 图片地址": "Logo 图片地址",
+  "在此输入 Logo 图片地址": "在此输入 Logo 图片地址",
+  "首页内容": "首页内容",
+  "设置首页内容": "设置首页内容",
+  "设置关于": "设置关于",
+  "页脚": "页脚",
+  "设置页脚": "设置页脚",
+  "详情": "详情",
+  "刷新失败": "刷新失败",
+  "令牌已重置并已复制到剪贴板": "令牌已重置并已复制到剪贴板",
+  "加载模型列表失败": "加载模型列表失败",
+  "系统令牌已复制到剪切板": "系统令牌已复制到剪切板",
+  "请输入你的账户名以确认删除!": "请输入你的账户名以确认删除!",
+  "账户已删除!": "账户已删除!",
+  "微信账户绑定成功!": "微信账户绑定成功!",
+  "请输入原密码!": "请输入原密码!",
+  "请输入新密码!": "请输入新密码!",
+  "新密码需要和原密码不一致!": "新密码需要和原密码不一致!",
+  "两次输入的密码不一致!": "两次输入的密码不一致!",
+  "密码修改成功!": "密码修改成功!",
+  "验证码发送成功,请检查邮箱!": "验证码发送成功,请检查邮箱!",
+  "请输入邮箱验证码!": "请输入邮箱验证码!",
+  "邮箱账户绑定成功!": "邮箱账户绑定成功!",
+  "无法复制到剪贴板,请手动复制": "无法复制到剪贴板,请手动复制",
+  "设置保存成功": "设置保存成功",
+  "设置保存失败": "设置保存失败",
+  "超级管理员": "超级管理员",
+  "普通用户": "普通用户",
+  "当前余额": "当前余额",
+  "历史消耗": "历史消耗",
+  "请求次数": "请求次数",
+  "默认": "默认",
+  "可用模型": "可用模型",
+  "模型列表": "模型列表",
+  "点击模型名称可复制": "点击模型名称可复制",
+  "没有可用模型": "没有可用模型",
+  "该分类下没有可用模型": "该分类下没有可用模型",
+  "更多": "更多",
+  "个模型": "个模型",
+  "账户绑定": "账户绑定",
+  "未绑定": "未绑定",
+  "修改绑定": "修改绑定",
+  "微信": "微信",
+  "已绑定": "已绑定",
+  "未启用": "未启用",
+  "绑定": "绑定",
+  "安全设置": "安全设置",
+  "系统访问令牌": "系统访问令牌",
+  "用于API调用的身份验证令牌,请妥善保管": "用于API调用的身份验证令牌,请妥善保管",
+  "生成令牌": "生成令牌",
+  "密码管理": "密码管理",
+  "定期更改密码可以提高账户安全性": "定期更改密码可以提高账户安全性",
+  "修改密码": "修改密码",
+  "此操作不可逆,所有数据将被永久删除": "此操作不可逆,所有数据将被永久删除",
+  "删除账户": "删除账户",
+  "其他设置": "其他设置",
+  "通知设置": "通知设置",
+  "邮件通知": "邮件通知",
+  "通过邮件接收通知": "通过邮件接收通知",
+  "Webhook通知": "Webhook通知",
+  "通过HTTP请求接收通知": "通过HTTP请求接收通知",
+  "请输入Webhook地址,例如: https://example.com/webhook": "请输入Webhook地址,例如: https://example.com/webhook",
+  "只支持https,系统将以 POST 方式发送通知,请确保地址可以接收 POST 请求": "只支持https,系统将以 POST 方式发送通知,请确保地址可以接收 POST 请求",
+  "接口凭证(可选)": "接口凭证(可选)",
+  "请输入密钥": "请输入密钥",
+  "密钥将以 Bearer 方式添加到请求头中,用于验证webhook请求的合法性": "密钥将以 Bearer 方式添加到请求头中,用于验证webhook请求的合法性",
+  "通知邮箱": "通知邮箱",
+  "留空则使用账号绑定的邮箱": "留空则使用账号绑定的邮箱",
+  "设置用于接收额度预警的邮箱地址,不填则使用账号绑定的邮箱": "设置用于接收额度预警的邮箱地址,不填则使用账号绑定的邮箱",
+  "额度预警阈值": "额度预警阈值",
+  "请输入预警额度": "请输入预警额度",
+  "当剩余额度低于此数值时,系统将通过选择的方式发送通知": "当剩余额度低于此数值时,系统将通过选择的方式发送通知",
+  "接受未设置价格模型": "接受未设置价格模型",
+  "当模型没有设置价格时仍接受调用,仅当您信任该网站时使用,可能会产生高额费用": "当模型没有设置价格时仍接受调用,仅当您信任该网站时使用,可能会产生高额费用",
+  "IP记录": "IP记录",
+  "记录请求与错误日志 IP": "记录请求与错误日志 IP",
+  "开启后,仅“消费”和“错误”日志将记录您的客户端 IP 地址": "开启后,仅“消费”和“错误”日志将记录您的客户端 IP 地址",
+  "绑定邮箱地址": "绑定邮箱地址",
+  "重新发送": "重新发送",
+  "绑定微信账户": "绑定微信账户",
+  "删除账户确认": "删除账户确认",
+  "您正在删除自己的帐户,将清空所有数据且不可恢复": "您正在删除自己的帐户,将清空所有数据且不可恢复",
+  "请输入您的用户名以确认删除": "请输入您的用户名以确认删除",
+  "输入你的账户名{{username}}以确认删除": "输入你的账户名{{username}}以确认删除",
+  "原密码": "原密码",
+  "请输入原密码": "请输入原密码",
+  "请输入新密码": "请输入新密码",
+  "确认新密码": "确认新密码",
+  "请再次输入新密码": "请再次输入新密码",
+  "模型倍率设置": "模型倍率设置",
+  "可视化倍率设置": "可视化倍率设置",
+  "未设置倍率模型": "未设置倍率模型",
+  "上游倍率同步": "上游倍率同步",
+  "未知类型": "未知类型",
+  "标签聚合": "标签聚合",
+  "已启用": "已启用",
+  "自动禁用": "自动禁用",
+  "未知状态": "未知状态",
+  "未测试": "未测试",
+  "名称": "名称",
+  "类型": "类型",
+  "状态": "状态",
+  ",时间:": ",时间:",
+  "响应时间": "响应时间",
+  "已用/剩余": "已用/剩余",
+  "剩余额度$": "剩余额度$",
+  ",点击更新": ",点击更新",
+  "已用额度": "已用额度",
+  "修改子渠道优先级": "修改子渠道优先级",
+  "确定要修改所有子渠道优先级为 ": "确定要修改所有子渠道优先级为 ",
+  "权重": "权重",
+  "修改子渠道权重": "修改子渠道权重",
+  "确定要修改所有子渠道权重为 ": "确定要修改所有子渠道权重为 ",
+  "确定是否要删除此渠道?": "确定是否要删除此渠道?",
+  "此修改将不可逆": "此修改将不可逆",
+  "确定是否要复制此渠道?": "确定是否要复制此渠道?",
+  "复制渠道的所有信息": "复制渠道的所有信息",
+  "测试单个渠道操作项目组": "测试单个渠道操作项目组",
+  "禁用": "禁用",
+  "启用": "启用",
+  "启用全部": "启用全部",
+  "禁用全部": "禁用全部",
+  "重置": "重置",
+  "全选": "全选",
+  "_复制": "_复制",
+  "渠道未找到,请刷新页面后重试。": "渠道未找到,请刷新页面后重试。",
+  "渠道复制成功": "渠道复制成功",
+  "渠道复制失败: ": "渠道复制失败: ",
+  "操作成功完成!": "操作成功完成!",
+  "通道 ${name} 测试成功,耗时 ${time.toFixed(2)} 秒。": "通道 ${name} 测试成功,耗时 ${time.toFixed(2)} 秒。",
+  "已停止测试": "已停止测试",
+  "全部": "全部",
+  "请先选择要设置标签的渠道!": "请先选择要设置标签的渠道!",
+  "标签不能为空!": "标签不能为空!",
+  "已为 ${count} 个渠道设置标签!": "已为 ${count} 个渠道设置标签!",
+  "已成功开始测试所有已启用通道,请刷新页面查看结果。": "已成功开始测试所有已启用通道,请刷新页面查看结果。",
+  "已删除所有禁用渠道,共计 ${data} 个": "已删除所有禁用渠道,共计 ${data} 个",
+  "已更新完毕所有已启用通道余额!": "已更新完毕所有已启用通道余额!",
+  "通道 ${name} 余额更新成功!": "通道 ${name} 余额更新成功!",
+  "已删除 ${data} 个通道!": "已删除 ${data} 个通道!",
+  "已修复 ${data} 个通道!": "已修复 ${data} 个通道!",
+  "确定是否要删除所选通道?": "确定是否要删除所选通道?",
+  "删除所选通道": "删除所选通道",
+  "批量设置标签": "批量设置标签",
+  "确定要测试所有通道吗?": "确定要测试所有通道吗?",
+  "测试所有通道": "测试所有通道",
+  "确定要更新所有已启用通道余额吗?": "确定要更新所有已启用通道余额吗?",
+  "更新所有已启用通道余额": "更新所有已启用通道余额",
+  "确定是否要删除禁用通道?": "确定是否要删除禁用通道?",
+  "删除禁用通道": "删除禁用通道",
+  "确定是否要修复数据库一致性?": "确定是否要修复数据库一致性?",
+  "进行该操作时,可能导致渠道访问错误,请仅在数据库出现问题时使用": "进行该操作时,可能导致渠道访问错误,请仅在数据库出现问题时使用",
+  "批量操作": "批量操作",
+  "使用ID排序": "使用ID排序",
+  "开启批量操作": "开启批量操作",
+  "标签聚合模式": "标签聚合模式",
+  "刷新": "刷新",
+  "列设置": "列设置",
+  "搜索渠道的 ID,名称,密钥和API地址 ...": "搜索渠道的 ID,名称,密钥和API地址 ...",
+  "模型关键字": "模型关键字",
+  "选择分组": "选择分组",
+  "查询": "查询",
+  "第 {{start}} - {{end}} 条,共 {{total}} 条": "第 {{start}} - {{end}} 条,共 {{total}} 条",
+  "搜索无结果": "搜索无结果",
+  "请输入要设置的标签名称": "请输入要设置的标签名称",
+  "请输入标签名称": "请输入标签名称",
+  "已选择 ${count} 个渠道": "已选择 ${count} 个渠道",
+  "共": "共",
+  "停止测试": "停止测试",
+  "测试中...": "测试中...",
+  "批量测试${count}个模型": "批量测试${count}个模型",
+  "搜索模型...": "搜索模型...",
+  "模型名称": "模型名称",
+  "测试中": "测试中",
+  "未开始": "未开始",
+  "失败": "失败",
+  "请求时长: ${time}s": "请求时长: ${time}s",
+  "充值": "充值",
+  "消费": "消费",
+  "系统": "系统",
+  "错误": "错误",
+  "流": "流",
+  "非流": "非流",
+  "请求并计费模型": "请求并计费模型",
+  "实际模型": "实际模型",
+  "用户": "用户",
+  "用时/首字": "用时/首字",
+  "提示": "提示",
+  "花费": "花费",
+  "只有当用户设置开启IP记录时,才会进行请求和错误类型日志的IP记录": "只有当用户设置开启IP记录时,才会进行请求和错误类型日志的IP记录",
+  "确定": "确定",
+  "用户信息": "用户信息",
+  "渠道信息": "渠道信息",
+  "语音输入": "语音输入",
+  "文字输入": "文字输入",
+  "文字输出": "文字输出",
+  "缓存创建 Tokens": "缓存创建 Tokens",
+  "日志详情": "日志详情",
+  "消耗额度": "消耗额度",
+  "开始时间": "开始时间",
+  "结束时间": "结束时间",
+  "用户名称": "用户名称",
+  "日志类型": "日志类型",
+  "绘图": "绘图",
+  "放大": "放大",
+  "变换": "变换",
+  "强变换": "强变换",
+  "平移": "平移",
+  "图生文": "图生文",
+  "图混合": "图混合",
+  "重绘": "重绘",
+  "局部重绘-提交": "局部重绘-提交",
+  "自定义变焦-提交": "自定义变焦-提交",
+  "窗口处理": "窗口处理",
+  "未知": "未知",
+  "已提交": "已提交",
+  "等待中": "等待中",
+  "重复提交": "重复提交",
+  "成功": "成功",
+  "未启动": "未启动",
+  "执行中": "执行中",
+  "窗口等待": "窗口等待",
+  "秒": "秒",
+  "提交时间": "提交时间",
+  "花费时间": "花费时间",
+  "任务ID": "任务ID",
+  "提交结果": "提交结果",
+  "任务状态": "任务状态",
+  "结果图片": "结果图片",
+  "查看图片": "查看图片",
+  "无": "无",
+  "失败原因": "失败原因",
+  "已复制:": "已复制:",
+  "当前未开启Midjourney回调,部分项目可能无法获得绘图结果,可在运营设置中开启。": "当前未开启Midjourney回调,部分项目可能无法获得绘图结果,可在运营设置中开启。",
+  "Midjourney 任务记录": "Midjourney 任务记录",
+  "任务 ID": "任务 ID",
+  "按次计费": "按次计费",
+  "按量计费": "按量计费",
+  "您的分组可以使用该模型": "您的分组可以使用该模型",
+  "可用性": "可用性",
+  "计费类型": "计费类型",
+  "当前查看的分组为:{{group}},倍率为:{{ratio}}": "当前查看的分组为:{{group}},倍率为:{{ratio}}",
+  "倍率": "倍率",
+  "倍率是为了方便换算不同价格的模型": "倍率是为了方便换算不同价格的模型",
+  "模型倍率": "模型倍率",
+  "补全倍率": "补全倍率",
+  "分组倍率": "分组倍率",
+  "模型价格": "模型价格",
+  "补全": "补全",
+  "模糊搜索模型名称": "模糊搜索模型名称",
+  "复制选中模型": "复制选中模型",
+  "模型定价": "模型定价",
+  "当前分组": "当前分组",
+  "未登录,使用默认分组倍率": "未登录,使用默认分组倍率",
+  "按量计费费用 = 分组倍率 × 模型倍率 × (提示token数 + 补全token数 × 补全倍率)/ 500000 (单位:美元)": "按量计费费用 = 分组倍率 × 模型倍率 × (提示token数 + 补全token数 × 补全倍率)/ 500000 (单位:美元)",
+  "已过期": "已过期",
+  "未使用": "未使用",
+  "已禁用": "已禁用",
+  "创建时间": "创建时间",
+  "过期时间": "过期时间",
+  "永不过期": "永不过期",
+  "确定是否要删除此兑换码?": "确定是否要删除此兑换码?",
+  "查看": "查看",
+  "已复制到剪贴板!": "已复制到剪贴板!",
+  "兑换码可以批量生成和分发,适合用于推广活动或批量充值。": "兑换码可以批量生成和分发,适合用于推广活动或批量充值。",
+  "添加兑换码": "添加兑换码",
+  "请至少选择一个兑换码!": "请至少选择一个兑换码!",
+  "复制所选兑换码到剪贴板": "复制所选兑换码到剪贴板",
+  "确定清除所有失效兑换码?": "确定清除所有失效兑换码?",
+  "将删除已使用、已禁用及过期的兑换码,此操作不可撤销。": "将删除已使用、已禁用及过期的兑换码,此操作不可撤销。",
+  "已删除 {{count}} 条失效兑换码": "已删除 {{count}} 条失效兑换码",
+  "关键字(id或者名称)": "关键字(id或者名称)",
+  "生成音乐": "生成音乐",
+  "生成歌词": "生成歌词",
+  "生成视频": "生成视频",
+  "排队中": "排队中",
+  "正在提交": "正在提交",
+  "平台": "平台",
+  "点击预览视频": "点击预览视频",
+  "任务记录": "任务记录",
+  "渠道 ID": "渠道 ID",
+  "已启用:限制模型": "已启用:限制模型",
+  "已耗尽": "已耗尽",
+  "剩余额度": "剩余额度",
+  "聊天链接配置错误,请联系管理员": "聊天链接配置错误,请联系管理员",
+  "令牌详情": "令牌详情",
+  "确定是否要删除此令牌?": "确定是否要删除此令牌?",
+  "项目操作按钮组": "项目操作按钮组",
+  "请联系管理员配置聊天链接": "请联系管理员配置聊天链接",
+  "令牌用于API访问认证,可以设置额度限制和模型权限。": "令牌用于API访问认证,可以设置额度限制和模型权限。",
+  "添加令牌": "添加令牌",
+  "请至少选择一个令牌!": "请至少选择一个令牌!",
+  "复制所选令牌到剪贴板": "复制所选令牌到剪贴板",
+  "搜索关键字": "搜索关键字",
+  "未知身份": "未知身份",
+  "已封禁": "已封禁",
+  "统计信息": "统计信息",
+  "剩余": "剩余",
+  "调用": "调用",
+  "邀请信息": "邀请信息",
+  "收益": "收益",
+  "无邀请人": "无邀请人",
+  "已注销": "已注销",
+  "确定要提升此用户吗?": "确定要提升此用户吗?",
+  "此操作将提升用户的权限级别": "此操作将提升用户的权限级别",
+  "确定要降级此用户吗?": "确定要降级此用户吗?",
+  "此操作将降低用户的权限级别": "此操作将降低用户的权限级别",
+  "确定是否要注销此用户?": "确定是否要注销此用户?",
+  "相当于删除用户,此修改将不可逆": "相当于删除用户,此修改将不可逆",
+  "用户管理页面,可以查看和管理所有注册用户的信息、权限和状态。": "用户管理页面,可以查看和管理所有注册用户的信息、权限和状态。",
+  "添加用户": "添加用户",
+  "支持搜索用户的 ID、用户名、显示名称和邮箱地址": "支持搜索用户的 ID、用户名、显示名称和邮箱地址",
+  "全部模型": "全部模型",
+  "智谱": "智谱",
+  "通义千问": "通义千问",
+  "文心一言": "文心一言",
+  "腾讯混元": "腾讯混元",
+  "360智脑": "360智脑",
+  "豆包": "豆包",
+  "用户分组": "用户分组",
+  "专属倍率": "专属倍率",
+  "输入价格:${{price}} / 1M tokens{{audioPrice}}": "输入价格:${{price}} / 1M tokens{{audioPrice}}",
+  "Web搜索价格:${{price}} / 1K 次": "Web搜索价格:${{price}} / 1K 次",
+  "文件搜索价格:${{price}} / 1K 次": "文件搜索价格:${{price}} / 1K 次",
+  "仅供参考,以实际扣费为准": "仅供参考,以实际扣费为准",
+  "价格:${{price}} * {{ratioType}}:{{ratio}}": "价格:${{price}} * {{ratioType}}:{{ratio}}",
+  "模型: {{ratio}} * {{ratioType}}:{{groupRatio}}": "模型: {{ratio}} * {{ratioType}}:{{groupRatio}}",
+  "提示价格:${{price}} / 1M tokens": "提示价格:${{price}} / 1M tokens",
+  "模型价格 ${{price}},{{ratioType}} {{ratio}}": "模型价格 ${{price}},{{ratioType}} {{ratio}}",
+  "模型: {{ratio}} * {{ratioType}}: {{groupRatio}}": "模型: {{ratio}} * {{ratioType}}: {{groupRatio}}",
+  "不是合法的 JSON 字符串": "不是合法的 JSON 字符串",
+  "请求发生错误: ": "请求发生错误: ",
+  "解析响应数据时发生错误": "解析响应数据时发生错误",
+  "连接已断开": "连接已断开",
+  "建立连接时发生错误": "建立连接时发生错误",
+  "加载模型失败": "加载模型失败",
+  "加载分组失败": "加载分组失败",
+  "消息已复制到剪贴板": "消息已复制到剪贴板",
+  "确认删除": "确认删除",
+  "确定要删除这条消息吗?": "确定要删除这条消息吗?",
+  "已删除消息及其回复": "已删除消息及其回复",
+  "消息已删除": "消息已删除",
+  "消息已编辑": "消息已编辑",
+  "检测到该消息后有AI回复,是否删除后续回复并重新生成?": "检测到该消息后有AI回复,是否删除后续回复并重新生成?",
+  "重新生成": "重新生成",
+  "消息已更新": "消息已更新",
+  "加载关于内容失败...": "加载关于内容失败...",
+  "可在设置页面设置关于内容,支持 HTML & Markdown": "可在设置页面设置关于内容,支持 HTML & Markdown",
+  "New API项目仓库地址:": "New API项目仓库地址:",
+  "| 基于": "| 基于",
+  "本项目根据": "本项目根据",
+  "MIT许可证": "MIT许可证",
+  "授权,需在遵守": "授权,需在遵守",
+  "Apache-2.0协议": "Apache-2.0协议",
+  "管理员暂时未设置任何关于内容": "管理员暂时未设置任何关于内容",
+  "仅支持 OpenAI 接口格式": "仅支持 OpenAI 接口格式",
+  "请填写密钥": "请填写密钥",
+  "获取模型列表成功": "获取模型列表成功",
+  "获取模型列表失败": "获取模型列表失败",
+  "请填写渠道名称和渠道密钥!": "请填写渠道名称和渠道密钥!",
+  "请至少选择一个模型!": "请至少选择一个模型!",
+  "模型映射必须是合法的 JSON 格式!": "模型映射必须是合法的 JSON 格式!",
+  "提交失败,请勿重复提交!": "提交失败,请勿重复提交!",
+  "渠道创建成功!": "渠道创建成功!",
+  "已新增 {{count}} 个模型:{{list}}": "已新增 {{count}} 个模型:{{list}}",
+  "未发现新增模型": "未发现新增模型",
+  "新建": "新建",
+  "更新渠道信息": "更新渠道信息",
+  "创建新的渠道": "创建新的渠道",
+  "基本信息": "基本信息",
+  "渠道的基本配置信息": "渠道的基本配置信息",
+  "请选择渠道类型": "请选择渠道类型",
+  "请为渠道命名": "请为渠道命名",
+  "请输入密钥,一行一个": "请输入密钥,一行一个",
+  "批量创建": "批量创建",
+  "API 配置": "API 配置",
+  "API 地址和相关配置": "API 地址和相关配置",
+  "2025年5月10日后添加的渠道,不需要再在部署的时候移除模型名称中的\".\"": "2025年5月10日后添加的渠道,不需要再在部署的时候移除模型名称中的\".\"",
+  "请输入 AZURE_OPENAI_ENDPOINT,例如:https://docs-test-001.openai.azure.com": "请输入 AZURE_OPENAI_ENDPOINT,例如:https://docs-test-001.openai.azure.com",
+  "请输入默认 API 版本,例如:2025-04-01-preview": "请输入默认 API 版本,例如:2025-04-01-preview",
+  "如果你对接的是上游One API或者New API等转发项目,请使用OpenAI类型,不要使用此类型,除非你知道你在做什么。": "如果你对接的是上游One API或者New API等转发项目,请使用OpenAI类型,不要使用此类型,除非你知道你在做什么。",
+  "完整的 Base URL,支持变量{model}": "完整的 Base URL,支持变量{model}",
+  "请输入完整的URL,例如:https://api.openai.com/v1/chat/completions": "请输入完整的URL,例如:https://api.openai.com/v1/chat/completions",
+  "Dify渠道只适配chatflow和agent,并且agent不支持图片!": "Dify渠道只适配chatflow和agent,并且agent不支持图片!",
+  "此项可选,用于通过自定义API地址来进行 API 调用,末尾不要带/v1和/": "此项可选,用于通过自定义API地址来进行 API 调用,末尾不要带/v1和/",
+  "对于官方渠道,new-api已经内置地址,除非是第三方代理站点或者Azure的特殊接入地址,否则不需要填写": "对于官方渠道,new-api已经内置地址,除非是第三方代理站点或者Azure的特殊接入地址,否则不需要填写",
+  "私有部署地址": "私有部署地址",
+  "请输入私有部署地址,格式为:https://fastgpt.run/api/openapi": "请输入私有部署地址,格式为:https://fastgpt.run/api/openapi",
+  "注意非Chat API,请务必填写正确的API地址,否则可能导致无法使用": "注意非Chat API,请务必填写正确的API地址,否则可能导致无法使用",
+  "请输入到 /suno 前的路径,通常就是域名,例如:https://api.example.com": "请输入到 /suno 前的路径,通常就是域名,例如:https://api.example.com",
+  "模型选择和映射设置": "模型选择和映射设置",
+  "模型": "模型",
+  "请选择该渠道所支持的模型": "请选择该渠道所支持的模型",
+  "填入相关模型": "填入相关模型",
+  "填入所有模型": "填入所有模型",
+  "获取模型列表": "获取模型列表",
+  "清除所有模型": "清除所有模型",
+  "输入自定义模型名称": "输入自定义模型名称",
+  "模型重定向": "模型重定向",
+  "此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称,例如:": "此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称,例如:",
+  "填入模板": "填入模板",
+  "默认测试模型": "默认测试模型",
+  "不填则为模型列表第一个": "不填则为模型列表第一个",
+  "渠道的高级配置选项": "渠道的高级配置选项",
+  "请选择可以使用该渠道的分组": "请选择可以使用该渠道的分组",
+  "请在系统设置页面编辑分组倍率以添加新的分组:": "请在系统设置页面编辑分组倍率以添加新的分组:",
+  "部署地区": "部署地区",
+  "知识库 ID": "知识库 ID",
+  "渠道标签": "渠道标签",
+  "渠道优先级": "渠道优先级",
+  "渠道权重": "渠道权重",
+  "渠道额外设置": "渠道额外设置",
+  "此项可选,用于配置渠道特定设置,为一个 JSON 字符串,例如:": "此项可选,用于配置渠道特定设置,为一个 JSON 字符串,例如:",
+  "参数覆盖": "参数覆盖",
+  "此项可选,用于覆盖请求参数。不支持覆盖 stream 参数。为一个 JSON 字符串,例如:": "此项可选,用于覆盖请求参数。不支持覆盖 stream 参数。为一个 JSON 字符串,例如:",
+  "请输入组织org-xxx": "请输入组织org-xxx",
+  "组织,可选,不填则为默认组织": "组织,可选,不填则为默认组织",
+  "是否自动禁用(仅当自动禁用开启时有效),关闭后不会自动禁用该渠道": "是否自动禁用(仅当自动禁用开启时有效),关闭后不会自动禁用该渠道",
+  "状态码复写(仅影响本地判断,不修改返回到上游的状态码)": "状态码复写(仅影响本地判断,不修改返回到上游的状态码)",
+  "此项可选,用于复写返回的状态码,比如将claude渠道的400错误复写为500(用于重试),请勿滥用该功能,例如:": "此项可选,用于复写返回的状态码,比如将claude渠道的400错误复写为500(用于重试),请勿滥用该功能,例如:",
+  "编辑标签": "编辑标签",
+  "标签信息": "标签信息",
+  "标签的基本配置": "标签的基本配置",
+  "所有编辑均为覆盖操作,留空则不更改": "所有编辑均为覆盖操作,留空则不更改",
+  "标签名称": "标签名称",
+  "请输入新标签,留空则解散标签": "请输入新标签,留空则解散标签",
+  "当前模型列表为该标签下所有渠道模型列表最长的一个,并非所有渠道的并集,请注意可能导致某些渠道模型丢失。": "当前模型列表为该标签下所有渠道模型列表最长的一个,并非所有渠道的并集,请注意可能导致某些渠道模型丢失。",
+  "请选择该渠道所支持的模型,留空则不更改": "请选择该渠道所支持的模型,留空则不更改",
+  "此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称,留空则不更改": "此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称,留空则不更改",
+  "清空重定向": "清空重定向",
+  "分组设置": "分组设置",
+  "用户分组配置": "用户分组配置",
+  "请选择可以使用该渠道的分组,留空则不更改": "请选择可以使用该渠道的分组,留空则不更改",
+  "正在跳转...": "正在跳转...",
+  "小时": "小时",
+  "周": "周",
+  "模型调用次数占比": "模型调用次数占比",
+  "模型消耗分布": "模型消耗分布",
+  "总计": "总计",
+  "早上好": "早上好",
+  "中午好": "中午好",
+  "下午好": "下午好",
+  "账户数据": "账户数据",
+  "使用统计": "使用统计",
+  "统计次数": "统计次数",
+  "资源消耗": "资源消耗",
+  "统计额度": "统计额度",
+  "性能指标": "性能指标",
+  "平均RPM": "平均RPM",
+  "复制成功": "复制成功",
+  "进行中": "进行中",
+  "异常": "异常",
+  "正常": "正常",
+  "可用率": "可用率",
+  "有异常": "有异常",
+  "高延迟": "高延迟",
+  "维护中": "维护中",
+  "暂无监控数据": "暂无监控数据",
+  "搜索条件": "搜索条件",
+  "时间粒度": "时间粒度",
+  "模型数据分析": "模型数据分析",
+  "消耗分布": "消耗分布",
+  "调用次数分布": "调用次数分布",
+  "API信息": "API信息",
+  "暂无API信息": "暂无API信息",
+  "请联系管理员在系统设置中配置API信息": "请联系管理员在系统设置中配置API信息",
+  "显示最新20条": "显示最新20条",
+  "请联系管理员在系统设置中配置公告信息": "请联系管理员在系统设置中配置公告信息",
+  "暂无常见问答": "暂无常见问答",
+  "请联系管理员在系统设置中配置常见问答": "请联系管理员在系统设置中配置常见问答",
+  "服务可用性": "服务可用性",
+  "请联系管理员在系统设置中配置Uptime": "请联系管理员在系统设置中配置Uptime",
+  "加载首页内容失败...": "加载首页内容失败...",
+  "统一的大模型接口网关": "统一的大模型接口网关",
+  "更好的价格,更好的稳定性,无需订阅": "更好的价格,更好的稳定性,无需订阅",
+  "开始使用": "开始使用",
+  "支持众多的大模型供应商": "支持众多的大模型供应商",
+  "页面未找到,请检查您的浏览器地址是否正确": "页面未找到,请检查您的浏览器地址是否正确",
+  "登录过期,请重新登录!": "登录过期,请重新登录!",
+  "兑换码更新成功!": "兑换码更新成功!",
+  "兑换码创建成功!": "兑换码创建成功!",
+  "兑换码创建成功": "兑换码创建成功",
+  "兑换码创建成功,是否下载兑换码?": "兑换码创建成功,是否下载兑换码?",
+  "兑换码将以文本文件的形式下载,文件名为兑换码的名称。": "兑换码将以文本文件的形式下载,文件名为兑换码的名称。",
+  "更新兑换码信息": "更新兑换码信息",
+  "创建新的兑换码": "创建新的兑换码",
+  "设置兑换码的基本信息": "设置兑换码的基本信息",
+  "请输入名称": "请输入名称",
+  "选择过期时间(可选,留空为永久)": "选择过期时间(可选,留空为永久)",
+  "额度设置": "额度设置",
+  "设置兑换码的额度和数量": "设置兑换码的额度和数量",
+  "请输入额度": "请输入额度",
+  "生成数量": "生成数量",
+  "请输入生成数量": "请输入生成数量",
+  "你似乎并没有修改什么": "你似乎并没有修改什么",
+  "部分保存失败,请重试": "部分保存失败,请重试",
+  "保存成功": "保存成功",
+  "保存失败,请重试": "保存失败,请重试",
+  "请检查输入": "请检查输入",
+  "聊天配置": "聊天配置",
+  "为一个 JSON 文本": "为一个 JSON 文本",
+  "保存聊天设置": "保存聊天设置",
+  "设置已保存": "设置已保存",
+  "API地址": "API地址",
+  "说明": "说明",
+  "颜色": "颜色",
+  "API信息管理,可以配置多个API地址用于状态展示和负载均衡(最多50个)": "API信息管理,可以配置多个API地址用于状态展示和负载均衡(最多50个)",
+  "批量删除": "批量删除",
+  "保存设置": "保存设置",
+  "添加API": "添加API",
+  "请输入API地址": "请输入API地址",
+  "如:香港线路": "如:香港线路",
+  "请输入线路描述": "请输入线路描述",
+  "如:大带宽批量分析图片推荐": "如:大带宽批量分析图片推荐",
+  "请输入说明": "请输入说明",
+  "标识颜色": "标识颜色",
+  "确定要删除此API信息吗?": "确定要删除此API信息吗?",
+  "警告": "警告",
+  "发布时间": "发布时间",
+  "操作": "操作",
+  "系统公告管理,可以发布系统通知和重要消息(最多100个,前端显示最新20条)": "系统公告管理,可以发布系统通知和重要消息(最多100个,前端显示最新20条)",
+  "添加公告": "添加公告",
+  "编辑公告": "编辑公告",
+  "公告内容": "公告内容",
+  "请输入公告内容": "请输入公告内容",
+  "请选择发布日期": "请选择发布日期",
+  "公告类型": "公告类型",
+  "说明信息": "说明信息",
+  "可选,公告的补充说明": "可选,公告的补充说明",
+  "确定要删除此公告吗?": "确定要删除此公告吗?",
+  "数据看板设置": "数据看板设置",
+  "启用数据看板(实验性)": "启用数据看板(实验性)",
+  "数据看板更新间隔": "数据看板更新间隔",
+  "设置过短会影响数据库性能": "设置过短会影响数据库性能",
+  "数据看板默认时间粒度": "数据看板默认时间粒度",
+  "仅修改展示粒度,统计精确到小时": "仅修改展示粒度,统计精确到小时",
+  "保存数据看板设置": "保存数据看板设置",
+  "问题标题": "问题标题",
+  "回答内容": "回答内容",
+  "常见问答管理,为用户提供常见问题的答案(最多50个,前端显示最新20条)": "常见问答管理,为用户提供常见问题的答案(最多50个,前端显示最新20条)",
+  "添加问答": "添加问答",
+  "编辑问答": "编辑问答",
+  "请输入问题标题": "请输入问题标题",
+  "请输入回答内容": "请输入回答内容",
+  "确定要删除此问答吗?": "确定要删除此问答吗?",
+  "分类名称": "分类名称",
+  "Uptime Kuma地址": "Uptime Kuma地址",
+  "Uptime Kuma监控分类管理,可以配置多个监控分类用于服务状态展示(最多20个)": "Uptime Kuma监控分类管理,可以配置多个监控分类用于服务状态展示(最多20个)",
+  "编辑分类": "编辑分类",
+  "添加分类": "添加分类",
+  "请输入分类名称,如:OpenAI、Claude等": "请输入分类名称,如:OpenAI、Claude等",
+  "请输入分类名称": "请输入分类名称",
+  "请输入Uptime Kuma服务地址,如:https://status.example.com": "请输入Uptime Kuma服务地址,如:https://status.example.com",
+  "请输入Uptime Kuma地址": "请输入Uptime Kuma地址",
+  "请输入状态页面的Slug,如:my-status": "请输入状态页面的Slug,如:my-status",
+  "请输入状态页面Slug": "请输入状态页面Slug",
+  "确定要删除此分类吗?": "确定要删除此分类吗?",
+  "绘图设置": "绘图设置",
+  "启用绘图功能": "启用绘图功能",
+  "允许回调(会泄露服务器 IP 地址)": "允许回调(会泄露服务器 IP 地址)",
+  "允许 AccountFilter 参数": "允许 AccountFilter 参数",
+  "开启之后会清除用户提示词中的": "开启之后会清除用户提示词中的",
+  "以及": "以及",
+  "检测必须等待绘图成功才能进行放大等操作": "检测必须等待绘图成功才能进行放大等操作",
+  "保存绘图设置": "保存绘图设置",
+  "Claude设置": "Claude设置",
+  "Claude请求头覆盖": "Claude请求头覆盖",
+  "为一个 JSON 文本,例如:": "为一个 JSON 文本,例如:",
+  "缺省 MaxTokens": "缺省 MaxTokens",
+  "启用Claude思考适配(-thinking后缀)": "启用Claude思考适配(-thinking后缀)",
+  "思考适配 BudgetTokens 百分比": "思考适配 BudgetTokens 百分比",
+  "0.1-1之间的小数": "0.1-1之间的小数",
+  "Gemini设置": "Gemini设置",
+  "Gemini安全设置": "Gemini安全设置",
+  "default为默认设置,可单独设置每个模型的版本": "default为默认设置,可单独设置每个模型的版本",
+  "例如:": "例如:",
+  "Gemini思考适配设置": "Gemini思考适配设置",
+  "启用Gemini思考后缀适配": "启用Gemini思考后缀适配",
+  "适配 -thinking、-thinking-预算数字 和 -nothinking 后缀": "适配 -thinking、-thinking-预算数字 和 -nothinking 后缀",
+  "0.002-1之间的小数": "0.002-1之间的小数",
+  "全局设置": "全局设置",
+  "启用请求透传": "启用请求透传",
+  "连接保活设置": "连接保活设置",
+  "启用Ping间隔": "启用Ping间隔",
+  "Ping间隔(秒)": "Ping间隔(秒)",
+  "新用户初始额度": "新用户初始额度",
+  "请求预扣费额度": "请求预扣费额度",
+  "请求结束后多退少补": "请求结束后多退少补",
+  "邀请新用户奖励额度": "邀请新用户奖励额度",
+  "新用户使用邀请码奖励额度": "新用户使用邀请码奖励额度",
+  "例如:1000": "例如:1000",
+  "保存额度设置": "保存额度设置",
+  "例如发卡网站的购买链接": "例如发卡网站的购买链接",
+  "文档地址": "文档地址",
+  "单位美元额度": "单位美元额度",
+  "一单位货币能兑换的额度": "一单位货币能兑换的额度",
+  "失败重试次数": "失败重试次数",
+  "以货币形式显示额度": "以货币形式显示额度",
+  "额度查询接口返回令牌额度而非用户额度": "额度查询接口返回令牌额度而非用户额度",
+  "默认折叠侧边栏": "默认折叠侧边栏",
+  "开启后不限制:必须设置模型倍率": "开启后不限制:必须设置模型倍率",
+  "保存通用设置": "保存通用设置",
+  "请选择日志记录时间": "请选择日志记录时间",
+  "条日志已清理!": "条日志已清理!",
+  "日志清理失败:": "日志清理失败:",
+  "启用额度消费日志记录": "启用额度消费日志记录",
+  "日志记录时间": "日志记录时间",
+  "清除历史日志": "清除历史日志",
+  "保存日志设置": "保存日志设置",
+  "监控设置": "监控设置",
+  "测试所有渠道的最长响应时间": "测试所有渠道的最长响应时间",
+  "额度提醒阈值": "额度提醒阈值",
+  "低于此额度时将发送邮件提醒用户": "低于此额度时将发送邮件提醒用户",
+  "失败时自动禁用通道": "失败时自动禁用通道",
+  "成功时自动启用通道": "成功时自动启用通道",
+  "自动禁用关键词": "自动禁用关键词",
+  "一行一个,不区分大小写": "一行一个,不区分大小写",
+  "屏蔽词过滤设置": "屏蔽词过滤设置",
+  "启用屏蔽词过滤功能": "启用屏蔽词过滤功能",
+  "启用 Prompt 检查": "启用 Prompt 检查",
+  "一行一个屏蔽词,不需要符号分割": "一行一个屏蔽词,不需要符号分割",
+  "保存屏蔽词过滤设置": "保存屏蔽词过滤设置",
+  "更新成功": "更新成功",
+  "更新失败": "更新失败",
+  "服务器地址": "服务器地址",
+  "更新服务器地址": "更新服务器地址",
+  "请先填写服务器地址": "请先填写服务器地址",
+  "充值分组倍率不是合法的 JSON 字符串": "充值分组倍率不是合法的 JSON 字符串",
+  "充值方式设置不是合法的 JSON 字符串": "充值方式设置不是合法的 JSON 字符串",
+  "支付设置": "支付设置",
+  "(当前仅支持易支付接口,默认使用上方服务器地址作为回调地址!)": "(当前仅支持易支付接口,默认使用上方服务器地址作为回调地址!)",
+  "例如:https://yourdomain.com": "例如:https://yourdomain.com",
+  "易支付商户ID": "易支付商户ID",
+  "易支付商户密钥": "易支付商户密钥",
+  "敏感信息不会发送到前端显示": "敏感信息不会发送到前端显示",
+  "回调地址": "回调地址",
+  "充值价格(x元/美金)": "充值价格(x元/美金)",
+  "例如:7,就是7元/美金": "例如:7,就是7元/美金",
+  "最低充值美元数量": "最低充值美元数量",
+  "例如:2,就是最低充值2$": "例如:2,就是最低充值2$",
+  "为一个 JSON 文本,键为组名称,值为倍率": "为一个 JSON 文本,键为组名称,值为倍率",
+  "充值方式设置": "充值方式设置",
+  "更新支付设置": "更新支付设置",
+  "模型请求速率限制": "模型请求速率限制",
+  "启用用户模型请求速率限制(可能会影响高并发性能)": "启用用户模型请求速率限制(可能会影响高并发性能)",
+  "分钟": "分钟",
+  "频率限制的周期(分钟)": "频率限制的周期(分钟)",
+  "用户每周期最多请求次数": "用户每周期最多请求次数",
+  "包括失败请求的次数,0代表不限制": "包括失败请求的次数,0代表不限制",
+  "用户每周期最多请求完成次数": "用户每周期最多请求完成次数",
+  "只包括请求成功的次数": "只包括请求成功的次数",
+  "分组速率限制": "分组速率限制",
+  "使用 JSON 对象格式,格式为:{\"组名\": [最多请求次数, 最多请求完成次数]}": "使用 JSON 对象格式,格式为:{\"组名\": [最多请求次数, 最多请求完成次数]}",
+  "示例:{\"default\": [200, 100], \"vip\": [0, 1000]}。": "示例:{\"default\": [200, 100], \"vip\": [0, 1000]}。",
+  "[最多请求次数]必须大于等于0,[最多请求完成次数]必须大于等于1。": "[最多请求次数]必须大于等于0,[最多请求完成次数]必须大于等于1。",
+  "分组速率配置优先级高于全局速率限制。": "分组速率配置优先级高于全局速率限制。",
+  "限制周期统一使用上方配置的“限制周期”值。": "限制周期统一使用上方配置的“限制周期”值。",
+  "保存模型速率限制": "保存模型速率限制",
+  "保存失败": "保存失败",
+  "为一个 JSON 文本,键为分组名称,值为倍率": "为一个 JSON 文本,键为分组名称,值为倍率",
+  "用户可选分组": "用户可选分组",
+  "为一个 JSON 文本,键为分组名称,值为分组描述": "为一个 JSON 文本,键为分组名称,值为分组描述",
+  "自动分组auto,从第一个开始选择": "自动分组auto,从第一个开始选择",
+  "必须是有效的 JSON 字符串数组,例如:[\"g1\",\"g2\"]": "必须是有效的 JSON 字符串数组,例如:[\"g1\",\"g2\"]",
+  "模型固定价格": "模型固定价格",
+  "一次调用消耗多少刀,优先级大于模型倍率": "一次调用消耗多少刀,优先级大于模型倍率",
+  "为一个 JSON 文本,键为模型名称,值为倍率": "为一个 JSON 文本,键为模型名称,值为倍率",
+  "模型补全倍率(仅对自定义模型有效)": "模型补全倍率(仅对自定义模型有效)",
+  "仅对自定义模型有效": "仅对自定义模型有效",
+  "保存模型倍率设置": "保存模型倍率设置",
+  "确定重置模型倍率吗?": "确定重置模型倍率吗?",
+  "重置模型倍率": "重置模型倍率",
+  "获取启用模型失败:": "获取启用模型失败:",
+  "获取启用模型失败": "获取启用模型失败",
+  "JSON解析错误:": "JSON解析错误:",
+  "保存失败:": "保存失败:",
+  "输入模型倍率": "输入模型倍率",
+  "输入补全倍率": "输入补全倍率",
+  "请输入数字": "请输入数字",
+  "模型名称已存在": "模型名称已存在",
+  "请先选择需要批量设置的模型": "请先选择需要批量设置的模型",
+  "请输入模型倍率和补全倍率": "请输入模型倍率和补全倍率",
+  "请输入有效的数字": "请输入有效的数字",
+  "请输入填充值": "请输入填充值",
+  "批量设置成功": "批量设置成功",
+  "已为 {{count}} 个模型设置{{type}}": "已为 {{count}} 个模型设置{{type}}",
+  "模型倍率和补全倍率": "模型倍率和补全倍率",
+  "添加模型": "添加模型",
+  "批量设置": "批量设置",
+  "应用更改": "应用更改",
+  "搜索模型名称": "搜索模型名称",
+  "此页面仅显示未设置价格或倍率的模型,设置后将自动从列表中移除": "此页面仅显示未设置价格或倍率的模型,设置后将自动从列表中移除",
+  "定价模式": "定价模式",
+  "固定价格": "固定价格",
+  "固定价格(每次)": "固定价格(每次)",
+  "输入每次价格": "输入每次价格",
+  "输入补全价格": "输入补全价格",
+  "批量设置模型参数": "批量设置模型参数",
+  "设置类型": "设置类型",
+  "模型倍率和补全倍率同时设置": "模型倍率和补全倍率同时设置",
+  "模型倍率值": "模型倍率值",
+  "请输入模型倍率": "请输入模型倍率",
+  "补全倍率值": "补全倍率值",
+  "请输入补全倍率": "请输入补全倍率",
+  "请输入数值": "请输入数值",
+  "将为选中的 ": "将为选中的 ",
+  " 个模型设置相同的值": " 个模型设置相同的值",
+  "当前设置类型: ": "当前设置类型: ",
+  "默认补全倍率": "默认补全倍率",
+  "添加成功": "添加成功",
+  "价格设置方式": "价格设置方式",
+  "按倍率设置": "按倍率设置",
+  "按价格设置": "按价格设置",
+  "输入价格": "输入价格",
+  "输出价格": "输出价格",
+  "获取渠道失败:": "获取渠道失败:",
+  "请至少选择一个渠道": "请至少选择一个渠道",
+  "后端请求失败": "后端请求失败",
+  "部分渠道测试失败:": "部分渠道测试失败:",
+  "未找到差异化倍率,无需同步": "未找到差异化倍率,无需同步",
+  "请求后端接口失败:": "请求后端接口失败:",
+  "同步成功": "同步成功",
+  "部分保存失败": "部分保存失败",
+  "未找到匹配的模型": "未找到匹配的模型",
+  "暂无差异化倍率显示": "暂无差异化倍率显示",
+  "请先选择同步渠道": "请先选择同步渠道",
+  "倍率类型": "倍率类型",
+  "缓存倍率": "缓存倍率",
+  "当前值": "当前值",
+  "未设置": "未设置",
+  "与本地相同": "与本地相同",
+  "运营设置": "运营设置",
+  "聊天设置": "聊天设置",
+  "速率限制设置": "速率限制设置",
+  "模型相关设置": "模型相关设置",
+  "系统设置": "系统设置",
+  "仪表盘设置": "仪表盘设置",
+  "获取初始化状态失败": "获取初始化状态失败",
+  "表单引用错误,请刷新页面重试": "表单引用错误,请刷新页面重试",
+  "请输入管理员用户名": "请输入管理员用户名",
+  "密码长度至少为8个字符": "密码长度至少为8个字符",
+  "两次输入的密码不一致": "两次输入的密码不一致",
+  "系统初始化成功,正在跳转...": "系统初始化成功,正在跳转...",
+  "初始化失败,请重试": "初始化失败,请重试",
+  "系统初始化失败,请重试": "系统初始化失败,请重试",
+  "系统初始化": "系统初始化",
+  "欢迎使用,请完成以下设置以开始使用系统": "欢迎使用,请完成以下设置以开始使用系统",
+  "数据库信息": "数据库信息",
+  "管理员账号": "管理员账号",
+  "设置系统管理员的登录信息": "设置系统管理员的登录信息",
+  "管理员账号已经初始化过,请继续设置其他参数": "管理员账号已经初始化过,请继续设置其他参数",
+  "密码": "密码",
+  "请输入管理员密码": "请输入管理员密码",
+  "请确认管理员密码": "请确认管理员密码",
+  "选择适合您使用场景的模式": "选择适合您使用场景的模式",
+  "对外运营模式": "对外运营模式",
+  "适用于为多个用户提供服务的场景": "适用于为多个用户提供服务的场景",
+  "默认模式": "默认模式",
+  "适用于个人使用的场景,不需要设置模型价格": "适用于个人使用的场景,不需要设置模型价格",
+  "无需计费": "无需计费",
+  "演示站点模式": "演示站点模式",
+  "适用于展示系统功能的场景,提供基础功能演示": "适用于展示系统功能的场景,提供基础功能演示",
+  "初始化系统": "初始化系统",
+  "使用模式说明": "使用模式说明",
+  "我已了解": "我已了解",
+  "默认模式,适用于为多个用户提供服务的场景。": "默认模式,适用于为多个用户提供服务的场景。",
+  "此模式下,系统将计算每次调用的用量,您需要对每个模型都设置价格,如果没有设置价格,用户将无法使用该模型。": "此模式下,系统将计算每次调用的用量,您需要对每个模型都设置价格,如果没有设置价格,用户将无法使用该模型。",
+  "多用户支持": "多用户支持",
+  "适用于个人使用的场景。": "适用于个人使用的场景。",
+  "不需要设置模型价格,系统将弱化用量计算,您可专注于使用模型。": "不需要设置模型价格,系统将弱化用量计算,您可专注于使用模型。",
+  "个人使用": "个人使用",
+  "适用于展示系统功能的场景。": "适用于展示系统功能的场景。",
+  "提供基础功能演示,方便用户了解系统特性。": "提供基础功能演示,方便用户了解系统特性。",
+  "体验试用": "体验试用",
+  "自动选择": "自动选择",
+  "过期时间格式错误!": "过期时间格式错误!",
+  "令牌更新成功!": "令牌更新成功!",
+  "令牌创建成功,请在列表页面点击复制获取令牌!": "令牌创建成功,请在列表页面点击复制获取令牌!",
+  "更新令牌信息": "更新令牌信息",
+  "创建新的令牌": "创建新的令牌",
+  "设置令牌的基本信息": "设置令牌的基本信息",
+  "请选择过期时间": "请选择过期时间",
+  "一天": "一天",
+  "一个月": "一个月",
+  "设置令牌可用额度和数量": "设置令牌可用额度和数量",
+  "新建数量": "新建数量",
+  "请选择或输入创建令牌的数量": "请选择或输入创建令牌的数量",
+  "20个": "20个",
+  "100个": "100个",
+  "取消无限额度": "取消无限额度",
+  "设为无限额度": "设为无限额度",
+  "设置令牌的访问限制": "设置令牌的访问限制",
+  "IP白名单": "IP白名单",
+  "允许的IP,一行一个,不填写则不限制": "允许的IP,一行一个,不填写则不限制",
+  "请勿过度信任此功能,IP可能被伪造": "请勿过度信任此功能,IP可能被伪造",
+  "勾选启用模型限制后可选择": "勾选启用模型限制后可选择",
+  "非必要,不建议启用模型限制": "非必要,不建议启用模型限制",
+  "分组信息": "分组信息",
+  "设置令牌的分组": "设置令牌的分组",
+  "令牌分组,默认为用户的分组": "令牌分组,默认为用户的分组",
+  "管理员未设置用户可选分组": "管理员未设置用户可选分组",
+  "请输入兑换码!": "请输入兑换码!",
+  "兑换成功!": "兑换成功!",
+  "成功兑换额度:": "成功兑换额度:",
+  "请求失败": "请求失败",
+  "超级管理员未设置充值链接!": "超级管理员未设置充值链接!",
+  "管理员未开启在线充值!": "管理员未开启在线充值!",
+  "充值数量不能小于": "充值数量不能小于",
+  "支付请求失败": "支付请求失败",
+  "划转金额最低为": "划转金额最低为",
+  "邀请链接已复制到剪切板": "邀请链接已复制到剪切板",
+  "支付方式配置错误, 请联系管理员": "支付方式配置错误, 请联系管理员",
+  "划转邀请额度": "划转邀请额度",
+  "可用邀请额度": "可用邀请额度",
+  "划转额度": "划转额度",
+  "充值确认": "充值确认",
+  "充值数量": "充值数量",
+  "实付金额": "实付金额",
+  "支付方式": "支付方式",
+  "在线充值": "在线充值",
+  "快速方便的充值方式": "快速方便的充值方式",
+  "选择充值额度": "选择充值额度",
+  "实付": "实付",
+  "或输入自定义金额": "或输入自定义金额",
+  "充值数量,最低 ": "充值数量,最低 ",
+  "选择支付方式": "选择支付方式",
+  "处理中": "处理中",
+  "兑换码充值": "兑换码充值",
+  "使用兑换码快速充值": "使用兑换码快速充值",
+  "请输入兑换码": "请输入兑换码",
+  "兑换中...": "兑换中...",
+  "兑换": "兑换",
+  "邀请奖励": "邀请奖励",
+  "邀请好友获得额外奖励": "邀请好友获得额外奖励",
+  "待使用收益": "待使用收益",
+  "总收益": "总收益",
+  "邀请人数": "邀请人数",
+  "邀请链接": "邀请链接",
+  "邀请好友注册,好友充值后您可获得相应奖励": "邀请好友注册,好友充值后您可获得相应奖励",
+  "通过划转功能将奖励额度转入到您的账户余额中": "通过划转功能将奖励额度转入到您的账户余额中",
+  "邀请的好友越多,获得的奖励越多": "邀请的好友越多,获得的奖励越多",
+  "用户名和密码不能为空!": "用户名和密码不能为空!",
+  "用户账户创建成功!": "用户账户创建成功!",
+  "提交": "提交",
+  "创建新用户账户": "创建新用户账户",
+  "请输入显示名称": "请输入显示名称",
+  "请输入密码": "请输入密码",
+  "请输入备注(仅管理员可见)": "请输入备注(仅管理员可见)",
+  "编辑用户": "编辑用户",
+  "用户的基本账户信息": "用户的基本账户信息",
+  "请输入新的用户名": "请输入新的用户名",
+  "请输入新的密码,最短 8 位": "请输入新的密码,最短 8 位",
+  "显示名称": "显示名称",
+  "请输入新的显示名称": "请输入新的显示名称",
+  "权限设置": "权限设置",
+  "用户分组和额度管理": "用户分组和额度管理",
+  "请输入新的剩余额度": "请输入新的剩余额度",
+  "添加额度": "添加额度",
+  "第三方账户绑定状态(只读)": "第三方账户绑定状态(只读)",
+  "已绑定的 GitHub 账户": "已绑定的 GitHub 账户",
+  "已绑定的 OIDC 账户": "已绑定的 OIDC 账户",
+  "已绑定的微信账户": "已绑定的微信账户",
+  "已绑定的邮箱账户": "已绑定的邮箱账户",
+  "已绑定的 Telegram 账户": "已绑定的 Telegram 账户",
+  "新额度": "新额度",
+  "需要添加的额度(支持负数)": "需要添加的额度(支持负数)"
+}

+ 56 - 33
main.go

@@ -12,7 +12,7 @@ import (
 	"one-api/model"
 	"one-api/router"
 	"one-api/service"
-	"one-api/setting/operation_setting"
+	"one-api/setting/ratio_setting"
 	"os"
 	"strconv"
 
@@ -32,13 +32,13 @@ var buildFS embed.FS
 var indexPage []byte
 
 func main() {
-	err := godotenv.Load(".env")
+
+	err := InitResources()
 	if err != nil {
-		common.SysLog("Support for .env file is disabled: " + err.Error())
+		common.FatalLog("failed to initialize resources: " + err.Error())
+		return
 	}
 
-	common.LoadEnv()
-
 	common.SetupLogger()
 	common.SysLog("New API " + common.Version + " started")
 	if os.Getenv("GIN_MODE") != "debug" {
@@ -47,19 +47,7 @@ func main() {
 	if common.DebugEnabled {
 		common.SysLog("running in debug mode")
 	}
-	// Initialize SQL Database
-	err = model.InitDB()
-	if err != nil {
-		common.FatalLog("failed to initialize database: " + err.Error())
-	}
 
-	model.CheckSetup()
-
-	// Initialize SQL Database
-	err = model.InitLogDB()
-	if err != nil {
-		common.FatalLog("failed to initialize database: " + err.Error())
-	}
 	defer func() {
 		err := model.CloseDB()
 		if err != nil {
@@ -67,21 +55,6 @@ func main() {
 		}
 	}()
 
-	// Initialize Redis
-	err = common.InitRedisClient()
-	if err != nil {
-		common.FatalLog("failed to initialize Redis: " + err.Error())
-	}
-
-	// Initialize model settings
-	operation_setting.InitRatioSettings()
-	// Initialize constants
-	constant.InitEnv()
-	// Initialize options
-	model.InitOptionMap()
-
-	service.InitTokenEncoders()
-
 	if common.RedisEnabled {
 		// for compatibility with old versions
 		common.MemoryCacheEnabled = true
@@ -105,10 +78,12 @@ func main() {
 			model.InitChannelCache()
 		}()
 
-		go model.SyncOptions(common.SyncFrequency)
 		go model.SyncChannelCache(common.SyncFrequency)
 	}
 
+	// 热更新配置
+	go model.SyncOptions(common.SyncFrequency)
+
 	// 数据看板
 	go model.UpdateQuotaData()
 
@@ -184,3 +159,51 @@ func main() {
 		common.FatalLog("failed to start HTTP server: " + err.Error())
 	}
 }
+
+func InitResources() error {
+	// Initialize resources here if needed
+	// This is a placeholder function for future resource initialization
+	err := godotenv.Load(".env")
+	if err != nil {
+		common.SysLog("未找到 .env 文件,使用默认环境变量,如果需要,请创建 .env 文件并设置相关变量")
+		common.SysLog("No .env file found, using default environment variables. If needed, please create a .env file and set the relevant variables.")
+	}
+
+	// 加载环境变量
+	common.InitEnv()
+
+	// Initialize model settings
+	ratio_setting.InitRatioSettings()
+
+	service.InitHttpClient()
+
+	service.InitTokenEncoders()
+
+	// Initialize SQL Database
+	err = model.InitDB()
+	if err != nil {
+		common.FatalLog("failed to initialize database: " + err.Error())
+		return err
+	}
+
+	model.CheckSetup()
+
+	// Initialize options, should after model.InitDB()
+	model.InitOptionMap()
+
+	// 初始化模型
+	model.GetPricing()
+
+	// Initialize SQL Database
+	err = model.InitLogDB()
+	if err != nil {
+		return err
+	}
+
+	// Initialize Redis
+	err = common.InitRedisClient()
+	if err != nil {
+		return err
+	}
+	return nil
+}

+ 1 - 1
middleware/auth.go

@@ -184,7 +184,7 @@ func TokenAuth() func(c *gin.Context) {
 			}
 		}
 		// gemini api 从query中获取key
-		if strings.HasPrefix(c.Request.URL.Path, "/v1beta/models/") {
+		if strings.HasPrefix(c.Request.URL.Path, "/v1beta/models/") || strings.HasPrefix(c.Request.URL.Path, "/v1/models/") {
 			skKey := c.Query("key")
 			if skKey != "" {
 				c.Request.Header.Set("Authorization", "Bearer "+skKey)

+ 49 - 22
middleware/distributor.go

@@ -11,6 +11,7 @@ import (
 	relayconstant "one-api/relay/constant"
 	"one-api/service"
 	"one-api/setting"
+	"one-api/setting/ratio_setting"
 	"strconv"
 	"strings"
 	"time"
@@ -24,7 +25,7 @@ type ModelRequest struct {
 
 func Distribute() func(c *gin.Context) {
 	return func(c *gin.Context) {
-		allowIpsMap := c.GetStringMap("allow_ips")
+		allowIpsMap := common.GetContextKeyStringMap(c, constant.ContextKeyTokenAllowIps)
 		if len(allowIpsMap) != 0 {
 			clientIp := c.ClientIP()
 			if _, ok := allowIpsMap[clientIp]; !ok {
@@ -33,14 +34,14 @@ func Distribute() func(c *gin.Context) {
 			}
 		}
 		var channel *model.Channel
-		channelId, ok := c.Get("specific_channel_id")
+		channelId, ok := common.GetContextKey(c, constant.ContextKeyTokenSpecificChannelId)
 		modelRequest, shouldSelectChannel, err := getModelRequest(c)
 		if err != nil {
 			abortWithOpenAiMessage(c, http.StatusBadRequest, "Invalid request, "+err.Error())
 			return
 		}
-		userGroup := c.GetString(constant.ContextKeyUserGroup)
-		tokenGroup := c.GetString("token_group")
+		userGroup := common.GetContextKeyString(c, constant.ContextKeyUserGroup)
+		tokenGroup := common.GetContextKeyString(c, constant.ContextKeyTokenGroup)
 		if tokenGroup != "" {
 			// check common.UserUsableGroups[userGroup]
 			if _, ok := setting.GetUserUsableGroups(userGroup)[tokenGroup]; !ok {
@@ -48,13 +49,15 @@ func Distribute() func(c *gin.Context) {
 				return
 			}
 			// check group in common.GroupRatio
-			if !setting.ContainsGroupRatio(tokenGroup) {
-				abortWithOpenAiMessage(c, http.StatusForbidden, fmt.Sprintf("分组 %s 已被弃用", tokenGroup))
-				return
+			if !ratio_setting.ContainsGroupRatio(tokenGroup) {
+				if tokenGroup != "auto" {
+					abortWithOpenAiMessage(c, http.StatusForbidden, fmt.Sprintf("分组 %s 已被弃用", tokenGroup))
+					return
+				}
 			}
 			userGroup = tokenGroup
 		}
-		c.Set("group", userGroup)
+		common.SetContextKey(c, constant.ContextKeyUsingGroup, userGroup)
 		if ok {
 			id, err := strconv.Atoi(channelId.(string))
 			if err != nil {
@@ -73,9 +76,9 @@ func Distribute() func(c *gin.Context) {
 		} else {
 			// Select a channel for the user
 			// check token model mapping
-			modelLimitEnable := c.GetBool("token_model_limit_enabled")
+			modelLimitEnable := common.GetContextKeyBool(c, constant.ContextKeyTokenModelLimitEnabled)
 			if modelLimitEnable {
-				s, ok := c.Get("token_model_limit")
+				s, ok := common.GetContextKey(c, constant.ContextKeyTokenModelLimit)
 				var tokenModelLimit map[string]bool
 				if ok {
 					tokenModelLimit = s.(map[string]bool)
@@ -95,9 +98,14 @@ func Distribute() func(c *gin.Context) {
 			}
 
 			if shouldSelectChannel {
-				channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model, 0)
+				var selectGroup string
+				channel, selectGroup, err = model.CacheGetRandomSatisfiedChannel(c, userGroup, modelRequest.Model, 0)
 				if err != nil {
-					message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", userGroup, modelRequest.Model)
+					showGroup := userGroup
+					if userGroup == "auto" {
+						showGroup = fmt.Sprintf("auto(%s)", selectGroup)
+					}
+					message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", showGroup, modelRequest.Model)
 					// 如果错误,但是渠道不为空,说明是数据库一致性问题
 					if channel != nil {
 						common.SysError(fmt.Sprintf("渠道不存在:%d", channel.Id))
@@ -113,7 +121,7 @@ func Distribute() func(c *gin.Context) {
 				}
 			}
 		}
-		c.Set(constant.ContextKeyRequestStartTime, time.Now())
+		common.SetContextKey(c, constant.ContextKeyRequestStartTime, time.Now())
 		SetupContextForSelectedChannel(c, channel, modelRequest.Model)
 		c.Next()
 	}
@@ -162,7 +170,26 @@ func getModelRequest(c *gin.Context) (*ModelRequest, bool, error) {
 		}
 		c.Set("platform", string(constant.TaskPlatformSuno))
 		c.Set("relay_mode", relayMode)
-	} else if strings.HasPrefix(c.Request.URL.Path, "/v1beta/models/") {
+	} else if strings.Contains(c.Request.URL.Path, "/v1/video/generations") {
+		err = common.UnmarshalBodyReusable(c, &modelRequest)
+		var platform string
+		var relayMode int
+		if strings.HasPrefix(modelRequest.Model, "jimeng") {
+			platform = string(constant.TaskPlatformJimeng)
+			relayMode = relayconstant.Path2RelayJimeng(c.Request.Method, c.Request.URL.Path)
+			if relayMode == relayconstant.RelayModeJimengFetchByID {
+				shouldSelectChannel = false
+			}
+		} else {
+			platform = string(constant.TaskPlatformKling)
+			relayMode = relayconstant.Path2RelayKling(c.Request.Method, c.Request.URL.Path)
+			if relayMode == relayconstant.RelayModeKlingFetchByID {
+				shouldSelectChannel = false
+			}
+		}
+		c.Set("platform", platform)
+		c.Set("relay_mode", relayMode)
+	} else if strings.HasPrefix(c.Request.URL.Path, "/v1beta/models/") || strings.HasPrefix(c.Request.URL.Path, "/v1/models/") {
 		// Gemini API 路径处理: /v1beta/models/gemini-2.0-flash:generateContent
 		relayMode := relayconstant.RelayModeGemini
 		modelName := extractModelNameFromGeminiPath(c.Request.URL.Path)
@@ -234,21 +261,21 @@ func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, mode
 	c.Set("base_url", channel.GetBaseURL())
 	// TODO: api_version统一
 	switch channel.Type {
-	case common.ChannelTypeAzure:
+	case constant.ChannelTypeAzure:
 		c.Set("api_version", channel.Other)
-	case common.ChannelTypeVertexAi:
+	case constant.ChannelTypeVertexAi:
 		c.Set("region", channel.Other)
-	case common.ChannelTypeXunfei:
+	case constant.ChannelTypeXunfei:
 		c.Set("api_version", channel.Other)
-	case common.ChannelTypeGemini:
+	case constant.ChannelTypeGemini:
 		c.Set("api_version", channel.Other)
-	case common.ChannelTypeAli:
+	case constant.ChannelTypeAli:
 		c.Set("plugin", channel.Other)
-	case common.ChannelCloudflare:
+	case constant.ChannelCloudflare:
 		c.Set("api_version", channel.Other)
-	case common.ChannelTypeMokaAI:
+	case constant.ChannelTypeMokaAI:
 		c.Set("api_version", channel.Other)
-	case common.ChannelTypeCoze:
+	case constant.ChannelTypeCoze:
 		c.Set("bot_id", channel.Other)
 	}
 }

+ 47 - 0
middleware/kling_adapter.go

@@ -0,0 +1,47 @@
+package middleware
+
+import (
+	"bytes"
+	"encoding/json"
+	"io"
+	"one-api/common"
+	"one-api/constant"
+
+	"github.com/gin-gonic/gin"
+)
+
+func KlingRequestConvert() func(c *gin.Context) {
+	return func(c *gin.Context) {
+		var originalReq map[string]interface{}
+		if err := common.UnmarshalBodyReusable(c, &originalReq); err != nil {
+			c.Next()
+			return
+		}
+
+		model, _ := originalReq["model"].(string)
+		prompt, _ := originalReq["prompt"].(string)
+
+		unifiedReq := map[string]interface{}{
+			"model":    model,
+			"prompt":   prompt,
+			"metadata": originalReq,
+		}
+
+		jsonData, err := json.Marshal(unifiedReq)
+		if err != nil {
+			c.Next()
+			return
+		}
+
+		// Rewrite request body and path
+		c.Request.Body = io.NopCloser(bytes.NewBuffer(jsonData))
+		c.Request.URL.Path = "/v1/video/generations"
+		if image := originalReq["image"]; image == "" {
+			c.Set("action", constant.TaskActionTextGenerate)
+		}
+
+		// We have to reset the request body for the next handlers
+		c.Set(common.KeyRequestBody, jsonData)
+		c.Next()
+	}
+}

+ 2 - 2
middleware/model-rate-limit.go

@@ -177,9 +177,9 @@ func ModelRequestRateLimit() func(c *gin.Context) {
 		successMaxCount := setting.ModelRequestRateLimitSuccessCount
 
 		// 获取分组
-		group := c.GetString("token_group")
+		group := common.GetContextKeyString(c, constant.ContextKeyTokenGroup)
 		if group == "" {
-			group = c.GetString(constant.ContextKeyUserGroup)
+			group = common.GetContextKeyString(c, constant.ContextKeyUserGroup)
 		}
 
 		//获取分组的限流配置

+ 20 - 5
model/ability.go

@@ -21,7 +21,22 @@ type Ability struct {
 	Tag       *string `json:"tag" gorm:"index"`
 }
 
-func GetGroupModels(group string) []string {
+type AbilityWithChannel struct {
+	Ability
+	ChannelType int `json:"channel_type"`
+}
+
+func GetAllEnableAbilityWithChannels() ([]AbilityWithChannel, error) {
+	var abilities []AbilityWithChannel
+	err := DB.Table("abilities").
+		Select("abilities.*, channels.type as channel_type").
+		Joins("left join channels on abilities.channel_id = channels.id").
+		Where("abilities.enabled = ?", true).
+		Scan(&abilities).Error
+	return abilities, err
+}
+
+func GetGroupEnabledModels(group string) []string {
 	var models []string
 	// Find distinct models
 	DB.Table("abilities").Where(commonGroupCol+" = ? and enabled = ?", group, true).Distinct("model").Pluck("model", &models)
@@ -46,7 +61,7 @@ func getPriority(group string, model string, retry int) (int, error) {
 	var priorities []int
 	err := DB.Model(&Ability{}).
 		Select("DISTINCT(priority)").
-		Where(commonGroupCol+" = ? and model = ? and enabled = ?", group, model, commonTrueVal).
+		Where(commonGroupCol+" = ? and model = ? and enabled = ?", group, model, true).
 		Order("priority DESC").              // 按优先级降序排序
 		Pluck("priority", &priorities).Error // Pluck用于将查询的结果直接扫描到一个切片中
 
@@ -72,14 +87,14 @@ func getPriority(group string, model string, retry int) (int, error) {
 }
 
 func getChannelQuery(group string, model string, retry int) *gorm.DB {
-	maxPrioritySubQuery := DB.Model(&Ability{}).Select("MAX(priority)").Where(commonGroupCol+" = ? and model = ? and enabled = ?", group, model, commonTrueVal)
-	channelQuery := DB.Where(commonGroupCol+" = ? and model = ? and enabled = ? and priority = (?)", group, model, commonTrueVal, maxPrioritySubQuery)
+	maxPrioritySubQuery := DB.Model(&Ability{}).Select("MAX(priority)").Where(commonGroupCol+" = ? and model = ? and enabled = ?", group, model, true)
+	channelQuery := DB.Where(commonGroupCol+" = ? and model = ? and enabled = ? and priority = (?)", group, model, true, maxPrioritySubQuery)
 	if retry != 0 {
 		priority, err := getPriority(group, model, retry)
 		if err != nil {
 			common.SysError(fmt.Sprintf("Get priority failed: %s", err.Error()))
 		} else {
-			channelQuery = DB.Where(commonGroupCol+" = ? and model = ? and enabled = ? and priority = ?", group, model, commonTrueVal, priority)
+			channelQuery = DB.Where(commonGroupCol+" = ? and model = ? and enabled = ? and priority = ?", group, model, true, priority)
 		}
 	}
 

+ 40 - 1
model/cache.go

@@ -5,10 +5,13 @@ import (
 	"fmt"
 	"math/rand"
 	"one-api/common"
+	"one-api/setting"
 	"sort"
 	"strings"
 	"sync"
 	"time"
+
+	"github.com/gin-gonic/gin"
 )
 
 var group2model2channels map[string]map[string][]*Channel
@@ -75,7 +78,43 @@ func SyncChannelCache(frequency int) {
 	}
 }
 
-func CacheGetRandomSatisfiedChannel(group string, model string, retry int) (*Channel, error) {
+func CacheGetRandomSatisfiedChannel(c *gin.Context, group string, model string, retry int) (*Channel, string, error) {
+	var channel *Channel
+	var err error
+	selectGroup := group
+	if group == "auto" {
+		if len(setting.AutoGroups) == 0 {
+			return nil, selectGroup, errors.New("auto groups is not enabled")
+		}
+		for _, autoGroup := range setting.AutoGroups {
+			if common.DebugEnabled {
+				println("autoGroup:", autoGroup)
+			}
+			channel, _ = getRandomSatisfiedChannel(autoGroup, model, retry)
+			if channel == nil {
+				continue
+			} else {
+				c.Set("auto_group", autoGroup)
+				selectGroup = autoGroup
+				if common.DebugEnabled {
+					println("selectGroup:", selectGroup)
+				}
+				break
+			}
+		}
+	} else {
+		channel, err = getRandomSatisfiedChannel(group, model, retry)
+		if err != nil {
+			return nil, group, err
+		}
+	}
+	if channel == nil {
+		return nil, group, errors.New("channel not found")
+	}
+	return channel, selectGroup, nil
+}
+
+func getRandomSatisfiedChannel(group string, model string, retry int) (*Channel, error) {
 	if strings.HasPrefix(model, "gpt-4-gizmo") {
 		model = "gpt-4-gizmo-*"
 	}

+ 36 - 0
model/channel.go

@@ -617,3 +617,39 @@ func CountAllTags() (int64, error) {
 	err := DB.Model(&Channel{}).Where("tag is not null AND tag != ''").Distinct("tag").Count(&total).Error
 	return total, err
 }
+
+// Get channels of specified type with pagination
+func GetChannelsByType(startIdx int, num int, idSort bool, channelType int) ([]*Channel, error) {
+	var channels []*Channel
+	order := "priority desc"
+	if idSort {
+		order = "id desc"
+	}
+	err := DB.Where("type = ?", channelType).Order(order).Limit(num).Offset(startIdx).Omit("key").Find(&channels).Error
+	return channels, err
+}
+
+// Count channels of specific type
+func CountChannelsByType(channelType int) (int64, error) {
+	var count int64
+	err := DB.Model(&Channel{}).Where("type = ?", channelType).Count(&count).Error
+	return count, err
+}
+
+// Return map[type]count for all channels
+func CountChannelsGroupByType() (map[int64]int64, error) {
+	type result struct {
+		Type  int64 `gorm:"column:type"`
+		Count int64 `gorm:"column:count"`
+	}
+	var results []result
+	err := DB.Model(&Channel{}).Select("type, count(*) as count").Group("type").Find(&results).Error
+	if err != nil {
+		return nil, err
+	}
+	counts := make(map[int64]int64)
+	for _, r := range results {
+		counts[r.Type] = r.Count
+	}
+	return counts, nil
+}

+ 9 - 0
model/main.go

@@ -46,6 +46,15 @@ func initCol() {
 			logGroupCol = commonGroupCol
 			logKeyCol = commonKeyCol
 		}
+	} else {
+		// LOG_SQL_DSN 为空时,日志数据库与主数据库相同
+		if common.UsingPostgreSQL {
+			logGroupCol = `"group"`
+			logKeyCol = `"key"`
+		} else {
+			logGroupCol = commonGroupCol
+			logKeyCol = commonKeyCol
+		}
 	}
 	// log sql type and database type
 	//common.SysLog("Using Log SQL Type: " + common.LogSqlType)

+ 2 - 0
model/midjourney.go

@@ -14,6 +14,8 @@ type Midjourney struct {
 	StartTime   int64  `json:"start_time" gorm:"index"`
 	FinishTime  int64  `json:"finish_time" gorm:"index"`
 	ImageUrl    string `json:"image_url"`
+	VideoUrl    string `json:"video_url"`
+	VideoUrls   string `json:"video_urls"`
 	Status      string `json:"status" gorm:"type:varchar(20);index"`
 	Progress    string `json:"progress" gorm:"type:varchar(30);index"`
 	FailReason  string `json:"fail_reason"`

+ 26 - 13
model/option.go

@@ -5,6 +5,7 @@ import (
 	"one-api/setting"
 	"one-api/setting/config"
 	"one-api/setting/operation_setting"
+	"one-api/setting/ratio_setting"
 	"strconv"
 	"strings"
 	"time"
@@ -76,6 +77,9 @@ func InitOptionMap() {
 	common.OptionMap["MinTopUp"] = strconv.Itoa(setting.MinTopUp)
 	common.OptionMap["TopupGroupRatio"] = common.TopupGroupRatio2JSONString()
 	common.OptionMap["Chats"] = setting.Chats2JsonString()
+	common.OptionMap["AutoGroups"] = setting.AutoGroups2JsonString()
+	common.OptionMap["DefaultUseAutoGroup"] = strconv.FormatBool(setting.DefaultUseAutoGroup)
+	common.OptionMap["PayMethods"] = setting.PayMethods2JsonString()
 	common.OptionMap["GitHubClientId"] = ""
 	common.OptionMap["GitHubClientSecret"] = ""
 	common.OptionMap["TelegramBotToken"] = ""
@@ -94,13 +98,13 @@ func InitOptionMap() {
 	common.OptionMap["ModelRequestRateLimitDurationMinutes"] = strconv.Itoa(setting.ModelRequestRateLimitDurationMinutes)
 	common.OptionMap["ModelRequestRateLimitSuccessCount"] = strconv.Itoa(setting.ModelRequestRateLimitSuccessCount)
 	common.OptionMap["ModelRequestRateLimitGroup"] = setting.ModelRequestRateLimitGroup2JSONString()
-	common.OptionMap["ModelRatio"] = operation_setting.ModelRatio2JSONString()
-	common.OptionMap["ModelPrice"] = operation_setting.ModelPrice2JSONString()
-	common.OptionMap["CacheRatio"] = operation_setting.CacheRatio2JSONString()
-	common.OptionMap["GroupRatio"] = setting.GroupRatio2JSONString()
-	common.OptionMap["GroupGroupRatio"] = setting.GroupGroupRatio2JSONString()
+	common.OptionMap["ModelRatio"] = ratio_setting.ModelRatio2JSONString()
+	common.OptionMap["ModelPrice"] = ratio_setting.ModelPrice2JSONString()
+	common.OptionMap["CacheRatio"] = ratio_setting.CacheRatio2JSONString()
+	common.OptionMap["GroupRatio"] = ratio_setting.GroupRatio2JSONString()
+	common.OptionMap["GroupGroupRatio"] = ratio_setting.GroupGroupRatio2JSONString()
 	common.OptionMap["UserUsableGroups"] = setting.UserUsableGroups2JSONString()
-	common.OptionMap["CompletionRatio"] = operation_setting.CompletionRatio2JSONString()
+	common.OptionMap["CompletionRatio"] = ratio_setting.CompletionRatio2JSONString()
 	common.OptionMap["TopUpLink"] = common.TopUpLink
 	//common.OptionMap["ChatLink"] = common.ChatLink
 	//common.OptionMap["ChatLink2"] = common.ChatLink2
@@ -123,6 +127,7 @@ func InitOptionMap() {
 	common.OptionMap["SensitiveWords"] = setting.SensitiveWordsToString()
 	common.OptionMap["StreamCacheQueueLength"] = strconv.Itoa(setting.StreamCacheQueueLength)
 	common.OptionMap["AutomaticDisableKeywords"] = operation_setting.AutomaticDisableKeywordsToString()
+	common.OptionMap["ExposeRatioEnabled"] = strconv.FormatBool(ratio_setting.IsExposeRatioEnabled())
 
 	// 自动添加所有注册的模型配置
 	modelConfigs := config.GlobalConfig.ExportAllConfigs()
@@ -192,7 +197,7 @@ func updateOptionMap(key string, value string) (err error) {
 			common.ImageDownloadPermission = intValue
 		}
 	}
-	if strings.HasSuffix(key, "Enabled") || key == "DefaultCollapseSidebar" {
+	if strings.HasSuffix(key, "Enabled") || key == "DefaultCollapseSidebar" || key == "DefaultUseAutoGroup" {
 		boolValue := value == "true"
 		switch key {
 		case "PasswordRegisterEnabled":
@@ -261,6 +266,10 @@ func updateOptionMap(key string, value string) (err error) {
 			common.SMTPSSLEnabled = boolValue
 		case "WorkerAllowHttpImageRequestEnabled":
 			setting.WorkerAllowHttpImageRequestEnabled = boolValue
+		case "DefaultUseAutoGroup":
+			setting.DefaultUseAutoGroup = boolValue
+		case "ExposeRatioEnabled":
+			ratio_setting.SetExposeRatioEnabled(boolValue)
 		}
 	}
 	switch key {
@@ -287,6 +296,8 @@ func updateOptionMap(key string, value string) (err error) {
 		setting.PayAddress = value
 	case "Chats":
 		err = setting.UpdateChatsByJsonString(value)
+	case "AutoGroups":
+		err = setting.UpdateAutoGroupsByJsonString(value)
 	case "CustomCallbackAddress":
 		setting.CustomCallbackAddress = value
 	case "EpayId":
@@ -352,19 +363,19 @@ func updateOptionMap(key string, value string) (err error) {
 	case "DataExportDefaultTime":
 		common.DataExportDefaultTime = value
 	case "ModelRatio":
-		err = operation_setting.UpdateModelRatioByJSONString(value)
+		err = ratio_setting.UpdateModelRatioByJSONString(value)
 	case "GroupRatio":
-		err = setting.UpdateGroupRatioByJSONString(value)
+		err = ratio_setting.UpdateGroupRatioByJSONString(value)
 	case "GroupGroupRatio":
-		err = setting.UpdateGroupGroupRatioByJSONString(value)
+		err = ratio_setting.UpdateGroupGroupRatioByJSONString(value)
 	case "UserUsableGroups":
 		err = setting.UpdateUserUsableGroupsByJSONString(value)
 	case "CompletionRatio":
-		err = operation_setting.UpdateCompletionRatioByJSONString(value)
+		err = ratio_setting.UpdateCompletionRatioByJSONString(value)
 	case "ModelPrice":
-		err = operation_setting.UpdateModelPriceByJSONString(value)
+		err = ratio_setting.UpdateModelPriceByJSONString(value)
 	case "CacheRatio":
-		err = operation_setting.UpdateCacheRatioByJSONString(value)
+		err = ratio_setting.UpdateCacheRatioByJSONString(value)
 	case "TopUpLink":
 		common.TopUpLink = value
 	//case "ChatLink":
@@ -381,6 +392,8 @@ func updateOptionMap(key string, value string) (err error) {
 		operation_setting.AutomaticDisableKeywordsFromString(value)
 	case "StreamCacheQueueLength":
 		setting.StreamCacheQueueLength, _ = strconv.Atoi(value)
+	case "PayMethods":
+		err = setting.UpdatePayMethodsByJsonString(value)
 	}
 	return err
 }

+ 82 - 36
model/pricing.go

@@ -1,20 +1,24 @@
 package model
 
 import (
+	"fmt"
 	"one-api/common"
-	"one-api/setting/operation_setting"
+	"one-api/constant"
+	"one-api/setting/ratio_setting"
+	"one-api/types"
 	"sync"
 	"time"
 )
 
 type Pricing struct {
-	ModelName       string   `json:"model_name"`
-	QuotaType       int      `json:"quota_type"`
-	ModelRatio      float64  `json:"model_ratio"`
-	ModelPrice      float64  `json:"model_price"`
-	OwnerBy         string   `json:"owner_by"`
-	CompletionRatio float64  `json:"completion_ratio"`
-	EnableGroup     []string `json:"enable_groups,omitempty"`
+	ModelName              string                  `json:"model_name"`
+	QuotaType              int                     `json:"quota_type"`
+	ModelRatio             float64                 `json:"model_ratio"`
+	ModelPrice             float64                 `json:"model_price"`
+	OwnerBy                string                  `json:"owner_by"`
+	CompletionRatio        float64                 `json:"completion_ratio"`
+	EnableGroup            []string                `json:"enable_groups"`
+	SupportedEndpointTypes []constant.EndpointType `json:"supported_endpoint_types"`
 }
 
 var (
@@ -23,56 +27,98 @@ var (
 	updatePricingLock  sync.Mutex
 )
 
-func GetPricing() []Pricing {
-	updatePricingLock.Lock()
-	defer updatePricingLock.Unlock()
+var (
+	modelSupportEndpointTypes = make(map[string][]constant.EndpointType)
+	modelSupportEndpointsLock = sync.RWMutex{}
+)
 
+func GetPricing() []Pricing {
 	if time.Since(lastGetPricingTime) > time.Minute*1 || len(pricingMap) == 0 {
-		updatePricing()
+		updatePricingLock.Lock()
+		defer updatePricingLock.Unlock()
+		// Double check after acquiring the lock
+		if time.Since(lastGetPricingTime) > time.Minute*1 || len(pricingMap) == 0 {
+			modelSupportEndpointsLock.Lock()
+			defer modelSupportEndpointsLock.Unlock()
+			updatePricing()
+		}
 	}
-	//if group != "" {
-	//	userPricingMap := make([]Pricing, 0)
-	//	models := GetGroupModels(group)
-	//	for _, pricing := range pricingMap {
-	//		if !common.StringsContains(models, pricing.ModelName) {
-	//			pricing.Available = false
-	//		}
-	//		userPricingMap = append(userPricingMap, pricing)
-	//	}
-	//	return userPricingMap
-	//}
 	return pricingMap
 }
 
+func GetModelSupportEndpointTypes(model string) []constant.EndpointType {
+	if model == "" {
+		return make([]constant.EndpointType, 0)
+	}
+	modelSupportEndpointsLock.RLock()
+	defer modelSupportEndpointsLock.RUnlock()
+	if endpoints, ok := modelSupportEndpointTypes[model]; ok {
+		return endpoints
+	}
+	return make([]constant.EndpointType, 0)
+}
+
 func updatePricing() {
 	//modelRatios := common.GetModelRatios()
-	enableAbilities := GetAllEnableAbilities()
-	modelGroupsMap := make(map[string][]string)
+	enableAbilities, err := GetAllEnableAbilityWithChannels()
+	if err != nil {
+		common.SysError(fmt.Sprintf("GetAllEnableAbilityWithChannels error: %v", err))
+		return
+	}
+	modelGroupsMap := make(map[string]*types.Set[string])
+
 	for _, ability := range enableAbilities {
-		groups := modelGroupsMap[ability.Model]
-		if groups == nil {
-			groups = make([]string, 0)
+		groups, ok := modelGroupsMap[ability.Model]
+		if !ok {
+			groups = types.NewSet[string]()
+			modelGroupsMap[ability.Model] = groups
 		}
-		if !common.StringsContains(groups, ability.Group) {
-			groups = append(groups, ability.Group)
+		groups.Add(ability.Group)
+	}
+
+	//这里使用切片而不是Set,因为一个模型可能支持多个端点类型,并且第一个端点是优先使用端点
+	modelSupportEndpointsStr := make(map[string][]string)
+
+	for _, ability := range enableAbilities {
+		endpoints, ok := modelSupportEndpointsStr[ability.Model]
+		if !ok {
+			endpoints = make([]string, 0)
+			modelSupportEndpointsStr[ability.Model] = endpoints
+		}
+		channelTypes := common.GetEndpointTypesByChannelType(ability.ChannelType, ability.Model)
+		for _, channelType := range channelTypes {
+			if !common.StringsContains(endpoints, string(channelType)) {
+				endpoints = append(endpoints, string(channelType))
+			}
+		}
+		modelSupportEndpointsStr[ability.Model] = endpoints
+	}
+
+	modelSupportEndpointTypes = make(map[string][]constant.EndpointType)
+	for model, endpoints := range modelSupportEndpointsStr {
+		supportedEndpoints := make([]constant.EndpointType, 0)
+		for _, endpointStr := range endpoints {
+			endpointType := constant.EndpointType(endpointStr)
+			supportedEndpoints = append(supportedEndpoints, endpointType)
 		}
-		modelGroupsMap[ability.Model] = groups
+		modelSupportEndpointTypes[model] = supportedEndpoints
 	}
 
 	pricingMap = make([]Pricing, 0)
 	for model, groups := range modelGroupsMap {
 		pricing := Pricing{
-			ModelName:   model,
-			EnableGroup: groups,
+			ModelName:              model,
+			EnableGroup:            groups.Items(),
+			SupportedEndpointTypes: modelSupportEndpointTypes[model],
 		}
-		modelPrice, findPrice := operation_setting.GetModelPrice(model, false)
+		modelPrice, findPrice := ratio_setting.GetModelPrice(model, false)
 		if findPrice {
 			pricing.ModelPrice = modelPrice
 			pricing.QuotaType = 1
 		} else {
-			modelRatio, _ := operation_setting.GetModelRatio(model)
+			modelRatio, _ := ratio_setting.GetModelRatio(model)
 			pricing.ModelRatio = modelRatio
-			pricing.CompletionRatio = operation_setting.GetCompletionRatio(model)
+			pricing.CompletionRatio = ratio_setting.GetCompletionRatio(model)
 			pricing.QuotaType = 0
 		}
 		pricingMap = append(pricingMap, pricing)

+ 34 - 0
model/token.go

@@ -327,3 +327,37 @@ func CountUserTokens(userId int) (int64, error) {
 	err := DB.Model(&Token{}).Where("user_id = ?", userId).Count(&total).Error
 	return total, err
 }
+
+// BatchDeleteTokens 删除指定用户的一组令牌,返回成功删除数量
+func BatchDeleteTokens(ids []int, userId int) (int, error) {
+	if len(ids) == 0 {
+		return 0, errors.New("ids 不能为空!")
+	}
+
+	tx := DB.Begin()
+
+	var tokens []Token
+	if err := tx.Where("user_id = ? AND id IN (?)", userId, ids).Find(&tokens).Error; err != nil {
+		tx.Rollback()
+		return 0, err
+	}
+
+	if err := tx.Where("user_id = ? AND id IN (?)", userId, ids).Delete(&Token{}).Error; err != nil {
+		tx.Rollback()
+		return 0, err
+	}
+
+	if err := tx.Commit().Error; err != nil {
+		return 0, err
+	}
+
+	if common.RedisEnabled {
+		gopool.Go(func() {
+			for _, t := range tokens {
+				_ = cacheDeleteToken(t.Key)
+			}
+		})
+	}
+
+	return len(tokens), nil
+}

+ 1 - 1
model/token_cache.go

@@ -10,7 +10,7 @@ import (
 func cacheSetToken(token Token) error {
 	key := common.GenerateHMAC(token.Key)
 	token.Clean()
-	err := common.RedisHSetObj(fmt.Sprintf("token:%s", key), &token, time.Duration(constant.TokenCacheSeconds)*time.Second)
+	err := common.RedisHSetObj(fmt.Sprintf("token:%s", key), &token, time.Duration(common.RedisKeyCacheSeconds())*time.Second)
 	if err != nil {
 		return err
 	}

+ 4 - 2
model/user.go

@@ -41,6 +41,7 @@ type User struct {
 	DeletedAt        gorm.DeletedAt `gorm:"index"`
 	LinuxDOId        string         `json:"linux_do_id" gorm:"column:linux_do_id;index"`
 	Setting          string         `json:"setting" gorm:"type:text;column:setting"`
+	Remark           string         `json:"remark,omitempty" gorm:"type:varchar(255)" validate:"max=255"`
 }
 
 func (user *User) ToBaseUser() *UserBase {
@@ -113,7 +114,7 @@ func GetMaxUserId() int {
 	return user.Id
 }
 
-func GetAllUsers(startIdx int, num int) (users []*User, total int64, err error) {
+func GetAllUsers(pageInfo *common.PageInfo) (users []*User, total int64, err error) {
 	// Start transaction
 	tx := DB.Begin()
 	if tx.Error != nil {
@@ -133,7 +134,7 @@ func GetAllUsers(startIdx int, num int) (users []*User, total int64, err error)
 	}
 
 	// Get paginated users within same transaction
-	err = tx.Unscoped().Order("id desc").Limit(num).Offset(startIdx).Omit("password").Find(&users).Error
+	err = tx.Unscoped().Order("id desc").Limit(pageInfo.GetPageSize()).Offset(pageInfo.GetStartIdx()).Omit("password").Find(&users).Error
 	if err != nil {
 		tx.Rollback()
 		return nil, 0, err
@@ -366,6 +367,7 @@ func (user *User) Edit(updatePassword bool) error {
 		"display_name": newUser.DisplayName,
 		"group":        newUser.Group,
 		"quota":        newUser.Quota,
+		"remark":       newUser.Remark,
 	}
 	if updatePassword {
 		updates["password"] = newUser.Password

+ 7 - 7
model/user_cache.go

@@ -24,12 +24,12 @@ type UserBase struct {
 }
 
 func (user *UserBase) WriteContext(c *gin.Context) {
-	c.Set(constant.ContextKeyUserGroup, user.Group)
-	c.Set(constant.ContextKeyUserQuota, user.Quota)
-	c.Set(constant.ContextKeyUserStatus, user.Status)
-	c.Set(constant.ContextKeyUserEmail, user.Email)
-	c.Set("username", user.Username)
-	c.Set(constant.ContextKeyUserSetting, user.GetSetting())
+	common.SetContextKey(c, constant.ContextKeyUserGroup, user.Group)
+	common.SetContextKey(c, constant.ContextKeyUserQuota, user.Quota)
+	common.SetContextKey(c, constant.ContextKeyUserStatus, user.Status)
+	common.SetContextKey(c, constant.ContextKeyUserEmail, user.Email)
+	common.SetContextKey(c, constant.ContextKeyUserName, user.Username)
+	common.SetContextKey(c, constant.ContextKeyUserSetting, user.GetSetting())
 }
 
 func (user *UserBase) GetSetting() map[string]interface{} {
@@ -70,7 +70,7 @@ func updateUserCache(user User) error {
 	return common.RedisHSetObj(
 		getUserCacheKey(user.Id),
 		user.ToBaseUser(),
-		time.Duration(constant.UserId2QuotaCacheSeconds)*time.Second,
+		time.Duration(common.RedisKeyCacheSeconds())*time.Second,
 	)
 }
 

+ 19 - 2
model/utils.go

@@ -2,11 +2,12 @@ package model
 
 import (
 	"errors"
-	"github.com/bytedance/gopkg/util/gopool"
-	"gorm.io/gorm"
 	"one-api/common"
 	"sync"
 	"time"
+
+	"github.com/bytedance/gopkg/util/gopool"
+	"gorm.io/gorm"
 )
 
 const (
@@ -48,6 +49,22 @@ func addNewRecord(type_ int, id int, value int) {
 }
 
 func batchUpdate() {
+	// check if there's any data to update
+	hasData := false
+	for i := 0; i < BatchUpdateTypeCount; i++ {
+		batchUpdateLocks[i].Lock()
+		if len(batchUpdateStores[i]) > 0 {
+			hasData = true
+			batchUpdateLocks[i].Unlock()
+			break
+		}
+		batchUpdateLocks[i].Unlock()
+	}
+
+	if !hasData {
+		return
+	}
+
 	common.SysLog("batch update started")
 	for i := 0; i < BatchUpdateTypeCount; i++ {
 		batchUpdateLocks[i].Lock()

+ 3 - 8
relay/relay-audio.go → relay/audio_handler.go

@@ -55,7 +55,7 @@ func getAndValidAudioRequest(c *gin.Context, info *relaycommon.RelayInfo) (*dto.
 }
 
 func AudioHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
-	relayInfo := relaycommon.GenRelayInfo(c)
+	relayInfo := relaycommon.GenRelayInfoOpenAIAudio(c)
 	audioRequest, err := getAndValidAudioRequest(c, relayInfo)
 
 	if err != nil {
@@ -66,10 +66,7 @@ func AudioHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
 	promptTokens := 0
 	preConsumedTokens := common.PreConsumedQuota
 	if relayInfo.RelayMode == relayconstant.RelayModeAudioSpeech {
-		promptTokens, err = service.CountTTSToken(audioRequest.Input, audioRequest.Model)
-		if err != nil {
-			return service.OpenAIErrorWrapper(err, "count_audio_token_failed", http.StatusInternalServerError)
-		}
+		promptTokens = service.CountTTSToken(audioRequest.Input, audioRequest.Model)
 		preConsumedTokens = promptTokens
 		relayInfo.PromptTokens = promptTokens
 	}
@@ -89,13 +86,11 @@ func AudioHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
 		}
 	}()
 
-	err = helper.ModelMappedHelper(c, relayInfo)
+	err = helper.ModelMappedHelper(c, relayInfo, audioRequest)
 	if err != nil {
 		return service.OpenAIErrorWrapperLocal(err, "model_mapped_error", http.StatusInternalServerError)
 	}
 
-	audioRequest.Model = relayInfo.UpstreamModelName
-
 	adaptor := GetAdaptor(relayInfo.ApiType)
 	if adaptor == nil {
 		return service.OpenAIErrorWrapperLocal(fmt.Errorf("invalid api type: %d", relayInfo.ApiType), "invalid_api_type", http.StatusBadRequest)

+ 2 - 0
relay/channel/adapter.go

@@ -44,4 +44,6 @@ type TaskAdaptor interface {
 
 	// FetchTask
 	FetchTask(baseUrl, key string, body map[string]any) (*http.Response, error)
+
+	ParseTaskResult(respBody []byte) (*relaycommon.TaskInfo, error)
 }

+ 2 - 2
relay/channel/ali/adaptor.go

@@ -30,7 +30,7 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
 	var fullRequestURL string
 	switch info.RelayMode {
 	case constant.RelayModeEmbeddings:
-		fullRequestURL = fmt.Sprintf("%s/api/v1/services/embeddings/text-embedding/text-embedding", info.BaseUrl)
+		fullRequestURL = fmt.Sprintf("%s/compatible-mode/v1/embeddings", info.BaseUrl)
 	case constant.RelayModeRerank:
 		fullRequestURL = fmt.Sprintf("%s/api/v1/services/rerank/text-rerank/text-rerank", info.BaseUrl)
 	case constant.RelayModeImagesGenerations:
@@ -82,7 +82,7 @@ func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dt
 }
 
 func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.EmbeddingRequest) (any, error) {
-	return embeddingRequestOpenAI2Ali(request), nil
+	return request, nil
 }
 
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {

+ 1 - 4
relay/channel/ali/image.go

@@ -132,10 +132,7 @@ func aliImageHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rela
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	err = json.Unmarshal(responseBody, &aliTaskResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil

+ 2 - 4
relay/channel/ali/rerank.go

@@ -4,6 +4,7 @@ import (
 	"encoding/json"
 	"io"
 	"net/http"
+	"one-api/common"
 	"one-api/dto"
 	relaycommon "one-api/relay/common"
 	"one-api/service"
@@ -35,10 +36,7 @@ func RerankHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayI
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 
 	var aliResponse AliRerankResponse
 	err = json.Unmarshal(responseBody, &aliResponse)

+ 5 - 27
relay/channel/ali/text.go

@@ -39,34 +39,18 @@ func embeddingRequestOpenAI2Ali(request dto.EmbeddingRequest) *AliEmbeddingReque
 }
 
 func aliEmbeddingHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
-	var aliResponse AliEmbeddingResponse
-	err := json.NewDecoder(resp.Body).Decode(&aliResponse)
+	var fullTextResponse dto.OpenAIEmbeddingResponse
+	err := json.NewDecoder(resp.Body).Decode(&fullTextResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 	}
 
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
-
-	if aliResponse.Code != "" {
-		return &dto.OpenAIErrorWithStatusCode{
-			Error: dto.OpenAIError{
-				Message: aliResponse.Message,
-				Type:    aliResponse.Code,
-				Param:   aliResponse.RequestId,
-				Code:    aliResponse.Code,
-			},
-			StatusCode: resp.StatusCode,
-		}, nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 
 	model := c.GetString("model")
 	if model == "" {
 		model = "text-embedding-v4"
 	}
-	fullTextResponse := embeddingResponseAli2OpenAI(&aliResponse, model)
 	jsonResponse, err := json.Marshal(fullTextResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
@@ -186,10 +170,7 @@ func aliStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWith
 			return false
 		}
 	})
-	err := resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	return nil, &usage
 }
 
@@ -199,10 +180,7 @@ func aliHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatus
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	err = json.Unmarshal(responseBody, &aliResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil

+ 4 - 13
relay/channel/baidu/relay-baidu.go

@@ -166,10 +166,7 @@ func baiduStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWi
 			return false
 		}
 	})
-	err := resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	return nil, &usage
 }
 
@@ -179,10 +176,7 @@ func baiduHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStat
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	err = json.Unmarshal(responseBody, &baiduResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
@@ -215,10 +209,7 @@ func baiduEmbeddingHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErro
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	err = json.Unmarshal(responseBody, &baiduResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
@@ -280,7 +271,7 @@ func getBaiduAccessTokenHelper(apiKey string) (*BaiduAccessToken, error) {
 	}
 	req.Header.Add("Content-Type", "application/json")
 	req.Header.Add("Accept", "application/json")
-	res, err := service.GetImpatientHttpClient().Do(req)
+	res, err := service.GetHttpClient().Do(req)
 	if err != nil {
 		return nil, err
 	}

+ 58 - 46
relay/channel/claude/relay-claude.go

@@ -7,6 +7,7 @@ import (
 	"net/http"
 	"one-api/common"
 	"one-api/dto"
+	"one-api/relay/channel/openrouter"
 	relaycommon "one-api/relay/common"
 	"one-api/relay/helper"
 	"one-api/service"
@@ -113,7 +114,7 @@ func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*dto.Cla
 		// BudgetTokens 为 max_tokens 的 80%
 		claudeRequest.Thinking = &dto.Thinking{
 			Type:         "enabled",
-			BudgetTokens: int(float64(claudeRequest.MaxTokens) * model_setting.GetClaudeSettings().ThinkingAdapterBudgetTokensPercentage),
+			BudgetTokens: common.GetPointer[int](int(float64(claudeRequest.MaxTokens) * model_setting.GetClaudeSettings().ThinkingAdapterBudgetTokensPercentage)),
 		}
 		// TODO: 临时处理
 		// https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#important-considerations-when-using-extended-thinking
@@ -122,6 +123,21 @@ func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*dto.Cla
 		claudeRequest.Model = strings.TrimSuffix(textRequest.Model, "-thinking")
 	}
 
+	if textRequest.Reasoning != nil {
+		var reasoning openrouter.RequestReasoning
+		if err := common.UnmarshalJson(textRequest.Reasoning, &reasoning); err != nil {
+			return nil, err
+		}
+
+		budgetTokens := reasoning.MaxTokens
+		if budgetTokens > 0 {
+			claudeRequest.Thinking = &dto.Thinking{
+				Type:         "enabled",
+				BudgetTokens: &budgetTokens,
+			}
+		}
+	}
+
 	if textRequest.Stop != nil {
 		// stop maybe string/array string, convert to array string
 		switch textRequest.Stop.(type) {
@@ -454,6 +470,7 @@ type ClaudeResponseInfo struct {
 	Model        string
 	ResponseText strings.Builder
 	Usage        *dto.Usage
+	Done         bool
 }
 
 func FormatClaudeResponseInfo(requestMode int, claudeResponse *dto.ClaudeResponse, oaiResponse *dto.ChatCompletionsStreamResponse, claudeInfo *ClaudeResponseInfo) bool {
@@ -461,20 +478,32 @@ func FormatClaudeResponseInfo(requestMode int, claudeResponse *dto.ClaudeRespons
 		claudeInfo.ResponseText.WriteString(claudeResponse.Completion)
 	} else {
 		if claudeResponse.Type == "message_start" {
-			// message_start, 获取usage
 			claudeInfo.ResponseId = claudeResponse.Message.Id
 			claudeInfo.Model = claudeResponse.Message.Model
+
+			// message_start, 获取usage
 			claudeInfo.Usage.PromptTokens = claudeResponse.Message.Usage.InputTokens
+			claudeInfo.Usage.PromptTokensDetails.CachedTokens = claudeResponse.Message.Usage.CacheReadInputTokens
+			claudeInfo.Usage.PromptTokensDetails.CachedCreationTokens = claudeResponse.Message.Usage.CacheCreationInputTokens
+			claudeInfo.Usage.CompletionTokens = claudeResponse.Message.Usage.OutputTokens
 		} else if claudeResponse.Type == "content_block_delta" {
 			if claudeResponse.Delta.Text != nil {
 				claudeInfo.ResponseText.WriteString(*claudeResponse.Delta.Text)
 			}
+			if claudeResponse.Delta.Thinking != "" {
+				claudeInfo.ResponseText.WriteString(claudeResponse.Delta.Thinking)
+			}
 		} else if claudeResponse.Type == "message_delta" {
-			claudeInfo.Usage.CompletionTokens = claudeResponse.Usage.OutputTokens
+			// 最终的usage获取
 			if claudeResponse.Usage.InputTokens > 0 {
+				// 不叠加,只取最新的
 				claudeInfo.Usage.PromptTokens = claudeResponse.Usage.InputTokens
 			}
-			claudeInfo.Usage.TotalTokens = claudeInfo.Usage.PromptTokens + claudeResponse.Usage.OutputTokens
+			claudeInfo.Usage.CompletionTokens = claudeResponse.Usage.OutputTokens
+			claudeInfo.Usage.TotalTokens = claudeInfo.Usage.PromptTokens + claudeInfo.Usage.CompletionTokens
+
+			// 判断是否完整
+			claudeInfo.Done = true
 		} else if claudeResponse.Type == "content_block_start" {
 		} else {
 			return false
@@ -490,7 +519,7 @@ func FormatClaudeResponseInfo(requestMode int, claudeResponse *dto.ClaudeRespons
 
 func HandleStreamResponseData(c *gin.Context, info *relaycommon.RelayInfo, claudeInfo *ClaudeResponseInfo, data string, requestMode int) *dto.OpenAIErrorWithStatusCode {
 	var claudeResponse dto.ClaudeResponse
-	err := common.DecodeJsonStr(data, &claudeResponse)
+	err := common.UnmarshalJsonStr(data, &claudeResponse)
 	if err != nil {
 		common.SysError("error unmarshalling stream response: " + err.Error())
 		return service.OpenAIErrorWrapper(err, "stream_response_error", http.StatusInternalServerError)
@@ -506,25 +535,15 @@ func HandleStreamResponseData(c *gin.Context, info *relaycommon.RelayInfo, claud
 		}
 	}
 	if info.RelayFormat == relaycommon.RelayFormatClaude {
+		FormatClaudeResponseInfo(requestMode, &claudeResponse, nil, claudeInfo)
+
 		if requestMode == RequestModeCompletion {
-			claudeInfo.ResponseText.WriteString(claudeResponse.Completion)
 		} else {
 			if claudeResponse.Type == "message_start" {
 				// message_start, 获取usage
 				info.UpstreamModelName = claudeResponse.Message.Model
-				claudeInfo.Usage.PromptTokens = claudeResponse.Message.Usage.InputTokens
-				claudeInfo.Usage.PromptTokensDetails.CachedTokens = claudeResponse.Message.Usage.CacheReadInputTokens
-				claudeInfo.Usage.PromptTokensDetails.CachedCreationTokens = claudeResponse.Message.Usage.CacheCreationInputTokens
-				claudeInfo.Usage.CompletionTokens = claudeResponse.Message.Usage.OutputTokens
 			} else if claudeResponse.Type == "content_block_delta" {
-				claudeInfo.ResponseText.WriteString(claudeResponse.Delta.GetText())
 			} else if claudeResponse.Type == "message_delta" {
-				if claudeResponse.Usage.InputTokens > 0 {
-					// 不叠加,只取最新的
-					claudeInfo.Usage.PromptTokens = claudeResponse.Usage.InputTokens
-				}
-				claudeInfo.Usage.CompletionTokens = claudeResponse.Usage.OutputTokens
-				claudeInfo.Usage.TotalTokens = claudeInfo.Usage.PromptTokens + claudeInfo.Usage.CompletionTokens
 			}
 		}
 		helper.ClaudeChunkData(c, claudeResponse, data)
@@ -544,29 +563,25 @@ func HandleStreamResponseData(c *gin.Context, info *relaycommon.RelayInfo, claud
 }
 
 func HandleStreamFinalResponse(c *gin.Context, info *relaycommon.RelayInfo, claudeInfo *ClaudeResponseInfo, requestMode int) {
-	if info.RelayFormat == relaycommon.RelayFormatClaude {
-		if requestMode == RequestModeCompletion {
-			claudeInfo.Usage, _ = service.ResponseText2Usage(claudeInfo.ResponseText.String(), info.UpstreamModelName, info.PromptTokens)
-		} else {
-			// 说明流模式建立失败,可能为官方出错
-			if claudeInfo.Usage.PromptTokens == 0 {
-				//usage.PromptTokens = info.PromptTokens
-			}
-			if claudeInfo.Usage.CompletionTokens == 0 {
-				claudeInfo.Usage, _ = service.ResponseText2Usage(claudeInfo.ResponseText.String(), info.UpstreamModelName, claudeInfo.Usage.PromptTokens)
-			}
+
+	if requestMode == RequestModeCompletion {
+		claudeInfo.Usage = service.ResponseText2Usage(claudeInfo.ResponseText.String(), info.UpstreamModelName, info.PromptTokens)
+	} else {
+		if claudeInfo.Usage.PromptTokens == 0 {
+			//上游出错
 		}
-	} else if info.RelayFormat == relaycommon.RelayFormatOpenAI {
-		if requestMode == RequestModeCompletion {
-			claudeInfo.Usage, _ = service.ResponseText2Usage(claudeInfo.ResponseText.String(), info.UpstreamModelName, info.PromptTokens)
-		} else {
-			if claudeInfo.Usage.PromptTokens == 0 {
-				//上游出错
-			}
-			if claudeInfo.Usage.CompletionTokens == 0 {
-				claudeInfo.Usage, _ = service.ResponseText2Usage(claudeInfo.ResponseText.String(), info.UpstreamModelName, claudeInfo.Usage.PromptTokens)
+		if claudeInfo.Usage.CompletionTokens == 0 || !claudeInfo.Done {
+			if common.DebugEnabled {
+				common.SysError("claude response usage is not complete, maybe upstream error")
 			}
+			claudeInfo.Usage = service.ResponseText2Usage(claudeInfo.ResponseText.String(), info.UpstreamModelName, claudeInfo.Usage.PromptTokens)
 		}
+	}
+
+	if info.RelayFormat == relaycommon.RelayFormatClaude {
+		//
+	} else if info.RelayFormat == relaycommon.RelayFormatOpenAI {
+
 		if info.ShouldIncludeUsage {
 			response := helper.GenerateFinalUsageResponse(claudeInfo.ResponseId, claudeInfo.Created, info.UpstreamModelName, *claudeInfo.Usage)
 			err := helper.ObjectData(c, response)
@@ -604,7 +619,7 @@ func ClaudeStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.
 
 func HandleClaudeResponseData(c *gin.Context, info *relaycommon.RelayInfo, claudeInfo *ClaudeResponseInfo, data []byte, requestMode int) *dto.OpenAIErrorWithStatusCode {
 	var claudeResponse dto.ClaudeResponse
-	err := common.DecodeJson(data, &claudeResponse)
+	err := common.UnmarshalJson(data, &claudeResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_claude_response_failed", http.StatusInternalServerError)
 	}
@@ -619,10 +634,7 @@ func HandleClaudeResponseData(c *gin.Context, info *relaycommon.RelayInfo, claud
 		}
 	}
 	if requestMode == RequestModeCompletion {
-		completionTokens, err := service.CountTextToken(claudeResponse.Completion, info.OriginModelName)
-		if err != nil {
-			return service.OpenAIErrorWrapper(err, "count_token_text_failed", http.StatusInternalServerError)
-		}
+		completionTokens := service.CountTextToken(claudeResponse.Completion, info.OriginModelName)
 		claudeInfo.Usage.PromptTokens = info.PromptTokens
 		claudeInfo.Usage.CompletionTokens = completionTokens
 		claudeInfo.Usage.TotalTokens = info.PromptTokens + completionTokens
@@ -645,13 +657,14 @@ func HandleClaudeResponseData(c *gin.Context, info *relaycommon.RelayInfo, claud
 	case relaycommon.RelayFormatClaude:
 		responseData = data
 	}
-	c.Writer.Header().Set("Content-Type", "application/json")
-	c.Writer.WriteHeader(http.StatusOK)
-	_, err = c.Writer.Write(responseData)
+
+	common.IOCopyBytesGracefully(c, nil, responseData)
 	return nil
 }
 
 func ClaudeHandler(c *gin.Context, resp *http.Response, requestMode int, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
+	defer common.CloseResponseBodyGracefully(resp)
+
 	claudeInfo := &ClaudeResponseInfo{
 		ResponseId:   fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
 		Created:      common.GetTimestamp(),
@@ -663,7 +676,6 @@ func ClaudeHandler(c *gin.Context, resp *http.Response, requestMode int, info *r
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	resp.Body.Close()
 	if common.DebugEnabled {
 		println("responseBody: ", string(responseBody))
 	}

+ 6 - 15
relay/channel/cloudflare/relay_cloudflare.go

@@ -71,7 +71,7 @@ func cfStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rela
 	if err := scanner.Err(); err != nil {
 		common.LogError(c, "error_scanning_stream_response: "+err.Error())
 	}
-	usage, _ := service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
+	usage := service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
 	if info.ShouldIncludeUsage {
 		response := helper.GenerateFinalUsageResponse(id, info.StartTime.Unix(), info.UpstreamModelName, *usage)
 		err := helper.ObjectData(c, response)
@@ -81,10 +81,7 @@ func cfStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rela
 	}
 	helper.Done(c)
 
-	err := resp.Body.Close()
-	if err != nil {
-		common.LogError(c, "close_response_body_failed: "+err.Error())
-	}
+	common.CloseResponseBodyGracefully(resp)
 
 	return nil, usage
 }
@@ -94,10 +91,7 @@ func cfHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapperLocal(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	var response dto.TextResponse
 	err = json.Unmarshal(responseBody, &response)
 	if err != nil {
@@ -108,7 +102,7 @@ func cfHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo)
 	for _, choice := range response.Choices {
 		responseText += choice.Message.StringContent()
 	}
-	usage, _ := service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
+	usage := service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
 	response.Usage = *usage
 	response.Id = helper.GetResponseID(c)
 	jsonResponse, err := json.Marshal(response)
@@ -127,10 +121,7 @@ func cfSTTHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayIn
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	err = json.Unmarshal(responseBody, &cfResp)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
@@ -150,7 +141,7 @@ func cfSTTHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayIn
 
 	usage := &dto.Usage{}
 	usage.PromptTokens = info.PromptTokens
-	usage.CompletionTokens, _ = service.CountTextToken(cfResp.Result.Text, info.UpstreamModelName)
+	usage.CompletionTokens = service.CountTextToken(cfResp.Result.Text, info.UpstreamModelName)
 	usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
 
 	return nil, usage

+ 4 - 11
relay/channel/cohere/relay-cohere.go

@@ -3,7 +3,6 @@ package cohere
 import (
 	"bufio"
 	"encoding/json"
-	"fmt"
 	"github.com/gin-gonic/gin"
 	"io"
 	"net/http"
@@ -78,7 +77,7 @@ func stopReasonCohere2OpenAI(reason string) string {
 }
 
 func cohereStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
-	responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
+	responseId := helper.GetResponseID(c)
 	createdTime := common.GetTimestamp()
 	usage := &dto.Usage{}
 	responseText := ""
@@ -163,7 +162,7 @@ func cohereStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.
 		}
 	})
 	if usage.PromptTokens == 0 {
-		usage, _ = service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
+		usage = service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
 	}
 	return nil, usage
 }
@@ -174,10 +173,7 @@ func cohereHandler(c *gin.Context, resp *http.Response, modelName string, prompt
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	var cohereResp CohereResponseResult
 	err = json.Unmarshal(responseBody, &cohereResp)
 	if err != nil {
@@ -218,10 +214,7 @@ func cohereRerankHandler(c *gin.Context, resp *http.Response, info *relaycommon.
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	var cohereResp CohereRerankResponseResult
 	err = json.Unmarshal(responseBody, &cohereResp)
 	if err != nil {

+ 6 - 11
relay/channel/coze/relay-coze.go

@@ -48,10 +48,7 @@ func cozeChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rela
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapperLocal(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	// convert coze response to openai response
 	var response dto.TextResponse
 	var cozeResponse CozeChatDetailResponse
@@ -106,7 +103,7 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo
 
 	var currentEvent string
 	var currentData string
-	var usage dto.Usage
+	var usage = &dto.Usage{}
 
 	for scanner.Scan() {
 		line := scanner.Text()
@@ -114,7 +111,7 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo
 		if line == "" {
 			if currentEvent != "" && currentData != "" {
 				// handle last event
-				handleCozeEvent(c, currentEvent, currentData, &responseText, &usage, id, info)
+				handleCozeEvent(c, currentEvent, currentData, &responseText, usage, id, info)
 				currentEvent = ""
 				currentData = ""
 			}
@@ -134,7 +131,7 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo
 
 	// Last event
 	if currentEvent != "" && currentData != "" {
-		handleCozeEvent(c, currentEvent, currentData, &responseText, &usage, id, info)
+		handleCozeEvent(c, currentEvent, currentData, &responseText, usage, id, info)
 	}
 
 	if err := scanner.Err(); err != nil {
@@ -143,12 +140,10 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo
 	helper.Done(c)
 
 	if usage.TotalTokens == 0 {
-		usage.PromptTokens = info.PromptTokens
-		usage.CompletionTokens, _ = service.CountTextToken("gpt-3.5-turbo", responseText)
-		usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
+		usage = service.ResponseText2Usage(responseText, info.UpstreamModelName, c.GetInt("coze_input_count"))
 	}
 
-	return nil, &usage
+	return nil, usage
 }
 
 func handleCozeEvent(c *gin.Context, event string, data string, responseText *string, usage *dto.Usage, id string, info *relaycommon.RelayInfo) {

+ 3 - 13
relay/channel/dify/relay-dify.go

@@ -95,7 +95,7 @@ func uploadDifyFile(c *gin.Context, info *relaycommon.RelayInfo, user string, me
 		req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", info.ApiKey))
 
 		// Send request
-		client := service.GetImpatientHttpClient()
+		client := service.GetHttpClient()
 		resp, err := client.Do(req)
 		if err != nil {
 			common.SysError("failed to send request: " + err.Error())
@@ -243,15 +243,8 @@ func difyStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
 		return true
 	})
 	helper.Done(c)
-	err := resp.Body.Close()
-	if err != nil {
-		// return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-		common.SysError("close_response_body_failed: " + err.Error())
-	}
 	if usage.TotalTokens == 0 {
-		usage.PromptTokens = info.PromptTokens
-		usage.CompletionTokens, _ = service.CountTextToken("gpt-3.5-turbo", responseText)
-		usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
+		usage = service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
 	}
 	usage.CompletionTokens += nodeToken
 	return nil, usage
@@ -264,10 +257,7 @@ func difyHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInf
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	err = json.Unmarshal(responseBody, &difyResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil

+ 6 - 3
relay/channel/gemini/adaptor.go

@@ -72,10 +72,13 @@ func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
 func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
 
 	if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
-		// suffix -thinking and -nothinking
-		if strings.HasSuffix(info.OriginModelName, "-thinking") {
+		// 新增逻辑:处理 -thinking-<budget> 格式
+		if strings.Contains(info.UpstreamModelName, "-thinking-") {
+			parts := strings.Split(info.UpstreamModelName, "-thinking-")
+			info.UpstreamModelName = parts[0]
+		} else if strings.HasSuffix(info.UpstreamModelName, "-thinking") { // 旧的适配
 			info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-thinking")
-		} else if strings.HasSuffix(info.OriginModelName, "-nothinking") {
+		} else if strings.HasSuffix(info.UpstreamModelName, "-nothinking") {
 			info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-nothinking")
 		}
 	}

+ 1 - 0
relay/channel/gemini/dto.go

@@ -140,6 +140,7 @@ type GeminiChatGenerationConfig struct {
 	Seed               int64                 `json:"seed,omitempty"`
 	ResponseModalities []string              `json:"responseModalities,omitempty"`
 	ThinkingConfig     *GeminiThinkingConfig `json:"thinkingConfig,omitempty"`
+	SpeechConfig       json.RawMessage       `json:"speechConfig,omitempty"` // RawMessage to allow flexible speech config
 }
 
 type GeminiChatCandidate struct {

+ 25 - 33
relay/channel/gemini/relay-gemini-native.go

@@ -1,7 +1,6 @@
 package gemini
 
 import (
-	"encoding/json"
 	"io"
 	"net/http"
 	"one-api/common"
@@ -9,20 +8,19 @@ import (
 	relaycommon "one-api/relay/common"
 	"one-api/relay/helper"
 	"one-api/service"
+	"strings"
 
 	"github.com/gin-gonic/gin"
 )
 
 func GeminiTextGenerationHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.Usage, *dto.OpenAIErrorWithStatusCode) {
+	defer common.CloseResponseBodyGracefully(resp)
+
 	// 读取响应体
 	responseBody, err := io.ReadAll(resp.Body)
 	if err != nil {
 		return nil, service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return nil, service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
-	}
 
 	if common.DebugEnabled {
 		println(string(responseBody))
@@ -30,28 +28,15 @@ func GeminiTextGenerationHandler(c *gin.Context, resp *http.Response, info *rela
 
 	// 解析为 Gemini 原生响应格式
 	var geminiResponse GeminiChatResponse
-	err = common.DecodeJson(responseBody, &geminiResponse)
+	err = common.UnmarshalJson(responseBody, &geminiResponse)
 	if err != nil {
 		return nil, service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
 	}
 
-	// 检查是否有候选响应
-	if len(geminiResponse.Candidates) == 0 {
-		return nil, &dto.OpenAIErrorWithStatusCode{
-			Error: dto.OpenAIError{
-				Message: "No candidates returned",
-				Type:    "server_error",
-				Param:   "",
-				Code:    500,
-			},
-			StatusCode: resp.StatusCode,
-		}
-	}
-
 	// 计算使用量(基于 UsageMetadata)
 	usage := dto.Usage{
 		PromptTokens:     geminiResponse.UsageMetadata.PromptTokenCount,
-		CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount,
+		CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount + geminiResponse.UsageMetadata.ThoughtsTokenCount,
 		TotalTokens:      geminiResponse.UsageMetadata.TotalTokenCount,
 	}
 
@@ -66,18 +51,12 @@ func GeminiTextGenerationHandler(c *gin.Context, resp *http.Response, info *rela
 	}
 
 	// 直接返回 Gemini 原生格式的 JSON 响应
-	jsonResponse, err := json.Marshal(geminiResponse)
+	jsonResponse, err := common.EncodeJson(geminiResponse)
 	if err != nil {
 		return nil, service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError)
 	}
 
-	// 设置响应头并写入响应
-	c.Writer.Header().Set("Content-Type", "application/json")
-	c.Writer.WriteHeader(resp.StatusCode)
-	_, err = c.Writer.Write(jsonResponse)
-	if err != nil {
-		return nil, service.OpenAIErrorWrapper(err, "write_response_failed", http.StatusInternalServerError)
-	}
+	common.IOCopyBytesGracefully(c, resp, jsonResponse)
 
 	return &usage, nil
 }
@@ -88,9 +67,11 @@ func GeminiTextGenerationStreamHandler(c *gin.Context, resp *http.Response, info
 
 	helper.SetEventStreamHeaders(c)
 
+	responseText := strings.Builder{}
+
 	helper.StreamScannerHandler(c, resp, info, func(data string) bool {
 		var geminiResponse GeminiChatResponse
-		err := common.DecodeJsonStr(data, &geminiResponse)
+		err := common.UnmarshalJsonStr(data, &geminiResponse)
 		if err != nil {
 			common.LogError(c, "error unmarshalling stream response: "+err.Error())
 			return false
@@ -102,13 +83,16 @@ func GeminiTextGenerationStreamHandler(c *gin.Context, resp *http.Response, info
 				if part.InlineData != nil && part.InlineData.MimeType != "" {
 					imageCount++
 				}
+				if part.Text != "" {
+					responseText.WriteString(part.Text)
+				}
 			}
 		}
 
 		// 更新使用量统计
 		if geminiResponse.UsageMetadata.TotalTokenCount != 0 {
 			usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount
-			usage.CompletionTokens = geminiResponse.UsageMetadata.CandidatesTokenCount
+			usage.CompletionTokens = geminiResponse.UsageMetadata.CandidatesTokenCount + geminiResponse.UsageMetadata.ThoughtsTokenCount
 			usage.TotalTokens = geminiResponse.UsageMetadata.TotalTokenCount
 			usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
 			for _, detail := range geminiResponse.UsageMetadata.PromptTokensDetails {
@@ -121,7 +105,7 @@ func GeminiTextGenerationStreamHandler(c *gin.Context, resp *http.Response, info
 		}
 
 		// 直接发送 GeminiChatResponse 响应
-		err = helper.ObjectData(c, geminiResponse)
+		err = helper.StringData(c, data)
 		if err != nil {
 			common.LogError(c, err.Error())
 		}
@@ -135,8 +119,16 @@ func GeminiTextGenerationStreamHandler(c *gin.Context, resp *http.Response, info
 		}
 	}
 
-	// 计算最终使用量
-	usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
+	// 如果usage.CompletionTokens为0,则使用本地统计的completion tokens
+	if usage.CompletionTokens == 0 {
+		str := responseText.String()
+		if len(str) > 0 {
+			usage = service.ResponseText2Usage(responseText.String(), info.UpstreamModelName, info.PromptTokens)
+		} else {
+			// 空补全,不需要使用量
+			usage = &dto.Usage{}
+		}
+	}
 
 	// 移除流式响应结尾的[Done],因为Gemini API没有发送Done的行为
 	//helper.Done(c)

+ 101 - 66
relay/channel/gemini/relay-gemini.go

@@ -12,6 +12,7 @@ import (
 	"one-api/relay/helper"
 	"one-api/service"
 	"one-api/setting/model_setting"
+	"strconv"
 	"strings"
 	"unicode/utf8"
 
@@ -36,37 +37,73 @@ var geminiSupportedMimeTypes = map[string]bool{
 	"video/flv":       true,
 }
 
-// Setting safety to the lowest possible values since Gemini is already powerless enough
-func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (*GeminiChatRequest, error) {
+// Gemini 允许的思考预算范围
+const (
+	pro25MinBudget       = 128
+	pro25MaxBudget       = 32768
+	flash25MaxBudget     = 24576
+	flash25LiteMinBudget = 512
+	flash25LiteMaxBudget = 24576
+)
 
-	geminiRequest := GeminiChatRequest{
-		Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)),
-		GenerationConfig: GeminiChatGenerationConfig{
-			Temperature:     textRequest.Temperature,
-			TopP:            textRequest.TopP,
-			MaxOutputTokens: textRequest.MaxTokens,
-			Seed:            int64(textRequest.Seed),
-		},
-	}
+// clampThinkingBudget 根据模型名称将预算限制在允许的范围内
+func clampThinkingBudget(modelName string, budget int) int {
+	isNew25Pro := strings.HasPrefix(modelName, "gemini-2.5-pro") &&
+		!strings.HasPrefix(modelName, "gemini-2.5-pro-preview-05-06") &&
+		!strings.HasPrefix(modelName, "gemini-2.5-pro-preview-03-25")
+	is25FlashLite := strings.HasPrefix(modelName, "gemini-2.5-flash-lite")
 
-	if model_setting.IsGeminiModelSupportImagine(info.UpstreamModelName) {
-		geminiRequest.GenerationConfig.ResponseModalities = []string{
-			"TEXT",
-			"IMAGE",
+	if is25FlashLite {
+		if budget < flash25LiteMinBudget {
+			return flash25LiteMinBudget
+		}
+		if budget > flash25LiteMaxBudget {
+			return flash25LiteMaxBudget
+		}
+	} else if isNew25Pro {
+		if budget < pro25MinBudget {
+			return pro25MinBudget
+		}
+		if budget > pro25MaxBudget {
+			return pro25MaxBudget
+		}
+	} else { // 其他模型
+		if budget < 0 {
+			return 0
+		}
+		if budget > flash25MaxBudget {
+			return flash25MaxBudget
 		}
 	}
+	return budget
+}
 
+func ThinkingAdaptor(geminiRequest *GeminiChatRequest, info *relaycommon.RelayInfo) {
 	if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
-		if strings.HasSuffix(info.OriginModelName, "-thinking") {
-			// 硬编码不支持 ThinkingBudget 的旧模型
+		modelName := info.UpstreamModelName
+		isNew25Pro := strings.HasPrefix(modelName, "gemini-2.5-pro") &&
+			!strings.HasPrefix(modelName, "gemini-2.5-pro-preview-05-06") &&
+			!strings.HasPrefix(modelName, "gemini-2.5-pro-preview-03-25")
+
+		if strings.Contains(modelName, "-thinking-") {
+			parts := strings.SplitN(modelName, "-thinking-", 2)
+			if len(parts) == 2 && parts[1] != "" {
+				if budgetTokens, err := strconv.Atoi(parts[1]); err == nil {
+					clampedBudget := clampThinkingBudget(modelName, budgetTokens)
+					geminiRequest.GenerationConfig.ThinkingConfig = &GeminiThinkingConfig{
+						ThinkingBudget:  common.GetPointer(clampedBudget),
+						IncludeThoughts: true,
+					}
+				}
+			}
+		} else if strings.HasSuffix(modelName, "-thinking") {
 			unsupportedModels := []string{
 				"gemini-2.5-pro-preview-05-06",
 				"gemini-2.5-pro-preview-03-25",
 			}
-
 			isUnsupported := false
 			for _, unsupportedModel := range unsupportedModels {
-				if strings.HasPrefix(info.OriginModelName, unsupportedModel) {
+				if strings.HasPrefix(modelName, unsupportedModel) {
 					isUnsupported = true
 					break
 				}
@@ -77,46 +114,46 @@ func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest, info *relaycommon
 					IncludeThoughts: true,
 				}
 			} else {
-				budgetTokens := model_setting.GetGeminiSettings().ThinkingAdapterBudgetTokensPercentage * float64(geminiRequest.GenerationConfig.MaxOutputTokens)
-
-				// 检查是否为新的2.5pro模型(支持ThinkingBudget但有特殊范围)
-				isNew25Pro := strings.HasPrefix(info.OriginModelName, "gemini-2.5-pro") &&
-					!strings.HasPrefix(info.OriginModelName, "gemini-2.5-pro-preview-05-06") &&
-					!strings.HasPrefix(info.OriginModelName, "gemini-2.5-pro-preview-03-25")
-
-				if isNew25Pro {
-					// 新的2.5pro模型:ThinkingBudget范围为128-32768
-					if budgetTokens == 0 || budgetTokens < 128 {
-						budgetTokens = 128
-					} else if budgetTokens > 32768 {
-						budgetTokens = 32768
-					}
-				} else {
-					// 其他模型:ThinkingBudget范围为0-24576
-					if budgetTokens == 0 || budgetTokens > 24576 {
-						budgetTokens = 24576
-					}
-				}
-
 				geminiRequest.GenerationConfig.ThinkingConfig = &GeminiThinkingConfig{
-					ThinkingBudget:  common.GetPointer(int(budgetTokens)),
 					IncludeThoughts: true,
 				}
+				if geminiRequest.GenerationConfig.MaxOutputTokens > 0 {
+					budgetTokens := model_setting.GetGeminiSettings().ThinkingAdapterBudgetTokensPercentage * float64(geminiRequest.GenerationConfig.MaxOutputTokens)
+					clampedBudget := clampThinkingBudget(modelName, int(budgetTokens))
+					geminiRequest.GenerationConfig.ThinkingConfig.ThinkingBudget = common.GetPointer(clampedBudget)
+				}
 			}
-		} else if strings.HasSuffix(info.OriginModelName, "-nothinking") {
-			// 检查是否为新的2.5pro模型(不支持-nothinking,因为最低值只能为128)
-			isNew25Pro := strings.HasPrefix(info.OriginModelName, "gemini-2.5-pro") &&
-				!strings.HasPrefix(info.OriginModelName, "gemini-2.5-pro-preview-05-06") &&
-				!strings.HasPrefix(info.OriginModelName, "gemini-2.5-pro-preview-03-25")
-
+		} else if strings.HasSuffix(modelName, "-nothinking") {
 			if !isNew25Pro {
-				// 只有非新2.5pro模型才支持-nothinking
 				geminiRequest.GenerationConfig.ThinkingConfig = &GeminiThinkingConfig{
 					ThinkingBudget: common.GetPointer(0),
 				}
 			}
 		}
 	}
+}
+
+// Setting safety to the lowest possible values since Gemini is already powerless enough
+func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (*GeminiChatRequest, error) {
+
+	geminiRequest := GeminiChatRequest{
+		Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)),
+		GenerationConfig: GeminiChatGenerationConfig{
+			Temperature:     textRequest.Temperature,
+			TopP:            textRequest.TopP,
+			MaxOutputTokens: textRequest.MaxTokens,
+			Seed:            int64(textRequest.Seed),
+		},
+	}
+
+	if model_setting.IsGeminiModelSupportImagine(info.UpstreamModelName) {
+		geminiRequest.GenerationConfig.ResponseModalities = []string{
+			"TEXT",
+			"IMAGE",
+		}
+	}
+
+	ThinkingAdaptor(&geminiRequest, info)
 
 	safetySettings := make([]GeminiChatSafetySettings, 0, len(SafetySettingList))
 	for _, category := range SafetySettingList {
@@ -283,7 +320,8 @@ func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest, info *relaycommon
 
 					// 校验 MimeType 是否在 Gemini 支持的白名单中
 					if _, ok := geminiSupportedMimeTypes[strings.ToLower(fileData.MimeType)]; !ok {
-						return nil, fmt.Errorf("MIME type '%s' from URL '%s' is not supported by Gemini. Supported types are: %v", fileData.MimeType, part.GetImageMedia().Url, getSupportedMimeTypesList())
+						url := part.GetImageMedia().Url
+						return nil, fmt.Errorf("mime type is not supported by Gemini: '%s', url: '%s', supported types are: %v", fileData.MimeType, url, getSupportedMimeTypesList())
 					}
 
 					parts = append(parts, GeminiPart{
@@ -341,7 +379,9 @@ func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest, info *relaycommon
 		if content.Role == "assistant" {
 			content.Role = "model"
 		}
-		geminiRequest.Contents = append(geminiRequest.Contents, content)
+		if len(content.Parts) > 0 {
+			geminiRequest.Contents = append(geminiRequest.Contents, content)
+		}
 	}
 
 	if len(system_content) > 0 {
@@ -611,9 +651,9 @@ func getResponseToolCall(item *GeminiPart) *dto.ToolCallResponse {
 	}
 }
 
-func responseGeminiChat2OpenAI(response *GeminiChatResponse) *dto.OpenAITextResponse {
+func responseGeminiChat2OpenAI(c *gin.Context, response *GeminiChatResponse) *dto.OpenAITextResponse {
 	fullTextResponse := dto.OpenAITextResponse{
-		Id:      fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
+		Id:      helper.GetResponseID(c),
 		Object:  "chat.completion",
 		Created: common.GetTimestamp(),
 		Choices: make([]dto.OpenAITextResponseChoice, 0, len(response.Candidates)),
@@ -754,14 +794,14 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) (*dto.C
 
 func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
 	// responseText := ""
-	id := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
+	id := helper.GetResponseID(c)
 	createAt := common.GetTimestamp()
 	var usage = &dto.Usage{}
 	var imageCount int
 
 	helper.StreamScannerHandler(c, resp, info, func(data string) bool {
 		var geminiResponse GeminiChatResponse
-		err := common.DecodeJsonStr(data, &geminiResponse)
+		err := common.UnmarshalJsonStr(data, &geminiResponse)
 		if err != nil {
 			common.LogError(c, "error unmarshalling stream response: "+err.Error())
 			return false
@@ -826,15 +866,12 @@ func GeminiChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	if common.DebugEnabled {
 		println(string(responseBody))
 	}
 	var geminiResponse GeminiChatResponse
-	err = common.DecodeJson(responseBody, &geminiResponse)
+	err = common.UnmarshalJson(responseBody, &geminiResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 	}
@@ -849,7 +886,7 @@ func GeminiChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
 			StatusCode: resp.StatusCode,
 		}, nil
 	}
-	fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse)
+	fullTextResponse := responseGeminiChat2OpenAI(c, &geminiResponse)
 	fullTextResponse.Model = info.UpstreamModelName
 	usage := dto.Usage{
 		PromptTokens:     geminiResponse.UsageMetadata.PromptTokenCount,
@@ -880,11 +917,12 @@ func GeminiChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
 }
 
 func GeminiEmbeddingHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
+	defer common.CloseResponseBodyGracefully(resp)
+
 	responseBody, readErr := io.ReadAll(resp.Body)
 	if readErr != nil {
 		return nil, service.OpenAIErrorWrapper(readErr, "read_response_body_failed", http.StatusInternalServerError)
 	}
-	_ = resp.Body.Close()
 
 	var geminiResponse GeminiEmbeddingResponse
 	if jsonErr := json.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
@@ -916,14 +954,11 @@ func GeminiEmbeddingHandler(c *gin.Context, resp *http.Response, info *relaycomm
 	}
 	openAIResponse.Usage = *usage.(*dto.Usage)
 
-	jsonResponse, jsonErr := json.Marshal(openAIResponse)
+	jsonResponse, jsonErr := common.EncodeJson(openAIResponse)
 	if jsonErr != nil {
 		return nil, service.OpenAIErrorWrapper(jsonErr, "marshal_response_failed", http.StatusInternalServerError)
 	}
 
-	c.Writer.Header().Set("Content-Type", "application/json")
-	c.Writer.WriteHeader(resp.StatusCode)
-	_, _ = c.Writer.Write(jsonResponse)
-
+	common.IOCopyBytesGracefully(c, resp, jsonResponse)
 	return usage, nil
 }

+ 1 - 0
relay/channel/jina/constant.go

@@ -3,6 +3,7 @@ package jina
 var ModelList = []string{
 	"jina-clip-v1",
 	"jina-reranker-v2-base-multilingual",
+	"jina-reranker-m0",
 }
 
 var ChannelName = "jina"

+ 3 - 6
relay/channel/mokaai/relay-mokaai.go

@@ -5,6 +5,7 @@ import (
 	"github.com/gin-gonic/gin"
 	"io"
 	"net/http"
+	"one-api/common"
 	"one-api/dto"
 	"one-api/service"
 )
@@ -26,7 +27,7 @@ func embeddingRequestOpenAI2Moka(request dto.GeneralOpenAIRequest) *dto.Embeddin
 	}
 	return &dto.EmbeddingRequest{
 		Input: input,
-		Model:  request.Model,
+		Model: request.Model,
 	}
 }
 
@@ -53,10 +54,7 @@ func mokaEmbeddingHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIError
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	err = json.Unmarshal(responseBody, &baiduResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
@@ -80,4 +78,3 @@ func mokaEmbeddingHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIError
 	_, err = c.Writer.Write(jsonResponse)
 	return nil, &fullTextResponse.Usage
 }
-

+ 3 - 30
relay/channel/ollama/relay-ollama.go

@@ -1,12 +1,12 @@
 package ollama
 
 import (
-	"bytes"
 	"encoding/json"
 	"fmt"
 	"github.com/gin-gonic/gin"
 	"io"
 	"net/http"
+	"one-api/common"
 	"one-api/dto"
 	"one-api/service"
 	"strings"
@@ -88,10 +88,7 @@ func ollamaEmbeddingHandler(c *gin.Context, resp *http.Response, promptTokens in
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	err = json.Unmarshal(responseBody, &ollamaEmbeddingResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
@@ -120,31 +117,7 @@ func ollamaEmbeddingHandler(c *gin.Context, resp *http.Response, promptTokens in
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
 	}
-	resp.Body = io.NopCloser(bytes.NewBuffer(doResponseBody))
-	// We shouldn't set the header before we parse the response body, because the parse part may fail.
-	// And then we will have to send an error response, but in this case, the header has already been set.
-	// So the httpClient will be confused by the response.
-	// For example, Postman will report error, and we cannot check the response at all.
-	// Copy headers
-	for k, v := range resp.Header {
-		// 删除任何现有的相同头部,以防止重复添加头部
-		c.Writer.Header().Del(k)
-		for _, vv := range v {
-			c.Writer.Header().Add(k, vv)
-		}
-	}
-	// reset content length
-	c.Writer.Header().Del("Content-Length")
-	c.Writer.Header().Set("Content-Length", fmt.Sprintf("%d", len(doResponseBody)))
-	c.Writer.WriteHeader(resp.StatusCode)
-	_, err = io.Copy(c.Writer, resp.Body)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
-	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.IOCopyBytesGracefully(c, resp, doResponseBody)
 	return nil, usage
 }
 

+ 52 - 41
relay/channel/openai/adaptor.go

@@ -9,8 +9,7 @@ import (
 	"mime/multipart"
 	"net/http"
 	"net/textproto"
-	"one-api/common"
-	constant2 "one-api/constant"
+	"one-api/constant"
 	"one-api/dto"
 	"one-api/relay/channel"
 	"one-api/relay/channel/ai360"
@@ -21,7 +20,7 @@ import (
 	"one-api/relay/channel/xinference"
 	relaycommon "one-api/relay/common"
 	"one-api/relay/common_handler"
-	"one-api/relay/constant"
+	relayconstant "one-api/relay/constant"
 	"one-api/service"
 	"path/filepath"
 	"strings"
@@ -54,7 +53,7 @@ func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
 	a.ChannelType = info.ChannelType
 
 	// initialize ThinkingContentInfo when thinking_to_content is enabled
-	if think2Content, ok := info.ChannelSetting[constant2.ChannelSettingThinkingToContent].(bool); ok && think2Content {
+	if think2Content, ok := info.ChannelSetting[constant.ChannelSettingThinkingToContent].(bool); ok && think2Content {
 		info.ThinkingContentInfo = relaycommon.ThinkingContentInfo{
 			IsFirstThinkingContent:  true,
 			SendLastThinkingContent: false,
@@ -67,7 +66,7 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
 	if info.RelayFormat == relaycommon.RelayFormatClaude {
 		return fmt.Sprintf("%s/v1/chat/completions", info.BaseUrl), nil
 	}
-	if info.RelayMode == constant.RelayModeRealtime {
+	if info.RelayMode == relayconstant.RelayModeRealtime {
 		if strings.HasPrefix(info.BaseUrl, "https://") {
 			baseUrl := strings.TrimPrefix(info.BaseUrl, "https://")
 			baseUrl = "wss://" + baseUrl
@@ -79,29 +78,36 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
 		}
 	}
 	switch info.ChannelType {
-	case common.ChannelTypeAzure:
+	case constant.ChannelTypeAzure:
 		apiVersion := info.ApiVersion
 		if apiVersion == "" {
-			apiVersion = constant2.AzureDefaultAPIVersion
+			apiVersion = constant.AzureDefaultAPIVersion
 		}
 		// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
 		requestURL := strings.Split(info.RequestURLPath, "?")[0]
 		requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion)
 		task := strings.TrimPrefix(requestURL, "/v1/")
+
+		// 特殊处理 responses API
+		if info.RelayMode == relayconstant.RelayModeResponses {
+			requestURL = fmt.Sprintf("/openai/v1/responses?api-version=preview")
+			return relaycommon.GetFullRequestURL(info.BaseUrl, requestURL, info.ChannelType), nil
+		}
+
 		model_ := info.UpstreamModelName
 		// 2025年5月10日后创建的渠道不移除.
-		if info.ChannelCreateTime < constant2.AzureNoRemoveDotTime {
+		if info.ChannelCreateTime < constant.AzureNoRemoveDotTime {
 			model_ = strings.Replace(model_, ".", "", -1)
 		}
 		// https://github.com/songquanpeng/one-api/issues/67
 		requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model_, task)
-		if info.RelayMode == constant.RelayModeRealtime {
+		if info.RelayMode == relayconstant.RelayModeRealtime {
 			requestURL = fmt.Sprintf("/openai/realtime?deployment=%s&api-version=%s", model_, apiVersion)
 		}
 		return relaycommon.GetFullRequestURL(info.BaseUrl, requestURL, info.ChannelType), nil
-	case common.ChannelTypeMiniMax:
+	case constant.ChannelTypeMiniMax:
 		return minimax.GetRequestURL(info)
-	case common.ChannelTypeCustom:
+	case constant.ChannelTypeCustom:
 		url := info.BaseUrl
 		url = strings.Replace(url, "{model}", info.UpstreamModelName, -1)
 		return url, nil
@@ -112,14 +118,14 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
 
 func (a *Adaptor) SetupRequestHeader(c *gin.Context, header *http.Header, info *relaycommon.RelayInfo) error {
 	channel.SetupApiRequestHeader(info, c, header)
-	if info.ChannelType == common.ChannelTypeAzure {
+	if info.ChannelType == constant.ChannelTypeAzure {
 		header.Set("api-key", info.ApiKey)
 		return nil
 	}
-	if info.ChannelType == common.ChannelTypeOpenAI && "" != info.Organization {
+	if info.ChannelType == constant.ChannelTypeOpenAI && "" != info.Organization {
 		header.Set("OpenAI-Organization", info.Organization)
 	}
-	if info.RelayMode == constant.RelayModeRealtime {
+	if info.RelayMode == relayconstant.RelayModeRealtime {
 		swp := c.Request.Header.Get("Sec-WebSocket-Protocol")
 		if swp != "" {
 			items := []string{
@@ -138,7 +144,7 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, header *http.Header, info *
 	} else {
 		header.Set("Authorization", "Bearer "+info.ApiKey)
 	}
-	if info.ChannelType == common.ChannelTypeOpenRouter {
+	if info.ChannelType == constant.ChannelTypeOpenRouter {
 		header.Set("HTTP-Referer", "https://github.com/Calcium-Ion/new-api")
 		header.Set("X-Title", "New API")
 	}
@@ -149,9 +155,14 @@ func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayIn
 	if request == nil {
 		return nil, errors.New("request is nil")
 	}
-	if info.ChannelType != common.ChannelTypeOpenAI && info.ChannelType != common.ChannelTypeAzure {
+	if info.ChannelType != constant.ChannelTypeOpenAI && info.ChannelType != constant.ChannelTypeAzure {
 		request.StreamOptions = nil
 	}
+	if info.ChannelType == constant.ChannelTypeOpenRouter {
+		if len(request.Usage) == 0 {
+			request.Usage = json.RawMessage(`{"include":true}`)
+		}
+	}
 	if strings.HasPrefix(request.Model, "o") {
 		if request.MaxCompletionTokens == 0 && request.MaxTokens != 0 {
 			request.MaxCompletionTokens = request.MaxTokens
@@ -193,7 +204,7 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela
 
 func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
 	a.ResponseFormat = request.ResponseFormat
-	if info.RelayMode == constant.RelayModeAudioSpeech {
+	if info.RelayMode == relayconstant.RelayModeAudioSpeech {
 		jsonData, err := json.Marshal(request)
 		if err != nil {
 			return nil, fmt.Errorf("error marshalling object: %w", err)
@@ -242,7 +253,7 @@ func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInf
 
 func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) {
 	switch info.RelayMode {
-	case constant.RelayModeImagesEdits:
+	case relayconstant.RelayModeImagesEdits:
 
 		var requestBody bytes.Buffer
 		writer := multipart.NewWriter(&requestBody)
@@ -399,11 +410,11 @@ func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommo
 }
 
 func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
-	if info.RelayMode == constant.RelayModeAudioTranscription ||
-		info.RelayMode == constant.RelayModeAudioTranslation ||
-		info.RelayMode == constant.RelayModeImagesEdits {
+	if info.RelayMode == relayconstant.RelayModeAudioTranscription ||
+		info.RelayMode == relayconstant.RelayModeAudioTranslation ||
+		info.RelayMode == relayconstant.RelayModeImagesEdits {
 		return channel.DoFormRequest(a, c, info, requestBody)
-	} else if info.RelayMode == constant.RelayModeRealtime {
+	} else if info.RelayMode == relayconstant.RelayModeRealtime {
 		return channel.DoWssRequest(a, c, info, requestBody)
 	} else {
 		return channel.DoApiRequest(a, c, info, requestBody)
@@ -412,19 +423,19 @@ func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, request
 
 func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
 	switch info.RelayMode {
-	case constant.RelayModeRealtime:
+	case relayconstant.RelayModeRealtime:
 		err, usage = OpenaiRealtimeHandler(c, info)
-	case constant.RelayModeAudioSpeech:
+	case relayconstant.RelayModeAudioSpeech:
 		err, usage = OpenaiTTSHandler(c, resp, info)
-	case constant.RelayModeAudioTranslation:
+	case relayconstant.RelayModeAudioTranslation:
 		fallthrough
-	case constant.RelayModeAudioTranscription:
+	case relayconstant.RelayModeAudioTranscription:
 		err, usage = OpenaiSTTHandler(c, resp, info, a.ResponseFormat)
-	case constant.RelayModeImagesGenerations, constant.RelayModeImagesEdits:
+	case relayconstant.RelayModeImagesGenerations, relayconstant.RelayModeImagesEdits:
 		err, usage = OpenaiHandlerWithUsage(c, resp, info)
-	case constant.RelayModeRerank:
+	case relayconstant.RelayModeRerank:
 		err, usage = common_handler.RerankHandler(c, info, resp)
-	case constant.RelayModeResponses:
+	case relayconstant.RelayModeResponses:
 		if info.IsStream {
 			err, usage = OaiResponsesStreamHandler(c, resp, info)
 		} else {
@@ -442,17 +453,17 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 
 func (a *Adaptor) GetModelList() []string {
 	switch a.ChannelType {
-	case common.ChannelType360:
+	case constant.ChannelType360:
 		return ai360.ModelList
-	case common.ChannelTypeMoonshot:
+	case constant.ChannelTypeMoonshot:
 		return moonshot.ModelList
-	case common.ChannelTypeLingYiWanWu:
+	case constant.ChannelTypeLingYiWanWu:
 		return lingyiwanwu.ModelList
-	case common.ChannelTypeMiniMax:
+	case constant.ChannelTypeMiniMax:
 		return minimax.ModelList
-	case common.ChannelTypeXinference:
+	case constant.ChannelTypeXinference:
 		return xinference.ModelList
-	case common.ChannelTypeOpenRouter:
+	case constant.ChannelTypeOpenRouter:
 		return openrouter.ModelList
 	default:
 		return ModelList
@@ -461,17 +472,17 @@ func (a *Adaptor) GetModelList() []string {
 
 func (a *Adaptor) GetChannelName() string {
 	switch a.ChannelType {
-	case common.ChannelType360:
+	case constant.ChannelType360:
 		return ai360.ChannelName
-	case common.ChannelTypeMoonshot:
+	case constant.ChannelTypeMoonshot:
 		return moonshot.ChannelName
-	case common.ChannelTypeLingYiWanWu:
+	case constant.ChannelTypeLingYiWanWu:
 		return lingyiwanwu.ChannelName
-	case common.ChannelTypeMiniMax:
+	case constant.ChannelTypeMiniMax:
 		return minimax.ChannelName
-	case common.ChannelTypeXinference:
+	case constant.ChannelTypeXinference:
 		return xinference.ChannelName
-	case common.ChannelTypeOpenRouter:
+	case constant.ChannelTypeOpenRouter:
 		return openrouter.ChannelName
 	default:
 		return ChannelName

+ 50 - 110
relay/channel/openai/relay-openai.go

@@ -2,7 +2,6 @@ package openai
 
 import (
 	"bytes"
-	"encoding/json"
 	"fmt"
 	"io"
 	"math"
@@ -15,6 +14,7 @@ import (
 	"one-api/relay/helper"
 	"one-api/service"
 	"os"
+	"path/filepath"
 	"strings"
 
 	"github.com/bytedance/gopkg/util/gopool"
@@ -33,7 +33,7 @@ func sendStreamData(c *gin.Context, info *relaycommon.RelayInfo, data string, fo
 	}
 
 	var lastStreamResponse dto.ChatCompletionsStreamResponse
-	if err := common.DecodeJsonStr(data, &lastStreamResponse); err != nil {
+	if err := common.UnmarshalJsonStr(data, &lastStreamResponse); err != nil {
 		return err
 	}
 
@@ -110,12 +110,13 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 		return service.OpenAIErrorWrapper(fmt.Errorf("invalid response"), "invalid_response", http.StatusInternalServerError), nil
 	}
 
-	containStreamUsage := false
+	defer common.CloseResponseBodyGracefully(resp)
+
+	model := info.UpstreamModelName
 	var responseId string
 	var createAt int64 = 0
 	var systemFingerprint string
-	model := info.UpstreamModelName
-
+	var containStreamUsage bool
 	var responseTextBuilder strings.Builder
 	var toolCount int
 	var usage = &dto.Usage{}
@@ -147,31 +148,15 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 		return true
 	})
 
+	// 处理最后的响应
 	shouldSendLastResp := true
-	var lastStreamResponse dto.ChatCompletionsStreamResponse
-	err := common.DecodeJsonStr(lastStreamData, &lastStreamResponse)
-	if err == nil {
-		responseId = lastStreamResponse.Id
-		createAt = lastStreamResponse.Created
-		systemFingerprint = lastStreamResponse.GetSystemFingerprint()
-		model = lastStreamResponse.Model
-		if service.ValidUsage(lastStreamResponse.Usage) {
-			containStreamUsage = true
-			usage = lastStreamResponse.Usage
-			if !info.ShouldIncludeUsage {
-				shouldSendLastResp = false
-			}
-		}
-		for _, choice := range lastStreamResponse.Choices {
-			if choice.FinishReason != nil {
-				shouldSendLastResp = true
-			}
-		}
+	if err := handleLastResponse(lastStreamData, &responseId, &createAt, &systemFingerprint, &model, &usage,
+		&containStreamUsage, info, &shouldSendLastResp); err != nil {
+		common.SysError("error handling last response: " + err.Error())
 	}
 
-	if shouldSendLastResp {
-		sendStreamData(c, info, lastStreamData, forceFormat, thinkToContent)
-		//err = handleStreamFormat(c, info, lastStreamData, forceFormat, thinkToContent)
+	if shouldSendLastResp && info.RelayFormat == relaycommon.RelayFormatOpenAI {
+		_ = sendStreamData(c, info, lastStreamData, forceFormat, thinkToContent)
 	}
 
 	// 处理token计算
@@ -180,10 +165,10 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 	}
 
 	if !containStreamUsage {
-		usage, _ = service.ResponseText2Usage(responseTextBuilder.String(), info.UpstreamModelName, info.PromptTokens)
+		usage = service.ResponseText2Usage(responseTextBuilder.String(), info.UpstreamModelName, info.PromptTokens)
 		usage.CompletionTokens += toolCount * 7
 	} else {
-		if info.ChannelType == common.ChannelTypeDeepSeek {
+		if info.ChannelType == constant.ChannelTypeDeepSeek {
 			if usage.PromptCacheHitTokens != 0 {
 				usage.PromptTokensDetails.CachedTokens = usage.PromptCacheHitTokens
 			}
@@ -196,16 +181,14 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 }
 
 func OpenaiHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
+	defer common.CloseResponseBodyGracefully(resp)
+
 	var simpleResponse dto.OpenAITextResponse
 	responseBody, err := io.ReadAll(resp.Body)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
-	err = common.DecodeJson(responseBody, &simpleResponse)
+	err = common.UnmarshalJson(responseBody, &simpleResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 	}
@@ -215,7 +198,7 @@ func OpenaiHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayI
 			StatusCode: resp.StatusCode,
 		}, nil
 	}
-	
+
 	forceFormat := false
 	if forceFmt, ok := info.ChannelSetting[constant.ForceFormat].(bool); ok {
 		forceFormat = forceFmt
@@ -224,7 +207,7 @@ func OpenaiHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayI
 	if simpleResponse.Usage.TotalTokens == 0 || (simpleResponse.Usage.PromptTokens == 0 && simpleResponse.Usage.CompletionTokens == 0) {
 		completionTokens := 0
 		for _, choice := range simpleResponse.Choices {
-			ctkm, _ := service.CountTextToken(choice.Message.StringContent()+choice.Message.ReasoningContent+choice.Message.Reasoning, info.UpstreamModelName)
+			ctkm := service.CountTextToken(choice.Message.StringContent()+choice.Message.ReasoningContent+choice.Message.Reasoning, info.UpstreamModelName)
 			completionTokens += ctkm
 		}
 		simpleResponse.Usage = dto.Usage{
@@ -237,7 +220,7 @@ func OpenaiHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayI
 	switch info.RelayFormat {
 	case relaycommon.RelayFormatOpenAI:
 		if forceFormat {
-			responseBody, err = json.Marshal(simpleResponse)
+			responseBody, err = common.EncodeJson(simpleResponse)
 			if err != nil {
 				return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
 			}
@@ -246,40 +229,26 @@ func OpenaiHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayI
 		}
 	case relaycommon.RelayFormatClaude:
 		claudeResp := service.ResponseOpenAI2Claude(&simpleResponse, info)
-		claudeRespStr, err := json.Marshal(claudeResp)
+		claudeRespStr, err := common.EncodeJson(claudeResp)
 		if err != nil {
 			return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
 		}
 		responseBody = claudeRespStr
 	}
 
-	// Reset response body
-	resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
-	// We shouldn't set the header before we parse the response body, because the parse part may fail.
-	// And then we will have to send an error response, but in this case, the header has already been set.
-	// So the httpClient will be confused by the response.
-	// For example, Postman will report error, and we cannot check the response at all.
-	for k, v := range resp.Header {
-		c.Writer.Header().Set(k, v[0])
-	}
-	c.Writer.WriteHeader(resp.StatusCode)
-	_, err = io.Copy(c.Writer, resp.Body)
-	if err != nil {
-		//return service.OpenAIErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
-		common.SysError("error copying response body: " + err.Error())
-	}
-	resp.Body.Close()
+	common.IOCopyBytesGracefully(c, resp, responseBody)
+
 	return nil, &simpleResponse.Usage
 }
 
 func OpenaiTTSHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
 	// the status code has been judged before, if there is a body reading failure,
 	// it should be regarded as a non-recoverable error, so it should not return err for external retry.
-	// Analogous to nginx's load balancing, it will only retry if it can't be requested or 
-	// if the upstream returns a specific status code, once the upstream has already written the header, 
-	// the subsequent failure of the response body should be regarded as a non-recoverable error, 
+	// Analogous to nginx's load balancing, it will only retry if it can't be requested or
+	// if the upstream returns a specific status code, once the upstream has already written the header,
+	// the subsequent failure of the response body should be regarded as a non-recoverable error,
 	// and can be terminated directly.
-	defer resp.Body.Close()
+	defer common.CloseResponseBodyGracefully(resp)
 	usage := &dto.Usage{}
 	usage.PromptTokens = info.PromptTokens
 	usage.TotalTokens = info.PromptTokens
@@ -296,6 +265,8 @@ func OpenaiTTSHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 }
 
 func OpenaiSTTHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo, responseFormat string) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
+	defer common.CloseResponseBodyGracefully(resp)
+
 	// count tokens by audio file duration
 	audioTokens, err := countAudioTokens(c)
 	if err != nil {
@@ -305,25 +276,8 @@ func OpenaiSTTHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
-	// Reset response body
-	resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
-	// We shouldn't set the header before we parse the response body, because the parse part may fail.
-	// And then we will have to send an error response, but in this case, the header has already been set.
-	// So the httpClient will be confused by the response.
-	// For example, Postman will report error, and we cannot check the response at all.
-	for k, v := range resp.Header {
-		c.Writer.Header().Set(k, v[0])
-	}
-	c.Writer.WriteHeader(resp.StatusCode)
-	_, err = io.Copy(c.Writer, resp.Body)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
-	}
-	resp.Body.Close()
+	// 写入新的 response body
+	common.IOCopyBytesGracefully(c, resp, responseBody)
 
 	usage := &dto.Usage{}
 	usage.PromptTokens = audioTokens
@@ -345,13 +299,14 @@ func countAudioTokens(c *gin.Context) (int, error) {
 	if err = c.ShouldBind(&reqBody); err != nil {
 		return 0, errors.WithStack(err)
 	}
-
+	ext := filepath.Ext(reqBody.File.Filename) // 获取文件扩展名
 	reqFp, err := reqBody.File.Open()
 	if err != nil {
 		return 0, errors.WithStack(err)
 	}
+	defer reqFp.Close()
 
-	tmpFp, err := os.CreateTemp("", "audio-*")
+	tmpFp, err := os.CreateTemp("", "audio-*"+ext)
 	if err != nil {
 		return 0, errors.WithStack(err)
 	}
@@ -365,7 +320,7 @@ func countAudioTokens(c *gin.Context) (int, error) {
 		return 0, errors.WithStack(err)
 	}
 
-	duration, err := common.GetAudioDuration(c.Request.Context(), tmpFp.Name())
+	duration, err := common.GetAudioDuration(c.Request.Context(), tmpFp.Name(), ext)
 	if err != nil {
 		return 0, errors.WithStack(err)
 	}
@@ -413,7 +368,7 @@ func OpenaiRealtimeHandler(c *gin.Context, info *relaycommon.RelayInfo) (*dto.Op
 				}
 
 				realtimeEvent := &dto.RealtimeEvent{}
-				err = json.Unmarshal(message, realtimeEvent)
+				err = common.UnmarshalJson(message, realtimeEvent)
 				if err != nil {
 					errChan <- fmt.Errorf("error unmarshalling message: %v", err)
 					return
@@ -473,7 +428,7 @@ func OpenaiRealtimeHandler(c *gin.Context, info *relaycommon.RelayInfo) (*dto.Op
 				}
 				info.SetFirstResponseTime()
 				realtimeEvent := &dto.RealtimeEvent{}
-				err = json.Unmarshal(message, realtimeEvent)
+				err = common.UnmarshalJson(message, realtimeEvent)
 				if err != nil {
 					errChan <- fmt.Errorf("error unmarshalling message: %v", err)
 					return
@@ -520,9 +475,9 @@ func OpenaiRealtimeHandler(c *gin.Context, info *relaycommon.RelayInfo) (*dto.Op
 						localUsage = &dto.RealtimeUsage{}
 						// print now usage
 					}
-					//common.LogInfo(c, fmt.Sprintf("realtime streaming sumUsage: %v", sumUsage))
-					//common.LogInfo(c, fmt.Sprintf("realtime streaming localUsage: %v", localUsage))
-					//common.LogInfo(c, fmt.Sprintf("realtime streaming localUsage: %v", localUsage))
+					common.LogInfo(c, fmt.Sprintf("realtime streaming sumUsage: %v", sumUsage))
+					common.LogInfo(c, fmt.Sprintf("realtime streaming localUsage: %v", localUsage))
+					common.LogInfo(c, fmt.Sprintf("realtime streaming localUsage: %v", localUsage))
 
 				} else if realtimeEvent.Type == dto.RealtimeEventTypeSessionUpdated || realtimeEvent.Type == dto.RealtimeEventTypeSessionCreated {
 					realtimeSession := realtimeEvent.Session
@@ -599,40 +554,25 @@ func preConsumeUsage(ctx *gin.Context, info *relaycommon.RelayInfo, usage *dto.R
 }
 
 func OpenaiHandlerWithUsage(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
+	defer common.CloseResponseBodyGracefully(resp)
+
 	responseBody, err := io.ReadAll(resp.Body)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
-	// Reset response body
-	resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
-	// We shouldn't set the header before we parse the response body, because the parse part may fail.
-	// And then we will have to send an error response, but in this case, the header has already been set.
-	// So the httpClient will be confused by the response.
-	// For example, Postman will report error, and we cannot check the response at all.
-	for k, v := range resp.Header {
-		c.Writer.Header().Set(k, v[0])
-	}
-	// reset content length
-	c.Writer.Header().Set("Content-Length", fmt.Sprintf("%d", len(responseBody)))
-	c.Writer.WriteHeader(resp.StatusCode)
-	_, err = io.Copy(c.Writer, resp.Body)
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
-	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
 
 	var usageResp dto.SimpleResponse
-	err = json.Unmarshal(responseBody, &usageResp)
+	err = common.UnmarshalJson(responseBody, &usageResp)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "parse_response_body_failed", http.StatusInternalServerError), nil
 	}
+
+	// 写入新的 response body
+	common.IOCopyBytesGracefully(c, resp, responseBody)
+
+	// Once we've written to the client, we should not return errors anymore
+	// because the upstream has already consumed resources and returned content
+	// We should still perform billing even if parsing fails
 	// format
 	if usageResp.InputTokens > 0 {
 		usageResp.PromptTokens += usageResp.InputTokens

+ 8 - 24
relay/channel/openai/relay_responses.go

@@ -1,7 +1,6 @@
 package openai
 
 import (
-	"bytes"
 	"fmt"
 	"io"
 	"net/http"
@@ -16,17 +15,15 @@ import (
 )
 
 func OaiResponsesHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
+	defer common.CloseResponseBodyGracefully(resp)
+
 	// read response body
 	var responsesResponse dto.OpenAIResponsesResponse
 	responseBody, err := io.ReadAll(resp.Body)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
-	err = common.DecodeJson(responseBody, &responsesResponse)
+	err = common.UnmarshalJson(responseBody, &responsesResponse)
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
 	}
@@ -41,22 +38,9 @@ func OaiResponsesHandler(c *gin.Context, resp *http.Response, info *relaycommon.
 		}, nil
 	}
 
-	// reset response body
-	resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
-	// We shouldn't set the header before we parse the response body, because the parse part may fail.
-	// And then we will have to send an error response, but in this case, the header has already been set.
-	// So the httpClient will be confused by the response.
-	// For example, Postman will report error, and we cannot check the response at all.
-	for k, v := range resp.Header {
-		c.Writer.Header().Set(k, v[0])
-	}
-	c.Writer.WriteHeader(resp.StatusCode)
-	// copy response body
-	_, err = io.Copy(c.Writer, resp.Body)
-	if err != nil {
-		common.SysError("error copying response body: " + err.Error())
-	}
-	resp.Body.Close()
+	// 写入新的 response body
+	common.IOCopyBytesGracefully(c, resp, responseBody)
+
 	// compute usage
 	usage := dto.Usage{}
 	usage.PromptTokens = responsesResponse.Usage.InputTokens
@@ -82,7 +66,7 @@ func OaiResponsesStreamHandler(c *gin.Context, resp *http.Response, info *relayc
 
 		// 检查当前数据是否包含 completed 状态和 usage 信息
 		var streamResponse dto.ResponsesStreamResponse
-		if err := common.DecodeJsonStr(data, &streamResponse); err == nil {
+		if err := common.UnmarshalJsonStr(data, &streamResponse); err == nil {
 			sendResponsesStreamData(c, streamResponse, data)
 			switch streamResponse.Type {
 			case "response.completed":
@@ -110,7 +94,7 @@ func OaiResponsesStreamHandler(c *gin.Context, resp *http.Response, info *relayc
 		tempStr := responseTextBuilder.String()
 		if len(tempStr) > 0 {
 			// 非正常结束,使用输出文本的 token 数量
-			completionTokens, _ := service.CountTextToken(tempStr, info.UpstreamModelName)
+			completionTokens := service.CountTextToken(tempStr, info.UpstreamModelName)
 			usage.CompletionTokens = completionTokens
 		}
 	}

+ 1 - 1
relay/channel/palm/adaptor.go

@@ -74,7 +74,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
 	if info.IsStream {
 		var responseText string
 		err, responseText = palmStreamHandler(c, resp)
-		usage, _ = service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
+		usage = service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
 	} else {
 		err, usage = palmHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
 	}

+ 5 - 17
relay/channel/palm/relay-palm.go

@@ -2,7 +2,6 @@ package palm
 
 import (
 	"encoding/json"
-	"fmt"
 	"github.com/gin-gonic/gin"
 	"io"
 	"net/http"
@@ -73,7 +72,7 @@ func streamResponsePaLM2OpenAI(palmResponse *PaLMChatResponse) *dto.ChatCompleti
 
 func palmStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatusCode, string) {
 	responseText := ""
-	responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
+	responseId := helper.GetResponseID(c)
 	createdTime := common.GetTimestamp()
 	dataChan := make(chan string)
 	stopChan := make(chan bool)
@@ -84,12 +83,7 @@ func palmStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWit
 			stopChan <- true
 			return
 		}
-		err = resp.Body.Close()
-		if err != nil {
-			common.SysError("error closing stream response: " + err.Error())
-			stopChan <- true
-			return
-		}
+		common.CloseResponseBodyGracefully(resp)
 		var palmResponse PaLMChatResponse
 		err = json.Unmarshal(responseBody, &palmResponse)
 		if err != nil {
@@ -123,10 +117,7 @@ func palmStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWit
 			return false
 		}
 	})
-	err := resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
-	}
+	common.CloseResponseBodyGracefully(resp)
 	return nil, responseText
 }
 
@@ -135,10 +126,7 @@ func palmHandler(c *gin.Context, resp *http.Response, promptTokens int, model st
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	var palmResponse PaLMChatResponse
 	err = json.Unmarshal(responseBody, &palmResponse)
 	if err != nil {
@@ -156,7 +144,7 @@ func palmHandler(c *gin.Context, resp *http.Response, promptTokens int, model st
 		}, nil
 	}
 	fullTextResponse := responsePaLM2OpenAI(&palmResponse)
-	completionTokens, _ := service.CountTextToken(palmResponse.Candidates[0].Content, model)
+	completionTokens := service.CountTextToken(palmResponse.Candidates[0].Content, model)
 	usage := dto.Usage{
 		PromptTokens:     promptTokens,
 		CompletionTokens: completionTokens,

+ 2 - 4
relay/channel/siliconflow/relay-siliconflow.go

@@ -5,6 +5,7 @@ import (
 	"github.com/gin-gonic/gin"
 	"io"
 	"net/http"
+	"one-api/common"
 	"one-api/dto"
 	"one-api/service"
 )
@@ -14,10 +15,7 @@ func siliconflowRerankHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIE
 	if err != nil {
 		return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
 	}
-	err = resp.Body.Close()
-	if err != nil {
-		return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
-	}
+	common.CloseResponseBodyGracefully(resp)
 	var siliconflowResp SFRerankResponse
 	err = json.Unmarshal(responseBody, &siliconflowResp)
 	if err != nil {

+ 380 - 0
relay/channel/task/jimeng/adaptor.go

@@ -0,0 +1,380 @@
+package jimeng
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"crypto/sha256"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"one-api/model"
+	"sort"
+	"strings"
+	"time"
+
+	"github.com/gin-gonic/gin"
+	"github.com/pkg/errors"
+
+	"one-api/common"
+	"one-api/constant"
+	"one-api/dto"
+	"one-api/relay/channel"
+	relaycommon "one-api/relay/common"
+	"one-api/service"
+)
+
+// ============================
+// Request / Response structures
+// ============================
+
+type requestPayload struct {
+	ReqKey           string   `json:"req_key"`
+	BinaryDataBase64 []string `json:"binary_data_base64,omitempty"`
+	ImageUrls        []string `json:"image_urls,omitempty"`
+	Prompt           string   `json:"prompt,omitempty"`
+	Seed             int64    `json:"seed"`
+	AspectRatio      string   `json:"aspect_ratio"`
+}
+
+type responsePayload struct {
+	Code      int    `json:"code"`
+	Message   string `json:"message"`
+	RequestId string `json:"request_id"`
+	Data      struct {
+		TaskID string `json:"task_id"`
+	} `json:"data"`
+}
+
+type responseTask struct {
+	Code int `json:"code"`
+	Data struct {
+		BinaryDataBase64 []interface{} `json:"binary_data_base64"`
+		ImageUrls        interface{}   `json:"image_urls"`
+		RespData         string        `json:"resp_data"`
+		Status           string        `json:"status"`
+		VideoUrl         string        `json:"video_url"`
+	} `json:"data"`
+	Message     string `json:"message"`
+	RequestId   string `json:"request_id"`
+	Status      int    `json:"status"`
+	TimeElapsed string `json:"time_elapsed"`
+}
+
+// ============================
+// Adaptor implementation
+// ============================
+
+type TaskAdaptor struct {
+	ChannelType int
+	accessKey   string
+	secretKey   string
+	baseURL     string
+}
+
+func (a *TaskAdaptor) Init(info *relaycommon.TaskRelayInfo) {
+	a.ChannelType = info.ChannelType
+	a.baseURL = info.BaseUrl
+
+	// apiKey format: "access_key|secret_key"
+	keyParts := strings.Split(info.ApiKey, "|")
+	if len(keyParts) == 2 {
+		a.accessKey = strings.TrimSpace(keyParts[0])
+		a.secretKey = strings.TrimSpace(keyParts[1])
+	}
+}
+
+// ValidateRequestAndSetAction parses body, validates fields and sets default action.
+func (a *TaskAdaptor) ValidateRequestAndSetAction(c *gin.Context, info *relaycommon.TaskRelayInfo) (taskErr *dto.TaskError) {
+	// Accept only POST /v1/video/generations as "generate" action.
+	action := constant.TaskActionGenerate
+	info.Action = action
+
+	req := relaycommon.TaskSubmitReq{}
+	if err := common.UnmarshalBodyReusable(c, &req); err != nil {
+		taskErr = service.TaskErrorWrapperLocal(err, "invalid_request", http.StatusBadRequest)
+		return
+	}
+	if strings.TrimSpace(req.Prompt) == "" {
+		taskErr = service.TaskErrorWrapperLocal(fmt.Errorf("prompt is required"), "invalid_request", http.StatusBadRequest)
+		return
+	}
+
+	// Store into context for later usage
+	c.Set("task_request", req)
+	return nil
+}
+
+// BuildRequestURL constructs the upstream URL.
+func (a *TaskAdaptor) BuildRequestURL(info *relaycommon.TaskRelayInfo) (string, error) {
+	return fmt.Sprintf("%s/?Action=CVSync2AsyncSubmitTask&Version=2022-08-31", a.baseURL), nil
+}
+
+// BuildRequestHeader sets required headers.
+func (a *TaskAdaptor) BuildRequestHeader(c *gin.Context, req *http.Request, info *relaycommon.TaskRelayInfo) error {
+	req.Header.Set("Content-Type", "application/json")
+	req.Header.Set("Accept", "application/json")
+	return a.signRequest(req, a.accessKey, a.secretKey)
+}
+
+// BuildRequestBody converts request into Jimeng specific format.
+func (a *TaskAdaptor) BuildRequestBody(c *gin.Context, info *relaycommon.TaskRelayInfo) (io.Reader, error) {
+	v, exists := c.Get("task_request")
+	if !exists {
+		return nil, fmt.Errorf("request not found in context")
+	}
+	req := v.(relaycommon.TaskSubmitReq)
+
+	body, err := a.convertToRequestPayload(&req)
+	if err != nil {
+		return nil, errors.Wrap(err, "convert request payload failed")
+	}
+	data, err := json.Marshal(body)
+	if err != nil {
+		return nil, err
+	}
+	return bytes.NewReader(data), nil
+}
+
+// DoRequest delegates to common helper.
+func (a *TaskAdaptor) DoRequest(c *gin.Context, info *relaycommon.TaskRelayInfo, requestBody io.Reader) (*http.Response, error) {
+	return channel.DoTaskApiRequest(a, c, info, requestBody)
+}
+
+// DoResponse handles upstream response, returns taskID etc.
+func (a *TaskAdaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.TaskRelayInfo) (taskID string, taskData []byte, taskErr *dto.TaskError) {
+	responseBody, err := io.ReadAll(resp.Body)
+	if err != nil {
+		taskErr = service.TaskErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
+		return
+	}
+	_ = resp.Body.Close()
+
+	// Parse Jimeng response
+	var jResp responsePayload
+	if err := json.Unmarshal(responseBody, &jResp); err != nil {
+		taskErr = service.TaskErrorWrapper(errors.Wrapf(err, "body: %s", responseBody), "unmarshal_response_body_failed", http.StatusInternalServerError)
+		return
+	}
+
+	if jResp.Code != 10000 {
+		taskErr = service.TaskErrorWrapper(fmt.Errorf(jResp.Message), fmt.Sprintf("%d", jResp.Code), http.StatusInternalServerError)
+		return
+	}
+
+	c.JSON(http.StatusOK, gin.H{"task_id": jResp.Data.TaskID})
+	return jResp.Data.TaskID, responseBody, nil
+}
+
+// FetchTask fetch task status
+func (a *TaskAdaptor) FetchTask(baseUrl, key string, body map[string]any) (*http.Response, error) {
+	taskID, ok := body["task_id"].(string)
+	if !ok {
+		return nil, fmt.Errorf("invalid task_id")
+	}
+
+	uri := fmt.Sprintf("%s/?Action=CVSync2AsyncGetResult&Version=2022-08-31", baseUrl)
+	payload := map[string]string{
+		"req_key": "jimeng_vgfm_t2v_l20", // This is fixed value from doc: https://www.volcengine.com/docs/85621/1544774
+		"task_id": taskID,
+	}
+	payloadBytes, err := json.Marshal(payload)
+	if err != nil {
+		return nil, errors.Wrap(err, "marshal fetch task payload failed")
+	}
+
+	req, err := http.NewRequest(http.MethodPost, uri, bytes.NewBuffer(payloadBytes))
+	if err != nil {
+		return nil, err
+	}
+
+	req.Header.Set("Accept", "application/json")
+	req.Header.Set("Content-Type", "application/json")
+
+	keyParts := strings.Split(key, "|")
+	if len(keyParts) != 2 {
+		return nil, fmt.Errorf("invalid api key format for jimeng: expected 'ak|sk'")
+	}
+	accessKey := strings.TrimSpace(keyParts[0])
+	secretKey := strings.TrimSpace(keyParts[1])
+
+	if err := a.signRequest(req, accessKey, secretKey); err != nil {
+		return nil, errors.Wrap(err, "sign request failed")
+	}
+
+	return service.GetHttpClient().Do(req)
+}
+
+func (a *TaskAdaptor) GetModelList() []string {
+	return []string{"jimeng_vgfm_t2v_l20"}
+}
+
+func (a *TaskAdaptor) GetChannelName() string {
+	return "jimeng"
+}
+
+func (a *TaskAdaptor) signRequest(req *http.Request, accessKey, secretKey string) error {
+	var bodyBytes []byte
+	var err error
+
+	if req.Body != nil {
+		bodyBytes, err = io.ReadAll(req.Body)
+		if err != nil {
+			return errors.Wrap(err, "read request body failed")
+		}
+		_ = req.Body.Close()
+		req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) // Rewind
+	} else {
+		bodyBytes = []byte{}
+	}
+
+	payloadHash := sha256.Sum256(bodyBytes)
+	hexPayloadHash := hex.EncodeToString(payloadHash[:])
+
+	t := time.Now().UTC()
+	xDate := t.Format("20060102T150405Z")
+	shortDate := t.Format("20060102")
+
+	req.Header.Set("Host", req.URL.Host)
+	req.Header.Set("X-Date", xDate)
+	req.Header.Set("X-Content-Sha256", hexPayloadHash)
+
+	// Sort and encode query parameters to create canonical query string
+	queryParams := req.URL.Query()
+	sortedKeys := make([]string, 0, len(queryParams))
+	for k := range queryParams {
+		sortedKeys = append(sortedKeys, k)
+	}
+	sort.Strings(sortedKeys)
+	var queryParts []string
+	for _, k := range sortedKeys {
+		values := queryParams[k]
+		sort.Strings(values)
+		for _, v := range values {
+			queryParts = append(queryParts, fmt.Sprintf("%s=%s", url.QueryEscape(k), url.QueryEscape(v)))
+		}
+	}
+	canonicalQueryString := strings.Join(queryParts, "&")
+
+	headersToSign := map[string]string{
+		"host":             req.URL.Host,
+		"x-date":           xDate,
+		"x-content-sha256": hexPayloadHash,
+	}
+	if req.Header.Get("Content-Type") != "" {
+		headersToSign["content-type"] = req.Header.Get("Content-Type")
+	}
+
+	var signedHeaderKeys []string
+	for k := range headersToSign {
+		signedHeaderKeys = append(signedHeaderKeys, k)
+	}
+	sort.Strings(signedHeaderKeys)
+
+	var canonicalHeaders strings.Builder
+	for _, k := range signedHeaderKeys {
+		canonicalHeaders.WriteString(k)
+		canonicalHeaders.WriteString(":")
+		canonicalHeaders.WriteString(strings.TrimSpace(headersToSign[k]))
+		canonicalHeaders.WriteString("\n")
+	}
+	signedHeaders := strings.Join(signedHeaderKeys, ";")
+
+	canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s",
+		req.Method,
+		req.URL.Path,
+		canonicalQueryString,
+		canonicalHeaders.String(),
+		signedHeaders,
+		hexPayloadHash,
+	)
+
+	hashedCanonicalRequest := sha256.Sum256([]byte(canonicalRequest))
+	hexHashedCanonicalRequest := hex.EncodeToString(hashedCanonicalRequest[:])
+
+	region := "cn-north-1"
+	serviceName := "cv"
+	credentialScope := fmt.Sprintf("%s/%s/%s/request", shortDate, region, serviceName)
+	stringToSign := fmt.Sprintf("HMAC-SHA256\n%s\n%s\n%s",
+		xDate,
+		credentialScope,
+		hexHashedCanonicalRequest,
+	)
+
+	kDate := hmacSHA256([]byte(secretKey), []byte(shortDate))
+	kRegion := hmacSHA256(kDate, []byte(region))
+	kService := hmacSHA256(kRegion, []byte(serviceName))
+	kSigning := hmacSHA256(kService, []byte("request"))
+	signature := hex.EncodeToString(hmacSHA256(kSigning, []byte(stringToSign)))
+
+	authorization := fmt.Sprintf("HMAC-SHA256 Credential=%s/%s, SignedHeaders=%s, Signature=%s",
+		accessKey,
+		credentialScope,
+		signedHeaders,
+		signature,
+	)
+	req.Header.Set("Authorization", authorization)
+	return nil
+}
+
+func hmacSHA256(key []byte, data []byte) []byte {
+	h := hmac.New(sha256.New, key)
+	h.Write(data)
+	return h.Sum(nil)
+}
+
+func (a *TaskAdaptor) convertToRequestPayload(req *relaycommon.TaskSubmitReq) (*requestPayload, error) {
+	r := requestPayload{
+		ReqKey:      "jimeng_vgfm_i2v_l20",
+		Prompt:      req.Prompt,
+		AspectRatio: "16:9", // Default aspect ratio
+		Seed:        -1,     // Default to random
+	}
+
+	// Handle one-of image_urls or binary_data_base64
+	if req.Image != "" {
+		if strings.HasPrefix(req.Image, "http") {
+			r.ImageUrls = []string{req.Image}
+		} else {
+			r.BinaryDataBase64 = []string{req.Image}
+		}
+	}
+	metadata := req.Metadata
+	medaBytes, err := json.Marshal(metadata)
+	if err != nil {
+		return nil, errors.Wrap(err, "metadata marshal metadata failed")
+	}
+	err = json.Unmarshal(medaBytes, &r)
+	if err != nil {
+		return nil, errors.Wrap(err, "unmarshal metadata failed")
+	}
+	return &r, nil
+}
+
+func (a *TaskAdaptor) ParseTaskResult(respBody []byte) (*relaycommon.TaskInfo, error) {
+	resTask := responseTask{}
+	if err := json.Unmarshal(respBody, &resTask); err != nil {
+		return nil, errors.Wrap(err, "unmarshal task result failed")
+	}
+	taskResult := relaycommon.TaskInfo{}
+	if resTask.Code == 10000 {
+		taskResult.Code = 0
+	} else {
+		taskResult.Code = resTask.Code // todo uni code
+		taskResult.Reason = resTask.Message
+		taskResult.Status = model.TaskStatusFailure
+		taskResult.Progress = "100%"
+	}
+	switch resTask.Data.Status {
+	case "in_queue":
+		taskResult.Status = model.TaskStatusQueued
+		taskResult.Progress = "10%"
+	case "done":
+		taskResult.Status = model.TaskStatusSuccess
+		taskResult.Progress = "100%"
+	}
+	taskResult.Url = resTask.Data.VideoUrl
+	return &taskResult, nil
+}

+ 346 - 0
relay/channel/task/kling/adaptor.go

@@ -0,0 +1,346 @@
+package kling
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"github.com/samber/lo"
+	"io"
+	"net/http"
+	"one-api/model"
+	"strings"
+	"time"
+
+	"github.com/gin-gonic/gin"
+	"github.com/golang-jwt/jwt"
+	"github.com/pkg/errors"
+
+	"one-api/common"
+	"one-api/constant"
+	"one-api/dto"
+	"one-api/relay/channel"
+	relaycommon "one-api/relay/common"
+	"one-api/service"
+)
+
+// ============================
+// Request / Response structures
+// ============================
+
+type SubmitReq struct {
+	Prompt   string                 `json:"prompt"`
+	Model    string                 `json:"model,omitempty"`
+	Mode     string                 `json:"mode,omitempty"`
+	Image    string                 `json:"image,omitempty"`
+	Size     string                 `json:"size,omitempty"`
+	Duration int                    `json:"duration,omitempty"`
+	Metadata map[string]interface{} `json:"metadata,omitempty"`
+}
+
+type requestPayload struct {
+	Prompt      string  `json:"prompt,omitempty"`
+	Image       string  `json:"image,omitempty"`
+	Mode        string  `json:"mode,omitempty"`
+	Duration    string  `json:"duration,omitempty"`
+	AspectRatio string  `json:"aspect_ratio,omitempty"`
+	ModelName   string  `json:"model_name,omitempty"`
+	CfgScale    float64 `json:"cfg_scale,omitempty"`
+}
+
+type responsePayload struct {
+	Code      int    `json:"code"`
+	Message   string `json:"message"`
+	RequestId string `json:"request_id"`
+	Data      struct {
+		TaskId        string `json:"task_id"`
+		TaskStatus    string `json:"task_status"`
+		TaskStatusMsg string `json:"task_status_msg"`
+		TaskResult    struct {
+			Videos []struct {
+				Id       string `json:"id"`
+				Url      string `json:"url"`
+				Duration string `json:"duration"`
+			} `json:"videos"`
+		} `json:"task_result"`
+		CreatedAt int64 `json:"created_at"`
+		UpdatedAt int64 `json:"updated_at"`
+	} `json:"data"`
+}
+
+// ============================
+// Adaptor implementation
+// ============================
+
+type TaskAdaptor struct {
+	ChannelType int
+	accessKey   string
+	secretKey   string
+	baseURL     string
+}
+
+func (a *TaskAdaptor) Init(info *relaycommon.TaskRelayInfo) {
+	a.ChannelType = info.ChannelType
+	a.baseURL = info.BaseUrl
+
+	// apiKey format: "access_key|secret_key"
+	keyParts := strings.Split(info.ApiKey, "|")
+	if len(keyParts) == 2 {
+		a.accessKey = strings.TrimSpace(keyParts[0])
+		a.secretKey = strings.TrimSpace(keyParts[1])
+	}
+}
+
+// ValidateRequestAndSetAction parses body, validates fields and sets default action.
+func (a *TaskAdaptor) ValidateRequestAndSetAction(c *gin.Context, info *relaycommon.TaskRelayInfo) (taskErr *dto.TaskError) {
+	// Accept only POST /v1/video/generations as "generate" action.
+	action := constant.TaskActionGenerate
+	info.Action = action
+
+	var req SubmitReq
+	if err := common.UnmarshalBodyReusable(c, &req); err != nil {
+		taskErr = service.TaskErrorWrapperLocal(err, "invalid_request", http.StatusBadRequest)
+		return
+	}
+	if strings.TrimSpace(req.Prompt) == "" {
+		taskErr = service.TaskErrorWrapperLocal(fmt.Errorf("prompt is required"), "invalid_request", http.StatusBadRequest)
+		return
+	}
+
+	// Store into context for later usage
+	c.Set("task_request", req)
+	return nil
+}
+
+// BuildRequestURL constructs the upstream URL.
+func (a *TaskAdaptor) BuildRequestURL(info *relaycommon.TaskRelayInfo) (string, error) {
+	path := lo.Ternary(info.Action == constant.TaskActionGenerate, "/v1/videos/image2video", "/v1/videos/text2video")
+	return fmt.Sprintf("%s%s", a.baseURL, path), nil
+}
+
+// BuildRequestHeader sets required headers.
+func (a *TaskAdaptor) BuildRequestHeader(c *gin.Context, req *http.Request, info *relaycommon.TaskRelayInfo) error {
+	token, err := a.createJWTToken()
+	if err != nil {
+		return fmt.Errorf("failed to create JWT token: %w", err)
+	}
+
+	req.Header.Set("Content-Type", "application/json")
+	req.Header.Set("Accept", "application/json")
+	req.Header.Set("Authorization", "Bearer "+token)
+	req.Header.Set("User-Agent", "kling-sdk/1.0")
+	return nil
+}
+
+// BuildRequestBody converts request into Kling specific format.
+func (a *TaskAdaptor) BuildRequestBody(c *gin.Context, info *relaycommon.TaskRelayInfo) (io.Reader, error) {
+	v, exists := c.Get("task_request")
+	if !exists {
+		return nil, fmt.Errorf("request not found in context")
+	}
+	req := v.(SubmitReq)
+
+	body, err := a.convertToRequestPayload(&req)
+	if err != nil {
+		return nil, err
+	}
+	data, err := json.Marshal(body)
+	if err != nil {
+		return nil, err
+	}
+	return bytes.NewReader(data), nil
+}
+
+// DoRequest delegates to common helper.
+func (a *TaskAdaptor) DoRequest(c *gin.Context, info *relaycommon.TaskRelayInfo, requestBody io.Reader) (*http.Response, error) {
+	if action := c.GetString("action"); action != "" {
+		info.Action = action
+	}
+	return channel.DoTaskApiRequest(a, c, info, requestBody)
+}
+
+// DoResponse handles upstream response, returns taskID etc.
+func (a *TaskAdaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.TaskRelayInfo) (taskID string, taskData []byte, taskErr *dto.TaskError) {
+	responseBody, err := io.ReadAll(resp.Body)
+	if err != nil {
+		taskErr = service.TaskErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
+		return
+	}
+
+	// Attempt Kling response parse first.
+	var kResp responsePayload
+	if err := json.Unmarshal(responseBody, &kResp); err == nil && kResp.Code == 0 {
+		c.JSON(http.StatusOK, gin.H{"task_id": kResp.Data.TaskId})
+		return kResp.Data.TaskId, responseBody, nil
+	}
+
+	// Fallback generic task response.
+	var generic dto.TaskResponse[string]
+	if err := json.Unmarshal(responseBody, &generic); err != nil {
+		taskErr = service.TaskErrorWrapper(errors.Wrapf(err, "body: %s", responseBody), "unmarshal_response_body_failed", http.StatusInternalServerError)
+		return
+	}
+
+	if !generic.IsSuccess() {
+		taskErr = service.TaskErrorWrapper(fmt.Errorf(generic.Message), generic.Code, http.StatusInternalServerError)
+		return
+	}
+
+	c.JSON(http.StatusOK, gin.H{"task_id": generic.Data})
+	return generic.Data, responseBody, nil
+}
+
+// FetchTask fetch task status
+func (a *TaskAdaptor) FetchTask(baseUrl, key string, body map[string]any) (*http.Response, error) {
+	taskID, ok := body["task_id"].(string)
+	if !ok {
+		return nil, fmt.Errorf("invalid task_id")
+	}
+	action, ok := body["action"].(string)
+	if !ok {
+		return nil, fmt.Errorf("invalid action")
+	}
+	path := lo.Ternary(action == constant.TaskActionGenerate, "/v1/videos/image2video", "/v1/videos/text2video")
+	url := fmt.Sprintf("%s%s/%s", baseUrl, path, taskID)
+
+	req, err := http.NewRequest(http.MethodGet, url, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	token, err := a.createJWTTokenWithKey(key)
+	if err != nil {
+		token = key
+	}
+
+	req.Header.Set("Accept", "application/json")
+	req.Header.Set("Authorization", "Bearer "+token)
+	req.Header.Set("User-Agent", "kling-sdk/1.0")
+
+	return service.GetHttpClient().Do(req)
+}
+
+func (a *TaskAdaptor) GetModelList() []string {
+	return []string{"kling-v1", "kling-v1-6", "kling-v2-master"}
+}
+
+func (a *TaskAdaptor) GetChannelName() string {
+	return "kling"
+}
+
+// ============================
+// helpers
+// ============================
+
+func (a *TaskAdaptor) convertToRequestPayload(req *SubmitReq) (*requestPayload, error) {
+	r := requestPayload{
+		Prompt:      req.Prompt,
+		Image:       req.Image,
+		Mode:        defaultString(req.Mode, "std"),
+		Duration:    fmt.Sprintf("%d", defaultInt(req.Duration, 5)),
+		AspectRatio: a.getAspectRatio(req.Size),
+		ModelName:   req.Model,
+		CfgScale:    0.5,
+	}
+	if r.ModelName == "" {
+		r.ModelName = "kling-v1"
+	}
+	metadata := req.Metadata
+	medaBytes, err := json.Marshal(metadata)
+	if err != nil {
+		return nil, errors.Wrap(err, "metadata marshal metadata failed")
+	}
+	err = json.Unmarshal(medaBytes, &r)
+	if err != nil {
+		return nil, errors.Wrap(err, "unmarshal metadata failed")
+	}
+	return &r, nil
+}
+
+func (a *TaskAdaptor) getAspectRatio(size string) string {
+	switch size {
+	case "1024x1024", "512x512":
+		return "1:1"
+	case "1280x720", "1920x1080":
+		return "16:9"
+	case "720x1280", "1080x1920":
+		return "9:16"
+	default:
+		return "1:1"
+	}
+}
+
+func defaultString(s, def string) string {
+	if strings.TrimSpace(s) == "" {
+		return def
+	}
+	return s
+}
+
+func defaultInt(v int, def int) int {
+	if v == 0 {
+		return def
+	}
+	return v
+}
+
+// ============================
+// JWT helpers
+// ============================
+
+func (a *TaskAdaptor) createJWTToken() (string, error) {
+	return a.createJWTTokenWithKeys(a.accessKey, a.secretKey)
+}
+
+func (a *TaskAdaptor) createJWTTokenWithKey(apiKey string) (string, error) {
+	parts := strings.Split(apiKey, "|")
+	if len(parts) != 2 {
+		return "", fmt.Errorf("invalid API key format, expected 'access_key,secret_key'")
+	}
+	return a.createJWTTokenWithKeys(strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]))
+}
+
+func (a *TaskAdaptor) createJWTTokenWithKeys(accessKey, secretKey string) (string, error) {
+	if accessKey == "" || secretKey == "" {
+		return "", fmt.Errorf("access key and secret key are required")
+	}
+	now := time.Now().Unix()
+	claims := jwt.MapClaims{
+		"iss": accessKey,
+		"exp": now + 1800, // 30 minutes
+		"nbf": now - 5,
+	}
+	token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
+	token.Header["typ"] = "JWT"
+	return token.SignedString([]byte(secretKey))
+}
+
+func (a *TaskAdaptor) ParseTaskResult(respBody []byte) (*relaycommon.TaskInfo, error) {
+	resPayload := responsePayload{}
+	err := json.Unmarshal(respBody, &resPayload)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to unmarshal response body")
+	}
+	taskInfo := &relaycommon.TaskInfo{}
+	taskInfo.Code = resPayload.Code
+	taskInfo.TaskID = resPayload.Data.TaskId
+	taskInfo.Reason = resPayload.Message
+	//任务状态,枚举值:submitted(已提交)、processing(处理中)、succeed(成功)、failed(失败)
+	status := resPayload.Data.TaskStatus
+	switch status {
+	case "submitted":
+		taskInfo.Status = model.TaskStatusSubmitted
+	case "processing":
+		taskInfo.Status = model.TaskStatusInProgress
+	case "succeed":
+		taskInfo.Status = model.TaskStatusSuccess
+	case "failed":
+		taskInfo.Status = model.TaskStatusFailure
+	default:
+		return nil, fmt.Errorf("unknown task status: %s", status)
+	}
+	if videos := resPayload.Data.TaskResult.Videos; len(videos) > 0 {
+		video := videos[0]
+		taskInfo.Url = video.Url
+	}
+	return taskInfo, nil
+}

+ 4 - 0
relay/channel/task/suno/adaptor.go

@@ -22,6 +22,10 @@ type TaskAdaptor struct {
 	ChannelType int
 }
 
+func (a *TaskAdaptor) ParseTaskResult([]byte) (*relaycommon.TaskInfo, error) {
+	return nil, fmt.Errorf("not implement") // todo implement this method if needed
+}
+
 func (a *TaskAdaptor) Init(info *relaycommon.TaskRelayInfo) {
 	a.ChannelType = info.ChannelType
 }

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio