channel-test.go 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888
  1. package controller
  2. import (
  3. "bytes"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "math"
  9. "net/http"
  10. "net/http/httptest"
  11. "net/url"
  12. "strconv"
  13. "strings"
  14. "sync"
  15. "time"
  16. "github.com/QuantumNous/new-api/common"
  17. "github.com/QuantumNous/new-api/constant"
  18. "github.com/QuantumNous/new-api/dto"
  19. "github.com/QuantumNous/new-api/middleware"
  20. "github.com/QuantumNous/new-api/model"
  21. "github.com/QuantumNous/new-api/relay"
  22. relaycommon "github.com/QuantumNous/new-api/relay/common"
  23. relayconstant "github.com/QuantumNous/new-api/relay/constant"
  24. "github.com/QuantumNous/new-api/relay/helper"
  25. "github.com/QuantumNous/new-api/service"
  26. "github.com/QuantumNous/new-api/setting/operation_setting"
  27. "github.com/QuantumNous/new-api/setting/ratio_setting"
  28. "github.com/QuantumNous/new-api/types"
  29. "github.com/bytedance/gopkg/util/gopool"
  30. "github.com/samber/lo"
  31. "github.com/tidwall/gjson"
  32. "github.com/gin-gonic/gin"
  33. )
  34. type testResult struct {
  35. context *gin.Context
  36. localErr error
  37. newAPIError *types.NewAPIError
  38. }
  39. func normalizeChannelTestEndpoint(channel *model.Channel, modelName, endpointType string) string {
  40. normalized := strings.TrimSpace(endpointType)
  41. if normalized != "" {
  42. return normalized
  43. }
  44. if strings.HasSuffix(modelName, ratio_setting.CompactModelSuffix) {
  45. return string(constant.EndpointTypeOpenAIResponseCompact)
  46. }
  47. if channel != nil && channel.Type == constant.ChannelTypeCodex {
  48. return string(constant.EndpointTypeOpenAIResponse)
  49. }
  50. return normalized
  51. }
  52. func testChannel(channel *model.Channel, testModel string, endpointType string, isStream bool) testResult {
  53. tik := time.Now()
  54. var unsupportedTestChannelTypes = []int{
  55. constant.ChannelTypeMidjourney,
  56. constant.ChannelTypeMidjourneyPlus,
  57. constant.ChannelTypeSunoAPI,
  58. constant.ChannelTypeKling,
  59. constant.ChannelTypeJimeng,
  60. constant.ChannelTypeDoubaoVideo,
  61. constant.ChannelTypeVidu,
  62. }
  63. if lo.Contains(unsupportedTestChannelTypes, channel.Type) {
  64. channelTypeName := constant.GetChannelTypeName(channel.Type)
  65. return testResult{
  66. localErr: fmt.Errorf("%s channel test is not supported", channelTypeName),
  67. }
  68. }
  69. w := httptest.NewRecorder()
  70. c, _ := gin.CreateTestContext(w)
  71. testModel = strings.TrimSpace(testModel)
  72. if testModel == "" {
  73. if channel.TestModel != nil && *channel.TestModel != "" {
  74. testModel = strings.TrimSpace(*channel.TestModel)
  75. } else {
  76. models := channel.GetModels()
  77. if len(models) > 0 {
  78. testModel = strings.TrimSpace(models[0])
  79. }
  80. if testModel == "" {
  81. testModel = "gpt-4o-mini"
  82. }
  83. }
  84. }
  85. endpointType = normalizeChannelTestEndpoint(channel, testModel, endpointType)
  86. requestPath := "/v1/chat/completions"
  87. // 如果指定了端点类型,使用指定的端点类型
  88. if endpointType != "" {
  89. if endpointInfo, ok := common.GetDefaultEndpointInfo(constant.EndpointType(endpointType)); ok {
  90. requestPath = endpointInfo.Path
  91. }
  92. } else {
  93. // 如果没有指定端点类型,使用原有的自动检测逻辑
  94. if strings.Contains(strings.ToLower(testModel), "rerank") {
  95. requestPath = "/v1/rerank"
  96. }
  97. // 先判断是否为 Embedding 模型
  98. if strings.Contains(strings.ToLower(testModel), "embedding") ||
  99. strings.HasPrefix(testModel, "m3e") || // m3e 系列模型
  100. strings.Contains(testModel, "bge-") || // bge 系列模型
  101. strings.Contains(testModel, "embed") ||
  102. channel.Type == constant.ChannelTypeMokaAI { // 其他 embedding 模型
  103. requestPath = "/v1/embeddings" // 修改请求路径
  104. }
  105. // VolcEngine 图像生成模型
  106. if channel.Type == constant.ChannelTypeVolcEngine && strings.Contains(testModel, "seedream") {
  107. requestPath = "/v1/images/generations"
  108. }
  109. // responses-only models
  110. if strings.Contains(strings.ToLower(testModel), "codex") {
  111. requestPath = "/v1/responses"
  112. }
  113. // responses compaction models (must use /v1/responses/compact)
  114. if strings.HasSuffix(testModel, ratio_setting.CompactModelSuffix) {
  115. requestPath = "/v1/responses/compact"
  116. }
  117. }
  118. if strings.HasPrefix(requestPath, "/v1/responses/compact") {
  119. testModel = ratio_setting.WithCompactModelSuffix(testModel)
  120. }
  121. c.Request = &http.Request{
  122. Method: "POST",
  123. URL: &url.URL{Path: requestPath}, // 使用动态路径
  124. Body: nil,
  125. Header: make(http.Header),
  126. }
  127. cache, err := model.GetUserCache(1)
  128. if err != nil {
  129. return testResult{
  130. localErr: err,
  131. newAPIError: nil,
  132. }
  133. }
  134. cache.WriteContext(c)
  135. //c.Request.Header.Set("Authorization", "Bearer "+channel.Key)
  136. c.Request.Header.Set("Content-Type", "application/json")
  137. c.Set("channel", channel.Type)
  138. c.Set("base_url", channel.GetBaseURL())
  139. group, _ := model.GetUserGroup(1, false)
  140. c.Set("group", group)
  141. newAPIError := middleware.SetupContextForSelectedChannel(c, channel, testModel)
  142. if newAPIError != nil {
  143. return testResult{
  144. context: c,
  145. localErr: newAPIError,
  146. newAPIError: newAPIError,
  147. }
  148. }
  149. // Determine relay format based on endpoint type or request path
  150. var relayFormat types.RelayFormat
  151. if endpointType != "" {
  152. // 根据指定的端点类型设置 relayFormat
  153. switch constant.EndpointType(endpointType) {
  154. case constant.EndpointTypeOpenAI:
  155. relayFormat = types.RelayFormatOpenAI
  156. case constant.EndpointTypeOpenAIResponse:
  157. relayFormat = types.RelayFormatOpenAIResponses
  158. case constant.EndpointTypeOpenAIResponseCompact:
  159. relayFormat = types.RelayFormatOpenAIResponsesCompaction
  160. case constant.EndpointTypeAnthropic:
  161. relayFormat = types.RelayFormatClaude
  162. case constant.EndpointTypeGemini:
  163. relayFormat = types.RelayFormatGemini
  164. case constant.EndpointTypeJinaRerank:
  165. relayFormat = types.RelayFormatRerank
  166. case constant.EndpointTypeImageGeneration:
  167. relayFormat = types.RelayFormatOpenAIImage
  168. case constant.EndpointTypeEmbeddings:
  169. relayFormat = types.RelayFormatEmbedding
  170. default:
  171. relayFormat = types.RelayFormatOpenAI
  172. }
  173. } else {
  174. // 根据请求路径自动检测
  175. relayFormat = types.RelayFormatOpenAI
  176. if c.Request.URL.Path == "/v1/embeddings" {
  177. relayFormat = types.RelayFormatEmbedding
  178. }
  179. if c.Request.URL.Path == "/v1/images/generations" {
  180. relayFormat = types.RelayFormatOpenAIImage
  181. }
  182. if c.Request.URL.Path == "/v1/messages" {
  183. relayFormat = types.RelayFormatClaude
  184. }
  185. if strings.Contains(c.Request.URL.Path, "/v1beta/models") {
  186. relayFormat = types.RelayFormatGemini
  187. }
  188. if c.Request.URL.Path == "/v1/rerank" || c.Request.URL.Path == "/rerank" {
  189. relayFormat = types.RelayFormatRerank
  190. }
  191. if c.Request.URL.Path == "/v1/responses" {
  192. relayFormat = types.RelayFormatOpenAIResponses
  193. }
  194. if strings.HasPrefix(c.Request.URL.Path, "/v1/responses/compact") {
  195. relayFormat = types.RelayFormatOpenAIResponsesCompaction
  196. }
  197. }
  198. request := buildTestRequest(testModel, endpointType, channel, isStream)
  199. info, err := relaycommon.GenRelayInfo(c, relayFormat, request, nil)
  200. if err != nil {
  201. return testResult{
  202. context: c,
  203. localErr: err,
  204. newAPIError: types.NewError(err, types.ErrorCodeGenRelayInfoFailed),
  205. }
  206. }
  207. info.IsChannelTest = true
  208. info.InitChannelMeta(c)
  209. err = helper.ModelMappedHelper(c, info, request)
  210. if err != nil {
  211. return testResult{
  212. context: c,
  213. localErr: err,
  214. newAPIError: types.NewError(err, types.ErrorCodeChannelModelMappedError),
  215. }
  216. }
  217. testModel = info.UpstreamModelName
  218. // 更新请求中的模型名称
  219. request.SetModelName(testModel)
  220. apiType, _ := common.ChannelType2APIType(channel.Type)
  221. if info.RelayMode == relayconstant.RelayModeResponsesCompact &&
  222. apiType != constant.APITypeOpenAI &&
  223. apiType != constant.APITypeCodex {
  224. return testResult{
  225. context: c,
  226. localErr: fmt.Errorf("responses compaction test only supports openai/codex channels, got api type %d", apiType),
  227. newAPIError: types.NewError(fmt.Errorf("unsupported api type: %d", apiType), types.ErrorCodeInvalidApiType),
  228. }
  229. }
  230. adaptor := relay.GetAdaptor(apiType)
  231. if adaptor == nil {
  232. return testResult{
  233. context: c,
  234. localErr: fmt.Errorf("invalid api type: %d, adaptor is nil", apiType),
  235. newAPIError: types.NewError(fmt.Errorf("invalid api type: %d, adaptor is nil", apiType), types.ErrorCodeInvalidApiType),
  236. }
  237. }
  238. //// 创建一个用于日志的 info 副本,移除 ApiKey
  239. //logInfo := info
  240. //logInfo.ApiKey = ""
  241. common.SysLog(fmt.Sprintf("testing channel %d with model %s , info %+v ", channel.Id, testModel, info.ToString()))
  242. priceData, err := helper.ModelPriceHelper(c, info, 0, request.GetTokenCountMeta())
  243. if err != nil {
  244. return testResult{
  245. context: c,
  246. localErr: err,
  247. newAPIError: types.NewError(err, types.ErrorCodeModelPriceError),
  248. }
  249. }
  250. adaptor.Init(info)
  251. var convertedRequest any
  252. // 根据 RelayMode 选择正确的转换函数
  253. switch info.RelayMode {
  254. case relayconstant.RelayModeEmbeddings:
  255. // Embedding 请求 - request 已经是正确的类型
  256. if embeddingReq, ok := request.(*dto.EmbeddingRequest); ok {
  257. convertedRequest, err = adaptor.ConvertEmbeddingRequest(c, info, *embeddingReq)
  258. } else {
  259. return testResult{
  260. context: c,
  261. localErr: errors.New("invalid embedding request type"),
  262. newAPIError: types.NewError(errors.New("invalid embedding request type"), types.ErrorCodeConvertRequestFailed),
  263. }
  264. }
  265. case relayconstant.RelayModeImagesGenerations:
  266. // 图像生成请求 - request 已经是正确的类型
  267. if imageReq, ok := request.(*dto.ImageRequest); ok {
  268. convertedRequest, err = adaptor.ConvertImageRequest(c, info, *imageReq)
  269. } else {
  270. return testResult{
  271. context: c,
  272. localErr: errors.New("invalid image request type"),
  273. newAPIError: types.NewError(errors.New("invalid image request type"), types.ErrorCodeConvertRequestFailed),
  274. }
  275. }
  276. case relayconstant.RelayModeRerank:
  277. // Rerank 请求 - request 已经是正确的类型
  278. if rerankReq, ok := request.(*dto.RerankRequest); ok {
  279. convertedRequest, err = adaptor.ConvertRerankRequest(c, info.RelayMode, *rerankReq)
  280. } else {
  281. return testResult{
  282. context: c,
  283. localErr: errors.New("invalid rerank request type"),
  284. newAPIError: types.NewError(errors.New("invalid rerank request type"), types.ErrorCodeConvertRequestFailed),
  285. }
  286. }
  287. case relayconstant.RelayModeResponses:
  288. // Response 请求 - request 已经是正确的类型
  289. if responseReq, ok := request.(*dto.OpenAIResponsesRequest); ok {
  290. convertedRequest, err = adaptor.ConvertOpenAIResponsesRequest(c, info, *responseReq)
  291. } else {
  292. return testResult{
  293. context: c,
  294. localErr: errors.New("invalid response request type"),
  295. newAPIError: types.NewError(errors.New("invalid response request type"), types.ErrorCodeConvertRequestFailed),
  296. }
  297. }
  298. case relayconstant.RelayModeResponsesCompact:
  299. // Response compaction request - convert to OpenAIResponsesRequest before adapting
  300. switch req := request.(type) {
  301. case *dto.OpenAIResponsesCompactionRequest:
  302. convertedRequest, err = adaptor.ConvertOpenAIResponsesRequest(c, info, dto.OpenAIResponsesRequest{
  303. Model: req.Model,
  304. Input: req.Input,
  305. Instructions: req.Instructions,
  306. PreviousResponseID: req.PreviousResponseID,
  307. })
  308. case *dto.OpenAIResponsesRequest:
  309. convertedRequest, err = adaptor.ConvertOpenAIResponsesRequest(c, info, *req)
  310. default:
  311. return testResult{
  312. context: c,
  313. localErr: errors.New("invalid response compaction request type"),
  314. newAPIError: types.NewError(errors.New("invalid response compaction request type"), types.ErrorCodeConvertRequestFailed),
  315. }
  316. }
  317. default:
  318. // Chat/Completion 等其他请求类型
  319. if generalReq, ok := request.(*dto.GeneralOpenAIRequest); ok {
  320. convertedRequest, err = adaptor.ConvertOpenAIRequest(c, info, generalReq)
  321. } else {
  322. return testResult{
  323. context: c,
  324. localErr: errors.New("invalid general request type"),
  325. newAPIError: types.NewError(errors.New("invalid general request type"), types.ErrorCodeConvertRequestFailed),
  326. }
  327. }
  328. }
  329. if err != nil {
  330. return testResult{
  331. context: c,
  332. localErr: err,
  333. newAPIError: types.NewError(err, types.ErrorCodeConvertRequestFailed),
  334. }
  335. }
  336. jsonData, err := json.Marshal(convertedRequest)
  337. if err != nil {
  338. return testResult{
  339. context: c,
  340. localErr: err,
  341. newAPIError: types.NewError(err, types.ErrorCodeJsonMarshalFailed),
  342. }
  343. }
  344. //jsonData, err = relaycommon.RemoveDisabledFields(jsonData, info.ChannelOtherSettings)
  345. //if err != nil {
  346. // return testResult{
  347. // context: c,
  348. // localErr: err,
  349. // newAPIError: types.NewError(err, types.ErrorCodeConvertRequestFailed),
  350. // }
  351. //}
  352. if len(info.ParamOverride) > 0 {
  353. jsonData, err = relaycommon.ApplyParamOverride(jsonData, info.ParamOverride, relaycommon.BuildParamOverrideContext(info))
  354. if err != nil {
  355. return testResult{
  356. context: c,
  357. localErr: err,
  358. newAPIError: types.NewError(err, types.ErrorCodeChannelParamOverrideInvalid),
  359. }
  360. }
  361. }
  362. requestBody := bytes.NewBuffer(jsonData)
  363. c.Request.Body = io.NopCloser(bytes.NewBuffer(jsonData))
  364. resp, err := adaptor.DoRequest(c, info, requestBody)
  365. if err != nil {
  366. return testResult{
  367. context: c,
  368. localErr: err,
  369. newAPIError: types.NewOpenAIError(err, types.ErrorCodeDoRequestFailed, http.StatusInternalServerError),
  370. }
  371. }
  372. var httpResp *http.Response
  373. if resp != nil {
  374. httpResp = resp.(*http.Response)
  375. if httpResp.StatusCode != http.StatusOK {
  376. err := service.RelayErrorHandler(c.Request.Context(), httpResp, true)
  377. common.SysError(fmt.Sprintf(
  378. "channel test bad response: channel_id=%d name=%s type=%d model=%s endpoint_type=%s status=%d err=%v",
  379. channel.Id,
  380. channel.Name,
  381. channel.Type,
  382. testModel,
  383. endpointType,
  384. httpResp.StatusCode,
  385. err,
  386. ))
  387. return testResult{
  388. context: c,
  389. localErr: err,
  390. newAPIError: types.NewOpenAIError(err, types.ErrorCodeBadResponse, http.StatusInternalServerError),
  391. }
  392. }
  393. }
  394. usageA, respErr := adaptor.DoResponse(c, httpResp, info)
  395. if respErr != nil {
  396. return testResult{
  397. context: c,
  398. localErr: respErr,
  399. newAPIError: respErr,
  400. }
  401. }
  402. usage, usageErr := coerceTestUsage(usageA, isStream, info.GetEstimatePromptTokens())
  403. if usageErr != nil {
  404. return testResult{
  405. context: c,
  406. localErr: usageErr,
  407. newAPIError: types.NewOpenAIError(usageErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError),
  408. }
  409. }
  410. result := w.Result()
  411. respBody, err := readTestResponseBody(result.Body, isStream)
  412. if err != nil {
  413. return testResult{
  414. context: c,
  415. localErr: err,
  416. newAPIError: types.NewOpenAIError(err, types.ErrorCodeReadResponseBodyFailed, http.StatusInternalServerError),
  417. }
  418. }
  419. if bodyErr := detectErrorFromTestResponseBody(respBody); bodyErr != nil {
  420. return testResult{
  421. context: c,
  422. localErr: bodyErr,
  423. newAPIError: types.NewOpenAIError(bodyErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError),
  424. }
  425. }
  426. info.SetEstimatePromptTokens(usage.PromptTokens)
  427. quota := 0
  428. if !priceData.UsePrice {
  429. quota = usage.PromptTokens + int(math.Round(float64(usage.CompletionTokens)*priceData.CompletionRatio))
  430. quota = int(math.Round(float64(quota) * priceData.ModelRatio))
  431. if priceData.ModelRatio != 0 && quota <= 0 {
  432. quota = 1
  433. }
  434. } else {
  435. quota = int(priceData.ModelPrice * common.QuotaPerUnit)
  436. }
  437. tok := time.Now()
  438. milliseconds := tok.Sub(tik).Milliseconds()
  439. consumedTime := float64(milliseconds) / 1000.0
  440. other := service.GenerateTextOtherInfo(c, info, priceData.ModelRatio, priceData.GroupRatioInfo.GroupRatio, priceData.CompletionRatio,
  441. usage.PromptTokensDetails.CachedTokens, priceData.CacheRatio, priceData.ModelPrice, priceData.GroupRatioInfo.GroupSpecialRatio)
  442. model.RecordConsumeLog(c, 1, model.RecordConsumeLogParams{
  443. ChannelId: channel.Id,
  444. PromptTokens: usage.PromptTokens,
  445. CompletionTokens: usage.CompletionTokens,
  446. ModelName: info.OriginModelName,
  447. TokenName: "模型测试",
  448. Quota: quota,
  449. Content: "模型测试",
  450. UseTimeSeconds: int(consumedTime),
  451. IsStream: info.IsStream,
  452. Group: info.UsingGroup,
  453. Other: other,
  454. })
  455. common.SysLog(fmt.Sprintf("testing channel #%d, response: \n%s", channel.Id, string(respBody)))
  456. return testResult{
  457. context: c,
  458. localErr: nil,
  459. newAPIError: nil,
  460. }
  461. }
  462. func coerceTestUsage(usageAny any, isStream bool, estimatePromptTokens int) (*dto.Usage, error) {
  463. switch u := usageAny.(type) {
  464. case *dto.Usage:
  465. return u, nil
  466. case dto.Usage:
  467. return &u, nil
  468. case nil:
  469. if !isStream {
  470. return nil, errors.New("usage is nil")
  471. }
  472. usage := &dto.Usage{
  473. PromptTokens: estimatePromptTokens,
  474. }
  475. usage.TotalTokens = usage.PromptTokens
  476. return usage, nil
  477. default:
  478. if !isStream {
  479. return nil, fmt.Errorf("invalid usage type: %T", usageAny)
  480. }
  481. usage := &dto.Usage{
  482. PromptTokens: estimatePromptTokens,
  483. }
  484. usage.TotalTokens = usage.PromptTokens
  485. return usage, nil
  486. }
  487. }
  488. func readTestResponseBody(body io.ReadCloser, isStream bool) ([]byte, error) {
  489. defer func() { _ = body.Close() }()
  490. const maxStreamLogBytes = 8 << 10
  491. if isStream {
  492. return io.ReadAll(io.LimitReader(body, maxStreamLogBytes))
  493. }
  494. return io.ReadAll(body)
  495. }
  496. func detectErrorFromTestResponseBody(respBody []byte) error {
  497. b := bytes.TrimSpace(respBody)
  498. if len(b) == 0 {
  499. return nil
  500. }
  501. if message := detectErrorMessageFromJSONBytes(b); message != "" {
  502. return fmt.Errorf("upstream error: %s", message)
  503. }
  504. for _, line := range bytes.Split(b, []byte{'\n'}) {
  505. line = bytes.TrimSpace(line)
  506. if len(line) == 0 {
  507. continue
  508. }
  509. if !bytes.HasPrefix(line, []byte("data:")) {
  510. continue
  511. }
  512. payload := bytes.TrimSpace(bytes.TrimPrefix(line, []byte("data:")))
  513. if len(payload) == 0 || bytes.Equal(payload, []byte("[DONE]")) {
  514. continue
  515. }
  516. if message := detectErrorMessageFromJSONBytes(payload); message != "" {
  517. return fmt.Errorf("upstream error: %s", message)
  518. }
  519. }
  520. return nil
  521. }
  522. func detectErrorMessageFromJSONBytes(jsonBytes []byte) string {
  523. if len(jsonBytes) == 0 {
  524. return ""
  525. }
  526. if jsonBytes[0] != '{' && jsonBytes[0] != '[' {
  527. return ""
  528. }
  529. errVal := gjson.GetBytes(jsonBytes, "error")
  530. if !errVal.Exists() || errVal.Type == gjson.Null {
  531. return ""
  532. }
  533. message := gjson.GetBytes(jsonBytes, "error.message").String()
  534. if message == "" {
  535. message = gjson.GetBytes(jsonBytes, "error.error.message").String()
  536. }
  537. if message == "" && errVal.Type == gjson.String {
  538. message = errVal.String()
  539. }
  540. if message == "" {
  541. message = errVal.Raw
  542. }
  543. message = strings.TrimSpace(message)
  544. if message == "" {
  545. return "upstream returned error payload"
  546. }
  547. return message
  548. }
  549. func buildTestRequest(model string, endpointType string, channel *model.Channel, isStream bool) dto.Request {
  550. testResponsesInput := json.RawMessage(`[{"role":"user","content":"hi"}]`)
  551. // 根据端点类型构建不同的测试请求
  552. if endpointType != "" {
  553. switch constant.EndpointType(endpointType) {
  554. case constant.EndpointTypeEmbeddings:
  555. // 返回 EmbeddingRequest
  556. return &dto.EmbeddingRequest{
  557. Model: model,
  558. Input: []any{"hello world"},
  559. }
  560. case constant.EndpointTypeImageGeneration:
  561. // 返回 ImageRequest
  562. return &dto.ImageRequest{
  563. Model: model,
  564. Prompt: "a cute cat",
  565. N: 1,
  566. Size: "1024x1024",
  567. }
  568. case constant.EndpointTypeJinaRerank:
  569. // 返回 RerankRequest
  570. return &dto.RerankRequest{
  571. Model: model,
  572. Query: "What is Deep Learning?",
  573. Documents: []any{"Deep Learning is a subset of machine learning.", "Machine learning is a field of artificial intelligence."},
  574. TopN: 2,
  575. }
  576. case constant.EndpointTypeOpenAIResponse:
  577. // 返回 OpenAIResponsesRequest
  578. return &dto.OpenAIResponsesRequest{
  579. Model: model,
  580. Input: json.RawMessage(`[{"role":"user","content":"hi"}]`),
  581. Stream: isStream,
  582. }
  583. case constant.EndpointTypeOpenAIResponseCompact:
  584. // 返回 OpenAIResponsesCompactionRequest
  585. return &dto.OpenAIResponsesCompactionRequest{
  586. Model: model,
  587. Input: testResponsesInput,
  588. }
  589. case constant.EndpointTypeAnthropic, constant.EndpointTypeGemini, constant.EndpointTypeOpenAI:
  590. // 返回 GeneralOpenAIRequest
  591. maxTokens := uint(16)
  592. if constant.EndpointType(endpointType) == constant.EndpointTypeGemini {
  593. maxTokens = 3000
  594. }
  595. req := &dto.GeneralOpenAIRequest{
  596. Model: model,
  597. Stream: isStream,
  598. Messages: []dto.Message{
  599. {
  600. Role: "user",
  601. Content: "hi",
  602. },
  603. },
  604. MaxTokens: maxTokens,
  605. }
  606. if isStream {
  607. req.StreamOptions = &dto.StreamOptions{IncludeUsage: true}
  608. }
  609. return req
  610. }
  611. }
  612. // 自动检测逻辑(保持原有行为)
  613. if strings.Contains(strings.ToLower(model), "rerank") {
  614. return &dto.RerankRequest{
  615. Model: model,
  616. Query: "What is Deep Learning?",
  617. Documents: []any{"Deep Learning is a subset of machine learning.", "Machine learning is a field of artificial intelligence."},
  618. TopN: 2,
  619. }
  620. }
  621. // 先判断是否为 Embedding 模型
  622. if strings.Contains(strings.ToLower(model), "embedding") ||
  623. strings.HasPrefix(model, "m3e") ||
  624. strings.Contains(model, "bge-") {
  625. // 返回 EmbeddingRequest
  626. return &dto.EmbeddingRequest{
  627. Model: model,
  628. Input: []any{"hello world"},
  629. }
  630. }
  631. // Responses compaction models (must use /v1/responses/compact)
  632. if strings.HasSuffix(model, ratio_setting.CompactModelSuffix) {
  633. return &dto.OpenAIResponsesCompactionRequest{
  634. Model: model,
  635. Input: testResponsesInput,
  636. }
  637. }
  638. // Responses-only models (e.g. codex series)
  639. if strings.Contains(strings.ToLower(model), "codex") {
  640. return &dto.OpenAIResponsesRequest{
  641. Model: model,
  642. Input: json.RawMessage(`[{"role":"user","content":"hi"}]`),
  643. Stream: isStream,
  644. }
  645. }
  646. // Chat/Completion 请求 - 返回 GeneralOpenAIRequest
  647. testRequest := &dto.GeneralOpenAIRequest{
  648. Model: model,
  649. Stream: isStream,
  650. Messages: []dto.Message{
  651. {
  652. Role: "user",
  653. Content: "hi",
  654. },
  655. },
  656. }
  657. if isStream {
  658. testRequest.StreamOptions = &dto.StreamOptions{IncludeUsage: true}
  659. }
  660. if strings.HasPrefix(model, "o") {
  661. testRequest.MaxCompletionTokens = 16
  662. } else if strings.Contains(model, "thinking") {
  663. if !strings.Contains(model, "claude") {
  664. testRequest.MaxTokens = 50
  665. }
  666. } else if strings.Contains(model, "gemini") {
  667. testRequest.MaxTokens = 3000
  668. } else {
  669. testRequest.MaxTokens = 16
  670. }
  671. return testRequest
  672. }
  673. func TestChannel(c *gin.Context) {
  674. channelId, err := strconv.Atoi(c.Param("id"))
  675. if err != nil {
  676. common.ApiError(c, err)
  677. return
  678. }
  679. channel, err := model.CacheGetChannel(channelId)
  680. if err != nil {
  681. channel, err = model.GetChannelById(channelId, true)
  682. if err != nil {
  683. common.ApiError(c, err)
  684. return
  685. }
  686. }
  687. //defer func() {
  688. // if channel.ChannelInfo.IsMultiKey {
  689. // go func() { _ = channel.SaveChannelInfo() }()
  690. // }
  691. //}()
  692. testModel := c.Query("model")
  693. endpointType := c.Query("endpoint_type")
  694. isStream, _ := strconv.ParseBool(c.Query("stream"))
  695. tik := time.Now()
  696. result := testChannel(channel, testModel, endpointType, isStream)
  697. if result.localErr != nil {
  698. c.JSON(http.StatusOK, gin.H{
  699. "success": false,
  700. "message": result.localErr.Error(),
  701. "time": 0.0,
  702. })
  703. return
  704. }
  705. tok := time.Now()
  706. milliseconds := tok.Sub(tik).Milliseconds()
  707. go channel.UpdateResponseTime(milliseconds)
  708. consumedTime := float64(milliseconds) / 1000.0
  709. if result.newAPIError != nil {
  710. c.JSON(http.StatusOK, gin.H{
  711. "success": false,
  712. "message": result.newAPIError.Error(),
  713. "time": consumedTime,
  714. })
  715. return
  716. }
  717. c.JSON(http.StatusOK, gin.H{
  718. "success": true,
  719. "message": "",
  720. "time": consumedTime,
  721. })
  722. }
  723. var testAllChannelsLock sync.Mutex
  724. var testAllChannelsRunning bool = false
  725. func testAllChannels(notify bool) error {
  726. testAllChannelsLock.Lock()
  727. if testAllChannelsRunning {
  728. testAllChannelsLock.Unlock()
  729. return errors.New("测试已在运行中")
  730. }
  731. testAllChannelsRunning = true
  732. testAllChannelsLock.Unlock()
  733. channels, getChannelErr := model.GetAllChannels(0, 0, true, false)
  734. if getChannelErr != nil {
  735. return getChannelErr
  736. }
  737. var disableThreshold = int64(common.ChannelDisableThreshold * 1000)
  738. if disableThreshold == 0 {
  739. disableThreshold = 10000000 // a impossible value
  740. }
  741. gopool.Go(func() {
  742. // 使用 defer 确保无论如何都会重置运行状态,防止死锁
  743. defer func() {
  744. testAllChannelsLock.Lock()
  745. testAllChannelsRunning = false
  746. testAllChannelsLock.Unlock()
  747. }()
  748. for _, channel := range channels {
  749. isChannelEnabled := channel.Status == common.ChannelStatusEnabled
  750. tik := time.Now()
  751. result := testChannel(channel, "", "", false)
  752. tok := time.Now()
  753. milliseconds := tok.Sub(tik).Milliseconds()
  754. shouldBanChannel := false
  755. newAPIError := result.newAPIError
  756. // request error disables the channel
  757. if newAPIError != nil {
  758. shouldBanChannel = service.ShouldDisableChannel(channel.Type, result.newAPIError)
  759. }
  760. // 当错误检查通过,才检查响应时间
  761. if common.AutomaticDisableChannelEnabled && !shouldBanChannel {
  762. if milliseconds > disableThreshold {
  763. err := fmt.Errorf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0)
  764. newAPIError = types.NewOpenAIError(err, types.ErrorCodeChannelResponseTimeExceeded, http.StatusRequestTimeout)
  765. shouldBanChannel = true
  766. }
  767. }
  768. // disable channel
  769. if isChannelEnabled && shouldBanChannel && channel.GetAutoBan() {
  770. processChannelError(result.context, *types.NewChannelError(channel.Id, channel.Type, channel.Name, channel.ChannelInfo.IsMultiKey, common.GetContextKeyString(result.context, constant.ContextKeyChannelKey), channel.GetAutoBan()), newAPIError)
  771. }
  772. // enable channel
  773. if !isChannelEnabled && service.ShouldEnableChannel(newAPIError, channel.Status) {
  774. service.EnableChannel(channel.Id, common.GetContextKeyString(result.context, constant.ContextKeyChannelKey), channel.Name)
  775. }
  776. channel.UpdateResponseTime(milliseconds)
  777. time.Sleep(common.RequestInterval)
  778. }
  779. if notify {
  780. service.NotifyRootUser(dto.NotifyTypeChannelTest, "通道测试完成", "所有通道测试已完成")
  781. }
  782. })
  783. return nil
  784. }
  785. func TestAllChannels(c *gin.Context) {
  786. err := testAllChannels(true)
  787. if err != nil {
  788. common.ApiError(c, err)
  789. return
  790. }
  791. c.JSON(http.StatusOK, gin.H{
  792. "success": true,
  793. "message": "",
  794. })
  795. }
  796. var autoTestChannelsOnce sync.Once
  797. func AutomaticallyTestChannels() {
  798. // 只在Master节点定时测试渠道
  799. if !common.IsMasterNode {
  800. return
  801. }
  802. autoTestChannelsOnce.Do(func() {
  803. for {
  804. if !operation_setting.GetMonitorSetting().AutoTestChannelEnabled {
  805. time.Sleep(1 * time.Minute)
  806. continue
  807. }
  808. for {
  809. frequency := operation_setting.GetMonitorSetting().AutoTestChannelMinutes
  810. time.Sleep(time.Duration(int(math.Round(frequency))) * time.Minute)
  811. common.SysLog(fmt.Sprintf("automatically test channels with interval %f minutes", frequency))
  812. common.SysLog("automatically testing all channels")
  813. _ = testAllChannels(false)
  814. common.SysLog("automatically channel test finished")
  815. if !operation_setting.GetMonitorSetting().AutoTestChannelEnabled {
  816. break
  817. }
  818. }
  819. }
  820. })
  821. }