cache.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. package model
  2. import (
  3. "context"
  4. "encoding"
  5. "errors"
  6. "fmt"
  7. "math/rand/v2"
  8. "slices"
  9. "sort"
  10. "sync"
  11. "sync/atomic"
  12. "time"
  13. "github.com/bytedance/sonic"
  14. "github.com/labring/aiproxy/common"
  15. "github.com/labring/aiproxy/common/config"
  16. "github.com/labring/aiproxy/common/conv"
  17. "github.com/labring/aiproxy/common/notify"
  18. "github.com/maruel/natural"
  19. "github.com/redis/go-redis/v9"
  20. log "github.com/sirupsen/logrus"
  21. )
  22. const (
  23. SyncFrequency = time.Minute * 3
  24. TokenCacheKey = "token:%s"
  25. GroupCacheKey = "group:%s"
  26. GroupModelTPMKey = "group:%s:model_tpm"
  27. )
  28. var (
  29. _ encoding.BinaryMarshaler = (*redisStringSlice)(nil)
  30. _ redis.Scanner = (*redisStringSlice)(nil)
  31. )
  32. type redisStringSlice []string
  33. func (r *redisStringSlice) ScanRedis(value string) error {
  34. return sonic.Unmarshal(conv.StringToBytes(value), r)
  35. }
  36. func (r redisStringSlice) MarshalBinary() ([]byte, error) {
  37. return sonic.Marshal(r)
  38. }
  39. type redisTime time.Time
  40. var (
  41. _ redis.Scanner = (*redisTime)(nil)
  42. _ encoding.BinaryMarshaler = (*redisTime)(nil)
  43. )
  44. func (t *redisTime) ScanRedis(value string) error {
  45. return (*time.Time)(t).UnmarshalBinary(conv.StringToBytes(value))
  46. }
  47. func (t redisTime) MarshalBinary() ([]byte, error) {
  48. return time.Time(t).MarshalBinary()
  49. }
  50. type TokenCache struct {
  51. ExpiredAt redisTime `json:"expired_at" redis:"e"`
  52. Group string `json:"group" redis:"g"`
  53. Key string `json:"-" redis:"-"`
  54. Name string `json:"name" redis:"n"`
  55. Subnets redisStringSlice `json:"subnets" redis:"s"`
  56. Models redisStringSlice `json:"models" redis:"m"`
  57. ID int `json:"id" redis:"i"`
  58. Status int `json:"status" redis:"st"`
  59. Quota float64 `json:"quota" redis:"q"`
  60. UsedAmount float64 `json:"used_amount" redis:"u"`
  61. }
  62. func (t *Token) ToTokenCache() *TokenCache {
  63. return &TokenCache{
  64. ID: t.ID,
  65. Group: t.GroupID,
  66. Key: t.Key,
  67. Name: t.Name.String(),
  68. Models: t.Models,
  69. Subnets: t.Subnets,
  70. Status: t.Status,
  71. ExpiredAt: redisTime(t.ExpiredAt),
  72. Quota: t.Quota,
  73. UsedAmount: t.UsedAmount,
  74. }
  75. }
  76. func CacheDeleteToken(key string) error {
  77. if !common.RedisEnabled {
  78. return nil
  79. }
  80. return common.RedisDel(fmt.Sprintf(TokenCacheKey, key))
  81. }
  82. //nolint:gosec
  83. func CacheSetToken(token *TokenCache) error {
  84. if !common.RedisEnabled {
  85. return nil
  86. }
  87. key := fmt.Sprintf(TokenCacheKey, token.Key)
  88. pipe := common.RDB.Pipeline()
  89. pipe.HSet(context.Background(), key, token)
  90. expireTime := SyncFrequency + time.Duration(rand.Int64N(60)-30)*time.Second
  91. pipe.Expire(context.Background(), key, expireTime)
  92. _, err := pipe.Exec(context.Background())
  93. return err
  94. }
  95. func CacheGetTokenByKey(key string) (*TokenCache, error) {
  96. if !common.RedisEnabled {
  97. token, err := GetTokenByKey(key)
  98. if err != nil {
  99. return nil, err
  100. }
  101. return token.ToTokenCache(), nil
  102. }
  103. cacheKey := fmt.Sprintf(TokenCacheKey, key)
  104. tokenCache := &TokenCache{}
  105. err := common.RDB.HGetAll(context.Background(), cacheKey).Scan(tokenCache)
  106. if err == nil && tokenCache.ID != 0 {
  107. tokenCache.Key = key
  108. return tokenCache, nil
  109. } else if err != nil && !errors.Is(err, redis.Nil) {
  110. log.Errorf("get token (%s) from redis error: %s", key, err.Error())
  111. }
  112. token, err := GetTokenByKey(key)
  113. if err != nil {
  114. return nil, err
  115. }
  116. tc := token.ToTokenCache()
  117. if err := CacheSetToken(tc); err != nil {
  118. log.Error("redis set token error: " + err.Error())
  119. }
  120. return tc, nil
  121. }
  122. var updateTokenUsedAmountOnlyIncreaseScript = redis.NewScript(`
  123. local used_amount = redis.call("HGet", KEYS[1], "ua")
  124. if used_amount == false then
  125. return redis.status_reply("ok")
  126. end
  127. if ARGV[1] < used_amount then
  128. return redis.status_reply("ok")
  129. end
  130. redis.call("HSet", KEYS[1], "ua", ARGV[1])
  131. return redis.status_reply("ok")
  132. `)
  133. func CacheUpdateTokenUsedAmountOnlyIncrease(key string, amount float64) error {
  134. if !common.RedisEnabled {
  135. return nil
  136. }
  137. return updateTokenUsedAmountOnlyIncreaseScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, amount).Err()
  138. }
  139. var updateTokenNameScript = redis.NewScript(`
  140. if redis.call("HExists", KEYS[1], "n") then
  141. redis.call("HSet", KEYS[1], "n", ARGV[1])
  142. end
  143. return redis.status_reply("ok")
  144. `)
  145. func CacheUpdateTokenName(key string, name string) error {
  146. if !common.RedisEnabled {
  147. return nil
  148. }
  149. return updateTokenNameScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, name).Err()
  150. }
  151. var updateTokenStatusScript = redis.NewScript(`
  152. if redis.call("HExists", KEYS[1], "st") then
  153. redis.call("HSet", KEYS[1], "st", ARGV[1])
  154. end
  155. return redis.status_reply("ok")
  156. `)
  157. func CacheUpdateTokenStatus(key string, status int) error {
  158. if !common.RedisEnabled {
  159. return nil
  160. }
  161. return updateTokenStatusScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, status).Err()
  162. }
  163. type redisMapStringInt64 map[string]int64
  164. var (
  165. _ redis.Scanner = (*redisMapStringInt64)(nil)
  166. _ encoding.BinaryMarshaler = (*redisMapStringInt64)(nil)
  167. )
  168. func (r *redisMapStringInt64) ScanRedis(value string) error {
  169. return sonic.Unmarshal(conv.StringToBytes(value), r)
  170. }
  171. func (r redisMapStringInt64) MarshalBinary() ([]byte, error) {
  172. return sonic.Marshal(r)
  173. }
  174. type GroupCache struct {
  175. ID string `json:"-" redis:"-"`
  176. Status int `json:"status" redis:"st"`
  177. UsedAmount float64 `json:"used_amount" redis:"ua"`
  178. RPMRatio float64 `json:"rpm_ratio" redis:"rpm_r"`
  179. RPM redisMapStringInt64 `json:"rpm" redis:"rpm"`
  180. TPMRatio float64 `json:"tpm_ratio" redis:"tpm_r"`
  181. TPM redisMapStringInt64 `json:"tpm" redis:"tpm"`
  182. }
  183. func (g *Group) ToGroupCache() *GroupCache {
  184. return &GroupCache{
  185. ID: g.ID,
  186. Status: g.Status,
  187. UsedAmount: g.UsedAmount,
  188. RPMRatio: g.RPMRatio,
  189. RPM: g.RPM,
  190. TPMRatio: g.TPMRatio,
  191. TPM: g.TPM,
  192. }
  193. }
  194. func CacheDeleteGroup(id string) error {
  195. if !common.RedisEnabled {
  196. return nil
  197. }
  198. return common.RedisDel(fmt.Sprintf(GroupCacheKey, id))
  199. }
  200. var updateGroupRPMRatioScript = redis.NewScript(`
  201. if redis.call("HExists", KEYS[1], "rpm_r") then
  202. redis.call("HSet", KEYS[1], "rpm_r", ARGV[1])
  203. end
  204. return redis.status_reply("ok")
  205. `)
  206. func CacheUpdateGroupRPMRatio(id string, rpmRatio float64) error {
  207. if !common.RedisEnabled {
  208. return nil
  209. }
  210. return updateGroupRPMRatioScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, rpmRatio).Err()
  211. }
  212. var updateGroupRPMScript = redis.NewScript(`
  213. if redis.call("HExists", KEYS[1], "rpm") then
  214. redis.call("HSet", KEYS[1], "rpm", ARGV[1])
  215. end
  216. return redis.status_reply("ok")
  217. `)
  218. func CacheUpdateGroupRPM(id string, rpm map[string]int64) error {
  219. if !common.RedisEnabled {
  220. return nil
  221. }
  222. jsonRPM, err := sonic.Marshal(rpm)
  223. if err != nil {
  224. return err
  225. }
  226. return updateGroupRPMScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, conv.BytesToString(jsonRPM)).Err()
  227. }
  228. var updateGroupTPMRatioScript = redis.NewScript(`
  229. if redis.call("HExists", KEYS[1], "tpm_r") then
  230. redis.call("HSet", KEYS[1], "tpm_r", ARGV[1])
  231. end
  232. return redis.status_reply("ok")
  233. `)
  234. func CacheUpdateGroupTPMRatio(id string, tpmRatio float64) error {
  235. if !common.RedisEnabled {
  236. return nil
  237. }
  238. return updateGroupTPMRatioScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, tpmRatio).Err()
  239. }
  240. var updateGroupTPMScript = redis.NewScript(`
  241. if redis.call("HExists", KEYS[1], "tpm") then
  242. redis.call("HSet", KEYS[1], "tpm", ARGV[1])
  243. end
  244. return redis.status_reply("ok")
  245. `)
  246. func CacheUpdateGroupTPM(id string, tpm map[string]int64) error {
  247. if !common.RedisEnabled {
  248. return nil
  249. }
  250. jsonTPM, err := sonic.Marshal(tpm)
  251. if err != nil {
  252. return err
  253. }
  254. return updateGroupTPMScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, conv.BytesToString(jsonTPM)).Err()
  255. }
  256. var updateGroupStatusScript = redis.NewScript(`
  257. if redis.call("HExists", KEYS[1], "st") then
  258. redis.call("HSet", KEYS[1], "st", ARGV[1])
  259. end
  260. return redis.status_reply("ok")
  261. `)
  262. func CacheUpdateGroupStatus(id string, status int) error {
  263. if !common.RedisEnabled {
  264. return nil
  265. }
  266. return updateGroupStatusScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, status).Err()
  267. }
  268. //nolint:gosec
  269. func CacheSetGroup(group *GroupCache) error {
  270. if !common.RedisEnabled {
  271. return nil
  272. }
  273. key := fmt.Sprintf(GroupCacheKey, group.ID)
  274. pipe := common.RDB.Pipeline()
  275. pipe.HSet(context.Background(), key, group)
  276. expireTime := SyncFrequency + time.Duration(rand.Int64N(60)-30)*time.Second
  277. pipe.Expire(context.Background(), key, expireTime)
  278. _, err := pipe.Exec(context.Background())
  279. return err
  280. }
  281. func CacheGetGroup(id string) (*GroupCache, error) {
  282. if !common.RedisEnabled {
  283. group, err := GetGroupByID(id)
  284. if err != nil {
  285. return nil, err
  286. }
  287. return group.ToGroupCache(), nil
  288. }
  289. cacheKey := fmt.Sprintf(GroupCacheKey, id)
  290. groupCache := &GroupCache{}
  291. err := common.RDB.HGetAll(context.Background(), cacheKey).Scan(groupCache)
  292. if err == nil && groupCache.Status != 0 {
  293. groupCache.ID = id
  294. return groupCache, nil
  295. } else if err != nil && !errors.Is(err, redis.Nil) {
  296. log.Errorf("get group (%s) from redis error: %s", id, err.Error())
  297. }
  298. group, err := GetGroupByID(id)
  299. if err != nil {
  300. return nil, err
  301. }
  302. gc := group.ToGroupCache()
  303. if err := CacheSetGroup(gc); err != nil {
  304. log.Error("redis set group error: " + err.Error())
  305. }
  306. return gc, nil
  307. }
  308. var updateGroupUsedAmountOnlyIncreaseScript = redis.NewScript(`
  309. local used_amount = redis.call("HGet", KEYS[1], "ua")
  310. if used_amount == false then
  311. return redis.status_reply("ok")
  312. end
  313. if ARGV[1] < used_amount then
  314. return redis.status_reply("ok")
  315. end
  316. redis.call("HSet", KEYS[1], "ua", ARGV[1])
  317. return redis.status_reply("ok")
  318. `)
  319. func CacheUpdateGroupUsedAmountOnlyIncrease(id string, amount float64) error {
  320. if !common.RedisEnabled {
  321. return nil
  322. }
  323. return updateGroupUsedAmountOnlyIncreaseScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, amount).Err()
  324. }
  325. //nolint:gosec
  326. func CacheGetGroupModelTPM(id string, model string) (int64, error) {
  327. if !common.RedisEnabled {
  328. return GetGroupModelTPM(id, model)
  329. }
  330. cacheKey := fmt.Sprintf(GroupModelTPMKey, id)
  331. tpm, err := common.RDB.HGet(context.Background(), cacheKey, model).Int64()
  332. if err == nil {
  333. return tpm, nil
  334. } else if !errors.Is(err, redis.Nil) {
  335. log.Errorf("get group model tpm (%s:%s) from redis error: %s", id, model, err.Error())
  336. }
  337. tpm, err = GetGroupModelTPM(id, model)
  338. if err != nil {
  339. return 0, err
  340. }
  341. pipe := common.RDB.Pipeline()
  342. pipe.HSet(context.Background(), cacheKey, model, tpm)
  343. // 2-5 seconds
  344. pipe.Expire(context.Background(), cacheKey, 2*time.Second+time.Duration(rand.Int64N(3))*time.Second)
  345. _, err = pipe.Exec(context.Background())
  346. if err != nil {
  347. log.Errorf("set group model tpm (%s:%s) to redis error: %s", id, model, err.Error())
  348. }
  349. return tpm, nil
  350. }
  351. //nolint:revive
  352. type ModelConfigCache interface {
  353. GetModelConfig(model string) (*ModelConfig, bool)
  354. }
  355. // read-only cache
  356. //
  357. //nolint:revive
  358. type ModelCaches struct {
  359. ModelConfig ModelConfigCache
  360. EnabledModel2channels map[string][]*Channel
  361. EnabledModels []string
  362. EnabledModelsMap map[string]struct{}
  363. EnabledModelConfigs []*ModelConfig
  364. EnabledModelConfigsMap map[string]*ModelConfig
  365. EnabledChannelType2ModelConfigs map[int][]*ModelConfig
  366. EnabledChannelID2channel map[int]*Channel
  367. }
  368. var modelCaches atomic.Pointer[ModelCaches]
  369. func init() {
  370. modelCaches.Store(new(ModelCaches))
  371. }
  372. func LoadModelCaches() *ModelCaches {
  373. return modelCaches.Load()
  374. }
  375. // InitModelConfigAndChannelCache initializes the channel cache from database
  376. func InitModelConfigAndChannelCache() error {
  377. modelConfig, err := initializeModelConfigCache()
  378. if err != nil {
  379. return err
  380. }
  381. // Load enabled newEnabledChannels from database
  382. newEnabledChannels, err := LoadEnabledChannels()
  383. if err != nil {
  384. return err
  385. }
  386. // Build channel ID to channel map
  387. newEnabledChannelID2channel := buildChannelIDMap(newEnabledChannels)
  388. // Build all channel ID to channel map
  389. // Build model to channels map
  390. newEnabledModel2channels := buildModelToChannelsMap(newEnabledChannels)
  391. // Sort channels by priority
  392. sortChannelsByPriority(newEnabledModel2channels)
  393. // Build channel type to model configs map
  394. newEnabledChannelType2ModelConfigs := buildChannelTypeToModelConfigsMap(newEnabledChannels, modelConfig)
  395. // Build enabled models and configs lists
  396. newEnabledModels, newEnabledModelsMap, newEnabledModelConfigs, newEnabledModelConfigsMap := buildEnabledModelsAndConfigs(newEnabledChannelType2ModelConfigs)
  397. // Update global cache atomically
  398. modelCaches.Store(&ModelCaches{
  399. ModelConfig: modelConfig,
  400. EnabledModel2channels: newEnabledModel2channels,
  401. EnabledModels: newEnabledModels,
  402. EnabledModelsMap: newEnabledModelsMap,
  403. EnabledModelConfigs: newEnabledModelConfigs,
  404. EnabledModelConfigsMap: newEnabledModelConfigsMap,
  405. EnabledChannelType2ModelConfigs: newEnabledChannelType2ModelConfigs,
  406. EnabledChannelID2channel: newEnabledChannelID2channel,
  407. })
  408. return nil
  409. }
  410. func LoadEnabledChannels() ([]*Channel, error) {
  411. var channels []*Channel
  412. err := DB.Where("status = ?", ChannelStatusEnabled).Find(&channels).Error
  413. if err != nil {
  414. return nil, err
  415. }
  416. for _, channel := range channels {
  417. initializeChannelModels(channel)
  418. initializeChannelModelMapping(channel)
  419. }
  420. return channels, nil
  421. }
  422. func LoadChannels() ([]*Channel, error) {
  423. var channels []*Channel
  424. err := DB.Find(&channels).Error
  425. if err != nil {
  426. return nil, err
  427. }
  428. for _, channel := range channels {
  429. initializeChannelModels(channel)
  430. initializeChannelModelMapping(channel)
  431. }
  432. return channels, nil
  433. }
  434. func LoadChannelByID(id int) (*Channel, error) {
  435. var channel Channel
  436. err := DB.First(&channel, id).Error
  437. if err != nil {
  438. return nil, err
  439. }
  440. initializeChannelModels(&channel)
  441. initializeChannelModelMapping(&channel)
  442. return &channel, nil
  443. }
  444. var _ ModelConfigCache = (*modelConfigMapCache)(nil)
  445. type modelConfigMapCache struct {
  446. modelConfigMap map[string]*ModelConfig
  447. }
  448. func (m *modelConfigMapCache) GetModelConfig(model string) (*ModelConfig, bool) {
  449. config, ok := m.modelConfigMap[model]
  450. return config, ok
  451. }
  452. var _ ModelConfigCache = (*disabledModelConfigCache)(nil)
  453. type disabledModelConfigCache struct {
  454. modelConfigs ModelConfigCache
  455. }
  456. func (d *disabledModelConfigCache) GetModelConfig(model string) (*ModelConfig, bool) {
  457. if config, ok := d.modelConfigs.GetModelConfig(model); ok {
  458. return config, true
  459. }
  460. return NewDefaultModelConfig(model), true
  461. }
  462. func initializeModelConfigCache() (ModelConfigCache, error) {
  463. modelConfigs, err := GetAllModelConfigs()
  464. if err != nil {
  465. return nil, err
  466. }
  467. newModelConfigMap := make(map[string]*ModelConfig)
  468. for _, modelConfig := range modelConfigs {
  469. newModelConfigMap[modelConfig.Model] = modelConfig
  470. }
  471. configs := &modelConfigMapCache{modelConfigMap: newModelConfigMap}
  472. if config.GetDisableModelConfig() {
  473. return &disabledModelConfigCache{modelConfigs: configs}, nil
  474. }
  475. return configs, nil
  476. }
  477. func initializeChannelModels(channel *Channel) {
  478. if len(channel.Models) == 0 {
  479. channel.Models = config.GetDefaultChannelModels()[channel.Type]
  480. return
  481. }
  482. findedModels, missingModels, err := GetModelConfigWithModels(channel.Models)
  483. if err != nil {
  484. return
  485. }
  486. if len(missingModels) > 0 {
  487. slices.Sort(missingModels)
  488. log.Errorf("model config not found: %v", missingModels)
  489. }
  490. slices.Sort(findedModels)
  491. channel.Models = findedModels
  492. }
  493. func initializeChannelModelMapping(channel *Channel) {
  494. if len(channel.ModelMapping) == 0 {
  495. channel.ModelMapping = config.GetDefaultChannelModelMapping()[channel.Type]
  496. }
  497. }
  498. func buildChannelIDMap(channels []*Channel) map[int]*Channel {
  499. channelMap := make(map[int]*Channel)
  500. for _, channel := range channels {
  501. channelMap[channel.ID] = channel
  502. }
  503. return channelMap
  504. }
  505. func buildModelToChannelsMap(channels []*Channel) map[string][]*Channel {
  506. modelMap := make(map[string][]*Channel)
  507. for _, channel := range channels {
  508. for _, model := range channel.Models {
  509. modelMap[model] = append(modelMap[model], channel)
  510. }
  511. }
  512. return modelMap
  513. }
  514. func sortChannelsByPriority(modelMap map[string][]*Channel) {
  515. for _, channels := range modelMap {
  516. sort.Slice(channels, func(i, j int) bool {
  517. return channels[i].GetPriority() > channels[j].GetPriority()
  518. })
  519. }
  520. }
  521. func buildChannelTypeToModelConfigsMap(channels []*Channel, modelConfigMap ModelConfigCache) map[int][]*ModelConfig {
  522. typeMap := make(map[int][]*ModelConfig)
  523. for _, channel := range channels {
  524. if _, ok := typeMap[channel.Type]; !ok {
  525. typeMap[channel.Type] = make([]*ModelConfig, 0, len(channel.Models))
  526. }
  527. configs := typeMap[channel.Type]
  528. for _, model := range channel.Models {
  529. if config, ok := modelConfigMap.GetModelConfig(model); ok {
  530. configs = append(configs, config)
  531. }
  532. }
  533. typeMap[channel.Type] = configs
  534. }
  535. for key, configs := range typeMap {
  536. slices.SortStableFunc(configs, SortModelConfigsFunc)
  537. typeMap[key] = slices.CompactFunc(configs, func(e1, e2 *ModelConfig) bool {
  538. return e1.Model == e2.Model
  539. })
  540. }
  541. return typeMap
  542. }
  543. func buildEnabledModelsAndConfigs(typeMap map[int][]*ModelConfig) ([]string, map[string]struct{}, []*ModelConfig, map[string]*ModelConfig) {
  544. models := make([]string, 0)
  545. configs := make([]*ModelConfig, 0)
  546. appended := make(map[string]struct{})
  547. modelConfigsMap := make(map[string]*ModelConfig)
  548. for _, modelConfigs := range typeMap {
  549. for _, config := range modelConfigs {
  550. if _, ok := appended[config.Model]; ok {
  551. continue
  552. }
  553. models = append(models, config.Model)
  554. configs = append(configs, config)
  555. appended[config.Model] = struct{}{}
  556. modelConfigsMap[config.Model] = config
  557. }
  558. }
  559. slices.Sort(models)
  560. slices.SortStableFunc(configs, SortModelConfigsFunc)
  561. return models, appended, configs, modelConfigsMap
  562. }
  563. func SortModelConfigsFunc(i, j *ModelConfig) int {
  564. if i.Owner != j.Owner {
  565. if natural.Less(string(i.Owner), string(j.Owner)) {
  566. return -1
  567. }
  568. return 1
  569. }
  570. if i.Type != j.Type {
  571. if i.Type < j.Type {
  572. return -1
  573. }
  574. return 1
  575. }
  576. if i.Model == j.Model {
  577. return 0
  578. }
  579. if natural.Less(i.Model, j.Model) {
  580. return -1
  581. }
  582. return 1
  583. }
  584. func SyncModelConfigAndChannelCache(ctx context.Context, wg *sync.WaitGroup, frequency time.Duration) {
  585. defer wg.Done()
  586. ticker := time.NewTicker(frequency)
  587. defer ticker.Stop()
  588. for {
  589. select {
  590. case <-ctx.Done():
  591. return
  592. case <-ticker.C:
  593. err := InitModelConfigAndChannelCache()
  594. if err != nil {
  595. notify.ErrorThrottle("syncModelChannel", time.Minute, "failed to sync channels", err.Error())
  596. }
  597. }
  598. }
  599. }