cache.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. package model
  2. import (
  3. "context"
  4. "encoding"
  5. "errors"
  6. "fmt"
  7. "math/rand/v2"
  8. "slices"
  9. "sort"
  10. "sync"
  11. "sync/atomic"
  12. "time"
  13. "github.com/bytedance/sonic"
  14. "github.com/labring/sealos/service/aiproxy/common"
  15. "github.com/labring/sealos/service/aiproxy/common/config"
  16. "github.com/labring/sealos/service/aiproxy/common/conv"
  17. "github.com/labring/sealos/service/aiproxy/common/notify"
  18. "github.com/maruel/natural"
  19. "github.com/redis/go-redis/v9"
  20. log "github.com/sirupsen/logrus"
  21. )
  22. const (
  23. SyncFrequency = time.Minute * 3
  24. TokenCacheKey = "token:%s"
  25. GroupCacheKey = "group:%s"
  26. GroupModelTPMKey = "group:%s:model_tpm"
  27. )
  28. var (
  29. _ encoding.BinaryMarshaler = (*redisStringSlice)(nil)
  30. _ redis.Scanner = (*redisStringSlice)(nil)
  31. )
  32. type redisStringSlice []string
  33. func (r *redisStringSlice) ScanRedis(value string) error {
  34. return sonic.Unmarshal(conv.StringToBytes(value), r)
  35. }
  36. func (r redisStringSlice) MarshalBinary() ([]byte, error) {
  37. return sonic.Marshal(r)
  38. }
  39. type redisTime time.Time
  40. var (
  41. _ redis.Scanner = (*redisTime)(nil)
  42. _ encoding.BinaryMarshaler = (*redisTime)(nil)
  43. )
  44. func (t *redisTime) ScanRedis(value string) error {
  45. return (*time.Time)(t).UnmarshalBinary(conv.StringToBytes(value))
  46. }
  47. func (t redisTime) MarshalBinary() ([]byte, error) {
  48. return time.Time(t).MarshalBinary()
  49. }
  50. type TokenCache struct {
  51. ExpiredAt redisTime `json:"expired_at" redis:"e"`
  52. Group string `json:"group" redis:"g"`
  53. Key string `json:"-" redis:"-"`
  54. Name string `json:"name" redis:"n"`
  55. Subnets redisStringSlice `json:"subnets" redis:"s"`
  56. Models redisStringSlice `json:"models" redis:"m"`
  57. ID int `json:"id" redis:"i"`
  58. Status int `json:"status" redis:"st"`
  59. Quota float64 `json:"quota" redis:"q"`
  60. UsedAmount float64 `json:"used_amount" redis:"u"`
  61. }
  62. func (t *Token) ToTokenCache() *TokenCache {
  63. return &TokenCache{
  64. ID: t.ID,
  65. Group: t.GroupID,
  66. Key: t.Key,
  67. Name: t.Name.String(),
  68. Models: t.Models,
  69. Subnets: t.Subnets,
  70. Status: t.Status,
  71. ExpiredAt: redisTime(t.ExpiredAt),
  72. Quota: t.Quota,
  73. UsedAmount: t.UsedAmount,
  74. }
  75. }
  76. func CacheDeleteToken(key string) error {
  77. if !common.RedisEnabled {
  78. return nil
  79. }
  80. return common.RedisDel(fmt.Sprintf(TokenCacheKey, key))
  81. }
  82. //nolint:gosec
  83. func CacheSetToken(token *TokenCache) error {
  84. if !common.RedisEnabled {
  85. return nil
  86. }
  87. key := fmt.Sprintf(TokenCacheKey, token.Key)
  88. pipe := common.RDB.Pipeline()
  89. pipe.HSet(context.Background(), key, token)
  90. expireTime := SyncFrequency + time.Duration(rand.Int64N(60)-30)*time.Second
  91. pipe.Expire(context.Background(), key, expireTime)
  92. _, err := pipe.Exec(context.Background())
  93. return err
  94. }
  95. func CacheGetTokenByKey(key string) (*TokenCache, error) {
  96. if !common.RedisEnabled {
  97. token, err := GetTokenByKey(key)
  98. if err != nil {
  99. return nil, err
  100. }
  101. return token.ToTokenCache(), nil
  102. }
  103. cacheKey := fmt.Sprintf(TokenCacheKey, key)
  104. tokenCache := &TokenCache{}
  105. err := common.RDB.HGetAll(context.Background(), cacheKey).Scan(tokenCache)
  106. if err == nil && tokenCache.ID != 0 {
  107. tokenCache.Key = key
  108. return tokenCache, nil
  109. } else if err != nil && !errors.Is(err, redis.Nil) {
  110. log.Errorf("get token (%s) from redis error: %s", key, err.Error())
  111. }
  112. token, err := GetTokenByKey(key)
  113. if err != nil {
  114. return nil, err
  115. }
  116. tc := token.ToTokenCache()
  117. if err := CacheSetToken(tc); err != nil {
  118. log.Error("redis set token error: " + err.Error())
  119. }
  120. return tc, nil
  121. }
  122. var updateTokenUsedAmountOnlyIncreaseScript = redis.NewScript(`
  123. local used_amount = redis.call("HGet", KEYS[1], "ua")
  124. if used_amount == false then
  125. return redis.status_reply("ok")
  126. end
  127. if ARGV[1] < used_amount then
  128. return redis.status_reply("ok")
  129. end
  130. redis.call("HSet", KEYS[1], "ua", ARGV[1])
  131. return redis.status_reply("ok")
  132. `)
  133. func CacheUpdateTokenUsedAmountOnlyIncrease(key string, amount float64) error {
  134. if !common.RedisEnabled {
  135. return nil
  136. }
  137. return updateTokenUsedAmountOnlyIncreaseScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, amount).Err()
  138. }
  139. var updateTokenNameScript = redis.NewScript(`
  140. if redis.call("HExists", KEYS[1], "n") then
  141. redis.call("HSet", KEYS[1], "n", ARGV[1])
  142. end
  143. return redis.status_reply("ok")
  144. `)
  145. func CacheUpdateTokenName(key string, name string) error {
  146. if !common.RedisEnabled {
  147. return nil
  148. }
  149. return updateTokenNameScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, name).Err()
  150. }
  151. var updateTokenStatusScript = redis.NewScript(`
  152. if redis.call("HExists", KEYS[1], "st") then
  153. redis.call("HSet", KEYS[1], "st", ARGV[1])
  154. end
  155. return redis.status_reply("ok")
  156. `)
  157. func CacheUpdateTokenStatus(key string, status int) error {
  158. if !common.RedisEnabled {
  159. return nil
  160. }
  161. return updateTokenStatusScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(TokenCacheKey, key)}, status).Err()
  162. }
  163. type redisMapStringInt64 map[string]int64
  164. var (
  165. _ redis.Scanner = (*redisMapStringInt64)(nil)
  166. _ encoding.BinaryMarshaler = (*redisMapStringInt64)(nil)
  167. )
  168. func (r *redisMapStringInt64) ScanRedis(value string) error {
  169. return sonic.Unmarshal(conv.StringToBytes(value), r)
  170. }
  171. func (r redisMapStringInt64) MarshalBinary() ([]byte, error) {
  172. return sonic.Marshal(r)
  173. }
  174. type GroupCache struct {
  175. ID string `json:"-" redis:"-"`
  176. Status int `json:"status" redis:"st"`
  177. UsedAmount float64 `json:"used_amount" redis:"ua"`
  178. RPMRatio float64 `json:"rpm_ratio" redis:"rpm_r"`
  179. RPM redisMapStringInt64 `json:"rpm" redis:"rpm"`
  180. TPMRatio float64 `json:"tpm_ratio" redis:"tpm_r"`
  181. TPM redisMapStringInt64 `json:"tpm" redis:"tpm"`
  182. }
  183. func (g *Group) ToGroupCache() *GroupCache {
  184. return &GroupCache{
  185. ID: g.ID,
  186. Status: g.Status,
  187. UsedAmount: g.UsedAmount,
  188. RPMRatio: g.RPMRatio,
  189. RPM: g.RPM,
  190. TPMRatio: g.TPMRatio,
  191. TPM: g.TPM,
  192. }
  193. }
  194. func CacheDeleteGroup(id string) error {
  195. if !common.RedisEnabled {
  196. return nil
  197. }
  198. return common.RedisDel(fmt.Sprintf(GroupCacheKey, id))
  199. }
  200. var updateGroupRPMRatioScript = redis.NewScript(`
  201. if redis.call("HExists", KEYS[1], "rpm_r") then
  202. redis.call("HSet", KEYS[1], "rpm_r", ARGV[1])
  203. end
  204. return redis.status_reply("ok")
  205. `)
  206. func CacheUpdateGroupRPMRatio(id string, rpmRatio float64) error {
  207. if !common.RedisEnabled {
  208. return nil
  209. }
  210. return updateGroupRPMRatioScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, rpmRatio).Err()
  211. }
  212. var updateGroupRPMScript = redis.NewScript(`
  213. if redis.call("HExists", KEYS[1], "rpm") then
  214. redis.call("HSet", KEYS[1], "rpm", ARGV[1])
  215. end
  216. return redis.status_reply("ok")
  217. `)
  218. func CacheUpdateGroupRPM(id string, rpm map[string]int64) error {
  219. if !common.RedisEnabled {
  220. return nil
  221. }
  222. jsonRPM, err := sonic.Marshal(rpm)
  223. if err != nil {
  224. return err
  225. }
  226. return updateGroupRPMScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, conv.BytesToString(jsonRPM)).Err()
  227. }
  228. var updateGroupTPMRatioScript = redis.NewScript(`
  229. if redis.call("HExists", KEYS[1], "tpm_r") then
  230. redis.call("HSet", KEYS[1], "tpm_r", ARGV[1])
  231. end
  232. return redis.status_reply("ok")
  233. `)
  234. func CacheUpdateGroupTPMRatio(id string, tpmRatio float64) error {
  235. if !common.RedisEnabled {
  236. return nil
  237. }
  238. return updateGroupTPMRatioScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, tpmRatio).Err()
  239. }
  240. var updateGroupTPMScript = redis.NewScript(`
  241. if redis.call("HExists", KEYS[1], "tpm") then
  242. redis.call("HSet", KEYS[1], "tpm", ARGV[1])
  243. end
  244. return redis.status_reply("ok")
  245. `)
  246. func CacheUpdateGroupTPM(id string, tpm map[string]int64) error {
  247. if !common.RedisEnabled {
  248. return nil
  249. }
  250. jsonTPM, err := sonic.Marshal(tpm)
  251. if err != nil {
  252. return err
  253. }
  254. return updateGroupTPMScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, conv.BytesToString(jsonTPM)).Err()
  255. }
  256. var updateGroupStatusScript = redis.NewScript(`
  257. if redis.call("HExists", KEYS[1], "st") then
  258. redis.call("HSet", KEYS[1], "st", ARGV[1])
  259. end
  260. return redis.status_reply("ok")
  261. `)
  262. func CacheUpdateGroupStatus(id string, status int) error {
  263. if !common.RedisEnabled {
  264. return nil
  265. }
  266. return updateGroupStatusScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, status).Err()
  267. }
  268. //nolint:gosec
  269. func CacheSetGroup(group *GroupCache) error {
  270. if !common.RedisEnabled {
  271. return nil
  272. }
  273. key := fmt.Sprintf(GroupCacheKey, group.ID)
  274. pipe := common.RDB.Pipeline()
  275. pipe.HSet(context.Background(), key, group)
  276. expireTime := SyncFrequency + time.Duration(rand.Int64N(60)-30)*time.Second
  277. pipe.Expire(context.Background(), key, expireTime)
  278. _, err := pipe.Exec(context.Background())
  279. return err
  280. }
  281. func CacheGetGroup(id string) (*GroupCache, error) {
  282. if !common.RedisEnabled {
  283. group, err := GetGroupByID(id)
  284. if err != nil {
  285. return nil, err
  286. }
  287. return group.ToGroupCache(), nil
  288. }
  289. cacheKey := fmt.Sprintf(GroupCacheKey, id)
  290. groupCache := &GroupCache{}
  291. err := common.RDB.HGetAll(context.Background(), cacheKey).Scan(groupCache)
  292. if err == nil && groupCache.Status != 0 {
  293. groupCache.ID = id
  294. return groupCache, nil
  295. } else if err != nil && !errors.Is(err, redis.Nil) {
  296. log.Errorf("get group (%s) from redis error: %s", id, err.Error())
  297. }
  298. group, err := GetGroupByID(id)
  299. if err != nil {
  300. return nil, err
  301. }
  302. gc := group.ToGroupCache()
  303. if err := CacheSetGroup(gc); err != nil {
  304. log.Error("redis set group error: " + err.Error())
  305. }
  306. return gc, nil
  307. }
  308. var updateGroupUsedAmountOnlyIncreaseScript = redis.NewScript(`
  309. local used_amount = redis.call("HGet", KEYS[1], "ua")
  310. if used_amount == false then
  311. return redis.status_reply("ok")
  312. end
  313. if ARGV[1] < used_amount then
  314. return redis.status_reply("ok")
  315. end
  316. redis.call("HSet", KEYS[1], "ua", ARGV[1])
  317. return redis.status_reply("ok")
  318. `)
  319. func CacheUpdateGroupUsedAmountOnlyIncrease(id string, amount float64) error {
  320. if !common.RedisEnabled {
  321. return nil
  322. }
  323. return updateGroupUsedAmountOnlyIncreaseScript.Run(context.Background(), common.RDB, []string{fmt.Sprintf(GroupCacheKey, id)}, amount).Err()
  324. }
  325. //nolint:gosec
  326. func CacheGetGroupModelTPM(id string, model string) (int64, error) {
  327. if !common.RedisEnabled {
  328. return GetGroupModelTPM(id, model)
  329. }
  330. cacheKey := fmt.Sprintf(GroupModelTPMKey, id)
  331. tpm, err := common.RDB.HGet(context.Background(), cacheKey, model).Int64()
  332. if err == nil {
  333. return tpm, nil
  334. } else if !errors.Is(err, redis.Nil) {
  335. log.Errorf("get group model tpm (%s:%s) from redis error: %s", id, model, err.Error())
  336. }
  337. tpm, err = GetGroupModelTPM(id, model)
  338. if err != nil {
  339. return 0, err
  340. }
  341. pipe := common.RDB.Pipeline()
  342. pipe.HSet(context.Background(), cacheKey, model, tpm)
  343. // 2-5 seconds
  344. pipe.Expire(context.Background(), cacheKey, 2*time.Second+time.Duration(rand.Int64N(3))*time.Second)
  345. _, err = pipe.Exec(context.Background())
  346. if err != nil {
  347. log.Errorf("set group model tpm (%s:%s) to redis error: %s", id, model, err.Error())
  348. }
  349. return tpm, nil
  350. }
  351. type ModelConfigCache interface {
  352. GetModelConfig(model string) (*ModelConfig, bool)
  353. }
  354. // read-only cache
  355. //
  356. type ModelCaches struct {
  357. ModelConfig ModelConfigCache
  358. EnabledModel2channels map[string][]*Channel
  359. EnabledModels []string
  360. EnabledModelsMap map[string]struct{}
  361. EnabledModelConfigs []*ModelConfig
  362. EnabledModelConfigsMap map[string]*ModelConfig
  363. EnabledChannelType2ModelConfigs map[int][]*ModelConfig
  364. EnabledChannelID2channel map[int]*Channel
  365. }
  366. var modelCaches atomic.Pointer[ModelCaches]
  367. func init() {
  368. modelCaches.Store(new(ModelCaches))
  369. }
  370. func LoadModelCaches() *ModelCaches {
  371. return modelCaches.Load()
  372. }
  373. // InitModelConfigAndChannelCache initializes the channel cache from database
  374. func InitModelConfigAndChannelCache() error {
  375. modelConfig, err := initializeModelConfigCache()
  376. if err != nil {
  377. return err
  378. }
  379. // Load enabled newEnabledChannels from database
  380. newEnabledChannels, err := LoadEnabledChannels()
  381. if err != nil {
  382. return err
  383. }
  384. // Build channel ID to channel map
  385. newEnabledChannelID2channel := buildChannelIDMap(newEnabledChannels)
  386. // Build all channel ID to channel map
  387. // Build model to channels map
  388. newEnabledModel2channels := buildModelToChannelsMap(newEnabledChannels)
  389. // Sort channels by priority
  390. sortChannelsByPriority(newEnabledModel2channels)
  391. // Build channel type to model configs map
  392. newEnabledChannelType2ModelConfigs := buildChannelTypeToModelConfigsMap(newEnabledChannels, modelConfig)
  393. // Build enabled models and configs lists
  394. newEnabledModels, newEnabledModelsMap, newEnabledModelConfigs, newEnabledModelConfigsMap := buildEnabledModelsAndConfigs(newEnabledChannelType2ModelConfigs)
  395. // Update global cache atomically
  396. modelCaches.Store(&ModelCaches{
  397. ModelConfig: modelConfig,
  398. EnabledModel2channels: newEnabledModel2channels,
  399. EnabledModels: newEnabledModels,
  400. EnabledModelsMap: newEnabledModelsMap,
  401. EnabledModelConfigs: newEnabledModelConfigs,
  402. EnabledModelConfigsMap: newEnabledModelConfigsMap,
  403. EnabledChannelType2ModelConfigs: newEnabledChannelType2ModelConfigs,
  404. EnabledChannelID2channel: newEnabledChannelID2channel,
  405. })
  406. return nil
  407. }
  408. func LoadEnabledChannels() ([]*Channel, error) {
  409. var channels []*Channel
  410. err := DB.Where("status = ?", ChannelStatusEnabled).Find(&channels).Error
  411. if err != nil {
  412. return nil, err
  413. }
  414. for _, channel := range channels {
  415. initializeChannelModels(channel)
  416. initializeChannelModelMapping(channel)
  417. }
  418. return channels, nil
  419. }
  420. func LoadChannels() ([]*Channel, error) {
  421. var channels []*Channel
  422. err := DB.Find(&channels).Error
  423. if err != nil {
  424. return nil, err
  425. }
  426. for _, channel := range channels {
  427. initializeChannelModels(channel)
  428. initializeChannelModelMapping(channel)
  429. }
  430. return channels, nil
  431. }
  432. func LoadChannelByID(id int) (*Channel, error) {
  433. var channel Channel
  434. err := DB.First(&channel, id).Error
  435. if err != nil {
  436. return nil, err
  437. }
  438. initializeChannelModels(&channel)
  439. initializeChannelModelMapping(&channel)
  440. return &channel, nil
  441. }
  442. var _ ModelConfigCache = (*modelConfigMapCache)(nil)
  443. type modelConfigMapCache struct {
  444. modelConfigMap map[string]*ModelConfig
  445. }
  446. func (m *modelConfigMapCache) GetModelConfig(model string) (*ModelConfig, bool) {
  447. config, ok := m.modelConfigMap[model]
  448. return config, ok
  449. }
  450. var _ ModelConfigCache = (*disabledModelConfigCache)(nil)
  451. type disabledModelConfigCache struct {
  452. modelConfigs ModelConfigCache
  453. }
  454. func (d *disabledModelConfigCache) GetModelConfig(model string) (*ModelConfig, bool) {
  455. if config, ok := d.modelConfigs.GetModelConfig(model); ok {
  456. return config, true
  457. }
  458. return NewDefaultModelConfig(model), true
  459. }
  460. func initializeModelConfigCache() (ModelConfigCache, error) {
  461. modelConfigs, err := GetAllModelConfigs()
  462. if err != nil {
  463. return nil, err
  464. }
  465. newModelConfigMap := make(map[string]*ModelConfig)
  466. for _, modelConfig := range modelConfigs {
  467. newModelConfigMap[modelConfig.Model] = modelConfig
  468. }
  469. configs := &modelConfigMapCache{modelConfigMap: newModelConfigMap}
  470. if config.GetDisableModelConfig() {
  471. return &disabledModelConfigCache{modelConfigs: configs}, nil
  472. }
  473. return configs, nil
  474. }
  475. func initializeChannelModels(channel *Channel) {
  476. if len(channel.Models) == 0 {
  477. channel.Models = config.GetDefaultChannelModels()[channel.Type]
  478. return
  479. }
  480. findedModels, missingModels, err := GetModelConfigWithModels(channel.Models)
  481. if err != nil {
  482. return
  483. }
  484. if len(missingModels) > 0 {
  485. slices.Sort(missingModels)
  486. log.Errorf("model config not found: %v", missingModels)
  487. }
  488. slices.Sort(findedModels)
  489. channel.Models = findedModels
  490. }
  491. func initializeChannelModelMapping(channel *Channel) {
  492. if len(channel.ModelMapping) == 0 {
  493. channel.ModelMapping = config.GetDefaultChannelModelMapping()[channel.Type]
  494. }
  495. }
  496. func buildChannelIDMap(channels []*Channel) map[int]*Channel {
  497. channelMap := make(map[int]*Channel)
  498. for _, channel := range channels {
  499. channelMap[channel.ID] = channel
  500. }
  501. return channelMap
  502. }
  503. func buildModelToChannelsMap(channels []*Channel) map[string][]*Channel {
  504. modelMap := make(map[string][]*Channel)
  505. for _, channel := range channels {
  506. for _, model := range channel.Models {
  507. modelMap[model] = append(modelMap[model], channel)
  508. }
  509. }
  510. return modelMap
  511. }
  512. func sortChannelsByPriority(modelMap map[string][]*Channel) {
  513. for _, channels := range modelMap {
  514. sort.Slice(channels, func(i, j int) bool {
  515. return channels[i].GetPriority() > channels[j].GetPriority()
  516. })
  517. }
  518. }
  519. func buildChannelTypeToModelConfigsMap(channels []*Channel, modelConfigMap ModelConfigCache) map[int][]*ModelConfig {
  520. typeMap := make(map[int][]*ModelConfig)
  521. for _, channel := range channels {
  522. if _, ok := typeMap[channel.Type]; !ok {
  523. typeMap[channel.Type] = make([]*ModelConfig, 0, len(channel.Models))
  524. }
  525. configs := typeMap[channel.Type]
  526. for _, model := range channel.Models {
  527. if config, ok := modelConfigMap.GetModelConfig(model); ok {
  528. configs = append(configs, config)
  529. }
  530. }
  531. typeMap[channel.Type] = configs
  532. }
  533. for key, configs := range typeMap {
  534. slices.SortStableFunc(configs, SortModelConfigsFunc)
  535. typeMap[key] = slices.CompactFunc(configs, func(e1, e2 *ModelConfig) bool {
  536. return e1.Model == e2.Model
  537. })
  538. }
  539. return typeMap
  540. }
  541. func buildEnabledModelsAndConfigs(typeMap map[int][]*ModelConfig) ([]string, map[string]struct{}, []*ModelConfig, map[string]*ModelConfig) {
  542. models := make([]string, 0)
  543. configs := make([]*ModelConfig, 0)
  544. appended := make(map[string]struct{})
  545. modelConfigsMap := make(map[string]*ModelConfig)
  546. for _, modelConfigs := range typeMap {
  547. for _, config := range modelConfigs {
  548. if _, ok := appended[config.Model]; ok {
  549. continue
  550. }
  551. models = append(models, config.Model)
  552. configs = append(configs, config)
  553. appended[config.Model] = struct{}{}
  554. modelConfigsMap[config.Model] = config
  555. }
  556. }
  557. slices.Sort(models)
  558. slices.SortStableFunc(configs, SortModelConfigsFunc)
  559. return models, appended, configs, modelConfigsMap
  560. }
  561. func SortModelConfigsFunc(i, j *ModelConfig) int {
  562. if i.Owner != j.Owner {
  563. if natural.Less(string(i.Owner), string(j.Owner)) {
  564. return -1
  565. }
  566. return 1
  567. }
  568. if i.Type != j.Type {
  569. if i.Type < j.Type {
  570. return -1
  571. }
  572. return 1
  573. }
  574. if i.Model == j.Model {
  575. return 0
  576. }
  577. if natural.Less(i.Model, j.Model) {
  578. return -1
  579. }
  580. return 1
  581. }
  582. func SyncModelConfigAndChannelCache(ctx context.Context, wg *sync.WaitGroup, frequency time.Duration) {
  583. defer wg.Done()
  584. ticker := time.NewTicker(frequency)
  585. defer ticker.Stop()
  586. for {
  587. select {
  588. case <-ctx.Done():
  589. return
  590. case <-ticker.C:
  591. err := InitModelConfigAndChannelCache()
  592. if err != nil {
  593. notify.ErrorThrottle("syncModelChannel", time.Minute, "failed to sync channels", err.Error())
  594. }
  595. }
  596. }
  597. }