chat.go 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. package model
  2. import (
  3. "github.com/labring/aiproxy/core/model"
  4. "github.com/labring/aiproxy/core/relay/adaptor"
  5. )
  6. type ChatUsage struct {
  7. PromptTokens int64 `json:"prompt_tokens,omitempty"`
  8. CompletionTokens int64 `json:"completion_tokens,omitempty"`
  9. TotalTokens int64 `json:"total_tokens"`
  10. WebSearchCount int64 `json:"web_search_count,omitempty"`
  11. PromptTokensDetails *PromptTokensDetails `json:"prompt_tokens_details,omitempty"`
  12. CompletionTokensDetails *CompletionTokensDetails `json:"completion_tokens_details,omitempty"`
  13. }
  14. func (u ChatUsage) ToModelUsage() model.Usage {
  15. usage := model.Usage{
  16. InputTokens: model.ZeroNullInt64(u.PromptTokens),
  17. OutputTokens: model.ZeroNullInt64(u.CompletionTokens),
  18. TotalTokens: model.ZeroNullInt64(u.TotalTokens),
  19. WebSearchCount: model.ZeroNullInt64(u.WebSearchCount),
  20. }
  21. if u.PromptTokensDetails != nil {
  22. usage.CachedTokens = model.ZeroNullInt64(u.PromptTokensDetails.CachedTokens)
  23. usage.CacheCreationTokens = model.ZeroNullInt64(u.PromptTokensDetails.CacheCreationTokens)
  24. }
  25. if u.CompletionTokensDetails != nil {
  26. usage.ReasoningTokens = model.ZeroNullInt64(u.CompletionTokensDetails.ReasoningTokens)
  27. }
  28. return usage
  29. }
  30. func (u *ChatUsage) Add(other *ChatUsage) {
  31. if other == nil {
  32. return
  33. }
  34. u.PromptTokens += other.PromptTokens
  35. u.CompletionTokens += other.CompletionTokens
  36. u.TotalTokens += other.TotalTokens
  37. if other.PromptTokensDetails != nil {
  38. if u.PromptTokensDetails == nil {
  39. u.PromptTokensDetails = &PromptTokensDetails{}
  40. }
  41. u.PromptTokensDetails.Add(other.PromptTokensDetails)
  42. }
  43. }
  44. func (u ChatUsage) ToClaudeUsage() ClaudeUsage {
  45. cu := ClaudeUsage{
  46. InputTokens: u.PromptTokens,
  47. OutputTokens: u.CompletionTokens,
  48. }
  49. if u.PromptTokensDetails != nil {
  50. cu.CacheCreationInputTokens = u.PromptTokensDetails.CacheCreationTokens
  51. cu.CacheReadInputTokens = u.PromptTokensDetails.CachedTokens
  52. }
  53. return cu
  54. }
  55. // ToResponseUsage converts ChatUsage to ResponseUsage (OpenAI Responses API format)
  56. func (u ChatUsage) ToResponseUsage() ResponseUsage {
  57. usage := ResponseUsage{
  58. InputTokens: u.PromptTokens,
  59. OutputTokens: u.CompletionTokens,
  60. TotalTokens: u.TotalTokens,
  61. }
  62. if u.PromptTokensDetails != nil &&
  63. (u.PromptTokensDetails.CachedTokens > 0 || u.PromptTokensDetails.CacheCreationTokens > 0) {
  64. usage.InputTokensDetails = &ResponseUsageDetails{
  65. CachedTokens: u.PromptTokensDetails.CachedTokens,
  66. }
  67. }
  68. if u.CompletionTokensDetails != nil && u.CompletionTokensDetails.ReasoningTokens > 0 {
  69. usage.OutputTokensDetails = &ResponseUsageDetails{
  70. ReasoningTokens: u.CompletionTokensDetails.ReasoningTokens,
  71. }
  72. }
  73. return usage
  74. }
  75. // ToGeminiUsage converts ChatUsage to GeminiUsageMetadata (Google Gemini format)
  76. func (u ChatUsage) ToGeminiUsage() GeminiUsageMetadata {
  77. usage := GeminiUsageMetadata{
  78. PromptTokenCount: u.PromptTokens,
  79. CandidatesTokenCount: u.CompletionTokens,
  80. TotalTokenCount: u.TotalTokens,
  81. }
  82. if u.PromptTokensDetails != nil && u.PromptTokensDetails.CachedTokens > 0 {
  83. usage.CachedContentTokenCount = u.PromptTokensDetails.CachedTokens
  84. }
  85. if u.CompletionTokensDetails != nil && u.CompletionTokensDetails.ReasoningTokens > 0 {
  86. usage.ThoughtsTokenCount = u.CompletionTokensDetails.ReasoningTokens
  87. }
  88. return usage
  89. }
  90. type PromptTokensDetails struct {
  91. CachedTokens int64 `json:"cached_tokens"`
  92. AudioTokens int64 `json:"audio_tokens"`
  93. CacheCreationTokens int64 `json:"cache_creation_tokens,omitempty"`
  94. }
  95. func (d *PromptTokensDetails) Add(other *PromptTokensDetails) {
  96. if other == nil {
  97. return
  98. }
  99. d.CachedTokens += other.CachedTokens
  100. d.AudioTokens += other.AudioTokens
  101. d.CacheCreationTokens += other.CacheCreationTokens
  102. }
  103. type CompletionTokensDetails struct {
  104. ReasoningTokens int64 `json:"reasoning_tokens"`
  105. AudioTokens int64 `json:"audio_tokens"`
  106. AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
  107. RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
  108. }
  109. type OpenAIErrorResponse struct {
  110. Error OpenAIError `json:"error"`
  111. }
  112. type OpenAIError struct {
  113. Code any `json:"code,omitempty"`
  114. Message string `json:"message,omitempty"`
  115. Type string `json:"type,omitempty"`
  116. Param string `json:"param,omitempty"`
  117. }
  118. func NewOpenAIError(statusCode int, err OpenAIError) adaptor.Error {
  119. return adaptor.NewError(statusCode, OpenAIErrorResponse{
  120. Error: err,
  121. })
  122. }
  123. func WrapperOpenAIError(err error, code any, statusCode int, _type ...string) adaptor.Error {
  124. return WrapperOpenAIErrorWithMessage(err.Error(), code, statusCode, _type...)
  125. }
  126. func WrapperOpenAIErrorWithMessage(
  127. message string,
  128. code any,
  129. statusCode int,
  130. _type ...string,
  131. ) adaptor.Error {
  132. errType := ErrorTypeAIPROXY
  133. if len(_type) > 0 {
  134. errType = _type[0]
  135. }
  136. return NewOpenAIError(statusCode, OpenAIError{
  137. Message: message,
  138. Type: errType,
  139. Code: code,
  140. })
  141. }