| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204 |
- package controller
- import (
- "fmt"
- "net/http"
- "one-api/common"
- "strconv"
- "strings"
- "github.com/gin-gonic/gin"
- )
- type Message struct {
- Role string `json:"role"`
- Content string `json:"content"`
- Name *string `json:"name,omitempty"`
- }
- const (
- RelayModeUnknown = iota
- RelayModeChatCompletions
- RelayModeCompletions
- RelayModeEmbeddings
- RelayModeModerations
- RelayModeImagesGenerations
- RelayModeEdits
- )
- // https://platform.openai.com/docs/api-reference/chat
- type GeneralOpenAIRequest struct {
- Model string `json:"model,omitempty"`
- Messages []Message `json:"messages,omitempty"`
- Prompt any `json:"prompt,omitempty"`
- Stream bool `json:"stream,omitempty"`
- MaxTokens int `json:"max_tokens,omitempty"`
- Temperature float64 `json:"temperature,omitempty"`
- TopP float64 `json:"top_p,omitempty"`
- N int `json:"n,omitempty"`
- Input any `json:"input,omitempty"`
- Instruction string `json:"instruction,omitempty"`
- Size string `json:"size,omitempty"`
- }
- type ChatRequest struct {
- Model string `json:"model"`
- Messages []Message `json:"messages"`
- MaxTokens int `json:"max_tokens"`
- }
- type TextRequest struct {
- Model string `json:"model"`
- Messages []Message `json:"messages"`
- Prompt string `json:"prompt"`
- MaxTokens int `json:"max_tokens"`
- //Stream bool `json:"stream"`
- }
- type ImageRequest struct {
- Prompt string `json:"prompt"`
- N int `json:"n"`
- Size string `json:"size"`
- }
- type Usage struct {
- PromptTokens int `json:"prompt_tokens"`
- CompletionTokens int `json:"completion_tokens"`
- TotalTokens int `json:"total_tokens"`
- }
- type OpenAIError struct {
- Message string `json:"message"`
- Type string `json:"type"`
- Param string `json:"param"`
- Code any `json:"code"`
- }
- type OpenAIErrorWithStatusCode struct {
- OpenAIError
- StatusCode int `json:"status_code"`
- }
- type TextResponse struct {
- Usage `json:"usage"`
- Error OpenAIError `json:"error"`
- }
- type OpenAITextResponseChoice struct {
- Index int `json:"index"`
- Message `json:"message"`
- FinishReason string `json:"finish_reason"`
- }
- type OpenAITextResponse struct {
- Id string `json:"id"`
- Object string `json:"object"`
- Created int64 `json:"created"`
- Choices []OpenAITextResponseChoice `json:"choices"`
- Usage `json:"usage"`
- }
- type ImageResponse struct {
- Created int `json:"created"`
- Data []struct {
- Url string `json:"url"`
- }
- }
- type ChatCompletionsStreamResponseChoice struct {
- Delta struct {
- Content string `json:"content"`
- } `json:"delta"`
- FinishReason string `json:"finish_reason,omitempty"`
- }
- type ChatCompletionsStreamResponse struct {
- Id string `json:"id"`
- Object string `json:"object"`
- Created int64 `json:"created"`
- Model string `json:"model"`
- Choices []ChatCompletionsStreamResponseChoice `json:"choices"`
- }
- type CompletionsStreamResponse struct {
- Choices []struct {
- Text string `json:"text"`
- FinishReason string `json:"finish_reason"`
- } `json:"choices"`
- }
- func Relay(c *gin.Context) {
- relayMode := RelayModeUnknown
- if strings.HasPrefix(c.Request.URL.Path, "/v1/chat/completions") {
- relayMode = RelayModeChatCompletions
- } else if strings.HasPrefix(c.Request.URL.Path, "/v1/completions") {
- relayMode = RelayModeCompletions
- } else if strings.HasPrefix(c.Request.URL.Path, "/v1/embeddings") {
- relayMode = RelayModeEmbeddings
- } else if strings.HasSuffix(c.Request.URL.Path, "embeddings") {
- relayMode = RelayModeEmbeddings
- } else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
- relayMode = RelayModeModerations
- } else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
- relayMode = RelayModeImagesGenerations
- } else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") {
- relayMode = RelayModeEdits
- }
- var err *OpenAIErrorWithStatusCode
- switch relayMode {
- case RelayModeImagesGenerations:
- err = relayImageHelper(c, relayMode)
- default:
- err = relayTextHelper(c, relayMode)
- }
- if err != nil {
- retryTimesStr := c.Query("retry")
- retryTimes, _ := strconv.Atoi(retryTimesStr)
- if retryTimesStr == "" {
- retryTimes = common.RetryTimes
- }
- if retryTimes > 0 {
- c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1))
- } else {
- if err.StatusCode == http.StatusTooManyRequests {
- err.OpenAIError.Message = "当前分组负载已饱和,请稍后再试,或升级账户以提升服务质量。"
- }
- c.JSON(err.StatusCode, gin.H{
- "error": err.OpenAIError,
- })
- }
- channelId := c.GetInt("channel_id")
- common.SysError(fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message))
- // https://platform.openai.com/docs/guides/error-codes/api-errors
- if shouldDisableChannel(&err.OpenAIError) {
- channelId := c.GetInt("channel_id")
- channelName := c.GetString("channel_name")
- disableChannel(channelId, channelName, err.Message)
- }
- }
- }
- func RelayNotImplemented(c *gin.Context) {
- err := OpenAIError{
- Message: "API not implemented",
- Type: "one_api_error",
- Param: "",
- Code: "api_not_implemented",
- }
- c.JSON(http.StatusNotImplemented, gin.H{
- "error": err,
- })
- }
- func RelayNotFound(c *gin.Context) {
- err := OpenAIError{
- Message: fmt.Sprintf("API not found: %s:%s", c.Request.Method, c.Request.URL.Path),
- Type: "one_api_error",
- Param: "",
- Code: "api_not_found",
- }
- c.JSON(http.StatusNotFound, gin.H{
- "error": err,
- })
- }
|