| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271 |
- package gemini
- import (
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "net/http"
- "one-api/common"
- "one-api/dto"
- "one-api/relay/channel"
- relaycommon "one-api/relay/common"
- "one-api/relay/constant"
- "one-api/setting/model_setting"
- "one-api/types"
- "strings"
- "github.com/gin-gonic/gin"
- )
- type Adaptor struct {
- }
- func (a *Adaptor) ConvertClaudeRequest(*gin.Context, *relaycommon.RelayInfo, *dto.ClaudeRequest) (any, error) {
- //TODO implement me
- panic("implement me")
- return nil, nil
- }
- func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
- //TODO implement me
- return nil, errors.New("not implemented")
- }
- func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) {
- if !strings.HasPrefix(info.UpstreamModelName, "imagen") {
- return nil, errors.New("not supported model for image generation")
- }
- // convert size to aspect ratio
- aspectRatio := "1:1" // default aspect ratio
- switch request.Size {
- case "1024x1024":
- aspectRatio = "1:1"
- case "1024x1792":
- aspectRatio = "9:16"
- case "1792x1024":
- aspectRatio = "16:9"
- }
- // build gemini imagen request
- geminiRequest := GeminiImageRequest{
- Instances: []GeminiImageInstance{
- {
- Prompt: request.Prompt,
- },
- },
- Parameters: GeminiImageParameters{
- SampleCount: request.N,
- AspectRatio: aspectRatio,
- PersonGeneration: "allow_adult", // default allow adult
- },
- }
- return geminiRequest, nil
- }
- func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
- }
- func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
- if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
- // 新增逻辑:处理 -thinking-<budget> 格式
- if strings.Contains(info.UpstreamModelName, "-thinking-") {
- parts := strings.Split(info.UpstreamModelName, "-thinking-")
- info.UpstreamModelName = parts[0]
- } else if strings.HasSuffix(info.UpstreamModelName, "-thinking") { // 旧的适配
- info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-thinking")
- } else if strings.HasSuffix(info.UpstreamModelName, "-nothinking") {
- info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-nothinking")
- }
- }
- version := model_setting.GetGeminiVersionSetting(info.UpstreamModelName)
- if strings.HasPrefix(info.UpstreamModelName, "imagen") {
- return fmt.Sprintf("%s/%s/models/%s:predict", info.BaseUrl, version, info.UpstreamModelName), nil
- }
- if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
- strings.HasPrefix(info.UpstreamModelName, "embedding") ||
- strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
- return fmt.Sprintf("%s/%s/models/%s:embedContent", info.BaseUrl, version, info.UpstreamModelName), nil
- }
- action := "generateContent"
- if info.IsStream {
- action = "streamGenerateContent?alt=sse"
- }
- return fmt.Sprintf("%s/%s/models/%s:%s", info.BaseUrl, version, info.UpstreamModelName, action), nil
- }
- func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error {
- channel.SetupApiRequestHeader(info, c, req)
- req.Set("x-goog-api-key", info.ApiKey)
- return nil
- }
- func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
- if request == nil {
- return nil, errors.New("request is nil")
- }
- geminiRequest, err := CovertGemini2OpenAI(*request, info)
- if err != nil {
- return nil, err
- }
- return geminiRequest, nil
- }
- func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {
- return nil, nil
- }
- func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.EmbeddingRequest) (any, error) {
- if request.Input == nil {
- return nil, errors.New("input is required")
- }
- inputs := request.ParseInput()
- if len(inputs) == 0 {
- return nil, errors.New("input is empty")
- }
- // only process the first input
- geminiRequest := GeminiEmbeddingRequest{
- Content: GeminiChatContent{
- Parts: []GeminiPart{
- {
- Text: inputs[0],
- },
- },
- },
- }
- // set specific parameters for different models
- // https://ai.google.dev/api/embeddings?hl=zh-cn#method:-models.embedcontent
- switch info.UpstreamModelName {
- case "text-embedding-004":
- // except embedding-001 supports setting `OutputDimensionality`
- if request.Dimensions > 0 {
- geminiRequest.OutputDimensionality = request.Dimensions
- }
- }
- return geminiRequest, nil
- }
- func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) {
- // TODO implement me
- return nil, errors.New("not implemented")
- }
- func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
- return channel.DoApiRequest(a, c, info, requestBody)
- }
- func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *types.NewAPIError) {
- if info.RelayMode == constant.RelayModeGemini {
- if info.IsStream {
- return GeminiTextGenerationStreamHandler(c, info, resp)
- } else {
- return GeminiTextGenerationHandler(c, info, resp)
- }
- }
- if strings.HasPrefix(info.UpstreamModelName, "imagen") {
- return GeminiImageHandler(c, info, resp)
- }
- // check if the model is an embedding model
- if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
- strings.HasPrefix(info.UpstreamModelName, "embedding") ||
- strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
- return GeminiEmbeddingHandler(c, info, resp)
- }
- if info.IsStream {
- return GeminiChatStreamHandler(c, info, resp)
- } else {
- return GeminiChatHandler(c, info, resp)
- }
- //if usage.(*dto.Usage).CompletionTokenDetails.ReasoningTokens > 100 {
- // // 没有请求-thinking的情况下,产生思考token,则按照思考模型计费
- // if !strings.HasSuffix(info.OriginModelName, "-thinking") &&
- // !strings.HasSuffix(info.OriginModelName, "-nothinking") {
- // thinkingModelName := info.OriginModelName + "-thinking"
- // if operation_setting.SelfUseModeEnabled || helper.ContainPriceOrRatio(thinkingModelName) {
- // info.OriginModelName = thinkingModelName
- // }
- // }
- //}
- return nil, types.NewError(errors.New("not implemented"), types.ErrorCodeBadResponseBody)
- }
- func GeminiImageHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
- responseBody, readErr := io.ReadAll(resp.Body)
- if readErr != nil {
- return nil, types.NewError(readErr, types.ErrorCodeBadResponseBody)
- }
- _ = resp.Body.Close()
- var geminiResponse GeminiImageResponse
- if jsonErr := json.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
- return nil, types.NewError(jsonErr, types.ErrorCodeBadResponseBody)
- }
- if len(geminiResponse.Predictions) == 0 {
- return nil, types.NewError(errors.New("no images generated"), types.ErrorCodeBadResponseBody)
- }
- // convert to openai format response
- openAIResponse := dto.ImageResponse{
- Created: common.GetTimestamp(),
- Data: make([]dto.ImageData, 0, len(geminiResponse.Predictions)),
- }
- for _, prediction := range geminiResponse.Predictions {
- if prediction.RaiFilteredReason != "" {
- continue // skip filtered image
- }
- openAIResponse.Data = append(openAIResponse.Data, dto.ImageData{
- B64Json: prediction.BytesBase64Encoded,
- })
- }
- jsonResponse, jsonErr := json.Marshal(openAIResponse)
- if jsonErr != nil {
- return nil, types.NewError(jsonErr, types.ErrorCodeBadResponseBody)
- }
- c.Writer.Header().Set("Content-Type", "application/json")
- c.Writer.WriteHeader(resp.StatusCode)
- _, _ = c.Writer.Write(jsonResponse)
- // https://github.com/google-gemini/cookbook/blob/719a27d752aac33f39de18a8d3cb42a70874917e/quickstarts/Counting_Tokens.ipynb
- // each image has fixed 258 tokens
- const imageTokens = 258
- generatedImages := len(openAIResponse.Data)
- usage := &dto.Usage{
- PromptTokens: imageTokens * generatedImages, // each generated image has fixed 258 tokens
- CompletionTokens: 0, // image generation does not calculate completion tokens
- TotalTokens: imageTokens * generatedImages,
- }
- return usage, nil
- }
- func (a *Adaptor) GetModelList() []string {
- return ModelList
- }
- func (a *Adaptor) GetChannelName() string {
- return ChannelName
- }
|