| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238 |
- package model
- import "github.com/labring/aiproxy/core/relay/adaptor"
- // Gemini API request and response types
- // https://ai.google.dev/api/generate-content
- type GeminiChatRequest struct {
- Contents []*GeminiChatContent `json:"contents"`
- SystemInstruction *GeminiChatContent `json:"systemInstruction,omitempty"`
- SafetySettings []GeminiChatSafetySettings `json:"safetySettings,omitempty"`
- GenerationConfig *GeminiChatGenerationConfig `json:"generationConfig,omitempty"`
- Tools []GeminiChatTools `json:"tools,omitempty"`
- ToolConfig *GeminiToolConfig `json:"toolConfig,omitempty"`
- }
- type GeminiChatContent struct {
- Role string `json:"role,omitempty"`
- Parts []*GeminiPart `json:"parts"`
- }
- type GeminiPart struct {
- InlineData *GeminiInlineData `json:"inlineData,omitempty"`
- FunctionCall *GeminiFunctionCall `json:"functionCall,omitempty"`
- FunctionResponse *GeminiFunctionResponse `json:"functionResponse,omitempty"`
- Text string `json:"text,omitempty"`
- Thought bool `json:"thought,omitempty"`
- ThoughtSignature string `json:"thoughtSignature,omitempty"`
- }
- type GeminiInlineData struct {
- MimeType string `json:"mimeType"`
- Data string `json:"data"`
- }
- type GeminiFunctionCall struct {
- Args map[string]any `json:"args"`
- Name string `json:"name"`
- }
- type GeminiFunctionResponse struct {
- Name string `json:"name"`
- Response map[string]any `json:"response"`
- // vertexai gemini not support `id` filed
- ID string `json:"id,omitempty"`
- }
- type GeminiChatSafetySettings struct {
- Category string `json:"category"`
- Threshold string `json:"threshold"`
- }
- type GeminiChatTools struct {
- FunctionDeclarations any `json:"functionDeclarations,omitempty"`
- }
- type GeminiChatGenerationConfig struct {
- ResponseSchema map[string]any `json:"responseSchema,omitempty"`
- Temperature *float64 `json:"temperature,omitempty"`
- TopP *float64 `json:"topP,omitempty"`
- ResponseMimeType string `json:"responseMimeType,omitempty"`
- StopSequences []string `json:"stopSequences,omitempty"`
- TopK float64 `json:"topK,omitempty"`
- MaxOutputTokens *int `json:"maxOutputTokens,omitempty"`
- CandidateCount int `json:"candidateCount,omitempty"`
- ResponseModalities []string `json:"responseModalities,omitempty"`
- ThinkingConfig *GeminiThinkingConfig `json:"thinkingConfig,omitempty"`
- }
- type GeminiThinkingConfig struct {
- ThinkingBudget int `json:"thinkingBudget,omitempty"`
- IncludeThoughts bool `json:"includeThoughts,omitempty"`
- }
- type GeminiFunctionCallingConfig struct {
- Mode string `json:"mode,omitempty"`
- AllowedFunctionNames []string `json:"allowedFunctionNames,omitempty"`
- }
- type GeminiToolConfig struct {
- FunctionCallingConfig GeminiFunctionCallingConfig `json:"functionCallingConfig"`
- }
- type GeminiChatResponse struct {
- Candidates []*GeminiChatCandidate `json:"candidates"`
- PromptFeedback *GeminiChatPromptFeedback `json:"promptFeedback,omitempty"`
- UsageMetadata *GeminiUsageMetadata `json:"usageMetadata,omitempty"`
- ModelVersion string `json:"modelVersion,omitempty"`
- }
- type GeminiUsageMetadata struct {
- PromptTokenCount int64 `json:"promptTokenCount"`
- CandidatesTokenCount int64 `json:"candidatesTokenCount"`
- TotalTokenCount int64 `json:"totalTokenCount"`
- ThoughtsTokenCount int64 `json:"thoughtsTokenCount,omitempty"`
- PromptTokensDetails []GeminiPromptTokensDetail `json:"promptTokensDetails"`
- CachedContentTokenCount int64 `json:"cachedContentTokenCount,omitempty"`
- CacheTokensDetails []GeminiCacheTokensDetail `json:"cacheTokensDetails,omitempty"`
- }
- type GeminiPromptTokensDetail struct {
- Modality string `json:"modality"`
- TokenCount int64 `json:"tokenCount"`
- }
- type GeminiCacheTokensDetail struct {
- Modality string `json:"modality"`
- TokenCount int64 `json:"tokenCount"`
- }
- type GeminiChatCandidate struct {
- FinishReason string `json:"finishReason,omitempty"`
- Content GeminiChatContent `json:"content"`
- SafetyRatings []struct {
- Category string `json:"category"`
- Probability string `json:"probability"`
- } `json:"safetyRatings,omitempty"`
- Index int64 `json:"index"`
- }
- type GeminiChatPromptFeedback struct {
- SafetyRatings []struct {
- Category string `json:"category"`
- Probability string `json:"probability"`
- } `json:"safetyRatings,omitempty"`
- }
- // ToUsage converts GeminiUsageMetadata to ChatUsage format
- func (u *GeminiUsageMetadata) ToUsage() ChatUsage {
- chatUsage := ChatUsage{
- PromptTokens: u.PromptTokenCount,
- CompletionTokens: u.CandidatesTokenCount +
- u.ThoughtsTokenCount,
- TotalTokens: u.TotalTokenCount,
- PromptTokensDetails: &PromptTokensDetails{
- CachedTokens: u.CachedContentTokenCount,
- },
- CompletionTokensDetails: &CompletionTokensDetails{
- ReasoningTokens: u.ThoughtsTokenCount,
- },
- }
- return chatUsage
- }
- // ToResponseUsage converts GeminiUsageMetadata to ResponseUsage (OpenAI Responses API format)
- func (u *GeminiUsageMetadata) ToResponseUsage() ResponseUsage {
- usage := ResponseUsage{
- InputTokens: u.PromptTokenCount,
- OutputTokens: u.CandidatesTokenCount,
- TotalTokens: u.TotalTokenCount,
- }
- if u.CachedContentTokenCount > 0 {
- usage.InputTokensDetails = &ResponseUsageDetails{
- CachedTokens: u.CachedContentTokenCount,
- }
- }
- if u.ThoughtsTokenCount > 0 {
- usage.OutputTokensDetails = &ResponseUsageDetails{
- ReasoningTokens: u.ThoughtsTokenCount,
- }
- }
- return usage
- }
- // ToClaudeUsage converts GeminiUsageMetadata to ClaudeUsage (Anthropic Claude format)
- func (u *GeminiUsageMetadata) ToClaudeUsage() ClaudeUsage {
- usage := ClaudeUsage{
- InputTokens: u.PromptTokenCount,
- OutputTokens: u.CandidatesTokenCount,
- }
- if u.CachedContentTokenCount > 0 {
- usage.CacheReadInputTokens = u.CachedContentTokenCount
- }
- return usage
- }
- type GeminiError struct {
- Message string `json:"message,omitempty"`
- Status string `json:"status,omitempty"`
- Code int `json:"code,omitempty"`
- }
- type GeminiErrorResponse struct {
- Error GeminiError `json:"error,omitempty"`
- }
- func NewGeminiError(statusCode int, err GeminiError) adaptor.Error {
- return adaptor.NewError(statusCode, GeminiErrorResponse{
- Error: err,
- })
- }
- // Gemini Role constants
- const (
- GeminiRoleModel = "model"
- GeminiRoleUser = "user"
- )
- // Gemini Finish Reason constants
- const (
- GeminiFinishReasonStop = "STOP"
- GeminiFinishReasonMaxTokens = "MAX_TOKENS"
- GeminiFinishReasonSafety = "SAFETY"
- GeminiFinishReasonRecitation = "RECITATION"
- GeminiFinishReasonOther = "OTHER"
- GeminiFinishReasonToolCalls = "TOOL_CALLS"
- GeminiFinishReasonFunctionCall = "FUNCTION_CALL"
- )
- // Gemini FunctionCallingConfig Mode constants
- const (
- GeminiFunctionCallingModeAuto = "AUTO"
- GeminiFunctionCallingModeAny = "ANY"
- GeminiFunctionCallingModeNone = "NONE"
- )
- // Gemini Safety Setting Category constants
- const (
- GeminiSafetyCategoryHarassment = "HARM_CATEGORY_HARASSMENT"
- GeminiSafetyCategoryHateSpeech = "HARM_CATEGORY_HATE_SPEECH"
- GeminiSafetyCategorySexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
- GeminiSafetyCategoryDangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
- GeminiSafetyCategoryCivicIntegrity = "HARM_CATEGORY_CIVIC_INTEGRITY"
- )
- // Gemini Safety Setting Threshold constants
- const (
- GeminiSafetyThresholdBlockNone = "BLOCK_NONE"
- GeminiSafetyThresholdBlockLowAndAbove = "BLOCK_LOW_AND_ABOVE"
- GeminiSafetyThresholdBlockMediumAndAbove = "BLOCK_MEDIUM_AND_ABOVE"
- GeminiSafetyThresholdBlockOnlyHigh = "BLOCK_ONLY_HIGH"
- )
|