|
|
@@ -345,23 +345,42 @@ type _AssertExperiments = AssertEqual<Equals<ExperimentId, Keys<Experiments>>>
|
|
|
* ProviderSettings
|
|
|
*/
|
|
|
|
|
|
-export const providerSettingsSchema = z.object({
|
|
|
- apiProvider: providerNamesSchema.optional(),
|
|
|
- // Anthropic
|
|
|
+// Generic settings that apply to all providers
|
|
|
+const genericProviderSettingsSchema = z.object({
|
|
|
+ includeMaxTokens: z.boolean().optional(),
|
|
|
+ reasoningEffort: reasoningEffortsSchema.optional(),
|
|
|
+ promptCachingDisabled: z.boolean().optional(),
|
|
|
+ diffEnabled: z.boolean().optional(),
|
|
|
+ fuzzyMatchThreshold: z.number().optional(),
|
|
|
+ modelTemperature: z.number().nullish(),
|
|
|
+ rateLimitSeconds: z.number().optional(),
|
|
|
+ // Claude 3.7 Sonnet Thinking
|
|
|
+ modelMaxTokens: z.number().optional(),
|
|
|
+ modelMaxThinkingTokens: z.number().optional(),
|
|
|
+})
|
|
|
+
|
|
|
+// Provider-specific schemas
|
|
|
+const anthropicSchema = z.object({
|
|
|
apiModelId: z.string().optional(),
|
|
|
apiKey: z.string().optional(),
|
|
|
anthropicBaseUrl: z.string().optional(),
|
|
|
anthropicUseAuthToken: z.boolean().optional(),
|
|
|
- // Glama
|
|
|
+})
|
|
|
+
|
|
|
+const glamaSchema = z.object({
|
|
|
glamaModelId: z.string().optional(),
|
|
|
glamaApiKey: z.string().optional(),
|
|
|
- // OpenRouter
|
|
|
+})
|
|
|
+
|
|
|
+const openRouterSchema = z.object({
|
|
|
openRouterApiKey: z.string().optional(),
|
|
|
openRouterModelId: z.string().optional(),
|
|
|
openRouterBaseUrl: z.string().optional(),
|
|
|
openRouterSpecificProvider: z.string().optional(),
|
|
|
openRouterUseMiddleOutTransform: z.boolean().optional(),
|
|
|
- // Amazon Bedrock
|
|
|
+})
|
|
|
+
|
|
|
+const bedrockSchema = z.object({
|
|
|
awsAccessKey: z.string().optional(),
|
|
|
awsSecretKey: z.string().optional(),
|
|
|
awsSessionToken: z.string().optional(),
|
|
|
@@ -371,12 +390,16 @@ export const providerSettingsSchema = z.object({
|
|
|
awsProfile: z.string().optional(),
|
|
|
awsUseProfile: z.boolean().optional(),
|
|
|
awsCustomArn: z.string().optional(),
|
|
|
- // Google Vertex
|
|
|
+})
|
|
|
+
|
|
|
+const vertexSchema = z.object({
|
|
|
vertexKeyFile: z.string().optional(),
|
|
|
vertexJsonCredentials: z.string().optional(),
|
|
|
vertexProjectId: z.string().optional(),
|
|
|
vertexRegion: z.string().optional(),
|
|
|
- // OpenAI
|
|
|
+})
|
|
|
+
|
|
|
+const openAiSchema = z.object({
|
|
|
openAiBaseUrl: z.string().optional(),
|
|
|
openAiApiKey: z.string().optional(),
|
|
|
openAiLegacyFormat: z.boolean().optional(),
|
|
|
@@ -389,10 +412,14 @@ export const providerSettingsSchema = z.object({
|
|
|
enableReasoningEffort: z.boolean().optional(),
|
|
|
openAiHostHeader: z.string().optional(), // Keep temporarily for backward compatibility during migration
|
|
|
openAiHeaders: z.record(z.string(), z.string()).optional(),
|
|
|
- // Ollama
|
|
|
+})
|
|
|
+
|
|
|
+const ollamaSchema = z.object({
|
|
|
ollamaModelId: z.string().optional(),
|
|
|
ollamaBaseUrl: z.string().optional(),
|
|
|
- // VS Code LM
|
|
|
+})
|
|
|
+
|
|
|
+const vsCodeLmSchema = z.object({
|
|
|
vsCodeLmModelSelector: z
|
|
|
.object({
|
|
|
vendor: z.string().optional(),
|
|
|
@@ -401,54 +428,213 @@ export const providerSettingsSchema = z.object({
|
|
|
id: z.string().optional(),
|
|
|
})
|
|
|
.optional(),
|
|
|
- // LM Studio
|
|
|
+})
|
|
|
+
|
|
|
+const lmStudioSchema = z.object({
|
|
|
lmStudioModelId: z.string().optional(),
|
|
|
lmStudioBaseUrl: z.string().optional(),
|
|
|
lmStudioDraftModelId: z.string().optional(),
|
|
|
lmStudioSpeculativeDecodingEnabled: z.boolean().optional(),
|
|
|
- // Gemini
|
|
|
+})
|
|
|
+
|
|
|
+const geminiSchema = z.object({
|
|
|
geminiApiKey: z.string().optional(),
|
|
|
googleGeminiBaseUrl: z.string().optional(),
|
|
|
- // OpenAI Native
|
|
|
+})
|
|
|
+
|
|
|
+const openAiNativeSchema = z.object({
|
|
|
openAiNativeApiKey: z.string().optional(),
|
|
|
openAiNativeBaseUrl: z.string().optional(),
|
|
|
- // Mistral
|
|
|
+})
|
|
|
+
|
|
|
+const mistralSchema = z.object({
|
|
|
mistralApiKey: z.string().optional(),
|
|
|
mistralCodestralUrl: z.string().optional(),
|
|
|
- // DeepSeek
|
|
|
+})
|
|
|
+
|
|
|
+const deepSeekSchema = z.object({
|
|
|
deepSeekBaseUrl: z.string().optional(),
|
|
|
deepSeekApiKey: z.string().optional(),
|
|
|
- // Unbound
|
|
|
+})
|
|
|
+
|
|
|
+const unboundSchema = z.object({
|
|
|
unboundApiKey: z.string().optional(),
|
|
|
unboundModelId: z.string().optional(),
|
|
|
- // Requesty
|
|
|
+})
|
|
|
+
|
|
|
+const requestySchema = z.object({
|
|
|
requestyApiKey: z.string().optional(),
|
|
|
requestyModelId: z.string().optional(),
|
|
|
- // X.AI (Grok)
|
|
|
+})
|
|
|
+
|
|
|
+const humanRelaySchema = z.object({})
|
|
|
+
|
|
|
+const fakeAiSchema = z.object({
|
|
|
+ fakeAi: z.unknown().optional(),
|
|
|
+})
|
|
|
+
|
|
|
+const xaiSchema = z.object({
|
|
|
xaiApiKey: z.string().optional(),
|
|
|
- // Groq
|
|
|
+})
|
|
|
+
|
|
|
+const groqSchema = z.object({
|
|
|
groqApiKey: z.string().optional(),
|
|
|
- // Chutes AI
|
|
|
+})
|
|
|
+
|
|
|
+const chutesSchema = z.object({
|
|
|
chutesApiKey: z.string().optional(),
|
|
|
- // LiteLLM
|
|
|
+})
|
|
|
+
|
|
|
+const litellmSchema = z.object({
|
|
|
litellmBaseUrl: z.string().optional(),
|
|
|
litellmApiKey: z.string().optional(),
|
|
|
litellmModelId: z.string().optional(),
|
|
|
- // Claude 3.7 Sonnet Thinking
|
|
|
- modelMaxTokens: z.number().optional(),
|
|
|
- modelMaxThinkingTokens: z.number().optional(),
|
|
|
- // Generic
|
|
|
- includeMaxTokens: z.boolean().optional(),
|
|
|
- reasoningEffort: reasoningEffortsSchema.optional(),
|
|
|
- promptCachingDisabled: z.boolean().optional(),
|
|
|
- diffEnabled: z.boolean().optional(),
|
|
|
- fuzzyMatchThreshold: z.number().optional(),
|
|
|
- modelTemperature: z.number().nullish(),
|
|
|
- rateLimitSeconds: z.number().optional(),
|
|
|
- // Fake AI
|
|
|
- fakeAi: z.unknown().optional(),
|
|
|
})
|
|
|
|
|
|
+// Default schema for when apiProvider is not specified
|
|
|
+const defaultSchema = z.object({
|
|
|
+ apiProvider: z.undefined(),
|
|
|
+})
|
|
|
+
|
|
|
+// Create the discriminated union
|
|
|
+export const providerSettingsSchemaDiscriminated = z
|
|
|
+ .discriminatedUnion("apiProvider", [
|
|
|
+ anthropicSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("anthropic"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ glamaSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("glama"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ openRouterSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("openrouter"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ bedrockSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("bedrock"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ vertexSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("vertex"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ openAiSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("openai"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ ollamaSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("ollama"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ vsCodeLmSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("vscode-lm"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ lmStudioSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("lmstudio"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ geminiSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("gemini"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ openAiNativeSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("openai-native"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ mistralSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("mistral"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ deepSeekSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("deepseek"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ unboundSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("unbound"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ requestySchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("requesty"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ humanRelaySchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("human-relay"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ fakeAiSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("fake-ai"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ xaiSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("xai"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ groqSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("groq"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ chutesSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("chutes"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ litellmSchema.merge(
|
|
|
+ z.object({
|
|
|
+ apiProvider: z.literal("litellm"),
|
|
|
+ }),
|
|
|
+ ),
|
|
|
+ defaultSchema,
|
|
|
+ ])
|
|
|
+ .and(genericProviderSettingsSchema)
|
|
|
+
|
|
|
+export const providerSettingsSchema = z
|
|
|
+ .object({
|
|
|
+ apiProvider: providerNamesSchema.optional(),
|
|
|
+ })
|
|
|
+ .merge(anthropicSchema)
|
|
|
+ .merge(glamaSchema)
|
|
|
+ .merge(openRouterSchema)
|
|
|
+ .merge(bedrockSchema)
|
|
|
+ .merge(vertexSchema)
|
|
|
+ .merge(openAiSchema)
|
|
|
+ .merge(ollamaSchema)
|
|
|
+ .merge(vsCodeLmSchema)
|
|
|
+ .merge(lmStudioSchema)
|
|
|
+ .merge(geminiSchema)
|
|
|
+ .merge(openAiNativeSchema)
|
|
|
+ .merge(mistralSchema)
|
|
|
+ .merge(deepSeekSchema)
|
|
|
+ .merge(unboundSchema)
|
|
|
+ .merge(requestySchema)
|
|
|
+ .merge(humanRelaySchema)
|
|
|
+ .merge(fakeAiSchema)
|
|
|
+ .merge(xaiSchema)
|
|
|
+ .merge(groqSchema)
|
|
|
+ .merge(chutesSchema)
|
|
|
+ .merge(litellmSchema)
|
|
|
+ .merge(genericProviderSettingsSchema)
|
|
|
+
|
|
|
export type ProviderSettings = z.infer<typeof providerSettingsSchema>
|
|
|
|
|
|
type ProviderSettingsRecord = Record<Keys<ProviderSettings>, undefined>
|