| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100 |
- import { z } from "zod"
- /**
- * ReasoningEffort
- */
- export const reasoningEfforts = ["low", "medium", "high"] as const
- export const reasoningEffortsSchema = z.enum(reasoningEfforts)
- export type ReasoningEffort = z.infer<typeof reasoningEffortsSchema>
- /**
- * ReasoningEffortWithMinimal
- */
- export const reasoningEffortWithMinimalSchema = z.union([reasoningEffortsSchema, z.literal("minimal")])
- export type ReasoningEffortWithMinimal = z.infer<typeof reasoningEffortWithMinimalSchema>
- /**
- * Verbosity
- */
- export const verbosityLevels = ["low", "medium", "high"] as const
- export const verbosityLevelsSchema = z.enum(verbosityLevels)
- export type VerbosityLevel = z.infer<typeof verbosityLevelsSchema>
- /**
- * Service tiers (OpenAI Responses API)
- */
- export const serviceTiers = ["default", "flex", "priority"] as const
- export const serviceTierSchema = z.enum(serviceTiers)
- export type ServiceTier = z.infer<typeof serviceTierSchema>
- /**
- * ModelParameter
- */
- export const modelParameters = ["max_tokens", "temperature", "reasoning", "include_reasoning"] as const
- export const modelParametersSchema = z.enum(modelParameters)
- export type ModelParameter = z.infer<typeof modelParametersSchema>
- export const isModelParameter = (value: string): value is ModelParameter =>
- modelParameters.includes(value as ModelParameter)
- /**
- * ModelInfo
- */
- export const modelInfoSchema = z.object({
- maxTokens: z.number().nullish(),
- maxThinkingTokens: z.number().nullish(),
- contextWindow: z.number(),
- supportsImages: z.boolean().optional(),
- supportsComputerUse: z.boolean().optional(),
- supportsPromptCache: z.boolean(),
- // Capability flag to indicate whether the model supports an output verbosity parameter
- supportsVerbosity: z.boolean().optional(),
- supportsReasoningBudget: z.boolean().optional(),
- // Capability flag to indicate whether the model supports temperature parameter
- supportsTemperature: z.boolean().optional(),
- requiredReasoningBudget: z.boolean().optional(),
- supportsReasoningEffort: z.boolean().optional(),
- supportedParameters: z.array(modelParametersSchema).optional(),
- inputPrice: z.number().optional(),
- outputPrice: z.number().optional(),
- cacheWritesPrice: z.number().optional(),
- cacheReadsPrice: z.number().optional(),
- description: z.string().optional(),
- reasoningEffort: reasoningEffortsSchema.optional(),
- minTokensPerCachePoint: z.number().optional(),
- maxCachePoints: z.number().optional(),
- cachableFields: z.array(z.string()).optional(),
- // Flag to indicate if the model is deprecated and should not be used
- deprecated: z.boolean().optional(),
- /**
- * Service tiers with pricing information.
- * Each tier can have a name (for OpenAI service tiers) and pricing overrides.
- * The top-level input/output/cache* fields represent the default/standard tier.
- */
- tiers: z
- .array(
- z.object({
- name: serviceTierSchema.optional(), // Service tier name (flex, priority, etc.)
- contextWindow: z.number(),
- inputPrice: z.number().optional(),
- outputPrice: z.number().optional(),
- cacheWritesPrice: z.number().optional(),
- cacheReadsPrice: z.number().optional(),
- }),
- )
- .optional(),
- })
- export type ModelInfo = z.infer<typeof modelInfoSchema>
|