model.ts 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. import { z } from "zod"
  2. /**
  3. * ReasoningEffort
  4. */
  5. export const reasoningEfforts = ["low", "medium", "high"] as const
  6. export const reasoningEffortsSchema = z.enum(reasoningEfforts)
  7. export type ReasoningEffort = z.infer<typeof reasoningEffortsSchema>
  8. /**
  9. * ReasoningEffortWithMinimal
  10. */
  11. export const reasoningEffortWithMinimalSchema = z.union([reasoningEffortsSchema, z.literal("minimal")])
  12. export type ReasoningEffortWithMinimal = z.infer<typeof reasoningEffortWithMinimalSchema>
  13. /**
  14. * Verbosity
  15. */
  16. export const verbosityLevels = ["low", "medium", "high"] as const
  17. export const verbosityLevelsSchema = z.enum(verbosityLevels)
  18. export type VerbosityLevel = z.infer<typeof verbosityLevelsSchema>
  19. /**
  20. * Service tiers (OpenAI Responses API)
  21. */
  22. export const serviceTiers = ["default", "flex", "priority"] as const
  23. export const serviceTierSchema = z.enum(serviceTiers)
  24. export type ServiceTier = z.infer<typeof serviceTierSchema>
  25. /**
  26. * ModelParameter
  27. */
  28. export const modelParameters = ["max_tokens", "temperature", "reasoning", "include_reasoning"] as const
  29. export const modelParametersSchema = z.enum(modelParameters)
  30. export type ModelParameter = z.infer<typeof modelParametersSchema>
  31. export const isModelParameter = (value: string): value is ModelParameter =>
  32. modelParameters.includes(value as ModelParameter)
  33. /**
  34. * ModelInfo
  35. */
  36. export const modelInfoSchema = z.object({
  37. maxTokens: z.number().nullish(),
  38. maxThinkingTokens: z.number().nullish(),
  39. contextWindow: z.number(),
  40. supportsImages: z.boolean().optional(),
  41. supportsComputerUse: z.boolean().optional(),
  42. supportsPromptCache: z.boolean(),
  43. // Capability flag to indicate whether the model supports an output verbosity parameter
  44. supportsVerbosity: z.boolean().optional(),
  45. supportsReasoningBudget: z.boolean().optional(),
  46. // Capability flag to indicate whether the model supports temperature parameter
  47. supportsTemperature: z.boolean().optional(),
  48. requiredReasoningBudget: z.boolean().optional(),
  49. supportsReasoningEffort: z.boolean().optional(),
  50. supportedParameters: z.array(modelParametersSchema).optional(),
  51. inputPrice: z.number().optional(),
  52. outputPrice: z.number().optional(),
  53. cacheWritesPrice: z.number().optional(),
  54. cacheReadsPrice: z.number().optional(),
  55. description: z.string().optional(),
  56. reasoningEffort: reasoningEffortsSchema.optional(),
  57. minTokensPerCachePoint: z.number().optional(),
  58. maxCachePoints: z.number().optional(),
  59. cachableFields: z.array(z.string()).optional(),
  60. // Flag to indicate if the model is deprecated and should not be used
  61. deprecated: z.boolean().optional(),
  62. /**
  63. * Service tiers with pricing information.
  64. * Each tier can have a name (for OpenAI service tiers) and pricing overrides.
  65. * The top-level input/output/cache* fields represent the default/standard tier.
  66. */
  67. tiers: z
  68. .array(
  69. z.object({
  70. name: serviceTierSchema.optional(), // Service tier name (flex, priority, etc.)
  71. contextWindow: z.number(),
  72. inputPrice: z.number().optional(),
  73. outputPrice: z.number().optional(),
  74. cacheWritesPrice: z.number().optional(),
  75. cacheReadsPrice: z.number().optional(),
  76. }),
  77. )
  78. .optional(),
  79. })
  80. export type ModelInfo = z.infer<typeof modelInfoSchema>