| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342 |
- export type ApiProvider =
- | "anthropic"
- | "openrouter"
- | "bedrock"
- | "vertex"
- | "openai"
- | "ollama"
- | "lmstudio"
- | "gemini"
- | "openai-native"
- export interface ApiHandlerOptions {
- apiModelId?: string
- apiKey?: string // anthropic
- anthropicBaseUrl?: string
- openRouterApiKey?: string
- openRouterModelId?: string
- openRouterModelInfo?: ModelInfo
- awsAccessKey?: string
- awsSecretKey?: string
- awsSessionToken?: string
- awsRegion?: string
- awsUseCrossRegionInference?: boolean
- vertexProjectId?: string
- vertexRegion?: string
- openAiBaseUrl?: string
- openAiApiKey?: string
- openAiModelId?: string
- ollamaModelId?: string
- ollamaBaseUrl?: string
- lmStudioModelId?: string
- lmStudioBaseUrl?: string
- geminiApiKey?: string
- openAiNativeApiKey?: string
- azureApiVersion?: string
- openRouterUseMiddleOutTransform?: boolean
- }
- export type ApiConfiguration = ApiHandlerOptions & {
- apiProvider?: ApiProvider
- }
- // Models
- export interface ModelInfo {
- maxTokens?: number
- contextWindow?: number
- supportsImages?: boolean
- supportsComputerUse?: boolean
- supportsPromptCache: boolean // this value is hardcoded for now
- inputPrice?: number
- outputPrice?: number
- cacheWritesPrice?: number
- cacheReadsPrice?: number
- description?: string
- }
- // Anthropic
- // https://docs.anthropic.com/en/docs/about-claude/models
- export type AnthropicModelId = keyof typeof anthropicModels
- export const anthropicDefaultModelId: AnthropicModelId = "claude-3-5-sonnet-20241022"
- export const anthropicModels = {
- "claude-3-5-sonnet-20241022": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0, // $3 per million input tokens
- outputPrice: 15.0, // $15 per million output tokens
- cacheWritesPrice: 3.75, // $3.75 per million tokens
- cacheReadsPrice: 0.3, // $0.30 per million tokens
- },
- "claude-3-5-haiku-20241022": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 1.0,
- outputPrice: 5.0,
- cacheWritesPrice: 1.25,
- cacheReadsPrice: 0.1,
- },
- "claude-3-opus-20240229": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 15.0,
- outputPrice: 75.0,
- cacheWritesPrice: 18.75,
- cacheReadsPrice: 1.5,
- },
- "claude-3-haiku-20240307": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 0.25,
- outputPrice: 1.25,
- cacheWritesPrice: 0.3,
- cacheReadsPrice: 0.03,
- },
- } as const satisfies Record<string, ModelInfo> // as const assertion makes the object deeply readonly
- // AWS Bedrock
- // https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
- export type BedrockModelId = keyof typeof bedrockModels
- export const bedrockDefaultModelId: BedrockModelId = "anthropic.claude-3-5-sonnet-20241022-v2:0"
- export const bedrockModels = {
- "anthropic.claude-3-5-sonnet-20241022-v2:0": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: false,
- inputPrice: 3.0,
- outputPrice: 15.0,
- },
- "anthropic.claude-3-5-haiku-20241022-v1:0": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 1.0,
- outputPrice: 5.0,
- },
- "anthropic.claude-3-5-sonnet-20240620-v1:0": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 3.0,
- outputPrice: 15.0,
- },
- "anthropic.claude-3-opus-20240229-v1:0": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 15.0,
- outputPrice: 75.0,
- },
- "anthropic.claude-3-sonnet-20240229-v1:0": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 3.0,
- outputPrice: 15.0,
- },
- "anthropic.claude-3-haiku-20240307-v1:0": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0.25,
- outputPrice: 1.25,
- },
- } as const satisfies Record<string, ModelInfo>
- // OpenRouter
- // https://openrouter.ai/models?order=newest&supported_parameters=tools
- export const openRouterDefaultModelId = "anthropic/claude-3.5-sonnet:beta" // will always exist in openRouterModels
- export const openRouterDefaultModelInfo: ModelInfo = {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- description:
- "The new Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: New Sonnet scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal\n\n_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/anthropic/claude-3.5-sonnet) variant._",
- }
- // Vertex AI
- // https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude
- export type VertexModelId = keyof typeof vertexModels
- export const vertexDefaultModelId: VertexModelId = "claude-3-5-sonnet-v2@20241022"
- export const vertexModels = {
- "claude-3-5-sonnet-v2@20241022": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: false,
- inputPrice: 3.0,
- outputPrice: 15.0,
- },
- "claude-3-5-sonnet@20240620": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 3.0,
- outputPrice: 15.0,
- },
- "claude-3-5-haiku@20241022": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 1.0,
- outputPrice: 5.0,
- },
- "claude-3-opus@20240229": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 15.0,
- outputPrice: 75.0,
- },
- "claude-3-haiku@20240307": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0.25,
- outputPrice: 1.25,
- },
- } as const satisfies Record<string, ModelInfo>
- export const openAiModelInfoSaneDefaults: ModelInfo = {
- maxTokens: -1,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- }
- // Gemini
- // https://ai.google.dev/gemini-api/docs/models/gemini
- export type GeminiModelId = keyof typeof geminiModels
- export const geminiDefaultModelId: GeminiModelId = "gemini-2.0-flash-exp"
- export const geminiModels = {
- "gemini-2.0-flash-exp": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-1.5-flash-002": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-1.5-flash-exp-0827": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-1.5-flash-8b-exp-0827": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-1.5-pro-002": {
- maxTokens: 8192,
- contextWindow: 2_097_152,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-1.5-pro-exp-0827": {
- maxTokens: 8192,
- contextWindow: 2_097_152,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-exp-1206": {
- maxTokens: 8192,
- contextWindow: 2_097_152,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- } as const satisfies Record<string, ModelInfo>
- // OpenAI Native
- // https://openai.com/api/pricing/
- export type OpenAiNativeModelId = keyof typeof openAiNativeModels
- export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-4o"
- export const openAiNativeModels = {
- // don't support tool use yet
- "o1-preview": {
- maxTokens: 32_768,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 15,
- outputPrice: 60,
- },
- "o1-mini": {
- maxTokens: 65_536,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 3,
- outputPrice: 12,
- },
- "gpt-4o": {
- maxTokens: 4_096,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 5,
- outputPrice: 15,
- },
- "gpt-4o-mini": {
- maxTokens: 16_384,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0.15,
- outputPrice: 0.6,
- },
- } as const satisfies Record<string, ModelInfo>
- // Azure OpenAI
- // https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation
- // https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs
- export const azureOpenAiDefaultApiVersion = "2024-08-01-preview"
|