| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763 |
- import { ModelInfo, ProviderName, ProviderSettings } from "../schemas"
- export type { ModelInfo, ProviderName }
- export type ApiHandlerOptions = Omit<ProviderSettings, "apiProvider" | "id">
- export type ApiConfiguration = ProviderSettings
- // Anthropic
- // https://docs.anthropic.com/en/docs/about-claude/models
- export type AnthropicModelId = keyof typeof anthropicModels
- export const anthropicDefaultModelId: AnthropicModelId = "claude-3-7-sonnet-20250219"
- export const anthropicModels = {
- "claude-3-7-sonnet-20250219:thinking": {
- maxTokens: 128_000,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0, // $3 per million input tokens
- outputPrice: 15.0, // $15 per million output tokens
- cacheWritesPrice: 3.75, // $3.75 per million tokens
- cacheReadsPrice: 0.3, // $0.30 per million tokens
- thinking: true,
- },
- "claude-3-7-sonnet-20250219": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0, // $3 per million input tokens
- outputPrice: 15.0, // $15 per million output tokens
- cacheWritesPrice: 3.75, // $3.75 per million tokens
- cacheReadsPrice: 0.3, // $0.30 per million tokens
- thinking: false,
- },
- "claude-3-5-sonnet-20241022": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0, // $3 per million input tokens
- outputPrice: 15.0, // $15 per million output tokens
- cacheWritesPrice: 3.75, // $3.75 per million tokens
- cacheReadsPrice: 0.3, // $0.30 per million tokens
- },
- "claude-3-5-haiku-20241022": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 1.0,
- outputPrice: 5.0,
- cacheWritesPrice: 1.25,
- cacheReadsPrice: 0.1,
- },
- "claude-3-opus-20240229": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 15.0,
- outputPrice: 75.0,
- cacheWritesPrice: 18.75,
- cacheReadsPrice: 1.5,
- },
- "claude-3-haiku-20240307": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 0.25,
- outputPrice: 1.25,
- cacheWritesPrice: 0.3,
- cacheReadsPrice: 0.03,
- },
- } as const satisfies Record<string, ModelInfo> // as const assertion makes the object deeply readonly
- // Amazon Bedrock
- // https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
- export interface MessageContent {
- type: "text" | "image" | "video" | "tool_use" | "tool_result"
- text?: string
- source?: {
- type: "base64"
- data: string | Uint8Array // string for Anthropic, Uint8Array for Bedrock
- media_type: "image/jpeg" | "image/png" | "image/gif" | "image/webp"
- }
- // Video specific fields
- format?: string
- s3Location?: {
- uri: string
- bucketOwner?: string
- }
- // Tool use and result fields
- toolUseId?: string
- name?: string
- input?: any
- output?: any // Used for tool_result type
- }
- export type BedrockModelId = keyof typeof bedrockModels
- export const bedrockDefaultModelId: BedrockModelId = "anthropic.claude-3-7-sonnet-20250219-v1:0"
- export const bedrockDefaultPromptRouterModelId: BedrockModelId = "anthropic.claude-3-sonnet-20240229-v1:0"
- // March, 12 2025 - updated prices to match US-West-2 list price shown at https://aws.amazon.com/bedrock/pricing/
- // including older models that are part of the default prompt routers AWS enabled for GA of the promot router feature
- export const bedrockModels = {
- "amazon.nova-pro-v1:0": {
- maxTokens: 5000,
- contextWindow: 300_000,
- supportsImages: true,
- supportsComputerUse: false,
- supportsPromptCache: true,
- inputPrice: 0.8,
- outputPrice: 3.2,
- cacheWritesPrice: 0.8, // per million tokens
- cacheReadsPrice: 0.2, // per million tokens
- minTokensPerCachePoint: 1,
- maxCachePoints: 1,
- cachableFields: ["system"],
- },
- "amazon.nova-pro-latency-optimized-v1:0": {
- maxTokens: 5000,
- contextWindow: 300_000,
- supportsImages: true,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 1.0,
- outputPrice: 4.0,
- cacheWritesPrice: 1.0, // per million tokens
- cacheReadsPrice: 0.25, // per million tokens
- description: "Amazon Nova Pro with latency optimized inference",
- },
- "amazon.nova-lite-v1:0": {
- maxTokens: 5000,
- contextWindow: 300_000,
- supportsImages: true,
- supportsComputerUse: false,
- supportsPromptCache: true,
- inputPrice: 0.06,
- outputPrice: 0.24,
- cacheWritesPrice: 0.06, // per million tokens
- cacheReadsPrice: 0.015, // per million tokens
- minTokensPerCachePoint: 1,
- maxCachePoints: 1,
- cachableFields: ["system"],
- },
- "amazon.nova-micro-v1:0": {
- maxTokens: 5000,
- contextWindow: 128_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: true,
- inputPrice: 0.035,
- outputPrice: 0.14,
- cacheWritesPrice: 0.035, // per million tokens
- cacheReadsPrice: 0.00875, // per million tokens
- minTokensPerCachePoint: 1,
- maxCachePoints: 1,
- cachableFields: ["system"],
- },
- "anthropic.claude-3-7-sonnet-20250219-v1:0": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- minTokensPerCachePoint: 1024,
- maxCachePoints: 4,
- cachableFields: ["system", "messages", "tools"],
- },
- "anthropic.claude-3-5-sonnet-20241022-v2:0": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- minTokensPerCachePoint: 1024,
- maxCachePoints: 4,
- cachableFields: ["system", "messages", "tools"],
- },
- "anthropic.claude-3-5-haiku-20241022-v1:0": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 0.8,
- outputPrice: 4.0,
- cacheWritesPrice: 1.0,
- cacheReadsPrice: 0.08,
- minTokensPerCachePoint: 2048,
- maxCachePoints: 4,
- cachableFields: ["system", "messages", "tools"],
- },
- "anthropic.claude-3-5-sonnet-20240620-v1:0": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 3.0,
- outputPrice: 15.0,
- },
- "anthropic.claude-3-opus-20240229-v1:0": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 15.0,
- outputPrice: 75.0,
- },
- "anthropic.claude-3-sonnet-20240229-v1:0": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 3.0,
- outputPrice: 15.0,
- },
- "anthropic.claude-3-haiku-20240307-v1:0": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0.25,
- outputPrice: 1.25,
- },
- "anthropic.claude-2-1-v1:0": {
- maxTokens: 4096,
- contextWindow: 100_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 8.0,
- outputPrice: 24.0,
- description: "Claude 2.1",
- },
- "anthropic.claude-2-0-v1:0": {
- maxTokens: 4096,
- contextWindow: 100_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 8.0,
- outputPrice: 24.0,
- description: "Claude 2.0",
- },
- "anthropic.claude-instant-v1:0": {
- maxTokens: 4096,
- contextWindow: 100_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0.8,
- outputPrice: 2.4,
- description: "Claude Instant",
- },
- "deepseek.r1-v1:0": {
- maxTokens: 32_768,
- contextWindow: 128_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 1.35,
- outputPrice: 5.4,
- },
- "meta.llama3-3-70b-instruct-v1:0": {
- maxTokens: 8192,
- contextWindow: 128_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.72,
- outputPrice: 0.72,
- description: "Llama 3.3 Instruct (70B)",
- },
- "meta.llama3-2-90b-instruct-v1:0": {
- maxTokens: 8192,
- contextWindow: 128_000,
- supportsImages: true,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.72,
- outputPrice: 0.72,
- description: "Llama 3.2 Instruct (90B)",
- },
- "meta.llama3-2-11b-instruct-v1:0": {
- maxTokens: 8192,
- contextWindow: 128_000,
- supportsImages: true,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.16,
- outputPrice: 0.16,
- description: "Llama 3.2 Instruct (11B)",
- },
- "meta.llama3-2-3b-instruct-v1:0": {
- maxTokens: 8192,
- contextWindow: 128_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.15,
- outputPrice: 0.15,
- description: "Llama 3.2 Instruct (3B)",
- },
- "meta.llama3-2-1b-instruct-v1:0": {
- maxTokens: 8192,
- contextWindow: 128_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.1,
- outputPrice: 0.1,
- description: "Llama 3.2 Instruct (1B)",
- },
- "meta.llama3-1-405b-instruct-v1:0": {
- maxTokens: 8192,
- contextWindow: 128_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 2.4,
- outputPrice: 2.4,
- description: "Llama 3.1 Instruct (405B)",
- },
- "meta.llama3-1-70b-instruct-v1:0": {
- maxTokens: 8192,
- contextWindow: 128_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.72,
- outputPrice: 0.72,
- description: "Llama 3.1 Instruct (70B)",
- },
- "meta.llama3-1-70b-instruct-latency-optimized-v1:0": {
- maxTokens: 8192,
- contextWindow: 128_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.9,
- outputPrice: 0.9,
- description: "Llama 3.1 Instruct (70B) (w/ latency optimized inference)",
- },
- "meta.llama3-1-8b-instruct-v1:0": {
- maxTokens: 8192,
- contextWindow: 8_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.22,
- outputPrice: 0.22,
- description: "Llama 3.1 Instruct (8B)",
- },
- "meta.llama3-70b-instruct-v1:0": {
- maxTokens: 2048,
- contextWindow: 8_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 2.65,
- outputPrice: 3.5,
- },
- "meta.llama3-8b-instruct-v1:0": {
- maxTokens: 2048,
- contextWindow: 4_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.3,
- outputPrice: 0.6,
- },
- "amazon.titan-text-lite-v1:0": {
- maxTokens: 4096,
- contextWindow: 8_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.15,
- outputPrice: 0.2,
- description: "Amazon Titan Text Lite",
- },
- "amazon.titan-text-express-v1:0": {
- maxTokens: 4096,
- contextWindow: 8_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.2,
- outputPrice: 0.6,
- description: "Amazon Titan Text Express",
- },
- "amazon.titan-text-embeddings-v1:0": {
- maxTokens: 8192,
- contextWindow: 8_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.1,
- description: "Amazon Titan Text Embeddings",
- },
- "amazon.titan-text-embeddings-v2:0": {
- maxTokens: 8192,
- contextWindow: 8_000,
- supportsImages: false,
- supportsComputerUse: false,
- supportsPromptCache: false,
- inputPrice: 0.02,
- description: "Amazon Titan Text Embeddings V2",
- },
- } as const satisfies Record<string, ModelInfo>
- // Glama
- // https://glama.ai/models
- export const glamaDefaultModelId = "anthropic/claude-3-7-sonnet"
- export const glamaDefaultModelInfo: ModelInfo = {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- description:
- "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)",
- }
- // Requesty
- // https://requesty.ai/router-2
- export const requestyDefaultModelId = "coding/claude-3-7-sonnet"
- export const requestyDefaultModelInfo: ModelInfo = {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- description:
- "The best coding model, optimized by Requesty, and automatically routed to the fastest provider. Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)",
- }
- // OpenRouter
- // https://openrouter.ai/models?order=newest&supported_parameters=tools
- export const openRouterDefaultModelId = "anthropic/claude-3.7-sonnet"
- export const openRouterDefaultModelInfo: ModelInfo = {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- description:
- "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)",
- }
- // Vertex AI
- // https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude
- export type VertexModelId = keyof typeof vertexModels
- export const vertexDefaultModelId: VertexModelId = "claude-3-7-sonnet@20250219"
- export const vertexModels = {
- "gemini-2.5-flash-preview-04-17:thinking": {
- maxTokens: 65_535,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0.15,
- outputPrice: 3.5,
- thinking: true,
- maxThinkingTokens: 24_576,
- },
- "gemini-2.5-flash-preview-04-17": {
- maxTokens: 65_535,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0.15,
- outputPrice: 0.6,
- thinking: false,
- },
- "gemini-2.5-pro-preview-03-25": {
- maxTokens: 65_535,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: true,
- isPromptCacheOptional: true,
- inputPrice: 2.5,
- outputPrice: 15,
- },
- "gemini-2.5-pro-preview-05-06": {
- maxTokens: 65_535,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: true,
- isPromptCacheOptional: true,
- inputPrice: 2.5,
- outputPrice: 15,
- },
- "gemini-2.5-pro-exp-03-25": {
- maxTokens: 65_535,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-2.0-pro-exp-02-05": {
- maxTokens: 8192,
- contextWindow: 2_097_152,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-2.0-flash-001": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: true,
- isPromptCacheOptional: true,
- inputPrice: 0.15,
- outputPrice: 0.6,
- },
- "gemini-2.0-flash-lite-001": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0.075,
- outputPrice: 0.3,
- },
- "gemini-2.0-flash-thinking-exp-01-21": {
- maxTokens: 8192,
- contextWindow: 32_768,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-1.5-flash-002": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: true,
- isPromptCacheOptional: true,
- inputPrice: 0.075,
- outputPrice: 0.3,
- },
- "gemini-1.5-pro-002": {
- maxTokens: 8192,
- contextWindow: 2_097_152,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 1.25,
- outputPrice: 5,
- },
- "claude-3-7-sonnet@20250219:thinking": {
- maxTokens: 64_000,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- thinking: true,
- },
- "claude-3-7-sonnet@20250219": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- thinking: false,
- },
- "claude-3-5-sonnet-v2@20241022": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- },
- "claude-3-5-sonnet@20240620": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- },
- "claude-3-5-haiku@20241022": {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 1.0,
- outputPrice: 5.0,
- cacheWritesPrice: 1.25,
- cacheReadsPrice: 0.1,
- },
- "claude-3-opus@20240229": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 15.0,
- outputPrice: 75.0,
- cacheWritesPrice: 18.75,
- cacheReadsPrice: 1.5,
- },
- "claude-3-haiku@20240307": {
- maxTokens: 4096,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 0.25,
- outputPrice: 1.25,
- cacheWritesPrice: 0.3,
- cacheReadsPrice: 0.03,
- },
- } as const satisfies Record<string, ModelInfo>
- export const openAiModelInfoSaneDefaults: ModelInfo = {
- maxTokens: -1,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- }
- // Gemini
- // https://ai.google.dev/gemini-api/docs/models/gemini
- export type GeminiModelId = keyof typeof geminiModels
- export const geminiDefaultModelId: GeminiModelId = "gemini-2.0-flash-001"
- export const geminiModels = {
- "gemini-2.5-flash-preview-04-17:thinking": {
- maxTokens: 65_535,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0.15,
- outputPrice: 3.5,
- thinking: true,
- maxThinkingTokens: 24_576,
- },
- "gemini-2.5-flash-preview-04-17": {
- maxTokens: 65_535,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0.15,
- outputPrice: 0.6,
- thinking: false,
- },
- "gemini-2.5-pro-exp-03-25": {
- maxTokens: 65_535,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-2.5-pro-preview-03-25": {
- maxTokens: 65_535,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: true,
- isPromptCacheOptional: true,
- inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
- outputPrice: 15,
- cacheReadsPrice: 0.625,
- cacheWritesPrice: 4.5,
- tiers: [
- {
- contextWindow: 200_000,
- inputPrice: 1.25,
- outputPrice: 10,
- cacheReadsPrice: 0.31,
- },
- {
- contextWindow: Infinity,
- inputPrice: 2.5,
- outputPrice: 15,
- cacheReadsPrice: 0.625,
- },
- ],
- },
- "gemini-2.5-pro-preview-05-06": {
- maxTokens: 65_535,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: true,
- isPromptCacheOptional: true,
- inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
- outputPrice: 15,
- cacheReadsPrice: 0.625,
- cacheWritesPrice: 4.5,
- tiers: [
- {
- contextWindow: 200_000,
- inputPrice: 1.25,
- outputPrice: 10,
- cacheReadsPrice: 0.31,
- },
- {
- contextWindow: Infinity,
- inputPrice: 2.5,
- outputPrice: 15,
- cacheReadsPrice: 0.625,
- },
- ],
- },
- "gemini-2.0-flash-001": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: true,
- isPromptCacheOptional: true,
- inputPrice: 0.1,
- outputPrice: 0.4,
- cacheReadsPrice: 0.025,
- cacheWritesPrice: 1.0,
- },
- "gemini-2.0-flash-lite-preview-02-05": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-2.0-pro-exp-02-05": {
- maxTokens: 8192,
- contextWindow: 2_097_152,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-2.0-flash-thinking-exp-01-21": {
- maxTokens: 65_536,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-2.0-flash-thinking-exp-1219": {
- maxTokens: 8192,
- contextWindow: 32_767,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-2.0-flash-exp": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-1.5-flash-002": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: true,
- isPromptCacheOptional: true,
- inputPrice: 0.15, // This is the pricing for prompts above 128k tokens.
- outputPrice: 0.6,
- cacheReadsPrice: 0.0375,
- cacheWritesPrice: 1.0,
- tiers: [
- {
- contextWindow: 128_000,
- inputPrice: 0.075,
- outputPrice: 0.3,
- cacheReadsPrice: 0.01875,
- },
- {
- contextWindow: Infinity,
- inputPrice: 0.15,
- outputPrice: 0.6,
- cacheReadsPrice: 0.0375,
- },
- ],
- },
- "gemini-1.5-flash-exp-0827": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-1.5-flash-8b-exp-0827": {
- maxTokens: 8192,
- contextWindow: 1_048_576,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-1.5-pro-002": {
- maxTokens: 8192,
- contextWindow: 2_097_152,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-1.5-pro-exp-0827": {
- maxTokens: 8192,
- contextWindow: 2_097_152,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- "gemini-exp-1206": {
- maxTokens: 8192,
- contextWindow: 2_097_152,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- },
- } as const satisfies Record<string, ModelInfo>
- // OpenAI Native
- // https://openai.com/api/pricing/
- export type OpenAiNativeModelId = keyof typeof openAiNativeModels
- export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-4.1"
- export const openAiNativeModels = {
- "gpt-4.1": {
- maxTokens: 32_768,
- contextWindow: 1_047_576,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 2,
- outputPrice: 8,
- cacheReadsPrice: 0.5,
- },
- "gpt-4.1-mini": {
- maxTokens: 32_768,
- contextWindow: 1_047_576,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 0.4,
- outputPrice: 1.6,
- cacheReadsPrice: 0.1,
- },
- "gpt-4.1-nano": {
- maxTokens: 32_768,
- contextWindow: 1_047_576,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 0.1,
- outputPrice: 0.4,
- cacheReadsPrice: 0.025,
- },
- o3: {
- maxTokens: 100_000,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 10.0,
- outputPrice: 40.0,
- cacheReadsPrice: 2.5,
- reasoningEffort: "medium",
- },
- "o3-high": {
- maxTokens: 100_000,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 10.0,
- outputPrice: 40.0,
- cacheReadsPrice: 2.5,
- reasoningEffort: "high",
- },
- "o3-low": {
- maxTokens: 100_000,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 10.0,
- outputPrice: 40.0,
- cacheReadsPrice: 2.5,
- reasoningEffort: "low",
- },
- "o4-mini": {
- maxTokens: 100_000,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 1.1,
- outputPrice: 4.4,
- cacheReadsPrice: 0.275,
- reasoningEffort: "medium",
- },
- "o4-mini-high": {
- maxTokens: 100_000,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 1.1,
- outputPrice: 4.4,
- cacheReadsPrice: 0.275,
- reasoningEffort: "high",
- },
- "o4-mini-low": {
- maxTokens: 100_000,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 1.1,
- outputPrice: 4.4,
- cacheReadsPrice: 0.275,
- reasoningEffort: "low",
- },
- "o3-mini": {
- maxTokens: 100_000,
- contextWindow: 200_000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 1.1,
- outputPrice: 4.4,
- cacheReadsPrice: 0.55,
- reasoningEffort: "medium",
- },
- "o3-mini-high": {
- maxTokens: 100_000,
- contextWindow: 200_000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 1.1,
- outputPrice: 4.4,
- cacheReadsPrice: 0.55,
- reasoningEffort: "high",
- },
- "o3-mini-low": {
- maxTokens: 100_000,
- contextWindow: 200_000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 1.1,
- outputPrice: 4.4,
- cacheReadsPrice: 0.55,
- reasoningEffort: "low",
- },
- o1: {
- maxTokens: 100_000,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 15,
- outputPrice: 60,
- cacheReadsPrice: 7.5,
- },
- "o1-preview": {
- maxTokens: 32_768,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 15,
- outputPrice: 60,
- cacheReadsPrice: 7.5,
- },
- "o1-mini": {
- maxTokens: 65_536,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 1.1,
- outputPrice: 4.4,
- cacheReadsPrice: 0.55,
- },
- "gpt-4.5-preview": {
- maxTokens: 16_384,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 75,
- outputPrice: 150,
- cacheReadsPrice: 37.5,
- },
- "gpt-4o": {
- maxTokens: 16_384,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 2.5,
- outputPrice: 10,
- cacheReadsPrice: 1.25,
- },
- "gpt-4o-mini": {
- maxTokens: 16_384,
- contextWindow: 128_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 0.15,
- outputPrice: 0.6,
- cacheReadsPrice: 0.075,
- },
- } as const satisfies Record<string, ModelInfo>
- // DeepSeek
- // https://platform.deepseek.com/docs/api
- export type DeepSeekModelId = keyof typeof deepSeekModels
- export const deepSeekDefaultModelId: DeepSeekModelId = "deepseek-chat"
- export const deepSeekModels = {
- "deepseek-chat": {
- maxTokens: 8192,
- contextWindow: 64_000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 0.27, // $0.27 per million tokens (cache miss)
- outputPrice: 1.1, // $1.10 per million tokens
- cacheWritesPrice: 0.27, // $0.27 per million tokens (cache miss)
- cacheReadsPrice: 0.07, // $0.07 per million tokens (cache hit).
- description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`,
- },
- "deepseek-reasoner": {
- maxTokens: 8192,
- contextWindow: 64_000,
- supportsImages: false,
- supportsPromptCache: true,
- inputPrice: 0.55, // $0.55 per million tokens (cache miss)
- outputPrice: 2.19, // $2.19 per million tokens
- cacheWritesPrice: 0.55, // $0.55 per million tokens (cache miss)
- cacheReadsPrice: 0.14, // $0.14 per million tokens (cache hit)
- description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 32K tokens.`,
- },
- } as const satisfies Record<string, ModelInfo>
- // Azure OpenAI
- // https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation
- // https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs
- export const azureOpenAiDefaultApiVersion = "2024-08-01-preview"
- // Mistral
- // https://docs.mistral.ai/getting-started/models/models_overview/
- export type MistralModelId = keyof typeof mistralModels
- export const mistralDefaultModelId: MistralModelId = "codestral-latest"
- export const mistralModels = {
- "codestral-latest": {
- maxTokens: 256_000,
- contextWindow: 256_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0.3,
- outputPrice: 0.9,
- },
- "mistral-large-latest": {
- maxTokens: 131_000,
- contextWindow: 131_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 2.0,
- outputPrice: 6.0,
- },
- "ministral-8b-latest": {
- maxTokens: 131_000,
- contextWindow: 131_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0.1,
- outputPrice: 0.1,
- },
- "ministral-3b-latest": {
- maxTokens: 131_000,
- contextWindow: 131_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0.04,
- outputPrice: 0.04,
- },
- "mistral-small-latest": {
- maxTokens: 32_000,
- contextWindow: 32_000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0.2,
- outputPrice: 0.6,
- },
- "pixtral-large-latest": {
- maxTokens: 131_000,
- contextWindow: 131_000,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 2.0,
- outputPrice: 6.0,
- },
- } as const satisfies Record<string, ModelInfo>
- // Unbound Security
- // https://www.unboundsecurity.ai/ai-gateway
- export const unboundDefaultModelId = "anthropic/claude-3-7-sonnet-20250219"
- export const unboundDefaultModelInfo: ModelInfo = {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- }
- // LiteLLM
- // https://docs.litellm.ai/
- export const litellmDefaultModelId = "anthropic/claude-3-7-sonnet-20250219"
- export const litellmDefaultModelInfo: ModelInfo = {
- maxTokens: 8192,
- contextWindow: 200_000,
- supportsImages: true,
- supportsComputerUse: true,
- supportsPromptCache: true,
- inputPrice: 3.0,
- outputPrice: 15.0,
- cacheWritesPrice: 3.75,
- cacheReadsPrice: 0.3,
- }
- // xAI
- // https://docs.x.ai/docs/api-reference
- export type XAIModelId = keyof typeof xaiModels
- export const xaiDefaultModelId: XAIModelId = "grok-3-beta"
- export const xaiModels = {
- "grok-3-beta": {
- maxTokens: 8192,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 3.0,
- outputPrice: 15.0,
- description: "xAI's Grok-3 beta model with 131K context window",
- },
- "grok-3-fast-beta": {
- maxTokens: 8192,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 5.0,
- outputPrice: 25.0,
- description: "xAI's Grok-3 fast beta model with 131K context window",
- },
- "grok-3-mini-beta": {
- maxTokens: 8192,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0.3,
- outputPrice: 0.5,
- description: "xAI's Grok-3 mini beta model with 131K context window",
- },
- "grok-3-mini-fast-beta": {
- maxTokens: 8192,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0.6,
- outputPrice: 4.0,
- description: "xAI's Grok-3 mini fast beta model with 131K context window",
- },
- "grok-2-latest": {
- maxTokens: 8192,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 2.0,
- outputPrice: 10.0,
- description: "xAI's Grok-2 model - latest version with 131K context window",
- },
- "grok-2": {
- maxTokens: 8192,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 2.0,
- outputPrice: 10.0,
- description: "xAI's Grok-2 model with 131K context window",
- },
- "grok-2-1212": {
- maxTokens: 8192,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 2.0,
- outputPrice: 10.0,
- description: "xAI's Grok-2 model (version 1212) with 131K context window",
- },
- "grok-2-vision-latest": {
- maxTokens: 8192,
- contextWindow: 32768,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 2.0,
- outputPrice: 10.0,
- description: "xAI's Grok-2 Vision model - latest version with image support and 32K context window",
- },
- "grok-2-vision": {
- maxTokens: 8192,
- contextWindow: 32768,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 2.0,
- outputPrice: 10.0,
- description: "xAI's Grok-2 Vision model with image support and 32K context window",
- },
- "grok-2-vision-1212": {
- maxTokens: 8192,
- contextWindow: 32768,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 2.0,
- outputPrice: 10.0,
- description: "xAI's Grok-2 Vision model (version 1212) with image support and 32K context window",
- },
- "grok-vision-beta": {
- maxTokens: 8192,
- contextWindow: 8192,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 5.0,
- outputPrice: 15.0,
- description: "xAI's Grok Vision Beta model with image support and 8K context window",
- },
- "grok-beta": {
- maxTokens: 8192,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 5.0,
- outputPrice: 15.0,
- description: "xAI's Grok Beta model (legacy) with 131K context window",
- },
- } as const satisfies Record<string, ModelInfo>
- export type VscodeLlmModelId = keyof typeof vscodeLlmModels
- export const vscodeLlmDefaultModelId: VscodeLlmModelId = "claude-3.5-sonnet"
- export const vscodeLlmModels = {
- "gpt-3.5-turbo": {
- contextWindow: 12114,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "gpt-3.5-turbo",
- version: "gpt-3.5-turbo-0613",
- name: "GPT 3.5 Turbo",
- supportsToolCalling: true,
- maxInputTokens: 12114,
- },
- "gpt-4o-mini": {
- contextWindow: 12115,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "gpt-4o-mini",
- version: "gpt-4o-mini-2024-07-18",
- name: "GPT-4o mini",
- supportsToolCalling: true,
- maxInputTokens: 12115,
- },
- "gpt-4": {
- contextWindow: 28501,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "gpt-4",
- version: "gpt-4-0613",
- name: "GPT 4",
- supportsToolCalling: true,
- maxInputTokens: 28501,
- },
- "gpt-4-0125-preview": {
- contextWindow: 63826,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "gpt-4-turbo",
- version: "gpt-4-0125-preview",
- name: "GPT 4 Turbo",
- supportsToolCalling: true,
- maxInputTokens: 63826,
- },
- "gpt-4o": {
- contextWindow: 63827,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "gpt-4o",
- version: "gpt-4o-2024-11-20",
- name: "GPT-4o",
- supportsToolCalling: true,
- maxInputTokens: 63827,
- },
- o1: {
- contextWindow: 19827,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "o1-ga",
- version: "o1-2024-12-17",
- name: "o1 (Preview)",
- supportsToolCalling: true,
- maxInputTokens: 19827,
- },
- "o3-mini": {
- contextWindow: 63827,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "o3-mini",
- version: "o3-mini-2025-01-31",
- name: "o3-mini",
- supportsToolCalling: true,
- maxInputTokens: 63827,
- },
- "claude-3.5-sonnet": {
- contextWindow: 81638,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "claude-3.5-sonnet",
- version: "claude-3.5-sonnet",
- name: "Claude 3.5 Sonnet",
- supportsToolCalling: true,
- maxInputTokens: 81638,
- },
- "claude-3.7-sonnet": {
- contextWindow: 89827,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "claude-3.7-sonnet",
- version: "claude-3.7-sonnet",
- name: "Claude 3.7 Sonnet",
- supportsToolCalling: true,
- maxInputTokens: 89827,
- },
- "claude-3.7-sonnet-thought": {
- contextWindow: 89827,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "claude-3.7-sonnet-thought",
- version: "claude-3.7-sonnet-thought",
- name: "Claude 3.7 Sonnet Thinking",
- supportsToolCalling: false,
- maxInputTokens: 89827,
- thinking: true,
- },
- "gemini-2.0-flash-001": {
- contextWindow: 127827,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "gemini-2.0-flash",
- version: "gemini-2.0-flash-001",
- name: "Gemini 2.0 Flash",
- supportsToolCalling: false,
- maxInputTokens: 127827,
- },
- "gemini-2.5-pro": {
- contextWindow: 63830,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "gemini-2.5-pro",
- version: "gemini-2.5-pro-preview-03-25",
- name: "Gemini 2.5 Pro (Preview)",
- supportsToolCalling: true,
- maxInputTokens: 63830,
- },
- "o4-mini": {
- contextWindow: 111446,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "o4-mini",
- version: "o4-mini-2025-04-16",
- name: "o4-mini (Preview)",
- supportsToolCalling: true,
- maxInputTokens: 111446,
- },
- "gpt-4.1": {
- contextWindow: 111446,
- supportsImages: true,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- family: "gpt-4.1",
- version: "gpt-4.1-2025-04-14",
- name: "GPT-4.1 (Preview)",
- supportsToolCalling: true,
- maxInputTokens: 111446,
- },
- } as const satisfies Record<
- string,
- ModelInfo & {
- family: string
- version: string
- name: string
- supportsToolCalling: boolean
- maxInputTokens: number
- }
- >
- // Groq
- // https://console.groq.com/docs/models
- export type GroqModelId =
- | "llama-3.1-8b-instant"
- | "llama-3.3-70b-versatile"
- | "meta-llama/llama-4-scout-17b-16e-instruct"
- | "meta-llama/llama-4-maverick-17b-128e-instruct"
- | "mistral-saba-24b"
- | "qwen-qwq-32b"
- | "deepseek-r1-distill-llama-70b"
- export const groqDefaultModelId: GroqModelId = "llama-3.3-70b-versatile" // Defaulting to Llama3 70B Versatile
- export const groqModels = {
- // Models based on API response: https://api.groq.com/openai/v1/models
- "llama-3.1-8b-instant": {
- maxTokens: 131072,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Meta Llama 3.1 8B Instant model, 128K context.",
- },
- "llama-3.3-70b-versatile": {
- maxTokens: 32768,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Meta Llama 3.3 70B Versatile model, 128K context.",
- },
- "meta-llama/llama-4-scout-17b-16e-instruct": {
- maxTokens: 8192,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Meta Llama 4 Scout 17B Instruct model, 128K context.",
- },
- "meta-llama/llama-4-maverick-17b-128e-instruct": {
- maxTokens: 8192,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Meta Llama 4 Maverick 17B Instruct model, 128K context.",
- },
- "mistral-saba-24b": {
- maxTokens: 32768,
- contextWindow: 32768,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Mistral Saba 24B model, 32K context.",
- },
- "qwen-qwq-32b": {
- maxTokens: 131072,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Alibaba Qwen QwQ 32B model, 128K context.",
- },
- "deepseek-r1-distill-llama-70b": {
- maxTokens: 131072,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "DeepSeek R1 Distill Llama 70B model, 128K context.",
- },
- } as const satisfies Record<string, ModelInfo>
- // Chutes AI
- // https://llm.chutes.ai/v1 (OpenAI compatible)
- export type ChutesModelId =
- | "deepseek-ai/DeepSeek-R1"
- | "deepseek-ai/DeepSeek-V3"
- | "unsloth/Llama-3.3-70B-Instruct"
- | "chutesai/Llama-4-Scout-17B-16E-Instruct"
- | "unsloth/Mistral-Nemo-Instruct-2407"
- | "unsloth/gemma-3-12b-it"
- | "NousResearch/DeepHermes-3-Llama-3-8B-Preview"
- | "unsloth/gemma-3-4b-it"
- | "nvidia/Llama-3_3-Nemotron-Super-49B-v1"
- | "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1"
- | "chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8"
- | "deepseek-ai/DeepSeek-V3-Base"
- | "deepseek-ai/DeepSeek-R1-Zero"
- | "deepseek-ai/DeepSeek-V3-0324"
- | "microsoft/MAI-DS-R1-FP8"
- | "tngtech/DeepSeek-R1T-Chimera"
- export const chutesDefaultModelId: ChutesModelId = "deepseek-ai/DeepSeek-R1"
- export const chutesModels = {
- "deepseek-ai/DeepSeek-R1": {
- maxTokens: 32768,
- contextWindow: 163840,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "DeepSeek R1 model.",
- },
- "deepseek-ai/DeepSeek-V3": {
- maxTokens: 32768,
- contextWindow: 163840,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "DeepSeek V3 model.",
- },
- "unsloth/Llama-3.3-70B-Instruct": {
- maxTokens: 32768, // From Groq
- contextWindow: 131072, // From Groq
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Unsloth Llama 3.3 70B Instruct model.",
- },
- "chutesai/Llama-4-Scout-17B-16E-Instruct": {
- maxTokens: 32768,
- contextWindow: 512000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "ChutesAI Llama 4 Scout 17B Instruct model, 512K context.",
- },
- "unsloth/Mistral-Nemo-Instruct-2407": {
- maxTokens: 32768,
- contextWindow: 128000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Unsloth Mistral Nemo Instruct model.",
- },
- "unsloth/gemma-3-12b-it": {
- maxTokens: 32768,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Unsloth Gemma 3 12B IT model.",
- },
- "NousResearch/DeepHermes-3-Llama-3-8B-Preview": {
- maxTokens: 32768,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Nous DeepHermes 3 Llama 3 8B Preview model.",
- },
- "unsloth/gemma-3-4b-it": {
- maxTokens: 32768,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Unsloth Gemma 3 4B IT model.",
- },
- "nvidia/Llama-3_3-Nemotron-Super-49B-v1": {
- maxTokens: 32768,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Nvidia Llama 3.3 Nemotron Super 49B model.",
- },
- "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1": {
- maxTokens: 32768,
- contextWindow: 131072,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Nvidia Llama 3.1 Nemotron Ultra 253B model.",
- },
- "chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8": {
- maxTokens: 32768,
- contextWindow: 256000,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "ChutesAI Llama 4 Maverick 17B Instruct FP8 model.",
- },
- "deepseek-ai/DeepSeek-V3-Base": {
- maxTokens: 32768,
- contextWindow: 163840,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "DeepSeek V3 Base model.",
- },
- "deepseek-ai/DeepSeek-R1-Zero": {
- maxTokens: 32768,
- contextWindow: 163840,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "DeepSeek R1 Zero model.",
- },
- "deepseek-ai/DeepSeek-V3-0324": {
- maxTokens: 32768,
- contextWindow: 163840,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "DeepSeek V3 (0324) model.",
- },
- "microsoft/MAI-DS-R1-FP8": {
- maxTokens: 32768,
- contextWindow: 163840,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "Microsoft MAI-DS-R1 FP8 model.",
- },
- "tngtech/DeepSeek-R1T-Chimera": {
- maxTokens: 32768,
- contextWindow: 163840,
- supportsImages: false,
- supportsPromptCache: false,
- inputPrice: 0,
- outputPrice: 0,
- description: "TNGTech DeepSeek R1T Chimera model.",
- },
- } as const satisfies Record<string, ModelInfo>
- /**
- * Constants
- */
- // These models support reasoning efforts.
- export const REASONING_MODELS = new Set(["x-ai/grok-3-mini-beta", "grok-3-mini-beta", "grok-3-mini-fast-beta"])
- // These models support prompt caching.
- export const PROMPT_CACHING_MODELS = new Set([
- "anthropic/claude-3-haiku",
- "anthropic/claude-3-haiku:beta",
- "anthropic/claude-3-opus",
- "anthropic/claude-3-opus:beta",
- "anthropic/claude-3-sonnet",
- "anthropic/claude-3-sonnet:beta",
- "anthropic/claude-3.5-haiku",
- "anthropic/claude-3.5-haiku-20241022",
- "anthropic/claude-3.5-haiku-20241022:beta",
- "anthropic/claude-3.5-haiku:beta",
- "anthropic/claude-3.5-sonnet",
- "anthropic/claude-3.5-sonnet-20240620",
- "anthropic/claude-3.5-sonnet-20240620:beta",
- "anthropic/claude-3.5-sonnet:beta",
- "anthropic/claude-3.7-sonnet",
- "anthropic/claude-3.7-sonnet:beta",
- "anthropic/claude-3.7-sonnet:thinking",
- "google/gemini-2.5-pro-preview-03-25",
- "google/gemini-2.5-pro-preview-05-06",
- "google/gemini-2.0-flash-001",
- "google/gemini-flash-1.5",
- "google/gemini-flash-1.5-8b",
- ])
- // These models don't have prompt caching enabled by default (you can turn it on
- // in settings).
- export const OPTIONAL_PROMPT_CACHING_MODELS = new Set([
- "google/gemini-2.5-pro-preview-03-25",
- "google/gemini-2.5-pro-preview-05-06",
- "google/gemini-2.0-flash-001",
- "google/gemini-flash-1.5",
- "google/gemini-flash-1.5-8b",
- ])
- // https://www.anthropic.com/news/3-5-models-and-computer-use
- export const COMPUTER_USE_MODELS = new Set([
- "anthropic/claude-3.5-sonnet",
- "anthropic/claude-3.5-sonnet:beta",
- "anthropic/claude-3.7-sonnet",
- "anthropic/claude-3.7-sonnet:beta",
- "anthropic/claude-3.7-sonnet:thinking",
- ])
- const routerNames = ["openrouter", "requesty", "glama", "unbound", "litellm"] as const
- export type RouterName = (typeof routerNames)[number]
- export const isRouterName = (value: string): value is RouterName => routerNames.includes(value as RouterName)
- export function toRouterName(value?: string): RouterName {
- if (value && isRouterName(value)) {
- return value
- }
- throw new Error(`Invalid router name: ${value}`)
- }
- export type ModelRecord = Record<string, ModelInfo>
- export type RouterModels = Record<RouterName, ModelRecord>
|