api.ts 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. export type ApiProvider =
  2. | "anthropic"
  3. | "openrouter"
  4. | "bedrock"
  5. | "vertex"
  6. | "openai"
  7. | "ollama"
  8. | "lmstudio"
  9. | "gemini"
  10. | "openai-native"
  11. export interface ApiHandlerOptions {
  12. apiModelId?: string
  13. apiKey?: string // anthropic
  14. anthropicBaseUrl?: string
  15. openRouterApiKey?: string
  16. openRouterModelId?: string
  17. openRouterModelInfo?: ModelInfo
  18. awsAccessKey?: string
  19. awsSecretKey?: string
  20. awsSessionToken?: string
  21. awsRegion?: string
  22. awsUseCrossRegionInference?: boolean
  23. vertexProjectId?: string
  24. vertexRegion?: string
  25. openAiBaseUrl?: string
  26. openAiApiKey?: string
  27. openAiModelId?: string
  28. ollamaModelId?: string
  29. ollamaBaseUrl?: string
  30. lmStudioModelId?: string
  31. lmStudioBaseUrl?: string
  32. geminiApiKey?: string
  33. openAiNativeApiKey?: string
  34. azureApiVersion?: string
  35. }
  36. export type ApiConfiguration = ApiHandlerOptions & {
  37. apiProvider?: ApiProvider
  38. }
  39. // Models
  40. export interface ModelInfo {
  41. maxTokens?: number
  42. contextWindow?: number
  43. supportsImages?: boolean
  44. supportsComputerUse?: boolean
  45. supportsPromptCache: boolean // this value is hardcoded for now
  46. inputPrice?: number
  47. outputPrice?: number
  48. cacheWritesPrice?: number
  49. cacheReadsPrice?: number
  50. description?: string
  51. }
  52. // Anthropic
  53. // https://docs.anthropic.com/en/docs/about-claude/models
  54. export type AnthropicModelId = keyof typeof anthropicModels
  55. export const anthropicDefaultModelId: AnthropicModelId = "claude-3-5-sonnet-20241022"
  56. export const anthropicModels = {
  57. "claude-3-5-sonnet-20241022": {
  58. maxTokens: 8192,
  59. contextWindow: 200_000,
  60. supportsImages: true,
  61. supportsComputerUse: true,
  62. supportsPromptCache: true,
  63. inputPrice: 3.0, // $3 per million input tokens
  64. outputPrice: 15.0, // $15 per million output tokens
  65. cacheWritesPrice: 3.75, // $3.75 per million tokens
  66. cacheReadsPrice: 0.3, // $0.30 per million tokens
  67. },
  68. "claude-3-5-haiku-20241022": {
  69. maxTokens: 8192,
  70. contextWindow: 200_000,
  71. supportsImages: false,
  72. supportsPromptCache: true,
  73. inputPrice: 1.0,
  74. outputPrice: 5.0,
  75. cacheWritesPrice: 1.25,
  76. cacheReadsPrice: 0.1,
  77. },
  78. "claude-3-opus-20240229": {
  79. maxTokens: 4096,
  80. contextWindow: 200_000,
  81. supportsImages: true,
  82. supportsPromptCache: true,
  83. inputPrice: 15.0,
  84. outputPrice: 75.0,
  85. cacheWritesPrice: 18.75,
  86. cacheReadsPrice: 1.5,
  87. },
  88. "claude-3-haiku-20240307": {
  89. maxTokens: 4096,
  90. contextWindow: 200_000,
  91. supportsImages: true,
  92. supportsPromptCache: true,
  93. inputPrice: 0.25,
  94. outputPrice: 1.25,
  95. cacheWritesPrice: 0.3,
  96. cacheReadsPrice: 0.03,
  97. },
  98. } as const satisfies Record<string, ModelInfo> // as const assertion makes the object deeply readonly
  99. // AWS Bedrock
  100. // https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
  101. export type BedrockModelId = keyof typeof bedrockModels
  102. export const bedrockDefaultModelId: BedrockModelId = "anthropic.claude-3-5-sonnet-20241022-v2:0"
  103. export const bedrockModels = {
  104. "anthropic.claude-3-5-sonnet-20241022-v2:0": {
  105. maxTokens: 8192,
  106. contextWindow: 200_000,
  107. supportsImages: true,
  108. supportsComputerUse: true,
  109. supportsPromptCache: false,
  110. inputPrice: 3.0,
  111. outputPrice: 15.0,
  112. },
  113. "anthropic.claude-3-5-haiku-20241022-v1:0": {
  114. maxTokens: 8192,
  115. contextWindow: 200_000,
  116. supportsImages: false,
  117. supportsPromptCache: false,
  118. inputPrice: 1.0,
  119. outputPrice: 5.0,
  120. },
  121. "anthropic.claude-3-5-sonnet-20240620-v1:0": {
  122. maxTokens: 8192,
  123. contextWindow: 200_000,
  124. supportsImages: true,
  125. supportsPromptCache: false,
  126. inputPrice: 3.0,
  127. outputPrice: 15.0,
  128. },
  129. "anthropic.claude-3-opus-20240229-v1:0": {
  130. maxTokens: 4096,
  131. contextWindow: 200_000,
  132. supportsImages: true,
  133. supportsPromptCache: false,
  134. inputPrice: 15.0,
  135. outputPrice: 75.0,
  136. },
  137. "anthropic.claude-3-sonnet-20240229-v1:0": {
  138. maxTokens: 4096,
  139. contextWindow: 200_000,
  140. supportsImages: true,
  141. supportsPromptCache: false,
  142. inputPrice: 3.0,
  143. outputPrice: 15.0,
  144. },
  145. "anthropic.claude-3-haiku-20240307-v1:0": {
  146. maxTokens: 4096,
  147. contextWindow: 200_000,
  148. supportsImages: true,
  149. supportsPromptCache: false,
  150. inputPrice: 0.25,
  151. outputPrice: 1.25,
  152. },
  153. } as const satisfies Record<string, ModelInfo>
  154. // OpenRouter
  155. // https://openrouter.ai/models?order=newest&supported_parameters=tools
  156. export const openRouterDefaultModelId = "anthropic/claude-3.5-sonnet:beta" // will always exist in openRouterModels
  157. export const openRouterDefaultModelInfo: ModelInfo = {
  158. maxTokens: 8192,
  159. contextWindow: 200_000,
  160. supportsImages: true,
  161. supportsComputerUse: true,
  162. supportsPromptCache: true,
  163. inputPrice: 3.0,
  164. outputPrice: 15.0,
  165. cacheWritesPrice: 3.75,
  166. cacheReadsPrice: 0.3,
  167. description:
  168. "The new Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: New Sonnet scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal\n\n_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/anthropic/claude-3.5-sonnet) variant._",
  169. }
  170. // Vertex AI
  171. // https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude
  172. export type VertexModelId = keyof typeof vertexModels
  173. export const vertexDefaultModelId: VertexModelId = "claude-3-5-sonnet-v2@20241022"
  174. export const vertexModels = {
  175. "claude-3-5-sonnet-v2@20241022": {
  176. maxTokens: 8192,
  177. contextWindow: 200_000,
  178. supportsImages: true,
  179. supportsComputerUse: true,
  180. supportsPromptCache: false,
  181. inputPrice: 3.0,
  182. outputPrice: 15.0,
  183. },
  184. "claude-3-5-sonnet@20240620": {
  185. maxTokens: 8192,
  186. contextWindow: 200_000,
  187. supportsImages: true,
  188. supportsPromptCache: false,
  189. inputPrice: 3.0,
  190. outputPrice: 15.0,
  191. },
  192. "claude-3-5-haiku@20241022": {
  193. maxTokens: 8192,
  194. contextWindow: 200_000,
  195. supportsImages: false,
  196. supportsPromptCache: false,
  197. inputPrice: 1.0,
  198. outputPrice: 5.0,
  199. },
  200. "claude-3-opus@20240229": {
  201. maxTokens: 4096,
  202. contextWindow: 200_000,
  203. supportsImages: true,
  204. supportsPromptCache: false,
  205. inputPrice: 15.0,
  206. outputPrice: 75.0,
  207. },
  208. "claude-3-haiku@20240307": {
  209. maxTokens: 4096,
  210. contextWindow: 200_000,
  211. supportsImages: true,
  212. supportsPromptCache: false,
  213. inputPrice: 0.25,
  214. outputPrice: 1.25,
  215. },
  216. } as const satisfies Record<string, ModelInfo>
  217. export const openAiModelInfoSaneDefaults: ModelInfo = {
  218. maxTokens: -1,
  219. contextWindow: 128_000,
  220. supportsImages: true,
  221. supportsPromptCache: false,
  222. inputPrice: 0,
  223. outputPrice: 0,
  224. }
  225. // Gemini
  226. // https://ai.google.dev/gemini-api/docs/models/gemini
  227. export type GeminiModelId = keyof typeof geminiModels
  228. export const geminiDefaultModelId: GeminiModelId = "gemini-2.0-flash-exp"
  229. export const geminiModels = {
  230. "gemini-2.0-flash-exp": {
  231. maxTokens: 8192,
  232. contextWindow: 1_048_576,
  233. supportsImages: true,
  234. supportsPromptCache: false,
  235. inputPrice: 0,
  236. outputPrice: 0,
  237. },
  238. "gemini-1.5-flash-002": {
  239. maxTokens: 8192,
  240. contextWindow: 1_048_576,
  241. supportsImages: true,
  242. supportsPromptCache: false,
  243. inputPrice: 0,
  244. outputPrice: 0,
  245. },
  246. "gemini-1.5-flash-exp-0827": {
  247. maxTokens: 8192,
  248. contextWindow: 1_048_576,
  249. supportsImages: true,
  250. supportsPromptCache: false,
  251. inputPrice: 0,
  252. outputPrice: 0,
  253. },
  254. "gemini-1.5-flash-8b-exp-0827": {
  255. maxTokens: 8192,
  256. contextWindow: 1_048_576,
  257. supportsImages: true,
  258. supportsPromptCache: false,
  259. inputPrice: 0,
  260. outputPrice: 0,
  261. },
  262. "gemini-1.5-pro-002": {
  263. maxTokens: 8192,
  264. contextWindow: 2_097_152,
  265. supportsImages: true,
  266. supportsPromptCache: false,
  267. inputPrice: 0,
  268. outputPrice: 0,
  269. },
  270. "gemini-1.5-pro-exp-0827": {
  271. maxTokens: 8192,
  272. contextWindow: 2_097_152,
  273. supportsImages: true,
  274. supportsPromptCache: false,
  275. inputPrice: 0,
  276. outputPrice: 0,
  277. },
  278. } as const satisfies Record<string, ModelInfo>
  279. // OpenAI Native
  280. // https://openai.com/api/pricing/
  281. export type OpenAiNativeModelId = keyof typeof openAiNativeModels
  282. export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-4o"
  283. export const openAiNativeModels = {
  284. // don't support tool use yet
  285. "o1-preview": {
  286. maxTokens: 32_768,
  287. contextWindow: 128_000,
  288. supportsImages: true,
  289. supportsPromptCache: false,
  290. inputPrice: 15,
  291. outputPrice: 60,
  292. },
  293. "o1-mini": {
  294. maxTokens: 65_536,
  295. contextWindow: 128_000,
  296. supportsImages: true,
  297. supportsPromptCache: false,
  298. inputPrice: 3,
  299. outputPrice: 12,
  300. },
  301. "gpt-4o": {
  302. maxTokens: 4_096,
  303. contextWindow: 128_000,
  304. supportsImages: true,
  305. supportsPromptCache: false,
  306. inputPrice: 5,
  307. outputPrice: 15,
  308. },
  309. "gpt-4o-mini": {
  310. maxTokens: 16_384,
  311. contextWindow: 128_000,
  312. supportsImages: true,
  313. supportsPromptCache: false,
  314. inputPrice: 0.15,
  315. outputPrice: 0.6,
  316. },
  317. } as const satisfies Record<string, ModelInfo>
  318. // Azure OpenAI
  319. // https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation
  320. // https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs
  321. export const azureOpenAiDefaultApiVersion = "2024-08-01-preview"