provider_mock.go 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. package config
  2. import (
  3. "github.com/charmbracelet/crush/internal/fur/provider"
  4. )
  5. // MockProviders returns a mock list of providers for testing.
  6. // This avoids making API calls during tests and provides consistent test data.
  7. // Simplified version with only default models from each provider.
  8. func MockProviders() []provider.Provider {
  9. return []provider.Provider{
  10. {
  11. Name: "Anthropic",
  12. ID: provider.InferenceProviderAnthropic,
  13. APIKey: "$ANTHROPIC_API_KEY",
  14. APIEndpoint: "$ANTHROPIC_API_ENDPOINT",
  15. Type: provider.TypeAnthropic,
  16. DefaultLargeModelID: "claude-sonnet-4-20250514",
  17. DefaultSmallModelID: "claude-3-5-haiku-20241022",
  18. Models: []provider.Model{
  19. {
  20. ID: "claude-sonnet-4-20250514",
  21. Name: "Claude Sonnet 4",
  22. CostPer1MIn: 3.0,
  23. CostPer1MOut: 15.0,
  24. CostPer1MInCached: 3.75,
  25. CostPer1MOutCached: 0.3,
  26. ContextWindow: 200000,
  27. DefaultMaxTokens: 50000,
  28. CanReason: true,
  29. SupportsImages: true,
  30. },
  31. {
  32. ID: "claude-3-5-haiku-20241022",
  33. Name: "Claude 3.5 Haiku",
  34. CostPer1MIn: 0.8,
  35. CostPer1MOut: 4.0,
  36. CostPer1MInCached: 1.0,
  37. CostPer1MOutCached: 0.08,
  38. ContextWindow: 200000,
  39. DefaultMaxTokens: 5000,
  40. CanReason: false,
  41. SupportsImages: true,
  42. },
  43. },
  44. },
  45. {
  46. Name: "OpenAI",
  47. ID: provider.InferenceProviderOpenAI,
  48. APIKey: "$OPENAI_API_KEY",
  49. APIEndpoint: "$OPENAI_API_ENDPOINT",
  50. Type: provider.TypeOpenAI,
  51. DefaultLargeModelID: "codex-mini-latest",
  52. DefaultSmallModelID: "gpt-4o",
  53. Models: []provider.Model{
  54. {
  55. ID: "codex-mini-latest",
  56. Name: "Codex Mini",
  57. CostPer1MIn: 1.5,
  58. CostPer1MOut: 6.0,
  59. CostPer1MInCached: 0.0,
  60. CostPer1MOutCached: 0.375,
  61. ContextWindow: 200000,
  62. DefaultMaxTokens: 50000,
  63. CanReason: true,
  64. HasReasoningEffort: true,
  65. DefaultReasoningEffort: "medium",
  66. SupportsImages: true,
  67. },
  68. {
  69. ID: "gpt-4o",
  70. Name: "GPT-4o",
  71. CostPer1MIn: 2.5,
  72. CostPer1MOut: 10.0,
  73. CostPer1MInCached: 0.0,
  74. CostPer1MOutCached: 1.25,
  75. ContextWindow: 128000,
  76. DefaultMaxTokens: 20000,
  77. CanReason: false,
  78. SupportsImages: true,
  79. },
  80. },
  81. },
  82. {
  83. Name: "Google Gemini",
  84. ID: provider.InferenceProviderGemini,
  85. APIKey: "$GEMINI_API_KEY",
  86. APIEndpoint: "$GEMINI_API_ENDPOINT",
  87. Type: provider.TypeGemini,
  88. DefaultLargeModelID: "gemini-2.5-pro",
  89. DefaultSmallModelID: "gemini-2.5-flash",
  90. Models: []provider.Model{
  91. {
  92. ID: "gemini-2.5-pro",
  93. Name: "Gemini 2.5 Pro",
  94. CostPer1MIn: 1.25,
  95. CostPer1MOut: 10.0,
  96. CostPer1MInCached: 1.625,
  97. CostPer1MOutCached: 0.31,
  98. ContextWindow: 1048576,
  99. DefaultMaxTokens: 50000,
  100. CanReason: true,
  101. SupportsImages: true,
  102. },
  103. {
  104. ID: "gemini-2.5-flash",
  105. Name: "Gemini 2.5 Flash",
  106. CostPer1MIn: 0.3,
  107. CostPer1MOut: 2.5,
  108. CostPer1MInCached: 0.3833,
  109. CostPer1MOutCached: 0.075,
  110. ContextWindow: 1048576,
  111. DefaultMaxTokens: 50000,
  112. CanReason: true,
  113. SupportsImages: true,
  114. },
  115. },
  116. },
  117. {
  118. Name: "xAI",
  119. ID: provider.InferenceProviderXAI,
  120. APIKey: "$XAI_API_KEY",
  121. APIEndpoint: "https://api.x.ai/v1",
  122. Type: provider.TypeXAI,
  123. DefaultLargeModelID: "grok-3",
  124. DefaultSmallModelID: "grok-3-mini",
  125. Models: []provider.Model{
  126. {
  127. ID: "grok-3",
  128. Name: "Grok 3",
  129. CostPer1MIn: 3.0,
  130. CostPer1MOut: 15.0,
  131. CostPer1MInCached: 0.0,
  132. CostPer1MOutCached: 0.75,
  133. ContextWindow: 131072,
  134. DefaultMaxTokens: 20000,
  135. CanReason: false,
  136. SupportsImages: false,
  137. },
  138. {
  139. ID: "grok-3-mini",
  140. Name: "Grok 3 Mini",
  141. CostPer1MIn: 0.3,
  142. CostPer1MOut: 0.5,
  143. CostPer1MInCached: 0.0,
  144. CostPer1MOutCached: 0.075,
  145. ContextWindow: 131072,
  146. DefaultMaxTokens: 20000,
  147. CanReason: true,
  148. SupportsImages: false,
  149. },
  150. },
  151. },
  152. {
  153. Name: "Azure OpenAI",
  154. ID: provider.InferenceProviderAzure,
  155. APIKey: "$AZURE_OPENAI_API_KEY",
  156. APIEndpoint: "$AZURE_OPENAI_API_ENDPOINT",
  157. Type: provider.TypeAzure,
  158. DefaultLargeModelID: "o4-mini",
  159. DefaultSmallModelID: "gpt-4o",
  160. Models: []provider.Model{
  161. {
  162. ID: "o4-mini",
  163. Name: "o4 Mini",
  164. CostPer1MIn: 1.1,
  165. CostPer1MOut: 4.4,
  166. CostPer1MInCached: 0.0,
  167. CostPer1MOutCached: 0.275,
  168. ContextWindow: 200000,
  169. DefaultMaxTokens: 50000,
  170. CanReason: true,
  171. HasReasoningEffort: false,
  172. DefaultReasoningEffort: "medium",
  173. SupportsImages: true,
  174. },
  175. {
  176. ID: "gpt-4o",
  177. Name: "GPT-4o",
  178. CostPer1MIn: 2.5,
  179. CostPer1MOut: 10.0,
  180. CostPer1MInCached: 0.0,
  181. CostPer1MOutCached: 1.25,
  182. ContextWindow: 128000,
  183. DefaultMaxTokens: 20000,
  184. CanReason: false,
  185. SupportsImages: true,
  186. },
  187. },
  188. },
  189. {
  190. Name: "AWS Bedrock",
  191. ID: provider.InferenceProviderBedrock,
  192. Type: provider.TypeBedrock,
  193. DefaultLargeModelID: "anthropic.claude-sonnet-4-20250514-v1:0",
  194. DefaultSmallModelID: "anthropic.claude-3-5-haiku-20241022-v1:0",
  195. Models: []provider.Model{
  196. {
  197. ID: "anthropic.claude-sonnet-4-20250514-v1:0",
  198. Name: "AWS Claude Sonnet 4",
  199. CostPer1MIn: 3.0,
  200. CostPer1MOut: 15.0,
  201. CostPer1MInCached: 3.75,
  202. CostPer1MOutCached: 0.3,
  203. ContextWindow: 200000,
  204. DefaultMaxTokens: 50000,
  205. CanReason: true,
  206. SupportsImages: true,
  207. },
  208. {
  209. ID: "anthropic.claude-3-5-haiku-20241022-v1:0",
  210. Name: "AWS Claude 3.5 Haiku",
  211. CostPer1MIn: 0.8,
  212. CostPer1MOut: 4.0,
  213. CostPer1MInCached: 1.0,
  214. CostPer1MOutCached: 0.08,
  215. ContextWindow: 200000,
  216. DefaultMaxTokens: 50000,
  217. CanReason: false,
  218. SupportsImages: true,
  219. },
  220. },
  221. },
  222. {
  223. Name: "Google Vertex AI",
  224. ID: provider.InferenceProviderVertexAI,
  225. Type: provider.TypeVertexAI,
  226. DefaultLargeModelID: "gemini-2.5-pro",
  227. DefaultSmallModelID: "gemini-2.5-flash",
  228. Models: []provider.Model{
  229. {
  230. ID: "gemini-2.5-pro",
  231. Name: "Gemini 2.5 Pro",
  232. CostPer1MIn: 1.25,
  233. CostPer1MOut: 10.0,
  234. CostPer1MInCached: 1.625,
  235. CostPer1MOutCached: 0.31,
  236. ContextWindow: 1048576,
  237. DefaultMaxTokens: 50000,
  238. CanReason: true,
  239. SupportsImages: true,
  240. },
  241. {
  242. ID: "gemini-2.5-flash",
  243. Name: "Gemini 2.5 Flash",
  244. CostPer1MIn: 0.3,
  245. CostPer1MOut: 2.5,
  246. CostPer1MInCached: 0.3833,
  247. CostPer1MOutCached: 0.075,
  248. ContextWindow: 1048576,
  249. DefaultMaxTokens: 50000,
  250. CanReason: true,
  251. SupportsImages: true,
  252. },
  253. },
  254. },
  255. {
  256. Name: "OpenRouter",
  257. ID: provider.InferenceProviderOpenRouter,
  258. APIKey: "$OPENROUTER_API_KEY",
  259. APIEndpoint: "https://openrouter.ai/api/v1",
  260. Type: provider.TypeOpenAI,
  261. DefaultLargeModelID: "anthropic/claude-sonnet-4",
  262. DefaultSmallModelID: "anthropic/claude-haiku-3.5",
  263. Models: []provider.Model{
  264. {
  265. ID: "anthropic/claude-sonnet-4",
  266. Name: "Anthropic: Claude Sonnet 4",
  267. CostPer1MIn: 3.0,
  268. CostPer1MOut: 15.0,
  269. CostPer1MInCached: 3.75,
  270. CostPer1MOutCached: 0.3,
  271. ContextWindow: 200000,
  272. DefaultMaxTokens: 32000,
  273. CanReason: true,
  274. SupportsImages: true,
  275. },
  276. {
  277. ID: "anthropic/claude-haiku-3.5",
  278. Name: "Anthropic: Claude 3.5 Haiku",
  279. CostPer1MIn: 0.8,
  280. CostPer1MOut: 4.0,
  281. CostPer1MInCached: 1.0,
  282. CostPer1MOutCached: 0.08,
  283. ContextWindow: 200000,
  284. DefaultMaxTokens: 4096,
  285. CanReason: false,
  286. SupportsImages: true,
  287. },
  288. },
  289. },
  290. }
  291. }