openrouter.ts 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. import { Anthropic } from "@anthropic-ai/sdk"
  2. import axios from "axios"
  3. import OpenAI from "openai"
  4. import { ApiHandler } from "../"
  5. import { ApiHandlerOptions, ModelInfo, openRouterDefaultModelId, openRouterDefaultModelInfo } from "../../shared/api"
  6. import { convertToOpenAiMessages } from "../transform/openai-format"
  7. import { ApiStreamChunk, ApiStreamUsageChunk } from "../transform/stream"
  8. import delay from "delay"
  9. // Add custom interface for OpenRouter params
  10. type OpenRouterChatCompletionParams = OpenAI.Chat.ChatCompletionCreateParams & {
  11. transforms?: string[];
  12. }
  13. // Add custom interface for OpenRouter usage chunk
  14. interface OpenRouterApiStreamUsageChunk extends ApiStreamUsageChunk {
  15. fullResponseText: string;
  16. }
  17. import { SingleCompletionHandler } from ".."
  18. export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler {
  19. private options: ApiHandlerOptions
  20. private client: OpenAI
  21. constructor(options: ApiHandlerOptions) {
  22. this.options = options
  23. this.client = new OpenAI({
  24. baseURL: "https://openrouter.ai/api/v1",
  25. apiKey: this.options.openRouterApiKey,
  26. defaultHeaders: {
  27. "HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline", // Optional, for including your app on openrouter.ai rankings.
  28. "X-Title": "Roo-Cline", // Optional. Shows in rankings on openrouter.ai.
  29. },
  30. })
  31. }
  32. async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): AsyncGenerator<ApiStreamChunk> {
  33. // Convert Anthropic messages to OpenAI format
  34. const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
  35. { role: "system", content: systemPrompt },
  36. ...convertToOpenAiMessages(messages),
  37. ]
  38. // prompt caching: https://openrouter.ai/docs/prompt-caching
  39. // this is specifically for claude models (some models may 'support prompt caching' automatically without this)
  40. switch (this.getModel().id) {
  41. case "anthropic/claude-3.5-sonnet":
  42. case "anthropic/claude-3.5-sonnet:beta":
  43. case "anthropic/claude-3.5-sonnet-20240620":
  44. case "anthropic/claude-3.5-sonnet-20240620:beta":
  45. case "anthropic/claude-3-5-haiku":
  46. case "anthropic/claude-3-5-haiku:beta":
  47. case "anthropic/claude-3-5-haiku-20241022":
  48. case "anthropic/claude-3-5-haiku-20241022:beta":
  49. case "anthropic/claude-3-haiku":
  50. case "anthropic/claude-3-haiku:beta":
  51. case "anthropic/claude-3-opus":
  52. case "anthropic/claude-3-opus:beta":
  53. openAiMessages[0] = {
  54. role: "system",
  55. content: [
  56. {
  57. type: "text",
  58. text: systemPrompt,
  59. // @ts-ignore-next-line
  60. cache_control: { type: "ephemeral" },
  61. },
  62. ],
  63. }
  64. // Add cache_control to the last two user messages
  65. // (note: this works because we only ever add one user message at a time, but if we added multiple we'd need to mark the user message before the last assistant message)
  66. const lastTwoUserMessages = openAiMessages.filter((msg) => msg.role === "user").slice(-2)
  67. lastTwoUserMessages.forEach((msg) => {
  68. if (typeof msg.content === "string") {
  69. msg.content = [{ type: "text", text: msg.content }]
  70. }
  71. if (Array.isArray(msg.content)) {
  72. // NOTE: this is fine since env details will always be added at the end. but if it weren't there, and the user added a image_url type message, it would pop a text part before it and then move it after to the end.
  73. let lastTextPart = msg.content.filter((part) => part.type === "text").pop()
  74. if (!lastTextPart) {
  75. lastTextPart = { type: "text", text: "..." }
  76. msg.content.push(lastTextPart)
  77. }
  78. // @ts-ignore-next-line
  79. lastTextPart["cache_control"] = { type: "ephemeral" }
  80. }
  81. })
  82. break
  83. default:
  84. break
  85. }
  86. // Not sure how openrouter defaults max tokens when no value is provided, but the anthropic api requires this value and since they offer both 4096 and 8192 variants, we should ensure 8192.
  87. // (models usually default to max tokens allowed)
  88. let maxTokens: number | undefined
  89. switch (this.getModel().id) {
  90. case "anthropic/claude-3.5-sonnet":
  91. case "anthropic/claude-3.5-sonnet:beta":
  92. case "anthropic/claude-3.5-sonnet-20240620":
  93. case "anthropic/claude-3.5-sonnet-20240620:beta":
  94. case "anthropic/claude-3-5-haiku":
  95. case "anthropic/claude-3-5-haiku:beta":
  96. case "anthropic/claude-3-5-haiku-20241022":
  97. case "anthropic/claude-3-5-haiku-20241022:beta":
  98. maxTokens = 8_192
  99. break
  100. }
  101. // https://openrouter.ai/docs/transforms
  102. let fullResponseText = "";
  103. const stream = await this.client.chat.completions.create({
  104. model: this.getModel().id,
  105. max_tokens: maxTokens,
  106. temperature: 0,
  107. messages: openAiMessages,
  108. stream: true,
  109. // This way, the transforms field will only be included in the parameters when openRouterUseMiddleOutTransform is true.
  110. ...(this.options.openRouterUseMiddleOutTransform && { transforms: ["middle-out"] })
  111. } as OpenRouterChatCompletionParams);
  112. let genId: string | undefined
  113. for await (const chunk of stream as unknown as AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>) {
  114. // openrouter returns an error object instead of the openai sdk throwing an error
  115. if ("error" in chunk) {
  116. const error = chunk.error as { message?: string; code?: number }
  117. console.error(`OpenRouter API Error: ${error?.code} - ${error?.message}`)
  118. throw new Error(`OpenRouter API Error ${error?.code}: ${error?.message}`)
  119. }
  120. if (!genId && chunk.id) {
  121. genId = chunk.id
  122. }
  123. const delta = chunk.choices[0]?.delta
  124. if (delta?.content) {
  125. fullResponseText += delta.content;
  126. yield {
  127. type: "text",
  128. text: delta.content,
  129. } as ApiStreamChunk;
  130. }
  131. // if (chunk.usage) {
  132. // yield {
  133. // type: "usage",
  134. // inputTokens: chunk.usage.prompt_tokens || 0,
  135. // outputTokens: chunk.usage.completion_tokens || 0,
  136. // }
  137. // }
  138. }
  139. await delay(500) // FIXME: necessary delay to ensure generation endpoint is ready
  140. try {
  141. const response = await axios.get(`https://openrouter.ai/api/v1/generation?id=${genId}`, {
  142. headers: {
  143. Authorization: `Bearer ${this.options.openRouterApiKey}`,
  144. },
  145. timeout: 5_000, // this request hangs sometimes
  146. })
  147. const generation = response.data?.data
  148. console.log("OpenRouter generation details:", response.data)
  149. yield {
  150. type: "usage",
  151. // cacheWriteTokens: 0,
  152. // cacheReadTokens: 0,
  153. // openrouter generation endpoint fails often
  154. inputTokens: generation?.native_tokens_prompt || 0,
  155. outputTokens: generation?.native_tokens_completion || 0,
  156. totalCost: generation?.total_cost || 0,
  157. fullResponseText
  158. } as OpenRouterApiStreamUsageChunk;
  159. } catch (error) {
  160. // ignore if fails
  161. console.error("Error fetching OpenRouter generation details:", error)
  162. }
  163. }
  164. getModel(): { id: string; info: ModelInfo } {
  165. const modelId = this.options.openRouterModelId
  166. const modelInfo = this.options.openRouterModelInfo
  167. if (modelId && modelInfo) {
  168. return { id: modelId, info: modelInfo }
  169. }
  170. return { id: openRouterDefaultModelId, info: openRouterDefaultModelInfo }
  171. }
  172. async completePrompt(prompt: string): Promise<string> {
  173. try {
  174. const response = await this.client.chat.completions.create({
  175. model: this.getModel().id,
  176. messages: [{ role: "user", content: prompt }],
  177. temperature: 0,
  178. stream: false
  179. })
  180. if ("error" in response) {
  181. const error = response.error as { message?: string; code?: number }
  182. throw new Error(`OpenRouter API Error ${error?.code}: ${error?.message}`)
  183. }
  184. const completion = response as OpenAI.Chat.ChatCompletion
  185. return completion.choices[0]?.message?.content || ""
  186. } catch (error) {
  187. if (error instanceof Error) {
  188. throw new Error(`OpenRouter completion error: ${error.message}`)
  189. }
  190. throw error
  191. }
  192. }
  193. }