openrouter.ts 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. import type { ApiStream, ModelInfo, Message, TextBlock } from "../../types.d.ts";
  2. interface OpenRouterOptions {
  3. model: string;
  4. apiKey: string;
  5. }
  6. export class OpenRouterHandler {
  7. private apiKey: string;
  8. private model: string;
  9. constructor(options: OpenRouterOptions) {
  10. this.apiKey = options.apiKey;
  11. this.model = options.model;
  12. }
  13. async *createMessage(systemPrompt: string, messages: Message[]): ApiStream {
  14. try {
  15. // Convert our messages to OpenRouter format
  16. const openRouterMessages = [
  17. { role: "system", content: systemPrompt },
  18. ...messages.map(msg => ({
  19. role: msg.role,
  20. content: Array.isArray(msg.content)
  21. ? msg.content.map(c => c.text).join("\n")
  22. : msg.content
  23. }))
  24. ];
  25. const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
  26. method: "POST",
  27. headers: {
  28. "Authorization": `Bearer ${this.apiKey}`,
  29. "Content-Type": "application/json",
  30. "HTTP-Referer": "https://github.com/mattvr/roo-cline",
  31. "X-Title": "Cline CLI"
  32. },
  33. body: JSON.stringify({
  34. model: this.model,
  35. messages: openRouterMessages,
  36. stream: true,
  37. temperature: 0.7,
  38. max_tokens: 4096
  39. })
  40. });
  41. if (!response.ok) {
  42. const errorData = await response.json().catch(() => null);
  43. throw new Error(`OpenRouter API error: ${response.statusText}${errorData ? ` - ${JSON.stringify(errorData)}` : ""}`);
  44. }
  45. if (!response.body) {
  46. throw new Error("No response body received");
  47. }
  48. const reader = response.body.getReader();
  49. const decoder = new TextDecoder();
  50. let buffer = "";
  51. let content = "";
  52. while (true) {
  53. const { done, value } = await reader.read();
  54. if (done) break;
  55. // Add new chunk to buffer and split into lines
  56. buffer += decoder.decode(value, { stream: true });
  57. const lines = buffer.split("\n");
  58. // Process all complete lines
  59. buffer = lines.pop() || ""; // Keep the last incomplete line in buffer
  60. for (const line of lines) {
  61. if (line.trim() === "") continue;
  62. if (line === "data: [DONE]") continue;
  63. if (line.startsWith("data: ")) {
  64. try {
  65. const data = JSON.parse(line.slice(6));
  66. if (data.choices?.[0]?.delta?.content) {
  67. const text = data.choices[0].delta.content;
  68. content += text;
  69. yield { type: "text", text };
  70. }
  71. } catch (e) {
  72. // Ignore parse errors for incomplete chunks
  73. continue;
  74. }
  75. }
  76. }
  77. }
  78. // Process any remaining content in buffer
  79. if (buffer.trim() && buffer.startsWith("data: ")) {
  80. try {
  81. const data = JSON.parse(buffer.slice(6));
  82. if (data.choices?.[0]?.delta?.content) {
  83. const text = data.choices[0].delta.content;
  84. content += text;
  85. yield { type: "text", text };
  86. }
  87. } catch (e) {
  88. // Ignore parse errors for final incomplete chunk
  89. }
  90. }
  91. // Estimate token usage (4 chars per token is a rough estimate)
  92. const inputText = systemPrompt + messages.reduce((acc, msg) =>
  93. acc + (typeof msg.content === "string" ?
  94. msg.content :
  95. msg.content.reduce((a, b) => a + b.text, "")), "");
  96. const inputTokens = Math.ceil(inputText.length / 4);
  97. const outputTokens = Math.ceil(content.length / 4);
  98. yield {
  99. type: "usage",
  100. inputTokens,
  101. outputTokens,
  102. totalCost: this.calculateCost(inputTokens, outputTokens)
  103. };
  104. } catch (error) {
  105. console.error("Error in OpenRouter API call:", error);
  106. throw error;
  107. }
  108. }
  109. getModel(): { id: string; info: ModelInfo } {
  110. return {
  111. id: this.model,
  112. info: {
  113. contextWindow: 128000, // This varies by model
  114. supportsComputerUse: true,
  115. inputPricePerToken: 0.000002, // Approximate, varies by model
  116. outputPricePerToken: 0.000002
  117. }
  118. };
  119. }
  120. private calculateCost(inputTokens: number, outputTokens: number): number {
  121. const { inputPricePerToken, outputPricePerToken } = this.getModel().info;
  122. return (
  123. (inputTokens * (inputPricePerToken || 0)) +
  124. (outputTokens * (outputPricePerToken || 0))
  125. );
  126. }
  127. }