issue-ai-response.js 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. /**
  2. * Multi-turn AI Response Script for GitHub Issues
  3. *
  4. * This script handles AI-powered responses to GitHub issues using a multi-turn
  5. * conversation approach. It provides project context, allows the AI to request
  6. * specific files, and generates classification labels and responses.
  7. */
  8. module.exports = async ({ github, context, core, fs, path }) => {
  9. const apiUrl = process.env.OPENAI_URL;
  10. const apiKey = process.env.OPENAI_KEY;
  11. if (!apiUrl || !apiKey) {
  12. core.setFailed('OPENAI_URL and OPENAI_KEY must be set');
  13. return;
  14. }
  15. // Read system prompt and repository index
  16. let systemPrompt, repoIndex;
  17. try {
  18. systemPrompt = fs.readFileSync('.github/prompts/issue-assistant.md', 'utf8');
  19. repoIndex = fs.readFileSync('/tmp/repo_index.md', 'utf8');
  20. } catch (error) {
  21. core.setFailed('Failed to read required prompt or index file: ' + error.message);
  22. return;
  23. }
  24. // Issue details
  25. const issueTitle = context.payload.issue.title ? context.payload.issue.title.substring(0, 500) : '(No title)';
  26. const issueBody = context.payload.issue.body ? context.payload.issue.body.substring(0, 10000) : '(No description provided)';
  27. const issueAuthor = context.payload.issue.user.login;
  28. // Constants
  29. const MAX_FILES_PER_TURN = 10;
  30. // Maximum file size (in bytes) for context sent to the AI. 50KB was chosen to balance
  31. // providing enough context for the AI to understand the code, while staying well within
  32. // OpenAI API token and processing limits. Larger files may exceed token limits or slow
  33. // down processing, while smaller limits may omit important context.
  34. const MAX_FILE_SIZE = 50000; // 50KB
  35. const MAX_TURNS = 3;
  36. // Delay in milliseconds between API calls to respect rate limits (50K tokens per minute).
  37. // Configurable via RATE_LIMIT_DELAY_MS environment variable. Default is 31 seconds.
  38. const RATE_LIMIT_DELAY_MS = parseInt(process.env.RATE_LIMIT_DELAY_MS || '31000', 10);
  39. /**
  40. * Check if content appears to be a binary file
  41. * @param {string} content - File content to check
  42. * @returns {boolean} - True if content appears to be binary
  43. */
  44. function isBinaryContent(content) {
  45. return content.includes('\0');
  46. }
  47. /**
  48. * Call OpenAI API with messages
  49. * @param {Array} messages - Array of message objects
  50. * @param {boolean} expectJson - Whether to expect JSON response
  51. * @returns {string} - AI response content
  52. * @throws {Error} - If API call fails or response is invalid
  53. */
  54. async function callOpenAI(messages, expectJson = true) {
  55. const response = await fetch(apiUrl, {
  56. method: 'POST',
  57. headers: {
  58. 'Content-Type': 'application/json',
  59. 'api-key': apiKey
  60. },
  61. body: JSON.stringify({
  62. messages: messages,
  63. temperature: 1,
  64. // max_completion_tokens is configurable via environment variable.
  65. // Default is 3000 to allow for more detailed responses. Adjust as needed for your model/cost.
  66. max_completion_tokens: parseInt(process.env.MAX_COMPLETION_TOKENS || '3000', 10),
  67. response_format: expectJson ? { type: "json_object" } : undefined
  68. })
  69. });
  70. if (!response.ok) {
  71. const errorText = await response.text();
  72. throw new Error(`API error: ${response.status} ${errorText}`);
  73. }
  74. const data = await response.json();
  75. if (!data.choices?.[0]?.message?.content) {
  76. throw new Error(`Invalid API response structure. Received: ${JSON.stringify(data)}`);
  77. }
  78. return data.choices[0].message.content.trim();
  79. }
  80. /**
  81. * Read file content safely with path traversal protection
  82. * @param {string} filePath - Path to file relative to repo root
  83. * @returns {string} - File content or error message
  84. */
  85. function readFileContent(filePath) {
  86. try {
  87. const repoRoot = process.cwd();
  88. // Resolve the full path and ensure it's within the repo
  89. const fullPath = path.resolve(repoRoot, filePath);
  90. const normalizedFull = path.normalize(fullPath);
  91. const normalizedRoot = path.normalize(repoRoot);
  92. const relativePath = path.relative(normalizedRoot, normalizedFull);
  93. if (relativePath.startsWith('..')) {
  94. return `[Access denied: ${filePath} is outside the repository]`;
  95. }
  96. // Check if file exists
  97. if (!fs.existsSync(fullPath)) {
  98. return `[File not found: ${filePath}]`;
  99. }
  100. const stats = fs.statSync(fullPath);
  101. if (stats.isDirectory()) {
  102. return `[${filePath} is a directory, not a file]`;
  103. }
  104. // Read file with size limit
  105. if (stats.size > MAX_FILE_SIZE) {
  106. // Read only first MAX_FILE_SIZE bytes for large files
  107. const fd = fs.openSync(fullPath, 'r');
  108. const buffer = Buffer.alloc(MAX_FILE_SIZE);
  109. let bytesRead;
  110. try {
  111. bytesRead = fs.readSync(fd, buffer, 0, MAX_FILE_SIZE, 0);
  112. } finally {
  113. fs.closeSync(fd);
  114. }
  115. // Use StringDecoder to handle incomplete multi-byte UTF-8 characters at the truncation point
  116. const { StringDecoder } = require('string_decoder');
  117. const decoder = new StringDecoder('utf8');
  118. const content = decoder.write(buffer.slice(0, bytesRead)) + decoder.end();
  119. if (isBinaryContent(content)) {
  120. return `[${filePath} appears to be a binary file]`;
  121. }
  122. return content + '\n\n[File truncated - exceeded 50KB limit]';
  123. }
  124. const content = fs.readFileSync(fullPath, 'utf8');
  125. if (isBinaryContent(content)) {
  126. return `[${filePath} appears to be a binary file]`;
  127. }
  128. return content;
  129. } catch (error) {
  130. return `[Error reading file ${filePath}: ${error?.message||"File could not be accessed"}]`;
  131. }
  132. }
  133. /**
  134. * Parse JSON response, handling markdown code fences
  135. * @param {string} content - Raw response content
  136. * @returns {Object} - Parsed JSON object
  137. * @throws {SyntaxError} - If content is not valid JSON
  138. */
  139. function parseJsonResponse(content) {
  140. let jsonContent = content;
  141. // Extract JSON from markdown code block if the entire content is wrapped in fences
  142. const codeBlockMatch = content.match(/^```(?:json)?\s*\n?([\s\S]*?)\n?```\s*$/);
  143. if (codeBlockMatch) {
  144. jsonContent = codeBlockMatch[1].trim();
  145. }
  146. return JSON.parse(jsonContent);
  147. }
  148. try {
  149. // Conversation history
  150. const messages = [];
  151. // System prompt for multi-turn conversation
  152. const multiTurnSystemPrompt = `${systemPrompt}
  153. ## Multi-turn Conversation Mode
  154. You are in a multi-turn conversation mode. In each turn, you can either:
  155. 1. Request more files to better understand the issue
  156. 2. Provide your final response with classification
  157. ### Response Format
  158. **If you need more files**, respond with:
  159. \`\`\`json
  160. {
  161. "needs_files": true,
  162. "requested_files": ["path/to/file1.py", "path/to/file2.md"],
  163. "reason": "Brief explanation of why these files are needed"
  164. }
  165. \`\`\`
  166. **If you have enough information**, respond with:
  167. \`\`\`json
  168. {
  169. "needs_files": false,
  170. "classification": "bug|feature|question",
  171. "response": "Your detailed response to the issue..."
  172. }
  173. \`\`\`
  174. ### Guidelines
  175. - Request only files that are directly relevant to the issue
  176. - Request at most 10 files per turn
  177. - After receiving requested files, analyze them and either request more or provide final response
  178. - You have at most 3 turns to gather information before providing a final response`;
  179. messages.push({ role: 'system', content: multiTurnSystemPrompt });
  180. // First turn: Provide project context and issue
  181. const firstTurnPrompt = `## Repository Context
  182. ${repoIndex}
  183. ## Issue Details
  184. **Title:** ${issueTitle}
  185. **Author:** @${issueAuthor}
  186. **Body:**
  187. ${issueBody}
  188. ---
  189. Please analyze this issue. If you need to see specific files to provide an accurate response, request them. Otherwise, provide your classification and response.`;
  190. messages.push({ role: 'user', content: firstTurnPrompt });
  191. let finalClassification = null;
  192. let finalResponse = null;
  193. for (let turn = 1; turn <= MAX_TURNS; turn++) {
  194. // Add delay before subsequent turns to ensure token usage stays under rate limits
  195. if (turn > 1) {
  196. console.log(`Waiting ${RATE_LIMIT_DELAY_MS / 1000} seconds before turn ${turn} to respect rate limits...`);
  197. await new Promise(resolve => setTimeout(resolve, RATE_LIMIT_DELAY_MS));
  198. }
  199. console.log(`Turn ${turn}/${MAX_TURNS}: Calling OpenAI API...`);
  200. const aiContent = await callOpenAI(messages);
  201. console.log(`Turn ${turn} response received`);
  202. let parsed;
  203. try {
  204. parsed = parseJsonResponse(aiContent);
  205. } catch (parseError) {
  206. console.log(`Failed to parse JSON in turn ${turn}: ${parseError.message}`);
  207. // If we can't parse JSON, treat as final response
  208. // Attempt to extract classification using legacy format (e.g., regex)
  209. let legacyClassification = null;
  210. // Example: look for "Classification: <label>" in the response (case-insensitive)
  211. const match = aiContent.match(/classification\s*:\s*([a-zA-Z0-9_-]+)/i);
  212. if (match) {
  213. legacyClassification = match[1].toLowerCase();
  214. console.log(`Extracted legacy classification: ${legacyClassification}`);
  215. } else {
  216. console.log('Could not extract classification from non-JSON response');
  217. }
  218. finalResponse = aiContent;
  219. finalClassification = legacyClassification;
  220. break;
  221. }
  222. if (turn === MAX_TURNS) {
  223. // Force final response on last turn
  224. finalClassification = parsed.classification?.toLowerCase() || null;
  225. if (parsed.needs_files) {
  226. finalResponse = "Unable to provide a complete analysis within the turn limit. Please provide more specific details about your issue or the relevant files.";
  227. } else {
  228. finalResponse = parsed.response || aiContent;
  229. }
  230. console.log(`Forced final response on last turn ${turn}`);
  231. break;
  232. }
  233. if (!parsed.needs_files) {
  234. // Final response provided before last turn
  235. finalClassification = parsed.classification?.toLowerCase() || null;
  236. finalResponse = parsed.response || aiContent;
  237. console.log(`Final response received in turn ${turn}`);
  238. break;
  239. }
  240. // Need more files
  241. const requestedFiles = parsed.requested_files || [];
  242. console.log(`Turn ${turn}: Requested files: ${requestedFiles.join(', ')}`);
  243. // Add assistant's response to history
  244. messages.push({ role: 'assistant', content: aiContent });
  245. // Read requested files and add to conversation
  246. let fileContents = '## Requested File Contents\n\n';
  247. for (const filePath of requestedFiles.slice(0, MAX_FILES_PER_TURN)) {
  248. const content = readFileContent(filePath);
  249. fileContents += `### \`${filePath}\`\n\n\`\`\`\n${content}\n\`\`\`\n\n`;
  250. }
  251. // Append remaining turns message (this code is only reached when turn < MAX_TURNS)
  252. fileContents += `\nYou have ${MAX_TURNS - turn} turn(s) remaining. Please analyze these files and either request more files or provide your final classification and response.`;
  253. messages.push({ role: 'user', content: fileContents });
  254. }
  255. if (!finalResponse) {
  256. core.setFailed('Failed to get a valid response after all turns. This indicates an unexpected error in the conversation flow.');
  257. console.error('Debug info - messages:', JSON.stringify(messages, null, 2));
  258. return;
  259. }
  260. // Post the response as a comment
  261. await github.rest.issues.createComment({
  262. owner: context.repo.owner,
  263. repo: context.repo.repo,
  264. issue_number: context.payload.issue.number,
  265. body: finalResponse
  266. });
  267. console.log('Comment posted successfully');
  268. // Add label based on classification
  269. if (finalClassification && ['bug', 'feature', 'question'].includes(finalClassification)) {
  270. try {
  271. await github.rest.issues.addLabels({
  272. owner: context.repo.owner,
  273. repo: context.repo.repo,
  274. issue_number: context.payload.issue.number,
  275. labels: [finalClassification]
  276. });
  277. console.log(`Label '${finalClassification}' added successfully`);
  278. } catch (labelError) {
  279. console.log(`Failed to add label: ${labelError.message}`);
  280. }
  281. }
  282. console.log('AI response workflow completed successfully');
  283. } catch (error) {
  284. const errorDetails = error && error.stack ? error.stack : error.message;
  285. core.setFailed(`Failed to generate AI response: ${error.message}\n\nDetails: ${errorDetails}`);
  286. console.error('Full error:', error);
  287. }
  288. };