release-notes-prompt.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. import os
  2. import subprocess
  3. import json
  4. import re
  5. import tiktoken # type: ignore
  6. from datetime import datetime;
  7. from pytz import timezone
  8. GITHUB_OUTPUT = os.getenv("GITHUB_OUTPUT")
  9. BASE_REF = os.getenv("BASE_REF", "main")
  10. HEAD_SHA = os.environ["HEAD_SHA"]
  11. PR_TITLE = os.environ["PR_TITLE"]
  12. PR_BODY = os.environ["PR_BODY"]
  13. EXISTING_NOTES = os.environ.get("EXISTING_NOTES", "null")
  14. MODEL_NAME = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo-16k')
  15. CUSTOM_PROMPT = os.environ.get('CUSTOM_PROMPT', '')
  16. def extract_description_section(pr_body):
  17. # Find content between ## Description and the next ## or end of text
  18. description_match = re.search(r'## Description\s*\n(.*?)(?=\n##|$)', pr_body, re.DOTALL)
  19. if description_match:
  20. content = description_match.group(1).strip()
  21. # Remove the comment line if it exists
  22. comment_pattern = r'\[comment\]:.+?\n'
  23. content = re.sub(comment_pattern, '', content)
  24. return content.strip()
  25. return ""
  26. def extract_ellipsis_important(pr_body):
  27. # Find content between <!-- ELLIPSIS_HIDDEN --> and <!-- ELLIPSIS_HIDDEN --> that contains [!IMPORTANT]
  28. ellipsis_match = re.search(r'<!--\s*ELLIPSIS_HIDDEN\s*-->(.*?)<!--\s*ELLIPSIS_HIDDEN\s*-->', pr_body, re.DOTALL)
  29. if ellipsis_match:
  30. content = ellipsis_match.group(1).strip()
  31. important_match = re.search(r'\[!IMPORTANT\](.*?)(?=\[!|$)', content, re.DOTALL)
  32. if important_match:
  33. important_text = important_match.group(1).strip()
  34. important_text = re.sub(r'^-+\s*', '', important_text)
  35. return important_text.strip()
  36. return ""
  37. def extract_coderabbit_summary(pr_body):
  38. # Find content between ## Summary by CodeRabbit and the next ## or end of text
  39. summary_match = re.search(r'## Summary by CodeRabbit\s*\n(.*?)(?=\n##|$)', pr_body, re.DOTALL)
  40. return summary_match.group(1).strip() if summary_match else ""
  41. def num_tokens_from_string(string: str, model_name: str) -> int:
  42. """
  43. Calculate the number of tokens in a text string for a specific model.
  44. Args:
  45. string: The input text to count tokens for
  46. model_name: Name of the OpenAI model to use for token counting
  47. Returns:
  48. int: Number of tokens in the input string
  49. """
  50. encoding = tiktoken.encoding_for_model(model_name)
  51. num_tokens = len(encoding.encode(string))
  52. return num_tokens
  53. def truncate_to_token_limit(text, max_tokens, model_name):
  54. """
  55. Truncate text to fit within a maximum token limit for a specific model.
  56. Args:
  57. text: The input text to truncate
  58. max_tokens: Maximum number of tokens allowed
  59. model_name: Name of the OpenAI model to use for tokenization
  60. Returns:
  61. str: Truncated text that fits within the token limit
  62. """
  63. encoding = tiktoken.encoding_for_model(model_name)
  64. encoded = encoding.encode(text)
  65. truncated = encoded[:max_tokens]
  66. return encoding.decode(truncated)
  67. # Extract sections and combine into PR_OVERVIEW
  68. description = extract_description_section(PR_BODY)
  69. important = extract_ellipsis_important(PR_BODY)
  70. summary = extract_coderabbit_summary(PR_BODY)
  71. PR_OVERVIEW = "\n\n".join(filter(None, [description, important, summary]))
  72. # Get git information
  73. base_sha = subprocess.getoutput(f"git rev-parse origin/{BASE_REF}") if BASE_REF == 'main' else BASE_REF
  74. diff_overview = subprocess.getoutput(f"git diff {base_sha}..{HEAD_SHA} --name-status | awk '{{print $2}}' | sort | uniq -c | awk '{{print $2 \": \" $1 \" files changed\"}}'")
  75. git_log = subprocess.getoutput(f"git log {base_sha}..{HEAD_SHA} --pretty=format:'%h - %s (%an)' --reverse | head -n 50")
  76. git_diff = subprocess.getoutput(f"git diff {base_sha}..{HEAD_SHA} --minimal --abbrev --ignore-cr-at-eol --ignore-space-at-eol --ignore-space-change --ignore-all-space --ignore-blank-lines --unified=0 --diff-filter=ACDMRT")
  77. max_tokens = 14000 # Reserve some tokens for the response
  78. changes_summary = truncate_to_token_limit(diff_overview, 1000, MODEL_NAME)
  79. git_logs = truncate_to_token_limit(git_log, 2000, MODEL_NAME)
  80. changes_diff = truncate_to_token_limit(git_diff, max_tokens - num_tokens_from_string(changes_summary, MODEL_NAME) - num_tokens_from_string(git_logs, MODEL_NAME) - 1000, MODEL_NAME)
  81. # Get today's existing changelog if any
  82. existing_changelog = EXISTING_NOTES if EXISTING_NOTES != "null" else None
  83. existing_changelog_text = f"\nAdditional context:\n{existing_changelog}" if existing_changelog else ""
  84. TODAY = datetime.now(timezone('US/Eastern')).isoformat(sep=' ', timespec='seconds')
  85. BASE_PROMPT = CUSTOM_PROMPT if CUSTOM_PROMPT else f"""Based on the following 'PR Information', please generate concise and informative release notes to be read by developers.
  86. Format the release notes with markdown, and always use this structure: a descriptive and very short title (no more than 8 words) with heading level 2, a paragraph with a summary of changes (no header), and if applicable, sections for '🚀 New Features & Improvements', '🐛 Bugs Fixed' and '🔧 Other Updates', with heading level 3, skip respectively the sections if not applicable.
  87. Finally include the following markdown comment with the PR merged date: <!-- PR_DATE: {TODAY} -->.
  88. Avoid being repetitive and focus on the most important changes and their impact, discard any mention of version bumps/updates, changeset files, environment variables or syntax updates.
  89. PR Information:"""
  90. OPENAI_PROMPT = f"""{BASE_PROMPT}
  91. Git log summary:
  92. {changes_summary}
  93. Commit Messages:
  94. {git_logs}
  95. PR Title:
  96. {PR_TITLE}
  97. PR Overview:
  98. {PR_OVERVIEW}{existing_changelog_text}
  99. Code Diff:
  100. {json.dumps(changes_diff)}"""
  101. print("OpenAI Prompt")
  102. print("----------------------------------------------------------------")
  103. print(OPENAI_PROMPT)
  104. # Write the prompt to GITHUB_OUTPUT
  105. with open(GITHUB_OUTPUT, "a") as outputs_file:
  106. outputs_file.write(f"OPENAI_PROMPT<<EOF\n{OPENAI_PROMPT}\nEOF")