]> git.xonotic.org Git - xonotic/xonotic.git/blob - misc/infrastructure/prepare_releasenotes.py
Release Note Generator: `Enormous`->`Huge`
[xonotic/xonotic.git] / misc / infrastructure / prepare_releasenotes.py
1 from enum import Enum
2 import logging
3 import requests
4 from typing import NamedTuple, TextIO
5 from datetime import datetime
6
7 # TODO: remove after testing
8 import os
9 import json
10 # end remove after testing
11
12
13 MR_TYPE = Enum("MR_TYPE", {"Feature(s)": 1,
14                            "Fix(es)": 2,
15                            "Refactoring": 3,
16                            "NO_TYPE_GIVEN": 9999})
17
18 # for ordering
19 MR_SIZE = Enum("MR_SIZE", {"Huge": 1,
20                            "Large": 2,
21                            "Medium": 3,
22                            "Small": 4,
23                            "Tiny": 5,
24                            "UNKNOWN": 6})
25
26 TOPIC_PREFIX = "Topic: "
27 CHANGELOG_PREFIX = "RN::"
28 MR_TYPE_PREFIX = "MR Content: "
29 MR_SIZE_PREFIX = "MR Size::"
30
31 MAIN_PROJECT_ID = 73434
32 # 73444: mediasource
33 # 144002: xonotic.org
34 EXCLUDED_PROJECT_IDS: list[int] = [73444, 144002]
35 TARGET_BRANCHES = ["master", "pending-release"]
36
37 GROUP_NAME = "xonotic"
38 BASEURL = "https://gitlab.com/api/v4"
39 MAIN_PROJECT_BASEURL = BASEURL + f"/projects/{MAIN_PROJECT_ID}/repository"
40 GROUP_BASEURL = BASEURL + f"/groups/{GROUP_NAME}"
41
42
43 class MergeRequestInfo(NamedTuple):
44     iid: int
45     size: MR_SIZE
46     author: str
47     reviewers: list[str]
48     short_desc: str
49     web_url: str
50
51
52 def get_time_of_latest_release() -> str:
53     response = requests.get(MAIN_PROJECT_BASEURL + "/tags")
54     latest = response.json()[0]
55     return latest["commit"]["created_at"]
56
57
58 def get_merge_requests(timestamp: str) -> list[dict]:
59     if os.path.isfile("testdata.json"):
60         with open("testdata.json") as f:
61             return json.load(f)
62     page_len = 10
63     MAX_PAGES = 100
64     url = GROUP_BASEURL + "/merge_requests?state=merged&updated_after=" +\
65         f"{timestamp}&per_page={page_len}&page="
66     current_page = 1
67     data = []
68     while True:
69         response = requests.get(url + str(current_page))
70         new_data = response.json()
71         if not new_data:
72             break
73         data.extend(new_data)
74         if len(new_data) < page_len:
75             break
76         if current_page == MAX_PAGES:
77             break
78         current_page += 1
79     return data
80
81
82 def process_description(description: str) -> str:
83     if not description:
84         raise ValueError("Empty description")
85     lines = description.splitlines()
86     if not lines[0].strip() == "Summary for release notes:":
87         raise ValueError("Unexpected description format: Summary missing")
88     summary = ""
89     for line in lines[1:]:
90         if line.startswith("---"):
91             continue
92         if not line:
93             break
94         summary += line + " " # add space
95     return summary.strip()
96
97
98
99 def process(timestamp: datetime, data: list[dict]) -> dict[MR_TYPE, dict[str, list[MergeRequestInfo]]]:
100     # extract type, size and topic from labels for easier filtering/ordering
101     # extract short description from description
102     # extract author->name
103     processed_data: dict = {mr_type: {} for mr_type in MR_TYPE}
104     for item in data:
105         if item["project_id"] in EXCLUDED_PROJECT_IDS:
106             continue
107         if item["target_branch"] not in TARGET_BRANCHES:
108             continue
109         # Workaround for missing merge information
110         if "merged_at" not in item or not isinstance(item["merged_at"], str):
111             logging.warning(f"Invalid merge information for {item['iid']} "
112                             f"(project: {item['project_id']})")
113             continue
114         # GitLab's rest API doesn't offer a way to filter by "merged_after", so
115         # check the "merge_at" field
116         if datetime.fromisoformat(item["merged_at"]) < timestamp:
117             continue
118         mr_type = MR_TYPE.NO_TYPE_GIVEN
119         size = MR_SIZE.UNKNOWN
120         section = "UNKNOWN SECTION"
121         for label in item["labels"]:
122             if label.startswith(MR_TYPE_PREFIX):
123                 try:
124                     new_mr_type = MR_TYPE[label.removeprefix(MR_TYPE_PREFIX)]
125                 except KeyError:
126                     logging.warning(f"Unexpected label: {label}, skipping")
127                     continue
128                 if new_mr_type.value < mr_type.value:
129                     mr_type = new_mr_type
130                 continue
131             if label.startswith(MR_SIZE_PREFIX):
132                 try:
133                     new_size = MR_SIZE[label.removeprefix(MR_SIZE_PREFIX)]
134                 except KeyError:
135                     logging.warning(f"Unexpected label: {label}, skipping")
136                     continue
137                 if new_size.value < size.value:
138                     size = new_size
139                 continue
140             if label.startswith(CHANGELOG_PREFIX):
141                 section = label.removeprefix(CHANGELOG_PREFIX)
142                 continue
143         try:
144             short_desc = process_description(item["description"])
145         except ValueError as e:
146             logging.warning(f"Error processing the description for "
147                             f"{item['iid']}: {e}")
148             short_desc = item["title"]
149         author = item["author"]["name"]
150         reviewers = []
151         for reviewer in item["reviewers"]:
152             reviewers.append(reviewer["name"])
153         if section not in processed_data[mr_type]:
154             processed_data[mr_type][section] = []
155         processed_data[mr_type][section].append(MergeRequestInfo(
156             iid=item["iid"], size=size, author=author, reviewers=reviewers,
157             short_desc=short_desc, web_url=item["web_url"]))
158     return processed_data
159
160
161 def draft_releasenotes(fp: TextIO, data: dict[MR_TYPE, dict[str, list[MergeRequestInfo]]]) -> None:
162     fp.writelines(["Release Notes\n", "===\n", "\n"])
163     for mr_type, sectioned_mr_data in data.items():
164         type_written = False
165         for section, merge_requests in sectioned_mr_data.items():
166             formatted_items = []
167             merge_requests.sort(key=lambda x: x.size.value)
168             for item in merge_requests:
169                 author = item.author
170                 reviewer_str = ""
171                 if item.reviewers:
172                     reviewer_str = ", Reviewer(s): " + ", ".join(item.reviewers)
173                 formatted_items.append(f"- {item.short_desc} (Author: {author}{reviewer_str})"
174                                        f" [{item.iid}]({item.web_url})\n")
175             if formatted_items:
176                 if not type_written:
177                     fp.writelines([f"{mr_type.name}\n", "---\n"])
178                     type_written = True
179                 fp.writelines([f"### {section}\n", *formatted_items])
180                 fp.write("\n")
181
182
183 def main() -> None:
184     release_timestamp_str = get_time_of_latest_release()
185     release_timestamp = datetime.fromisoformat(release_timestamp_str)
186     merge_requests = get_merge_requests(release_timestamp_str)
187     processed_data = process(release_timestamp, merge_requests)
188     with open(f"RN_draft_since_{release_timestamp_str}.md", "w") as f:
189         draft_releasenotes(f, processed_data)
190
191
192 if __name__ == "__main__":
193     main()