]> git.xonotic.org Git - xonotic/xonotic.git/blob - misc/infrastructure/prepare_releasenotes.py
Relese note generator: check `merged_at` field
[xonotic/xonotic.git] / misc / infrastructure / prepare_releasenotes.py
1 from enum import Enum
2 import logging
3 import requests
4 from typing import NamedTuple, TextIO
5 from datetime import datetime
6
7 # TODO: remove after testing
8 import os
9 import json
10 # end remove after testing
11
12
13 MR_TYPE = Enum("MR_TYPE", {"Feature(s)": 1,
14                            "Fix(es)": 2,
15                            "Refactoring": 3,
16                            "NO_TYPE_GIVEN": 9999})
17
18 # for ordering
19 MR_SIZE = Enum("MR_SIZE", {"Enormous": 1,
20                            "Large": 2,
21                            "Medium": 3,
22                            "Small": 4,
23                            "Tiny": 5,
24                            "UNKNOWN": 6})
25
26 TOPIC_PREFIX = "Topic: "
27 CHANGELOG_PREFIX = "RN::"
28 MR_TYPE_PREFIX = "MR Content: "
29 MR_SIZE_PREFIX = "MR Size::"
30
31 MAIN_PROJECT_ID = 73434
32 EXCLUDED_PROJECT_IDS = []
33 TARGET_BRANCHES = ["master", "develop", "pending-release"]
34
35 GROUP_NAME = "xonotic"
36 BASEURL = "https://gitlab.com/api/v4"
37 MAIN_PROJECT_BASEURL = BASEURL + f"/projects/{MAIN_PROJECT_ID}/repository"
38 GROUP_BASEURL = BASEURL + f"/groups/{GROUP_NAME}"
39
40
41 class MergeRequestInfo(NamedTuple):
42     iid: int
43     size: MR_SIZE
44     author: str
45     short_desc: str
46     web_url: str
47
48
49 def get_time_of_latest_release() -> str:
50     response = requests.get(MAIN_PROJECT_BASEURL + "/tags")
51     latest = response.json()[0]
52     return latest["commit"]["created_at"]
53
54
55 def get_merge_requests(timestamp: str) -> list[dict]:
56     if os.path.isfile("testdata.json"):
57         with open("testdata.json") as f:
58             return json.load(f)
59     page_len = 10
60     MAX_PAGES = 100
61     url = GROUP_BASEURL + "/merge_requests?state=merged&updated_after=" +\
62         f"{timestamp}&per_page={page_len}&page="
63     current_page = 1
64     data = []
65     while True:
66         response = requests.get(url + str(current_page))
67         new_data = response.json()
68         if not new_data:
69             break
70         data.extend(new_data)
71         if len(new_data) < page_len:
72             break
73         if current_page == MAX_PAGES:
74             break
75         current_page += 1
76     return data
77
78
79 def process_description(description: str) -> str:
80     if not description:
81         raise ValueError("Empty description")
82     lines = description.splitlines()
83     if not lines[0].strip() == "Summary for release notes:":
84         raise ValueError("Unexpected description format: Summary missing")
85     summary = ""
86     for line in lines[1:]:
87         if line.startswith("---"):
88             continue
89         if not line:
90             break
91         summary += line + " " # add space
92     return summary.strip()
93
94
95
96 def process(timestamp: datetime, data: list[dict]) -> dict[MR_TYPE, dict[str, MergeRequestInfo]]:
97     # extract type, size and topic from labels for easier filtering/ordering
98     # extract short description from description
99     # extract author->name
100     processed_data = {mr_type: {} for mr_type in MR_TYPE}
101     for item in data:
102         if item["project_id"] in EXCLUDED_PROJECT_IDS:
103             continue
104         if item["target_branch"] not in TARGET_BRANCHES:
105             continue
106         # Workaround for missing merge information
107         if "merged_at" not in item or not isinstance(item["merged_at"], str):
108             logging.warning(f"Invalid merge information for {item['iid']} "
109                             f"(project: {item['project_id']})")
110             continue
111         # GitLab's rest API doesn't offer a way to filter by "merged_after", so
112         # check the "merge_at" field
113         if datetime.fromisoformat(item["merged_at"]) < timestamp:
114             continue
115         mr_type = MR_TYPE.NO_TYPE_GIVEN
116         size = MR_SIZE.UNKNOWN
117         section = "UNKNOWN SECTION"
118         for label in item["labels"]:
119             if label.startswith(MR_TYPE_PREFIX):
120                 try:
121                     new_mr_type = MR_TYPE[label.removeprefix(MR_TYPE_PREFIX)]
122                 except KeyError:
123                     logging.warning(f"Unexpected label: {label}, skipping")
124                     continue
125                 if new_mr_type.value < mr_type.value:
126                     mr_type = new_mr_type
127                 continue
128             if label.startswith(MR_SIZE_PREFIX):
129                 try:
130                     new_size = MR_SIZE[label.removeprefix(MR_SIZE_PREFIX)]
131                 except KeyError:
132                     logging.warning(f"Unexpected label: {label}, skipping")
133                     continue
134                 if new_size.value < size.value:
135                     size = new_size
136                 continue
137             if label.startswith(CHANGELOG_PREFIX):
138                 section = label.removeprefix(CHANGELOG_PREFIX)
139                 continue
140         try:
141             short_desc = process_description(item["description"])
142         except ValueError as e:
143             logging.warning(f"Error processing the description for "
144                             f"{item['iid']}: {e}")
145             short_desc = item["title"]
146         author = item["author"]["name"]
147         if section not in processed_data[mr_type]:
148             processed_data[mr_type][section] = []
149         processed_data[mr_type][section].append(MergeRequestInfo(
150             iid=item["iid"], size=size, author=author,
151             short_desc=short_desc, web_url=item["web_url"]))
152     return processed_data
153
154
155 def draft_releasenotes(fp: TextIO, data: dict[MR_TYPE, dict[str, MergeRequestInfo]]) -> None:
156     fp.writelines(["Release Notes\n", "===\n", "\n"])
157     for mr_type, sectioned_mr_data in data.items():
158         type_written = False
159         for section, merge_requests in sectioned_mr_data.items():
160             formatted_items = []
161             merge_requests.sort(key=lambda x: x.size.value)
162             for item in merge_requests:
163                 authors = item.author
164                 formatted_items.append(f"- {item.short_desc} by {authors} "
165                                        f"([{item.iid}]({item.web_url}))\n")
166             if formatted_items:
167                 if not type_written:
168                     fp.writelines([f"{mr_type.name}\n", "---\n"])
169                     type_written = True
170                 fp.writelines([f"### {section}\n", *formatted_items])
171                 fp.write("\n")
172
173
174 def main() -> None:
175     release_timestamp_str = get_time_of_latest_release()
176     release_timestamp = datetime.fromisoformat(release_timestamp_str)
177     merge_requests = get_merge_requests(release_timestamp_str)
178     processed_data = process(release_timestamp, merge_requests)
179     with open(f"RN_draft_since_{release_timestamp_str}.md", "w") as f:
180         draft_releasenotes(f, processed_data)
181
182
183 if __name__ == "__main__":
184     main()