4 from typing import NamedTuple, TextIO
5 from datetime import datetime
7 # TODO: remove after testing
10 # end remove after testing
13 MR_TYPE = Enum("MR_TYPE", {"Feature(s)": 1,
16 "NO_TYPE_GIVEN": 9999})
19 MR_SIZE = Enum("MR_SIZE", {"Huge": 1,
26 TOPIC_PREFIX = "Topic: "
27 CHANGELOG_PREFIX = "RN::"
28 MR_TYPE_PREFIX = "MR Content: "
29 MR_SIZE_PREFIX = "MR Size::"
31 MAIN_PROJECT_ID = 73434
34 EXCLUDED_PROJECT_IDS: list[int] = [73444, 144002]
35 TARGET_BRANCHES = ["master", "pending-release"]
37 GROUP_NAME = "xonotic"
38 BASEURL = "https://gitlab.com/api/v4"
39 MAIN_PROJECT_BASEURL = BASEURL + f"/projects/{MAIN_PROJECT_ID}/repository"
40 GROUP_BASEURL = BASEURL + f"/groups/{GROUP_NAME}"
43 class MergeRequestInfo(NamedTuple):
52 def get_time_of_latest_release() -> str:
53 response = requests.get(MAIN_PROJECT_BASEURL + "/tags")
54 latest = response.json()[0]
55 return latest["commit"]["created_at"]
58 def get_merge_requests(timestamp: str) -> list[dict]:
59 if os.path.isfile("testdata.json"):
60 with open("testdata.json") as f:
64 url = GROUP_BASEURL + "/merge_requests?state=merged&updated_after=" +\
65 f"{timestamp}&per_page={page_len}&page="
69 response = requests.get(url + str(current_page))
70 new_data = response.json()
74 if len(new_data) < page_len:
76 if current_page == MAX_PAGES:
82 def process_description(description: str) -> str:
84 raise ValueError("Empty description")
85 lines = description.splitlines()
86 if not lines[0].strip() == "Summary for release notes:":
87 raise ValueError("Unexpected description format: Summary missing")
89 for line in lines[1:]:
90 if line.startswith("---"):
94 summary += line + " " # add space
95 return summary.strip()
99 def process(timestamp: datetime, data: list[dict]) -> dict[MR_TYPE, dict[str, list[MergeRequestInfo]]]:
100 # extract type, size and topic from labels for easier filtering/ordering
101 # extract short description from description
102 # extract author->name
103 processed_data: dict = {mr_type: {} for mr_type in MR_TYPE}
105 if item["project_id"] in EXCLUDED_PROJECT_IDS:
107 if item["target_branch"] not in TARGET_BRANCHES:
109 # Workaround for missing merge information
110 if "merged_at" not in item or not isinstance(item["merged_at"], str):
111 logging.warning(f"Invalid merge information for {item['iid']} "
112 f"(project: {item['project_id']})")
114 # GitLab's rest API doesn't offer a way to filter by "merged_after", so
115 # check the "merge_at" field
116 if datetime.fromisoformat(item["merged_at"]) < timestamp:
118 mr_type = MR_TYPE.NO_TYPE_GIVEN
119 size = MR_SIZE.UNKNOWN
120 section = "UNKNOWN SECTION"
121 for label in item["labels"]:
122 if label.startswith(MR_TYPE_PREFIX):
124 new_mr_type = MR_TYPE[label.removeprefix(MR_TYPE_PREFIX)]
126 logging.warning(f"Unexpected label: {label}, skipping")
128 if new_mr_type.value < mr_type.value:
129 mr_type = new_mr_type
131 if label.startswith(MR_SIZE_PREFIX):
133 new_size = MR_SIZE[label.removeprefix(MR_SIZE_PREFIX)]
135 logging.warning(f"Unexpected label: {label}, skipping")
137 if new_size.value < size.value:
140 if label.startswith(CHANGELOG_PREFIX):
141 section = label.removeprefix(CHANGELOG_PREFIX)
144 short_desc = process_description(item["description"])
145 except ValueError as e:
146 logging.warning(f"Error processing the description for "
147 f"{item['iid']}: {e}")
148 short_desc = item["title"]
149 author = item["author"]["name"]
151 for reviewer in item["reviewers"]:
152 reviewers.append(reviewer["name"])
153 if section not in processed_data[mr_type]:
154 processed_data[mr_type][section] = []
155 processed_data[mr_type][section].append(MergeRequestInfo(
156 iid=item["iid"], size=size, author=author, reviewers=reviewers,
157 short_desc=short_desc, web_url=item["web_url"]))
158 return processed_data
161 def draft_releasenotes(fp: TextIO, data: dict[MR_TYPE, dict[str, list[MergeRequestInfo]]]) -> None:
162 fp.writelines(["Release Notes\n", "===\n", "\n"])
163 for mr_type, sectioned_mr_data in data.items():
165 for section, merge_requests in sectioned_mr_data.items():
167 merge_requests.sort(key=lambda x: x.size.value)
168 for item in merge_requests:
172 reviewer_str = ", Reviewer(s): " + ", ".join(item.reviewers)
173 formatted_items.append(f"- {item.short_desc} (Author: {author}{reviewer_str})"
174 f" [{item.iid}]({item.web_url})\n")
177 fp.writelines([f"{mr_type.name}\n", "---\n"])
179 fp.writelines([f"### {section}\n", *formatted_items])
184 release_timestamp_str = get_time_of_latest_release()
185 release_timestamp = datetime.fromisoformat(release_timestamp_str)
186 merge_requests = get_merge_requests(release_timestamp_str)
187 processed_data = process(release_timestamp, merge_requests)
188 with open(f"RN_draft_since_{release_timestamp_str}.md", "w") as f:
189 draft_releasenotes(f, processed_data)
192 if __name__ == "__main__":