4 from typing import NamedTuple, TextIO
6 # TODO: remove after testing
9 # end remove after testing
12 MR_TYPE = Enum("MR_TYPE", {"Feature(s)": 1,
15 "NO_TYPE_GIVEN": 9999})
18 MR_SIZE = Enum("MR_SIZE", {"Enormous": 1,
25 TOPIC_PREFIX = "Topic: "
26 CHANGELOG_PREFIX = "RN::"
27 MR_TYPE_PREFIX = "MR Content: "
28 MR_SIZE_PREFIX = "MR Size::"
30 MAIN_PROJECT_ID = 73434
31 EXCLUDED_PROJECT_IDS = []
32 TARGET_BRANCHES = ["master", "develop", "pending-release"]
34 GROUP_NAME = "xonotic"
35 BASEURL = "https://gitlab.com/api/v4"
36 MAIN_PROJECT_BASEURL = BASEURL + f"/projects/{MAIN_PROJECT_ID}/repository"
37 GROUP_BASEURL = BASEURL + f"/groups/{GROUP_NAME}"
40 class MergeRequestInfo(NamedTuple):
48 def get_time_of_latest_release() -> str:
49 response = requests.get(MAIN_PROJECT_BASEURL + "/tags")
50 latest = response.json()[0]
51 return latest["commit"]["created_at"]
54 def get_merge_requests(timestamp: str) -> list[dict]:
55 if os.path.isfile("testdata.json"):
56 with open("testdata.json") as f:
60 url = GROUP_BASEURL + "/merge_requests?state=merged&updated_after=" +\
61 f"{timestamp}&per_page={page_len}&page="
65 response = requests.get(url + str(current_page))
66 new_data = response.json()
70 if len(new_data) < page_len:
72 if current_page == MAX_PAGES:
78 def process_description(description: str) -> str:
80 raise ValueError("Empty description")
81 lines = description.splitlines()
82 if not lines[0].strip() == "Summary for release notes:":
83 raise ValueError("Unexpected description format: Summary missing")
85 for line in lines[1:]:
86 if line.startswith("---"):
90 summary += line + " " # add space
91 return summary.strip()
95 def process(data: list[dict]) -> dict[MR_TYPE, dict[str, MergeRequestInfo]]:
96 # extract type, size and topic from labels for easier filtering/ordering
97 # extract short description from description
98 # extract author->name
99 processed_data = {mr_type: {} for mr_type in MR_TYPE}
101 if item["project_id"] in EXCLUDED_PROJECT_IDS:
103 if item["target_branch"] not in TARGET_BRANCHES:
105 mr_type = MR_TYPE.NO_TYPE_GIVEN
106 size = MR_SIZE.UNKNOWN
107 section = "UNKNOWN SECTION"
108 for label in item["labels"]:
109 if label.startswith(MR_TYPE_PREFIX):
111 new_mr_type = MR_TYPE[label.removeprefix(MR_TYPE_PREFIX)]
113 logging.warning(f"Unexpected label: {label}, skipping")
115 if new_mr_type.value < mr_type.value:
116 mr_type = new_mr_type
118 if label.startswith(MR_SIZE_PREFIX):
120 new_size = MR_SIZE[label.removeprefix(MR_SIZE_PREFIX)]
122 logging.warning(f"Unexpected label: {label}, skipping")
124 if new_size.value < size.value:
127 if label.startswith(CHANGELOG_PREFIX):
128 section = label.removeprefix(CHANGELOG_PREFIX)
131 short_desc = process_description(item["description"])
132 except ValueError as e:
133 logging.warning(f"Error processing the description for "
134 f"{item['iid']}: {e}")
135 short_desc = item["title"]
136 author = item["author"]["name"]
137 if section not in processed_data[mr_type]:
138 processed_data[mr_type][section] = []
139 processed_data[mr_type][section].append(MergeRequestInfo(
140 iid=item["iid"], size=size, author=author,
141 short_desc=short_desc, web_url=item["web_url"]))
142 return processed_data
145 def draft_releasenotes(fp: TextIO, data: dict[MR_TYPE, dict[str, MergeRequestInfo]]) -> None:
146 fp.writelines(["Release Notes\n", "===\n", "\n"])
147 for mr_type, sectioned_mr_data in data.items():
149 for section, merge_requests in sectioned_mr_data.items():
151 merge_requests.sort(key=lambda x: x.size.value)
152 for item in merge_requests:
153 authors = item.author
154 formatted_items.append(f"- {item.short_desc} by {authors} "
155 f"([{item.iid}]({item.web_url}))\n")
158 fp.writelines([f"{mr_type.name}\n", "---\n"])
160 fp.writelines([f"### {section}\n", *formatted_items])
165 release_timestamp = get_time_of_latest_release()
166 merge_requests = get_merge_requests(release_timestamp)
167 processed_data = process(merge_requests)
168 with open(f"RN_draft_since_{release_timestamp}.md", "w") as f:
169 draft_releasenotes(f, processed_data)
172 if __name__ == "__main__":