From 954486d5c21dfb6e3ffb402aac6d2fe5a88e4b1a Mon Sep 17 00:00:00 2001 From: Matthew Gaughan Date: Thu, 14 Mar 2024 08:18:30 -0500 Subject: [PATCH] expanding parsing to enable time-specific file --- pr_data_get.py | 58 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 23 deletions(-) diff --git a/pr_data_get.py b/pr_data_get.py index 920f6fb..ae2d840 100644 --- a/pr_data_get.py +++ b/pr_data_get.py @@ -24,9 +24,18 @@ def file_get_pr(upstream_vcs_link, me_read): #print(upstream_vcs_link.split('/')[4]) project_dict = {} project_dict['upstream_vcs_link'] = upstream_vcs_link - upstream_vcs_link = "/".join(upstream_vcs_link.split("/")[0:5]) + if upstream_vcs_link == " https://gitlab.com/ubports/core/cmake-extras": + return {} + upstream_vcs_link = upstream_vcs_link.strip() + if "github" in upstream_vcs_link or "gitlab" in upstream_vcs_link: + #making an evaluation that sub branches aren't being used and that people would fork if needed + #this only looks at main + upstream_vcs_link = "/".join(upstream_vcs_link.split("/")[0:5]) + print(upstream_vcs_link) + full_temp_path = temp_dir + upstream_vcs_link.split('/')[4] + ".git" + else: + full_temp_path = temp_dir + upstream_vcs_link.split('/')[- 1] + ".git" print(upstream_vcs_link) - full_temp_path = temp_dir + upstream_vcs_link.split('/')[4] + ".git" repo = Git(uri=upstream_vcs_link, gitpath=full_temp_path) try: commits = repo.fetch() @@ -40,6 +49,7 @@ def file_get_pr(upstream_vcs_link, me_read): commit_list = [] first_date_readme = "" for commit in commits: + #print(commit['data']) if "Merge" in commit['data'].keys(): commit_list.append([commit['data']['CommitDate'], True, commit['data']['Author'], commit['data']['Commit']]) if has_contributing: @@ -58,6 +68,7 @@ def file_get_pr(upstream_vcs_link, me_read): if "README" in file['file'] and has_readme == False: has_readme = True first_date_readme = dateutil.parser.parse(commit['data']['CommitDate']) + project_dict['readme_commit_hash'] = commit['data']['commit'] shutil.rmtree(full_temp_path, ignore_errors=True) if first_date_readme == "": return {} @@ -65,36 +76,36 @@ def file_get_pr(upstream_vcs_link, me_read): project_dict['first_readme'] = first_date_readme before_read = pr_count(first_date_readme+ dt.timedelta(days=-window, hours=0), first_date_readme, commit_list, [], []) if before_read != None: - project_dict['b6w_prs_read'] = before_read[0] - project_dict['b6w_mrg_read'] = before_read[1] - project_dict['b6w_auth_read'] = before_read[2] - project_dict['b6w_commit_read'] = before_read[3] + project_dict['before_prs_read'] = before_read[0] + project_dict['before_mrg_read'] = before_read[1] + project_dict['before_auth_new'] = before_read[2] + project_dict['before_commit_new'] = before_read[3] else: return {} after_read = pr_count(first_date_readme, first_date_readme + dt.timedelta(days=window, hours=0), commit_list, before_read[4], before_read[5]) if after_read != None: - project_dict['a6w_prs_read'] = after_read[0] - project_dict['a6w_mrg_read'] = after_read[1] - project_dict['a6w_auth_read'] = after_read[2] - project_dict['a6w_commit_read'] = after_read[3] + project_dict['after_prs_read'] = after_read[0] + project_dict['after_mrg_read'] = after_read[1] + project_dict['after_auth_new'] = after_read[2] + project_dict['after_commit_new'] = after_read[3] else: return {} else: project_dict['first_contributing'] = first_date_contributing before_cont = pr_count(first_date_contributing + dt.timedelta(days=-window, hours=0), first_date_contributing, commit_list, [], []) if before_cont != None: - project_dict['b6w_prs_cont'] = before_cont[0] - project_dict['b6w_mrg_cont'] = before_cont[1] - project_dict['b6w_auth_cont'] = before_cont[2] - project_dict['b6w_commit_cont'] = before_cont[3] + project_dict['before_prs_cont'] = before_cont[0] + project_dict['before_mrg_cont'] = before_cont[1] + project_dict['before_auth_new'] = before_cont[2] + project_dict['before_commit_new'] = before_cont[3] else: return {} after_cont = pr_count(first_date_contributing, first_date_contributing + dt.timedelta(days=window, hours=0), commit_list, before_cont[4], before_cont[5]) if after_cont != None: - project_dict['a6w_prs_cont'] = after_cont[0] - project_dict['a6w_mrg_cont'] = after_cont[1] - project_dict['a6w_auth_cont'] = after_cont[2] - project_dict['a6w_commit_cont'] = after_cont[3] + project_dict['after_prs_cont'] = after_cont[0] + project_dict['after_mrg_cont'] = after_cont[1] + project_dict['after_auth_new'] = after_cont[2] + project_dict['after_commit_new'] = after_cont[3] else: return {} print(project_dict) @@ -132,14 +143,14 @@ def pr_count(start, end, commits, author_roster, commit_roster): print(len(by_week)) return [by_week, by_week_merge, new_authors, new_committers, author_roster, commit_roster] -#TODO: need to do this for all files in the dataset of readme or contributing def for_files(): csv_path = "final_data/kk_final_readme_roster.csv" count = 0 with open(csv_path, 'r') as file: csv_reader = csv.DictReader(file) - with open('kk_test_031024_pr_did.csv', "w") as writing_file: - keys = ['upstream_vcs_link', "first_readme", "b6w_prs_read", "b6w_mrg_read", "a6w_prs_read", "a6w_mrg_read"] + with open('kk_test_031424_pr_did.csv', "w") as writing_file: + # this would also have to get switched fro the cont dataset + keys = ['upstream_vcs_link', "first_readme", "before_prs_read", "before_mrg_read", "after_prs_read", "after_mrg_read", 'before_auth_new', 'after_commit_new', 'after_auth_new', 'before_commit_new'] dict_writer = csv.DictWriter(writing_file, keys) dict_writer.writeheader() for row in csv_reader: @@ -151,8 +162,9 @@ def for_files(): if __name__ == "__main__": - for_files() - #file_get_pr("https://github.com/tqdm/tqdm", True) + #for_files() + file_get_pr("https://github.com/tqdm/tqdm", True) #file_get_pr("https://github.com/GameServerManagers/LinuxGSM", True) #file_get_pr("https://github.com/walling/unorm/issues/new/", True) + file_get_pr("https://github.com/krahets/hello-algo/tree/dev1", True)