expanding parsing to enable time-specific file

This commit is contained in:
Matthew Gaughan 2024-03-14 08:18:30 -05:00
parent 99fb547920
commit 954486d5c2

View File

@ -24,9 +24,18 @@ def file_get_pr(upstream_vcs_link, me_read):
#print(upstream_vcs_link.split('/')[4]) #print(upstream_vcs_link.split('/')[4])
project_dict = {} project_dict = {}
project_dict['upstream_vcs_link'] = upstream_vcs_link project_dict['upstream_vcs_link'] = upstream_vcs_link
upstream_vcs_link = "/".join(upstream_vcs_link.split("/")[0:5]) if upstream_vcs_link == " https://gitlab.com/ubports/core/cmake-extras":
return {}
upstream_vcs_link = upstream_vcs_link.strip()
if "github" in upstream_vcs_link or "gitlab" in upstream_vcs_link:
#making an evaluation that sub branches aren't being used and that people would fork if needed
#this only looks at main
upstream_vcs_link = "/".join(upstream_vcs_link.split("/")[0:5])
print(upstream_vcs_link)
full_temp_path = temp_dir + upstream_vcs_link.split('/')[4] + ".git"
else:
full_temp_path = temp_dir + upstream_vcs_link.split('/')[- 1] + ".git"
print(upstream_vcs_link) print(upstream_vcs_link)
full_temp_path = temp_dir + upstream_vcs_link.split('/')[4] + ".git"
repo = Git(uri=upstream_vcs_link, gitpath=full_temp_path) repo = Git(uri=upstream_vcs_link, gitpath=full_temp_path)
try: try:
commits = repo.fetch() commits = repo.fetch()
@ -40,6 +49,7 @@ def file_get_pr(upstream_vcs_link, me_read):
commit_list = [] commit_list = []
first_date_readme = "" first_date_readme = ""
for commit in commits: for commit in commits:
#print(commit['data'])
if "Merge" in commit['data'].keys(): if "Merge" in commit['data'].keys():
commit_list.append([commit['data']['CommitDate'], True, commit['data']['Author'], commit['data']['Commit']]) commit_list.append([commit['data']['CommitDate'], True, commit['data']['Author'], commit['data']['Commit']])
if has_contributing: if has_contributing:
@ -58,6 +68,7 @@ def file_get_pr(upstream_vcs_link, me_read):
if "README" in file['file'] and has_readme == False: if "README" in file['file'] and has_readme == False:
has_readme = True has_readme = True
first_date_readme = dateutil.parser.parse(commit['data']['CommitDate']) first_date_readme = dateutil.parser.parse(commit['data']['CommitDate'])
project_dict['readme_commit_hash'] = commit['data']['commit']
shutil.rmtree(full_temp_path, ignore_errors=True) shutil.rmtree(full_temp_path, ignore_errors=True)
if first_date_readme == "": if first_date_readme == "":
return {} return {}
@ -65,36 +76,36 @@ def file_get_pr(upstream_vcs_link, me_read):
project_dict['first_readme'] = first_date_readme project_dict['first_readme'] = first_date_readme
before_read = pr_count(first_date_readme+ dt.timedelta(days=-window, hours=0), first_date_readme, commit_list, [], []) before_read = pr_count(first_date_readme+ dt.timedelta(days=-window, hours=0), first_date_readme, commit_list, [], [])
if before_read != None: if before_read != None:
project_dict['b6w_prs_read'] = before_read[0] project_dict['before_prs_read'] = before_read[0]
project_dict['b6w_mrg_read'] = before_read[1] project_dict['before_mrg_read'] = before_read[1]
project_dict['b6w_auth_read'] = before_read[2] project_dict['before_auth_new'] = before_read[2]
project_dict['b6w_commit_read'] = before_read[3] project_dict['before_commit_new'] = before_read[3]
else: else:
return {} return {}
after_read = pr_count(first_date_readme, first_date_readme + dt.timedelta(days=window, hours=0), commit_list, before_read[4], before_read[5]) after_read = pr_count(first_date_readme, first_date_readme + dt.timedelta(days=window, hours=0), commit_list, before_read[4], before_read[5])
if after_read != None: if after_read != None:
project_dict['a6w_prs_read'] = after_read[0] project_dict['after_prs_read'] = after_read[0]
project_dict['a6w_mrg_read'] = after_read[1] project_dict['after_mrg_read'] = after_read[1]
project_dict['a6w_auth_read'] = after_read[2] project_dict['after_auth_new'] = after_read[2]
project_dict['a6w_commit_read'] = after_read[3] project_dict['after_commit_new'] = after_read[3]
else: else:
return {} return {}
else: else:
project_dict['first_contributing'] = first_date_contributing project_dict['first_contributing'] = first_date_contributing
before_cont = pr_count(first_date_contributing + dt.timedelta(days=-window, hours=0), first_date_contributing, commit_list, [], []) before_cont = pr_count(first_date_contributing + dt.timedelta(days=-window, hours=0), first_date_contributing, commit_list, [], [])
if before_cont != None: if before_cont != None:
project_dict['b6w_prs_cont'] = before_cont[0] project_dict['before_prs_cont'] = before_cont[0]
project_dict['b6w_mrg_cont'] = before_cont[1] project_dict['before_mrg_cont'] = before_cont[1]
project_dict['b6w_auth_cont'] = before_cont[2] project_dict['before_auth_new'] = before_cont[2]
project_dict['b6w_commit_cont'] = before_cont[3] project_dict['before_commit_new'] = before_cont[3]
else: else:
return {} return {}
after_cont = pr_count(first_date_contributing, first_date_contributing + dt.timedelta(days=window, hours=0), commit_list, before_cont[4], before_cont[5]) after_cont = pr_count(first_date_contributing, first_date_contributing + dt.timedelta(days=window, hours=0), commit_list, before_cont[4], before_cont[5])
if after_cont != None: if after_cont != None:
project_dict['a6w_prs_cont'] = after_cont[0] project_dict['after_prs_cont'] = after_cont[0]
project_dict['a6w_mrg_cont'] = after_cont[1] project_dict['after_mrg_cont'] = after_cont[1]
project_dict['a6w_auth_cont'] = after_cont[2] project_dict['after_auth_new'] = after_cont[2]
project_dict['a6w_commit_cont'] = after_cont[3] project_dict['after_commit_new'] = after_cont[3]
else: else:
return {} return {}
print(project_dict) print(project_dict)
@ -132,14 +143,14 @@ def pr_count(start, end, commits, author_roster, commit_roster):
print(len(by_week)) print(len(by_week))
return [by_week, by_week_merge, new_authors, new_committers, author_roster, commit_roster] return [by_week, by_week_merge, new_authors, new_committers, author_roster, commit_roster]
#TODO: need to do this for all files in the dataset of readme or contributing
def for_files(): def for_files():
csv_path = "final_data/kk_final_readme_roster.csv" csv_path = "final_data/kk_final_readme_roster.csv"
count = 0 count = 0
with open(csv_path, 'r') as file: with open(csv_path, 'r') as file:
csv_reader = csv.DictReader(file) csv_reader = csv.DictReader(file)
with open('kk_test_031024_pr_did.csv', "w") as writing_file: with open('kk_test_031424_pr_did.csv', "w") as writing_file:
keys = ['upstream_vcs_link', "first_readme", "b6w_prs_read", "b6w_mrg_read", "a6w_prs_read", "a6w_mrg_read"] # this would also have to get switched fro the cont dataset
keys = ['upstream_vcs_link', "first_readme", "before_prs_read", "before_mrg_read", "after_prs_read", "after_mrg_read", 'before_auth_new', 'after_commit_new', 'after_auth_new', 'before_commit_new']
dict_writer = csv.DictWriter(writing_file, keys) dict_writer = csv.DictWriter(writing_file, keys)
dict_writer.writeheader() dict_writer.writeheader()
for row in csv_reader: for row in csv_reader:
@ -151,8 +162,9 @@ def for_files():
if __name__ == "__main__": if __name__ == "__main__":
for_files() #for_files()
#file_get_pr("https://github.com/tqdm/tqdm", True) file_get_pr("https://github.com/tqdm/tqdm", True)
#file_get_pr("https://github.com/GameServerManagers/LinuxGSM", True) #file_get_pr("https://github.com/GameServerManagers/LinuxGSM", True)
#file_get_pr("https://github.com/walling/unorm/issues/new/", True) #file_get_pr("https://github.com/walling/unorm/issues/new/", True)
file_get_pr("https://github.com/krahets/hello-algo/tree/dev1", True)