updating pr_grabber again
This commit is contained in:
parent
6bdc4fa6ef
commit
99fb547920
@ -24,6 +24,8 @@ def file_get_pr(upstream_vcs_link, me_read):
|
||||
#print(upstream_vcs_link.split('/')[4])
|
||||
project_dict = {}
|
||||
project_dict['upstream_vcs_link'] = upstream_vcs_link
|
||||
upstream_vcs_link = "/".join(upstream_vcs_link.split("/")[0:5])
|
||||
print(upstream_vcs_link)
|
||||
full_temp_path = temp_dir + upstream_vcs_link.split('/')[4] + ".git"
|
||||
repo = Git(uri=upstream_vcs_link, gitpath=full_temp_path)
|
||||
try:
|
||||
@ -39,13 +41,13 @@ def file_get_pr(upstream_vcs_link, me_read):
|
||||
first_date_readme = ""
|
||||
for commit in commits:
|
||||
if "Merge" in commit['data'].keys():
|
||||
commit_list.append([commit['data']['CommitDate'], True])
|
||||
commit_list.append([commit['data']['CommitDate'], True, commit['data']['Author'], commit['data']['Commit']])
|
||||
if has_contributing:
|
||||
merge_post_cont += 1
|
||||
else:
|
||||
merge_pre_cont += 1
|
||||
else:
|
||||
commit_list.append([commit['data']['CommitDate'], False])
|
||||
commit_list.append([commit['data']['CommitDate'], False, commit['data']['Author'], commit['data']['Commit']])
|
||||
files = commit['data']['files']
|
||||
#print(commit['data']['CommitDate'])
|
||||
#print(type(dateutil.parser.parse(commit['data']['CommitDate'])))
|
||||
@ -61,40 +63,74 @@ def file_get_pr(upstream_vcs_link, me_read):
|
||||
return {}
|
||||
if me_read:
|
||||
project_dict['first_readme'] = first_date_readme
|
||||
before_read = pr_count(first_date_readme+ dt.timedelta(days=-window, hours=0), first_date_readme, commit_list)
|
||||
project_dict['b6w_prs_read'] = before_read[0]
|
||||
project_dict['b6w_mrg_read'] = before_read[1]
|
||||
after_read = pr_count(first_date_readme, first_date_readme + dt.timedelta(days=window, hours=0), commit_list)
|
||||
project_dict['a6w_prs_read'] = after_read[0]
|
||||
project_dict['a6w_mrg_read'] = after_read[1]
|
||||
before_read = pr_count(first_date_readme+ dt.timedelta(days=-window, hours=0), first_date_readme, commit_list, [], [])
|
||||
if before_read != None:
|
||||
project_dict['b6w_prs_read'] = before_read[0]
|
||||
project_dict['b6w_mrg_read'] = before_read[1]
|
||||
project_dict['b6w_auth_read'] = before_read[2]
|
||||
project_dict['b6w_commit_read'] = before_read[3]
|
||||
else:
|
||||
return {}
|
||||
after_read = pr_count(first_date_readme, first_date_readme + dt.timedelta(days=window, hours=0), commit_list, before_read[4], before_read[5])
|
||||
if after_read != None:
|
||||
project_dict['a6w_prs_read'] = after_read[0]
|
||||
project_dict['a6w_mrg_read'] = after_read[1]
|
||||
project_dict['a6w_auth_read'] = after_read[2]
|
||||
project_dict['a6w_commit_read'] = after_read[3]
|
||||
else:
|
||||
return {}
|
||||
else:
|
||||
project_dict['first_contributing'] = first_date_contributing
|
||||
before_cont = pr_count(first_date_contributing + dt.timedelta(days=-window, hours=0), first_date_contributing, commit_list)
|
||||
project_dict['b6w_prs_cont'] = before_cont[0]
|
||||
project_dict['b6w_mrg_cont'] = before_cont[1]
|
||||
after_cont = pr_count(first_date_contributing, first_date_contributing + dt.timedelta(days=window, hours=0), commit_list)
|
||||
project_dict['a6w_prs_cont'] = after_cont[0]
|
||||
project_dict['a6w_mrg_cont'] = after_cont[1]
|
||||
before_cont = pr_count(first_date_contributing + dt.timedelta(days=-window, hours=0), first_date_contributing, commit_list, [], [])
|
||||
if before_cont != None:
|
||||
project_dict['b6w_prs_cont'] = before_cont[0]
|
||||
project_dict['b6w_mrg_cont'] = before_cont[1]
|
||||
project_dict['b6w_auth_cont'] = before_cont[2]
|
||||
project_dict['b6w_commit_cont'] = before_cont[3]
|
||||
else:
|
||||
return {}
|
||||
after_cont = pr_count(first_date_contributing, first_date_contributing + dt.timedelta(days=window, hours=0), commit_list, before_cont[4], before_cont[5])
|
||||
if after_cont != None:
|
||||
project_dict['a6w_prs_cont'] = after_cont[0]
|
||||
project_dict['a6w_mrg_cont'] = after_cont[1]
|
||||
project_dict['a6w_auth_cont'] = after_cont[2]
|
||||
project_dict['a6w_commit_cont'] = after_cont[3]
|
||||
else:
|
||||
return {}
|
||||
print(project_dict)
|
||||
return project_dict
|
||||
|
||||
|
||||
#TODO: pr_count should return an array of values for weekly/6mo
|
||||
def pr_count(start, end, commits):
|
||||
def pr_count(start, end, commits, author_roster, commit_roster):
|
||||
count = 0
|
||||
merge_count = 0
|
||||
by_week = [0] * 27
|
||||
by_week_merge =[0] * 27
|
||||
current_week = 0
|
||||
new_authors = 0
|
||||
new_committers = 0
|
||||
for commit in tqdm(commits):
|
||||
if dateutil.parser.parse(commit[0]) <= start:
|
||||
if commit[2] not in author_roster:
|
||||
author_roster.append(commit[2])
|
||||
if commit[1] and commit[3] not in commit_roster:
|
||||
commit_roster.append(commit[3])
|
||||
if dateutil.parser.parse(commit[0]) > start:
|
||||
if math.floor((dateutil.parser.parse(commit[0]) - start).days / 7) <= 26:
|
||||
by_week[math.floor((dateutil.parser.parse(commit[0]) - start).days / 7)] += 1
|
||||
if commit[1]:
|
||||
by_week_merge[math.floor((dateutil.parser.parse(commit[0]) - start).days / 7)] += 1
|
||||
if commit[3] not in commit_roster:
|
||||
new_committers += 1
|
||||
#remaining question of whether to make this the author of the merge commit[2] or the committer of the merge commit[3]
|
||||
commit_roster.append(commit[3])
|
||||
if commit[2] not in author_roster:
|
||||
new_authors += 1
|
||||
author_roster.append(commit[2])
|
||||
if dateutil.parser.parse(commit[0]) > end:
|
||||
print(len(by_week))
|
||||
return [by_week, by_week_merge]
|
||||
return [by_week, by_week_merge, new_authors, new_committers, author_roster, commit_roster]
|
||||
|
||||
#TODO: need to do this for all files in the dataset of readme or contributing
|
||||
def for_files():
|
||||
@ -106,16 +142,17 @@ def for_files():
|
||||
keys = ['upstream_vcs_link', "first_readme", "b6w_prs_read", "b6w_mrg_read", "a6w_prs_read", "a6w_mrg_read"]
|
||||
dict_writer = csv.DictWriter(writing_file, keys)
|
||||
dict_writer.writeheader()
|
||||
#training wheels on right now
|
||||
for row in csv_reader:
|
||||
count += 1
|
||||
print(row['upstream_vcs_link'])
|
||||
# this would have to get switched to false for the cont dataset
|
||||
dict_row = file_get_pr(row['upstream_vcs_link'].strip(), True)
|
||||
dict_writer.writerow(dict_row)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for_files()
|
||||
#file_get_pr("https://github.com/tqdm/tqdm", False)
|
||||
#file_get_pr("https://github.com/GameServerManagers/LinuxGSM")
|
||||
#file_get_pr("https://github.com/tqdm/tqdm", True)
|
||||
#file_get_pr("https://github.com/GameServerManagers/LinuxGSM", True)
|
||||
#file_get_pr("https://github.com/walling/unorm/issues/new/", True)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user