24_deb_pkg_gov/pr_data_get.py

176 lines
8.0 KiB
Python
Raw Permalink Normal View History

2024-03-10 21:05:46 +00:00
import csv
2024-02-29 03:56:45 +00:00
from perceval.backends.core.git import Git
import os
import datetime as dt
2024-03-07 02:20:44 +00:00
import time
2024-02-29 03:56:45 +00:00
import shutil
2024-03-07 02:20:44 +00:00
import pandas as pd
import dateutil
from tqdm import tqdm
2024-03-10 21:05:46 +00:00
import math
2024-02-29 03:56:45 +00:00
key = os.environ.get('KKEXKEY')
early_cutoff = dt.datetime(2008,2, 8)
temp_dir = "/data/users/mgaughan/tmp/"
2024-02-29 03:56:45 +00:00
2024-03-07 02:20:44 +00:00
'''
2024-03-14 13:31:11 +00:00
- rate of change, rate of all/day
2024-03-07 02:20:44 +00:00
'''
2024-03-10 21:05:46 +00:00
def file_get_pr(upstream_vcs_link, me_read):
# if we're looking at readmes me_read is true and if not, if we're looking at contributing files, it's false
2024-03-07 02:20:44 +00:00
#this is the window of days on either side of the event that we're looking at
2024-03-10 21:05:46 +00:00
window = 182
2024-02-29 03:56:45 +00:00
#print(upstream_vcs_link.split('/')[4])
2024-03-07 02:20:44 +00:00
project_dict = {}
project_dict['upstream_vcs_link'] = upstream_vcs_link
upstream_vcs_link = upstream_vcs_link.strip()
if "github" in upstream_vcs_link or "gitlab" in upstream_vcs_link:
#making an evaluation that sub branches aren't being used and that people would fork if needed
#this only looks at main
upstream_vcs_link = "/".join(upstream_vcs_link.split("/")[0:5])
print(upstream_vcs_link)
full_temp_path = temp_dir + upstream_vcs_link.split('/')[4] + ".git"
else:
full_temp_path = temp_dir + upstream_vcs_link.split('/')[- 1] + ".git"
2024-03-12 13:57:41 +00:00
print(upstream_vcs_link)
2024-03-18 23:03:41 +00:00
if upstream_vcs_link == "https://gitlab.com/ubports/core" or upstream_vcs_link == "https://gitlab.freedesktop.org/xorg/lib":
shutil.rmtree(full_temp_path, ignore_errors=True)
return {}
2024-02-29 03:56:45 +00:00
repo = Git(uri=upstream_vcs_link, gitpath=full_temp_path)
try:
commits = repo.fetch()
except:
print("perceval issue")
return
has_readme = False
has_contributing = False
merge_pre_rm, merge_post_rm, merge_pre_cont, merge_post_cont = 0, 0, 0, 0
2024-03-07 02:20:44 +00:00
#list of tuples which has date and whether it was a merge
commit_list = []
2024-03-12 11:02:33 +00:00
first_date_readme = ""
2024-02-29 03:56:45 +00:00
for commit in commits:
#print(commit['data'])
2024-02-29 03:56:45 +00:00
if "Merge" in commit['data'].keys():
2024-03-12 13:57:41 +00:00
commit_list.append([commit['data']['CommitDate'], True, commit['data']['Author'], commit['data']['Commit']])
2024-02-29 03:56:45 +00:00
if has_contributing:
merge_post_cont += 1
else:
merge_pre_cont += 1
2024-03-07 02:20:44 +00:00
else:
2024-03-12 13:57:41 +00:00
commit_list.append([commit['data']['CommitDate'], False, commit['data']['Author'], commit['data']['Commit']])
2024-02-29 03:56:45 +00:00
files = commit['data']['files']
#print(commit['data']['CommitDate'])
2024-03-07 02:20:44 +00:00
#print(type(dateutil.parser.parse(commit['data']['CommitDate'])))
2024-02-29 03:56:45 +00:00
for file in files:
2024-03-10 21:05:46 +00:00
if "CONTRIBUTING" in file['file'] and has_contributing == False:
2024-02-29 03:56:45 +00:00
has_contributing = True
2024-03-07 02:20:44 +00:00
first_date_contributing = dateutil.parser.parse(commit['data']['CommitDate'])
2024-03-10 21:05:46 +00:00
if "README" in file['file'] and has_readme == False:
2024-02-29 03:56:45 +00:00
has_readme = True
2024-03-07 02:20:44 +00:00
first_date_readme = dateutil.parser.parse(commit['data']['CommitDate'])
project_dict['readme_commit_hash'] = commit['data']['commit']
2024-02-29 03:56:45 +00:00
shutil.rmtree(full_temp_path, ignore_errors=True)
2024-03-12 11:02:33 +00:00
if first_date_readme == "":
return {}
2024-03-10 21:05:46 +00:00
if me_read:
project_dict['first_readme'] = first_date_readme
2024-03-12 13:57:41 +00:00
before_read = pr_count(first_date_readme+ dt.timedelta(days=-window, hours=0), first_date_readme, commit_list, [], [])
if before_read != None:
2024-03-14 13:31:11 +00:00
project_dict['before_allcom_read'] = before_read[0]
project_dict['before_mrg_read'] = before_read[1]
project_dict['before_auth_new'] = before_read[2]
project_dict['before_commit_new'] = before_read[3]
2024-03-12 13:57:41 +00:00
else:
return {}
after_read = pr_count(first_date_readme, first_date_readme + dt.timedelta(days=window, hours=0), commit_list, before_read[4], before_read[5])
if after_read != None:
2024-03-14 13:31:11 +00:00
project_dict['after_allcom_read'] = after_read[0]
project_dict['after_mrg_read'] = after_read[1]
project_dict['after_auth_new'] = after_read[2]
project_dict['after_commit_new'] = after_read[3]
2024-03-12 13:57:41 +00:00
else:
return {}
2024-03-10 21:05:46 +00:00
else:
project_dict['first_contributing'] = first_date_contributing
2024-03-12 13:57:41 +00:00
before_cont = pr_count(first_date_contributing + dt.timedelta(days=-window, hours=0), first_date_contributing, commit_list, [], [])
if before_cont != None:
2024-03-14 13:31:11 +00:00
project_dict['before_allcom_cont'] = before_cont[0]
project_dict['before_mrg_cont'] = before_cont[1]
project_dict['before_auth_new'] = before_cont[2]
project_dict['before_commit_new'] = before_cont[3]
2024-03-12 13:57:41 +00:00
else:
return {}
after_cont = pr_count(first_date_contributing, first_date_contributing + dt.timedelta(days=window, hours=0), commit_list, before_cont[4], before_cont[5])
if after_cont != None:
2024-03-14 13:31:11 +00:00
project_dict['after_allcom_cont'] = after_cont[0]
project_dict['after_mrg_cont'] = after_cont[1]
project_dict['after_auth_new'] = after_cont[2]
project_dict['after_commit_new'] = after_cont[3]
2024-03-12 13:57:41 +00:00
else:
return {}
2024-03-07 02:20:44 +00:00
print(project_dict)
return project_dict
2024-03-10 21:05:46 +00:00
#TODO: pr_count should return an array of values for weekly/6mo
2024-03-12 13:57:41 +00:00
def pr_count(start, end, commits, author_roster, commit_roster):
2024-03-07 02:20:44 +00:00
count = 0
merge_count = 0
2024-03-10 21:05:46 +00:00
by_week = [0] * 27
by_week_merge =[0] * 27
current_week = 0
2024-03-12 13:57:41 +00:00
new_authors = 0
new_committers = 0
2024-03-07 02:20:44 +00:00
for commit in tqdm(commits):
2024-03-12 13:57:41 +00:00
if dateutil.parser.parse(commit[0]) <= start:
if commit[2] not in author_roster:
author_roster.append(commit[2])
if commit[1] and commit[3] not in commit_roster:
commit_roster.append(commit[3])
2024-03-07 02:20:44 +00:00
if dateutil.parser.parse(commit[0]) > start:
2024-03-10 21:05:46 +00:00
if math.floor((dateutil.parser.parse(commit[0]) - start).days / 7) <= 26:
by_week[math.floor((dateutil.parser.parse(commit[0]) - start).days / 7)] += 1
if commit[1]:
by_week_merge[math.floor((dateutil.parser.parse(commit[0]) - start).days / 7)] += 1
2024-03-12 13:57:41 +00:00
if commit[3] not in commit_roster:
new_committers += 1
#remaining question of whether to make this the author of the merge commit[2] or the committer of the merge commit[3]
commit_roster.append(commit[3])
if commit[2] not in author_roster:
new_authors += 1
author_roster.append(commit[2])
2024-03-07 02:20:44 +00:00
if dateutil.parser.parse(commit[0]) > end:
2024-03-10 21:05:46 +00:00
print(len(by_week))
2024-03-12 13:57:41 +00:00
return [by_week, by_week_merge, new_authors, new_committers, author_roster, commit_roster]
2024-02-29 03:56:45 +00:00
2024-03-10 21:05:46 +00:00
def for_files():
2024-03-18 23:03:41 +00:00
#csv_path = "final_data/deb_contribfile_roster.csv"
csv_path = "final_data/deb_readme_roster.csv"
2024-03-10 21:05:46 +00:00
count = 0
with open(csv_path, 'r') as file:
csv_reader = csv.DictReader(file)
2024-03-18 23:03:41 +00:00
with open('kk_031624_pr_did.csv', "w") as writing_file:
# this would also have to get switched fro the cont dataset
keys = ['upstream_vcs_link', "first_readme", "readme_commit_hash", "before_allcom_read", "before_mrg_read", "after_allcom_read", "after_mrg_read", 'before_auth_new', 'after_commit_new', 'after_auth_new', 'before_commit_new']
2024-03-10 21:05:46 +00:00
dict_writer = csv.DictWriter(writing_file, keys)
dict_writer.writeheader()
for row in csv_reader:
count += 1
print(row['upstream_vcs_link'])
2024-03-12 13:57:41 +00:00
# this would have to get switched to false for the cont dataset
2024-03-18 23:03:41 +00:00
try:
dict_row = file_get_pr(row['upstream_vcs_link'].strip(), True)
except:
dict_row = {}
2024-03-10 21:05:46 +00:00
dict_writer.writerow(dict_row)
2024-02-29 03:56:45 +00:00
if __name__ == "__main__":
for_files()
#file_get_pr("https://github.com/tqdm/tqdm", True)
2024-03-12 13:57:41 +00:00
#file_get_pr("https://github.com/GameServerManagers/LinuxGSM", True)
#file_get_pr("https://github.com/walling/unorm/issues/new/", True)
#file_get_pr("https://github.com/krahets/hello-algo/tree/dev1", True)
2024-02-29 03:56:45 +00:00