import csv from perceval.backends.core.git import Git import os import datetime as dt from datetime import timezone import time import shutil import dateutil from tqdm import tqdm import math key = os.environ.get('KKEXKEY') temp_dir = "/data/users/mgaughan/tmp/" temp_dir1 = "tmp/" ''' - rate of change, rate of all/day ''' def file_get_contribs(upstream_vcs_link, event_date): # if we're looking at readmes me_read is true and if not, if we're looking at contributing files, it's false #this is the window of days on either side of the event that we're looking at window = 56 #print(upstream_vcs_link.split('/')[4]) project_dict = {} project_dict['upstream_vcs_link'] = upstream_vcs_link #event_date = dateutil.parser.parse(event_date).astimezone(timezone.utc) upstream_vcs_link = upstream_vcs_link.strip() if "github" in upstream_vcs_link or "gitlab" in upstream_vcs_link: #making an evaluation that sub branches aren't being used and that people would fork if needed #this only looks at main upstream_vcs_link = "/".join(upstream_vcs_link.split("/")[0:5]) print(upstream_vcs_link) full_temp_path = temp_dir + upstream_vcs_link.split('/')[4] + ".git" else: full_temp_path = temp_dir + upstream_vcs_link.split('/')[- 1] + ".git" print(upstream_vcs_link) if upstream_vcs_link == "https://gitlab.com/ubports/core" or upstream_vcs_link == "https://gitlab.freedesktop.org/xorg/lib": shutil.rmtree(full_temp_path, ignore_errors=True) return {} repo = Git(uri=upstream_vcs_link, gitpath=full_temp_path) commits = repo.fetch() merge_pre, merge_post = 0, 0 #list of tuples which has date and whether it was a merge commit_list = [] first_date_readme = "" index = 0 for commit in commits: if index == 0: project_dict['first_commit'] = commit['data']['CommitDate'] break #print(commit['data']) shutil.rmtree(full_temp_path, ignore_errors=True) print(project_dict) return project_dict def for_files(): csv_path = "final_data/deb_contrib_did.csv" count = 0 with open(csv_path, 'r') as file: csv_reader = csv.DictReader(file) with open('062424_did_first_commit_contrib.csv', "w") as writing_file: # this would also have to get switched fro the cont dataset keys = ['upstream_vcs_link', "first_commit"] dict_writer = csv.DictWriter(writing_file, keys) dict_writer.writeheader() for row in csv_reader: count += 1 print(row['upstream_vcs_link']) # this would have to get switched to false for the cont dataset dict_row = file_get_contribs(row['upstream_vcs_link'].strip(), row['event_date']) dict_writer.writerow(dict_row) if __name__ == "__main__": for_files() #file_get_contribs("https://github.com/Debian/perl-cross-debian", "00-00-00") #file_get_pr("https://github.com/tqdm/tqdm", True) #file_get_pr("https://github.com/GameServerManagers/LinuxGSM", True) #file_get_pr("https://github.com/walling/unorm/issues/new/", True) #file_get_pr("https://github.com/krahets/hello-algo/tree/dev1", True)