207 lines
7.8 KiB
R
207 lines
7.8 KiB
R
#library(tidyverse)
|
|
library(dplyr)
|
|
library(lubridate)
|
|
library(tidyr)
|
|
library(purrr)
|
|
library(stringr)
|
|
|
|
https_commit_fp <- "/mmfs1/gscratch/comdata/users/mjilg/mw-repo-lifecycles/case2/mediawiki_core_commits.csv"
|
|
|
|
contains_http_but_not_url <- function(text) {
|
|
if (is.na(text)) {
|
|
return(FALSE)
|
|
}
|
|
# Split text by whitespace and check each word
|
|
words <- str_split(text, "\\s+")[[1]]
|
|
for (word in words) {
|
|
if (!str_detect(word,"://")){
|
|
#http
|
|
if (str_detect(word, "http")){
|
|
return(TRUE)
|
|
}
|
|
if (str_detect(word, "login")){
|
|
return(TRUE)
|
|
}
|
|
if (str_detect(word, "ssl")){
|
|
return(TRUE)
|
|
}
|
|
if (str_detect(word, "tls")){
|
|
return(TRUE)
|
|
}
|
|
if (startsWith(word, "cert") && !startsWith(word, "certain")){
|
|
return(TRUE)
|
|
}
|
|
}
|
|
}
|
|
return(FALSE)
|
|
}
|
|
|
|
transform_relevant_commit_data <- function(filepath){
|
|
#basic, loading in the file
|
|
df = read.csv(filepath, header = TRUE)
|
|
temp_df <- df
|
|
dir_path = dirname(filepath)
|
|
file_name = basename(filepath)
|
|
|
|
|
|
# TODO: this is project/event specific
|
|
event_date <- as.Date("2013-08-28")
|
|
#event_date <- as.Date("2013-07-01")
|
|
#event_date <- as.Date("2013-04-25")
|
|
#event_date <- as.Date("2012-12-11")
|
|
|
|
# isolate project id
|
|
project_id <- sub("_.*$", "", file_name)
|
|
|
|
#make sure the dates are formatted correctly and state the project_id
|
|
df <- df |>
|
|
mutate(commit_date = ymd_hms(commit_date)) |>
|
|
mutate(project_id = project_id)
|
|
|
|
#get information about project age either in the "present"
|
|
#or at the time of first commit
|
|
oldest_commit_date <- min(as.Date(df$commit_date))
|
|
project_age <- as.numeric(as.Date("2025-02-10") - oldest_commit_date)
|
|
|
|
#add that to the data
|
|
df <- df |>
|
|
mutate(age = project_age)
|
|
|
|
#drop out data from ''before'' the release process
|
|
df <- df |>
|
|
filter(commit_date >= as.Date("2011-09-03"))
|
|
|
|
#we are looking at weekly data, 6m before and 6m after
|
|
#start_date <- event_date %m-% months(6)
|
|
#calculated_start_date <- event_date %m-% months(12)
|
|
start_date <- as.Date('2011-09-03')
|
|
end_date <-as.Date('2015-10-03')
|
|
|
|
#getting the relative weeks to the publication date
|
|
relative_week <- function(date, ref_date) {
|
|
as.integer(as.numeric(difftime(date, ref_date, units = "days")) %/% 7)
|
|
}
|
|
|
|
df <- df |>
|
|
mutate(relative_week = relative_week(commit_date, event_date))|>
|
|
mutate(mentions_http = if_else(sapply(message, contains_http_but_not_url), TRUE, FALSE))
|
|
# arrange(relative_week) |>
|
|
# group_by(author_email) |>
|
|
# mutate(new_author = ifelse(row_number() <= 5, 1, 0),
|
|
# new_author_wmf = if_else(grepl("@wikimedia", author_email), new_author, 0),
|
|
# new_author_unaff = if_else(!grepl("@wikimedia", author_email), new_author, 0)) |>
|
|
# ungroup()
|
|
|
|
# cut the df to all before 06-01-2015
|
|
# group by author_email
|
|
# list all author_emails with >5 commits
|
|
# for big df: if author not in the list, 'new' author
|
|
old_author_list <- df |>
|
|
filter(commit_date > as.Date("2011-09-03") & commit_date < as.Date("2013-08-01"))|>
|
|
group_by(author_email) |>
|
|
summarise(commit_count = n()) |>
|
|
filter(commit_count > 5) |>
|
|
pull(author_email)
|
|
|
|
# Label authors as 'new' if they are not in the old_author_list
|
|
df <- df |>
|
|
mutate(new_author = ifelse(author_email %in% old_author_list, 0, 1),
|
|
new_author_wmf = if_else(grepl("@wikimedia", author_email),
|
|
new_author, 0),
|
|
new_author_unaff = if_else(!grepl("@wikimedia", author_email) &
|
|
!grepl("l10n-bot@translatewiki.net|tools.libraryupgrader@tools.wmflabs.org", author_email),
|
|
new_author, 0))
|
|
|
|
|
|
#filler for when there are weeks without commits
|
|
all_weeks <- seq(relative_week(start_date, event_date), relative_week(end_date, event_date))
|
|
complete_weeks_df <- expand.grid(relative_week = all_weeks,
|
|
project_id = project_id,
|
|
age = project_age)
|
|
|
|
|
|
#for each week, get the list of unique authors that committed
|
|
#cumulative_authors <- df %>%
|
|
# arrange(relative_week) %>%
|
|
# group_by(relative_week) %>%
|
|
# summarize(cumulative_author_emails = list(unique(author_email)), .groups = 'drop')
|
|
#same for each committer
|
|
#cumulative_committers <- df %>%
|
|
# arrange(relative_week) %>%
|
|
# group_by(relative_week) %>%
|
|
# summarize(cumulative_committer_emails = list(unique(committer_email)), .groups = 'drop')
|
|
|
|
#now cut out the commit data that we don't care about
|
|
df <- df |>
|
|
filter(author_email != "jenkins-bot@gerrit.wikimedia.org") |>
|
|
filter(author_email != "l10n-bot@translatewiki.net") |>
|
|
filter(author_email != "tools.libraryupgrader@tools.wmflabs.org")
|
|
|
|
#in order:
|
|
# - we group by project, week, ages
|
|
# - and we summarize commit and authorship details
|
|
# - we then fill in information for missingness
|
|
# - and add in vars for before/after
|
|
# - and weekly index
|
|
weekly_commits <- df |>
|
|
group_by(project_id, relative_week, age) |>
|
|
summarise(commit_count = n(),
|
|
author_emails = list(unique(author_email)),
|
|
committer_emails = list(unique(committer_email)),
|
|
mediawiki_dev_commit_count = sum(grepl("@users.mediawiki.org", author_email)),
|
|
wikimedia_commit_count = sum(grepl("@wikimedia", author_email)),
|
|
wikia_commit_count = sum(grepl("@wikia-inc.com", author_email)),
|
|
bot_commit_count = sum(grepl("l10n-bot@translatewiki.net|tools.libraryupgrader@tools.wmflabs.org", author_email)),
|
|
wmf_new_commit_count = sum(new_author_wmf),
|
|
unaff_new_commit_count = sum(new_author_unaff),
|
|
relevant_commits = sum(mentions_http),
|
|
.groups = 'drop') |>
|
|
right_join(complete_weeks_df, by=c("relative_week", "project_id", "age")) |>
|
|
replace_na(list(commit_count = 0)) |>
|
|
replace_na(list(wikimedia_commit_count = 0)) |>
|
|
replace_na(list(l10n_commit_count = 0)) |>
|
|
replace_na(list(jenkins_commit_count = 0)) |>
|
|
replace_na(list(mediawiki_dev_commit_count = 0)) |>
|
|
replace_na(list(wikia_commit_count = 0)) |>
|
|
replace_na(list(bot_commit_count = 0)) |>
|
|
replace_na(list(wmf_new_commit_count = 0)) |>
|
|
replace_na(list(unaff_new_commit_count = 0)) |>
|
|
replace_na(list(relevant_commits = 0)) |>
|
|
mutate(before_after = if_else(relative_week < 0, 0, 1)) |>
|
|
select(-author_emails, -committer_emails)
|
|
|
|
weekly_commits <- weekly_commits |>
|
|
filter(relative_week >= (-103) & relative_week <= 109 )
|
|
|
|
#gracefully exit
|
|
return(weekly_commits)
|
|
}
|
|
|
|
transformed <- transform_relevant_commit_data(https_commit_fp)
|
|
|
|
transformed$irrelevant_commit = transformed$commit_count - transformed$relevant_commits
|
|
|
|
long_df <- transformed|>
|
|
tidyr::pivot_longer(cols = c(irrelevant_commit, relevant_commits),
|
|
names_to = "commit_relevance",
|
|
values_to = "lengthened_commit_count")
|
|
|
|
relevant_https_commits <- long_df |>
|
|
ggplot(aes(x=relative_week,
|
|
y=lengthened_commit_count,
|
|
fill=commit_relevance)) +
|
|
geom_col(position = "dodge", width = 0.7) +
|
|
labs(x = "Relative Week", y = "Commits", fill="Commit relevance") +
|
|
scale_fill_manual(values = c("irrelevant_commit" = "#E1BE6A", # Color for "Returning Contributors"
|
|
"relevant_commits" = "#40B0A6"),
|
|
labels = c("irrelevant_commit" = "Irrelevant Commits",
|
|
"relevant_commits" = "Relevant Commits")
|
|
) +
|
|
ggtitle("Commits to MW-Core, 09-03-2011 to 10-03-2015, by relevance to HTTP/s feature deployments") +
|
|
theme_bw() +
|
|
theme(legend.position = "top")
|
|
|
|
relevant_https_commits
|
|
|
|
ggsave(filename = "ww-c2c3-relevance-viz.png", plot = relevant_https_commits, width = 12, height = 9, dpi = 800)
|