24_deb_pkg_gov/R/readme_docChar_outcomes.R
2024-07-15 18:20:46 -04:00

102 lines
6.2 KiB
R

#libraries
library(stringr)
readme_df <- read_csv("../final_data/deb_readme_did.csv")
readme_pop_df <- read_csv("../final_data/deb_readme_pop_change.csv")
readme_readability_df <- read_csv('../text_analysis/dwo_readability_readme.csv')
#get the readmeution count
#some preprocessing and expansion
col_order <- c("upstream_vcs_link", "age_in_days", "first_commit", "first_commit_dt", "event_gap", "event_date", "event_hash", "before_all_ct", "after_all_ct", "before_mrg_ct", "after_mrg_ct", "before_auth_new", "after_auth_new", "before_commit_new", "after_commit_new")
readme_df <- readme_df[,col_order]
readme_df$ct_before_all <- str_split(gsub("[][]","", readme_df$before_all_ct), ", ")
readme_df$ct_after_all <- str_split(gsub("[][]","", readme_df$after_all_ct), ", ")
readme_df$ct_before_mrg <- str_split(gsub("[][]","", readme_df$before_mrg_ct), ", ")
readme_df$ct_after_mrg <- str_split(gsub("[][]","", readme_df$after_mrg_ct), ", ")
drop <- c("before_all_ct", "before_mrg_ct", "after_all_ct", "after_mrg_ct")
readme_df = readme_df[,!(names(readme_df) %in% drop)]
# 2 some expansion needs to happens for each project
expand_timeseries <- function(project_row) {
longer <- project_row |>
pivot_longer(cols = starts_with("ct"),
names_to = "window",
values_to = "count") |>
unnest(count)
longer$observation_type <- gsub("^.*_", "", longer$window)
longer <- ddply(longer, "observation_type", transform, week=seq(from=0, by=1, length.out=length(observation_type)))
longer$count <- as.numeric(longer$count)
#longer <- longer[which(longer$observation_type == "all"),]
return(longer)
}
expanded_data <- expand_timeseries(readme_df[1,])
for (i in 2:nrow(readme_df)){
expanded_data <- rbind(expanded_data, expand_timeseries(readme_df[i,]))
}
#filter out the windows of time that we're looking at
window_num <- 8
windowed_data <- expanded_data |>
filter(week >= (27 - window_num) & week <= (27 + window_num)) |>
mutate(D = ifelse(week > 27, 1, 0))
summed_data <- windowed_data |>
filter(D==1) |>
group_by(upstream_vcs_link) |>
summarise_at(vars(count), list(summed_count=sum))
#concat dataframes into central data
readme_pop_df <- readme_pop_df |>
mutate(project_name = map_chr(upstream_vcs_link, ~ {
parts <- str_split(.x, pattern = "/")[[1]]
if (length(parts) >= 1) {
parts[length(parts)]
} else {
NA_character_
}
}))
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/agateau/yokadi/issues/new", "project_name"] = "yokadi"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "http://github.com/voloko/twitter-stream/issues/new", "project_name"] = "twitter-stream"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/ai/autoprefixer-rails/issues/new", "project_name"] = "autoprefixer-rails"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/aquasync/ruby-ole/issues/new", "project_name"] = "ruby-ole"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/bluemonk/ipaddress/issues/new", "project_name"] = "ipaddress"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/ccocchi/rabl-rails/issues/new", "project_name"] = "rabl-rails"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/dejan/espeak-ruby/issues/new", "project_name"] = "espeak-ruby"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/hadley/plyr/issues/new", "project_name"] = "plyr"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/jfelchner/ruby-progressbar/issues/new", "project_name"] = "ruby-progressbar"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/sargon/trayer-srg/issues/new", "project_name"] = "trayer-srg"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/SciRuby/rb-gsl/issues/new", "project_name"] = "rb-gsl"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/solvespace/solvespace/issues/new", "project_name"] = "solvespace"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/walling/unorm/issues/new", "project_name"] = "unorm"
readme_readability_df <- readme_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
paste(head(parts, -1), collapse="_")
} else {
NA_character_
}
}))
readme_readability_df[readme_readability_df['filename'] == "yder_README_8md.html", "project_name"] = "yder"
readme_readability_df[readme_readability_df['filename'] == "pg_filedump.git_README.pg_filedump", "project_name"] = "pg_filedump.git"
readme_readability_df[readme_readability_df['filename'] == "openvas_UPGRADE_README", "project_name"] = "openvas"
readme_readability_df[readme_readability_df['filename'] == "hyphen.git_README_hyph_en_US.txt", "project_name"] = "hyphen.git"
readme_readability_df[readme_readability_df['filename'] == "cycle.git_README_ru.html", "project_name"] = "cycle.git"
readme_readability_df[readme_readability_df['filename'] == "diffuse.git_README_ru", "project_name"] = "diffuse.git"
readme_readability_df[readme_readability_df['filename'] == "CheMPS2_README_8md_source.html", "project_name"] = "CheMPS2"
readme_readability_df[readme_readability_df['filename'] == "sleuthkit_README_win32.txt", "project_name"] = "sleuthkit"
readme_readability_df[readme_readability_df['filename'] == "Lmod_README_lua_modulefiles.txt", "project_name"] = "Lmod"
readme_readability_df[readme_readability_df['filename'] == "engauge_debian_README_for_osx", "project_name"] = "engauge_debian"
readme_total_df <- readme_pop_df |>
join(readme_readability_df, by="project_name")
readme_total_df <- readme_total_df|>
join(summed_data, by="upstream_vcs_link")
#outcome variable that is number of commits by number of new readmeutors
readme_total_df$commit_by_contrib = NA
readme_total_df$commit_by_contrib = readme_total_df$summed_count * (readme_total_df$after_contrib_new + 1)
readme_total_df$logged_outcome = log1p(readme_total_df$commit_by_contrib)
# test regressions
library(MASS)
lm1 <- glm.nb(logged_outcome~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)