clean model and draft duplicate for contrib

This commit is contained in:
mjgaughan 2024-04-24 20:55:56 -05:00
parent 73d5c5b979
commit 153e7b7c16
3 changed files with 173 additions and 116 deletions

View File

@ -1,90 +1,3 @@
expanded_sample_data <- expand_timeseries(sampled_data[1,])
for (i in 2:nrow(sampled_data)){
expanded_sample_data <- rbind(expanded_sample_data, expand_timeseries(sampled_data[i,]))
}
windowed_sample_data <- expanded_sample_data |>
filter(week >= (26 - window_num) & week <= (26 + window_num)) |>
mutate(D = ifelse(week > 26, 1, 0))
windowed_sample_data$scaled_project_age <- scale(windowed_sample_data$age_of_project)
windowed_sample_data$week_offset <- windowed_sample_data$week - 26
all_actions_sample_data <- windowed_sample_data[which(windowed_sample_data$observation_type == "all"),]
#test model
test_model <- lmer(count ~ D * I(week_offset) + scaled_project_age + (week_offset|upstream_vcs_link), data=all_actions_sample_data, REML=FALSE)
summary(test_model)
#plot results
p <- ggplot(all_actions_sample_data, aes(x=week_offset, y=count, color=upstream_vcs_link), show.legend = FALSE) +
geom_point(size=3, show.legend = FALSE) +
geom_line(aes(y=predict(test_model), group=upstream_vcs_link), show.legend = FALSE) +
theme_bw()
p
#test model
test_model <- lmer(count ~ D * I(week_offset) + scaled_project_age + (D * I(week_offset)|upstream_vcs_link), data=all_actions_sample_data, REML=FALSE)
summary(test_model)
#plot results
p <- ggplot(all_actions_sample_data, aes(x=week_offset, y=count, color=upstream_vcs_link), show.legend = FALSE) +
geom_point(size=3, show.legend = FALSE) +
geom_line(aes(y=predict(test_model), group=upstream_vcs_link), show.legend = FALSE) +
theme_bw()
p
#test model
test_model <- lmer(count ~ D * I(week_offset) + scaled_project_age + (week_offset|upstream_vcs_link), data=all_actions_sample_data, REML=FALSE)
summary(test_model)
#plot results
p <- ggplot(all_actions_sample_data, aes(x=week_offset, y=count, color=upstream_vcs_link), show.legend = FALSE) +
geom_point(size=3, show.legend = FALSE) +
geom_line(aes(y=predict(test_model), group=upstream_vcs_link), show.legend = FALSE) +
theme_bw()
p
##
all_model <- lmer(count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE)
summary(all_model)
##
all_model <- lmer(count ~ D * I(week_offset)+ scaled_project_age + (week_offset| upstream_vcs_link), data=all_actions_data, REML=FALSE)
summary(all_model)
##
all_model <- lmer(count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE)
summary(all_model)
#test model
test_model <- lmer(count ~ D * I(week_offset) + scaled_project_age + (D * I(week_offset)|upstream_vcs_link), data=all_actions_sample_data, REML=FALSE)
summary(test_model)
#plot results
p <- ggplot(all_actions_sample_data, aes(x=week_offset, y=count, color=upstream_vcs_link), show.legend = FALSE) +
geom_point(size=3, show.legend = FALSE) +
geom_line(aes(y=predict(test_model), group=upstream_vcs_link), show.legend = FALSE) +
theme_bw()
p
# this is the file with the lmer multi-level rddAnalysis
library(tidyverse)
library(plyr)
# 0 loading the readme data in
try(setwd(dirname(rstudioapi::getActiveDocumentContext()$path)))
readme_df <- read_csv("../final_data/deb_readme_did.csv")
# 1 preprocessing
#colnames(readme_df) <- c("upstream_vcs_link", "event_date", "event_hash", "before_all_ct", "before_mrg_ct", "after_all_ct", "after_mrg_ct", "before_auth_new", "after_commit_new", "after_auth_new", "before_commit_new")
col_order <- c("upstream_vcs_link", "age_of_project", "event_date", "event_hash", "before_all_ct", "after_all_ct", "before_mrg_ct", "after_mrg_ct", "before_auth_new", "after_auth_new", "before_commit_new", "after_commit_new")
readme_df <- readme_df[,col_order]
readme_df$ct_before_all <- str_split(gsub("[][]","", readme_df$before_all_ct), ", ")
readme_df$ct_after_all <- str_split(gsub("[][]","", readme_df$after_all_ct), ", ")
readme_df$ct_before_mrg <- str_split(gsub("[][]","", readme_df$before_mrg_ct), ", ")
readme_df$ct_after_mrg <- str_split(gsub("[][]","", readme_df$after_mrg_ct), ", ")
drop <- c("before_all_ct", "before_mrg_ct", "after_all_ct", "after_mrg_ct")
readme_df = readme_df[,!(names(readme_df) %in% drop)]
# 2 some expansion needs to happens for each project
expand_timeseries <- function(project_row) {
longer <- project_row |>
pivot_longer(cols = starts_with("ct"),
names_to = "window",
values_to = "count") |>
unnest(count)
longer$observation_type <- gsub("^.*_", "", longer$window)
longer <- ddply(longer, "observation_type", transform, week=seq(from=0, by=1, length.out=length(observation_type)))
longer$count <- as.numeric(longer$count)
#longer <- longer[which(longer$observation_type == "all"),]
return(longer)
}
expanded_data <- expand_timeseries(readme_df[1,])
for (i in 2:nrow(readme_df)){
expanded_data <- rbind(expanded_data, expand_timeseries(readme_df[i,]))
} }
#filter out the windows of time that we're looking at #filter out the windows of time that we're looking at
window_num <- 8 window_num <- 8
@ -510,3 +423,90 @@ library(merTools)
ICC(outcome="count", group="week", data=all_actions_data) ICC(outcome="count", group="week", data=all_actions_data)
ICC(outcome="count", group="upstream_vcs_link", data=all_actions_data) ICC(outcome="count", group="upstream_vcs_link", data=all_actions_data)
ICC(outcome="count", group="week", data=all_actions_data) ICC(outcome="count", group="week", data=all_actions_data)
# this is the file with the lmer multi-level rddAnalysis
library(tidyverse)
library(plyr)
# 0 loading the readme data in
try(setwd(dirname(rstudioapi::getActiveDocumentContext()$path)))
readme_df <- read_csv("../final_data/deb_readme_did.csv")
# 1 preprocessing
#colnames(readme_df) <- c("upstream_vcs_link", "event_date", "event_hash", "before_all_ct", "before_mrg_ct", "after_all_ct", "after_mrg_ct", "before_auth_new", "after_commit_new", "after_auth_new", "before_commit_new")
col_order <- c("upstream_vcs_link", "age_of_project", "event_date", "event_hash", "before_all_ct", "after_all_ct", "before_mrg_ct", "after_mrg_ct", "before_auth_new", "after_auth_new", "before_commit_new", "after_commit_new")
readme_df <- readme_df[,col_order]
readme_df$ct_before_all <- str_split(gsub("[][]","", readme_df$before_all_ct), ", ")
readme_df$ct_after_all <- str_split(gsub("[][]","", readme_df$after_all_ct), ", ")
readme_df$ct_before_mrg <- str_split(gsub("[][]","", readme_df$before_mrg_ct), ", ")
readme_df$ct_after_mrg <- str_split(gsub("[][]","", readme_df$after_mrg_ct), ", ")
drop <- c("before_all_ct", "before_mrg_ct", "after_all_ct", "after_mrg_ct")
readme_df = readme_df[,!(names(readme_df) %in% drop)]
# 2 some expansion needs to happens for each project
expand_timeseries <- function(project_row) {
longer <- project_row |>
pivot_longer(cols = starts_with("ct"),
names_to = "window",
values_to = "count") |>
unnest(count)
longer$observation_type <- gsub("^.*_", "", longer$window)
longer <- ddply(longer, "observation_type", transform, week=seq(from=0, by=1, length.out=length(observation_type)))
longer$count <- as.numeric(longer$count)
#longer <- longer[which(longer$observation_type == "all"),]
return(longer)
}
expanded_data <- expand_timeseries(readme_df[1,])
for (i in 2:nrow(readme_df)){
expanded_data <- rbind(expanded_data, expand_timeseries(readme_df[i,]))
}
#filter out the windows of time that we're looking at
window_num <- 8
windowed_data <- expanded_data |>
filter(week >= (27 - window_num) & week <= (27 + window_num)) |>
mutate(D = ifelse(week > 27, 1, 0))
#scale the age numbers
windowed_data$scaled_project_age <- scale(windowed_data$age_of_project)
windowed_data$week_offset <- windowed_data$week - 27
#separate out the cleaning d
all_actions_data <- windowed_data[which(windowed_data$observation_type == "all"),]
mrg_actions_data <- windowed_data[which(windowed_data$observation_type == "mrg"),]
all_actions_data$logged_count <- log(all_actions_data$count)
all_actions_data$log1p_count <- log1p(all_actions_data$count)
# 3 rdd in lmer analysis
# rdd: https://rpubs.com/phle/r_tutorial_regression_discontinuity_design
# lmer: https://www.youtube.com/watch?v=LzAwEKrn2Mc
library(lme4)
##end of the model testing and plotting section
all_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE)
summary(all_model)
(
##end of the model testing and plotting section
all_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl(optimizer ="Nelder_Mead"))
##end of the model testing and plotting section
all_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl(optimizer ="Nelder_Mead"))
all_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl(optimizer="optimx",
optCtrl=list(method="nlminb")))
all_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl(
optimizer ='optimx', optCtrl=list(method='L-BFGS-B')))
summary(all_model)
all0_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE)
summary(all0_model)
all_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl(
optimizer ='optimx', optCtrl=list(method='L-BFGS-B')))
all_model.ranef <- ranef(all_model)
str(all_model.ranef)
head(all_model.ranef)
all_actions_data$D_effect_quart <- ntile(all_model.ranef$D, 4)
head(all_model.ranef)
all_model.ranef <- random.effects(all_model)
head(as.data.frame(all_model.ranef))
head(all_model_ranef)
all_model_ranef <- as.data.frame(ranef(all_model))
head(all_model_ranef)
d_effect_ranef_all <- subset(all_model_ranef, term="D")
d_effect_ranef_all <- all_model_ranef[all_model_ranef$term="D",]
d_effect_ranef_all <- all_model_ranef[all_model_ranef$term=="D",]
View(d_effect_ranef_all)
d_effect_ranef_all$quartile <- ntile(d_effect_ranef_all$condval, 4)
View(d_effect_ranef_all)
# mrg behavior for this
mrg_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl(
optimizer ='optimx', optCtrl=list(method='L-BFGS-B')))
summary(mrg_model)

72
R/contribRDDAnalysis.R Normal file
View File

@ -0,0 +1,72 @@
library(tidyverse)
library(plyr)
#get the contrib data instead
try(setwd(dirname(rstudioapi::getActiveDocumentContext()$path)))
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
#some preprocessing and expansion
col_order <- c("upstream_vcs_link", "age_of_project", "event_date", "event_hash", "before_all_ct", "after_all_ct", "before_mrg_ct", "after_mrg_ct", "before_auth_new", "after_auth_new", "before_commit_new", "after_commit_new")
contrib_df <- contrib_df[,col_order]
contrib_df$ct_before_all <- str_split(gsub("[][]","", contrib_df$before_all_ct), ", ")
contrib_df$ct_after_all <- str_split(gsub("[][]","", contrib_df$after_all_ct), ", ")
contrib_df$ct_before_mrg <- str_split(gsub("[][]","", contrib_df$before_mrg_ct), ", ")
contrib_df$ct_after_mrg <- str_split(gsub("[][]","", contrib_df$after_mrg_ct), ", ")
drop <- c("before_all_ct", "before_mrg_ct", "after_all_ct", "after_mrg_ct")
contrib_df = contrib_df[,!(names(contrib_df) %in% drop)]
# 2 some expansion needs to happens for each project
expand_timeseries <- function(project_row) {
longer <- project_row |>
pivot_longer(cols = starts_with("ct"),
names_to = "window",
values_to = "count") |>
unnest(count)
longer$observation_type <- gsub("^.*_", "", longer$window)
longer <- ddply(longer, "observation_type", transform, week=seq(from=0, by=1, length.out=length(observation_type)))
longer$count <- as.numeric(longer$count)
#longer <- longer[which(longer$observation_type == "all"),]
return(longer)
}
expanded_data <- expand_timeseries(contrib_df[1,])
for (i in 2:nrow(contrib_df)){
expanded_data <- rbind(expanded_data, expand_timeseries(contrib_df[i,]))
}
#filter out the windows of time that we're looking at
window_num <- 8
windowed_data <- expanded_data |>
filter(week >= (27 - window_num) & week <= (27 + window_num)) |>
mutate(D = ifelse(week > 27, 1, 0))
#scale the age numbers
windowed_data$scaled_project_age <- scale(windowed_data$age_of_project)
windowed_data$week_offset <- windowed_data$week - 27
#separate out the cleaning d
all_actions_data <- windowed_data[which(windowed_data$observation_type == "all"),]
mrg_actions_data <- windowed_data[which(windowed_data$observation_type == "mrg"),]
#EDA?
#TKTK ---------------------
#imports for models
library(lme4)
library(optimx)
#models -- TKTK need to be fixed
all_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl(
optimizer ='optimx', optCtrl=list(method='L-BFGS-B')))
summary(all_model)
#identifying the quartiles of effect for D
all_model_ranef <- as.data.frame(ranef(all_model))
d_effect_ranef_all <- all_model_ranef[all_model_ranef$term=="D",]
d_effect_ranef_all$quartile <- ntile(d_effect_ranef_all$condval, 4)
#model residuals
all_residuals <- residuals(all_model)
qqnorm(all_residuals)
# mrg behavior for this
mrg_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl(
optimizer ='optimx', optCtrl=list(method='L-BFGS-B')))
summary(mrg_model)
#identifying the quartiles of effect for D
mrg_model_ranef <- as.data.frame(ranef(mrg_model))
d_effect_ranef_mrg <- mrg_model_ranef[mrg_model_ranef$term=="D",]
d_effect_ranef_mrg$quartile <- ntile(d_effect_ranef_mrg$condval, 4)
#merge model residuals
mrg_residuals <- residuals(mrg_model)
qqnorm(mrg_residuals)

View File

@ -57,43 +57,28 @@ all_actions_data$log1p_count <- log1p(all_actions_data$count)
# lmer: https://www.youtube.com/watch?v=LzAwEKrn2Mc # lmer: https://www.youtube.com/watch?v=LzAwEKrn2Mc
library(lme4) library(lme4)
# https://www.bristol.ac.uk/cmm/learning/videos/random-intercepts.html#exvar # https://www.bristol.ac.uk/cmm/learning/videos/random-intercepts.html#exvar
#making some random data
sampled_data <- readme_df[sample(nrow(readme_df), 220), ]
expanded_sample_data <- expand_timeseries(sampled_data[1,])
for (i in 2:nrow(sampled_data)){
expanded_sample_data <- rbind(expanded_sample_data, expand_timeseries(sampled_data[i,]))
}
windowed_sample_data <- expanded_sample_data |>
filter(week >= (27 - window_num) & week <= (27 + window_num)) |>
mutate(D = ifelse(week > 27, 1, 0))
windowed_sample_data$scaled_project_age <- scale(windowed_sample_data$age_of_project)
windowed_sample_data$week_offset <- windowed_sample_data$week - 27
all_actions_sample_data <- windowed_sample_data[which(windowed_sample_data$observation_type == "all"),]
all_actions_sample_data$log1p_count <- log1p(all_actions_sample_data$count)
#test model
test_model <- lmer(log1p_count ~ D * I(week_offset) + scaled_project_age + (D * I(week_offset)|upstream_vcs_link), data=all_actions_sample_data, REML=FALSE)
summary(test_model)
#plot results
p <- ggplot(all_actions_sample_data, aes(x=week_offset, y=count, color=upstream_vcs_link), show.legend = FALSE) +
geom_point(size=3, show.legend = FALSE) +
geom_line(aes(y=predict(test_model)), show.legend = FALSE) +
theme_bw()
p
##end of the model testing and plotting section
library(optimx) library(optimx)
all_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl( all_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl(
optimizer ='optimx', optCtrl=list(method='L-BFGS-B'))) optimizer ='optimx', optCtrl=list(method='L-BFGS-B')))
summary(all_model) summary(all_model)
random_effects <- ranef(all_model) #identifying the quartiles of effect for D
all_model_ranef <- as.data.frame(ranef(all_model))
d_effect_ranef_all <- all_model_ranef[all_model_ranef$term=="D",]
d_effect_ranef_all$quartile <- ntile(d_effect_ranef_all$condval, 4)
#model residuals
all_residuals <- residuals(all_model) all_residuals <- residuals(all_model)
qqnorm(all_residuals) qqnorm(all_residuals)
# for visualization, may have to run model for each project and then identify top 5 projects for RDD graphs # mrg behavior for this
mrg_model <- lmer(count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=mrg_actions_data, REML=FALSE) mrg_model <- lmer(log1p_count ~ D * I(week_offset)+ scaled_project_age + (D * I(week_offset)| upstream_vcs_link), data=all_actions_data, REML=FALSE, control = lmerControl(
optimizer ='optimx', optCtrl=list(method='L-BFGS-B')))
summary(mrg_model) summary(mrg_model)
#identifying the quartiles of effect for D
mrg_model_ranef <- as.data.frame(ranef(mrg_model))
d_effect_ranef_mrg <- mrg_model_ranef[mrg_model_ranef$term=="D",]
d_effect_ranef_mrg$quartile <- ntile(d_effect_ranef_mrg$condval, 4)
#merge model residuals
mrg_residuals <- residuals(mrg_model) mrg_residuals <- residuals(mrg_model)
qqnorm(mrg_residuals) qqnorm(mrg_residuals)
# Performance: # Performance:
library(merTools)
ICC(outcome="count", group="week", data=all_actions_data)
#testing for different types of models