hist(contrib_df$event_gap) median(contrib_df$event_gap) 1786.431 / 265 1786.431 / 365 sd(contrib_df$event_gap) sd(contrib_df$event_gap) max(readme_df$event_gap) #all_gmodel <- glmer.nb(log1p_count ~ D * week_offset + scaled_project_age + scaled_event_gap + (D * week_offset | upstream_vcs_link), # control=glmerControl(optimizer="bobyqa", # optCtrl=list(maxfun=2e5)), nAGQ=0, data=all_actions_data) all_gmodel <- readRDS("0710_contrib_all.rda") summary(all_gmodel) library(tidyverse) library(texreg) readme_rdd <- readRDS("final_models/0624_readme_all_rdd.rda") contrib_rdd <- readRDS("final_models/0710_contrib_all.rda") contrib_rdd <- readRDS("final_models/0710_contrib_all_rdd.rda") texreg(list(readme_rdd, contrib_rdd), stars=NULL, digits=3, use.packages=FALSE, custom.model.names=c( 'README','CONTRIBUTING'), custom.coef.names=c('(Intercept)', 'Indtroduction', 'Week (Time)', 'Project Age', 'Introduction:Week', 'Event Gap'), table=FALSE, ci.force = TRUE) source("~/Desktop/git/24_deb_gov/R/contribCrescAnalysis.R") #all_gmodel <- readRDS("0710_contrib_all.rda") summary(all_gmodel) saveRDS(all_gmodel, "0710_contrib_cresc.rda") range(all_actions_data$log1p_count) source("~/Desktop/git/24_deb_gov/R/contribRDDAnalysis.R") source("~/Desktop/git/24_deb_gov/R/contribRDDAnalysis.R") all_gmodel <- readRDS("0711_contrib_all.rda") summary(all_gmodel) library(tidyverse) library(texreg) library(tidyverse) library(texreg) readme_rdd <- readRDS("final_models/0624_readme_all_rdd.rda") contrib_rdd <- readRDS("final_models/0711_contrib_all_rdd.rda") summary(readme_rdd) texreg(list(readme_rdd, contrib_rdd), stars=NULL, digits=3, use.packages=FALSE, custom.model.names=c( 'README','CONTRIBUTING'), custom.coef.names=c('(Intercept)', 'Indtroduction', 'Week (Time)', 'Project Age', 'Introduction:Week', 'Event Gap'), table=FALSE, ci.force = TRUE) contrib_rdd <- readRDS("final_models/0711_contrib_all_rdd.rda") contrib_rdd <- readRDS("final_models/0711_contrib_all_rdd.rda") texreg(list(readme_rdd, contrib_rdd), stars=NULL, digits=3, use.packages=FALSE, custom.model.names=c( 'README','CONTRIBUTING'), custom.coef.names=c('(Intercept)', 'Indtroduction', 'Week (Time)', 'Project Age', 'Introduction:Week', 'Event Gap'), table=FALSE, ci.force = TRUE) texreg(list(readme_rdd, contrib_rdd), stars=NULL, digits=3, use.packages=FALSE, custom.model.names=c( 'README','CONTRIBUTING'), custom.coef.names=c('(Intercept)', 'Indtroduction', 'Week (Time)', 'Project Age', 'Introduction:Week'), table=FALSE, ci.force = TRUE) readme_groupings <- read.csv('../final_data/deb_readme_interaction_groupings.csv') contrib_groupings <- read.csv('../final_data/0711_contrib_inter_groupings.csv') subdirColors <- setNames( c('firebrick1', 'forestgreen', 'cornflowerblue') , c(0,1,2) ) readme_g <- readme_groupings |> ggplot(aes(x=rank, y=estimate, col = as.factor(ranef_grouping))) + geom_linerange(aes(ymin= conf.low, ymax= conf.high)) + scale_color_manual(values = subdirColors) + guides(fill="none", color="none")+ theme_bw() readme_g contrib_g <- contrib_groupings |> ggplot(aes(x=rank, y=estimate, col = as.factor(ranef_grouping))) + geom_linerange(aes(ymin= conf.low, ymax= conf.high)) + scale_color_manual(values = subdirColors) + theme_bw() + theme(legend.position = "top") contrib_g library(gridExtra) grid.arrange(contrib_g, readme_g, nrow = 1) source("~/Desktop/git/24_deb_gov/R/contribRDDAnalysis.R") source("~/Desktop/git/24_deb_gov/R/documentReadabilityAnalysis.R") contrib_pop_df <- read_csv("../final_data/deb_contrib_pop_change.csv") contrib_df <- read_csv("../final_data/deb_contrib_did.csv") View(contrib_pop_df) contrib_readability_df <- read_csv('../text_analysis/dwo_readability_contributing.csv') View(contrib_readability_df) View(contrib_pop_df) View(contrib_readability_df) View(contrib_pop_df) View(contrib_readability_df) View(contrib_pop_df) View(contrib_pop_df) View(contrib_df) View(contrib_pop_df) View(contrib_readability_df) View(contrib_pop_df) #concat dataframes into central data contrib_df_total <- contrib_pop_df |> mutate(project_name = str_split(upstream_vcs_link, pattern="/")[-1]) View(contrib_pop_df) View(contrib_readability_df) View(contrib_readability_df) contrib_df_total <- contrib_readability_df |> mutate(project_name = str_split(filename, pattern="_")[-2]) View(contrib_readability_df) contrib_df_total <- contrib_readability_df |> mutate(project_name = str_split(filename, pattern="_")) View(contrib_df_total) contrib_df_total <- contrib_readability_df |> mutate(project_name = str_split(filename, pattern="_")[0]) contrib_df_total <- contrib_readability_df |> mutate(project_name = str_split(filename, pattern="_")[1]) View(contrib_df_total) contrib_df_total <- contrib_readability_df |> mutate(project_name = str_split(filename, pattern="_")[1] |> sapply("[[", 1)) View(contrib_df_total) contrib_df_total <- contrib_readability_df |> mutate(project_name = str_split(filename, pattern="_")) View(contrib_df_total) contrib_df_total <- contrib_readability_df |> mutate(project_name_array = str_split(filename, pattern="_")) |> mutate(projes_name = project_name_array[1]) View(contrib_df_total) View(contrib_readability_df) View(contrib_pop_df) #concat dataframes into central data contrib_pop_df <- contrib_pop_df %>% mutate(first_element = map_chr(upstream_vcs_link, ~ { parts <- str_split(.x, pattern = "/")[[1]] if (length(parts) >= 1) { parts[1] # Extract the first element after splitting } else { NA_character_ } })) View(contrib_pop_df) contrib_df_total <- contrib_readability_df |> mutate(project_name = map_chr(filename, ~ { parts <- str_split(.x, pattern = "_")[[1]] if (length(parts) >= 1) { parts[1] } else { NA_character_ } })) contrib_df <- read_csv("../final_data/deb_contrib_did.csv") contrib_pop_df <- read_csv("../final_data/deb_contrib_pop_change.csv") contrib_readability_df <- read_csv('../text_analysis/dwo_readability_contributing.csv') contrib_df_total <- contrib_readability_df |> mutate(project_name = map_chr(filename, ~ { parts <- str_split(.x, pattern = "_")[[1]] if (length(parts) >= 1) { parts[1] } else { NA_character_ } })) View(contrib_df_total) contrib_pop_df <- contrib_pop_df |> mutate(project_name = map_chr(upstream_vcs_link, ~ { parts <- str_split(.x, pattern = "/")[[1]] if (length(parts) >= 1) { parts[-1] } else { NA_character_ } })) parts[length(parts)] contrib_pop_df <- contrib_pop_df |> mutate(project_name = map_chr(upstream_vcs_link, ~ { parts <- str_split(.x, pattern = "/")[[1]] if (length(parts) >= 1) { parts[length(parts)] } else { NA_character_ } })) View(contrib_pop_df) source("~/Desktop/git/24_deb_gov/R/docChar_outcomes.R") source("~/Desktop/git/24_deb_gov/R/docChar_outcomes.R") contrib_total_df <- contrib_pop_df |> left_join(contrib_readability_df, by="project_name") View(contrib_total_df) # test regressions lm1 <- glm.nb(after_contrib_new ~ word_count, data = contrib_total_df) # test regressions library(MASS) lm1 <- glm.nb(after_contrib_new ~ word_count, data = contrib_total_df) summary(lm1) View(contrib_total_df) contrib_total_df <- contrib_pop_df |> join(contrib_readability_df, by="project_name") View(contrib_total_df) View(contrib_readability_df) qqnorm(residuals(lm1)) source("~/Desktop/git/24_deb_gov/R/docChar_outcomes.R") lm1 <- glm.nb(after_contrib_new ~ linsear_write, data = contrib_total_df) lm1 <- glm.nb(after_contrib_new ~ linsear, data = contrib_total_df) View(contrib_total_df) lm1 <- glm.nb(after_contrib_new ~ linsear_write_formula, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(after_contrib_new ~ reading_time, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(after_contrib_new ~ flesch_reading_ease, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) contrib_readability_df <- contrib_readability_df |> mutate(project_name = map_chr(filename, ~ { parts <- str_split(.x, pattern = "_")[[1]] if (length(parts) >= 1) { head(parts, -1) } else { NA_character_ } })) parts[1] + parts[2] contrib_readability_df <- contrib_readability_df |> mutate(project_name = map_chr(filename, ~ { parts <- str_split(.x, pattern = "_")[[1]] if (length(parts) >= 1) { parts[1] + parts[2] } else { NA_character_ } })) contrib_readability_df <- contrib_readability_df |> mutate(project_name = map_chr(filename, ~ { parts <- str_split(.x, pattern = "_")[[1]] if (length(parts) >= 1) { paste(head(parts, -1), collapse="") } else { NA_character_ } })) View(contrib_readability_df) #libraries library(stringr) contrib_df <- read_csv("../final_data/deb_contrib_did.csv") contrib_pop_df <- read_csv("../final_data/deb_contrib_pop_change.csv") contrib_readability_df <- read_csv('../text_analysis/dwo_readability_contributing.csv') contrib_pop_df <- contrib_pop_df |> mutate(project_name = map_chr(upstream_vcs_link, ~ { parts <- str_split(.x, pattern = "/")[[1]] if (length(parts) >= 1) { parts[length(parts)] } else { NA_character_ } })) contrib_readability_df <- contrib_readability_df |> mutate(project_name = map_chr(filename, ~ { parts <- str_split(.x, pattern = "_")[[1]] if (length(parts) >= 1) { paste(head(parts, -1), collapse="_") } else { NA_character_ } })) contrib_total_df <- contrib_pop_df |> join(contrib_readability_df, by="project_name") View(contrib_total_df) # test regressions library(MASS) lm1 <- glm.nb(after_contrib_new ~ flesch_reading_ease, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(after_contrib_new ~ flesch_reading_ease + age_in_days, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) View(contrib_df) source("~/Desktop/git/24_deb_gov/R/contrib_docChar_outcomes.R") View(windowed_data) View(windowed_data) summed_data <- windowed_data |> group_by(upstream_vcs_link) |> summarize(total_ct_after_all = sum(ct_after_all)) summed_data <- windowed_data |> filter(window="ct_after_all") |> group_by(upstream_vcs_link) |> summarize(total_ct_after_all = sum(count)) summed_data <- windowed_data |> filter(window=="ct_after_all") |> group_by(upstream_vcs_link) |> summarize(total_ct_after_all = sum(count)) View(summed_data) summed_data <- windowed_data |> filter(window=="ct_after_all") |> group_by(upstream_vcs_link) |> mutate(total_ct_after_all = sum(count)) View(summed_data) summed_data <- windowed_data |> filter(window=="ct_after_all") |> group_by(upstream_vcs_link) |> summarize(total_ct_after_all = sum(count)) |> ungroup() View(summed_data) View(windowed_data) summed_data <- windowed_data |> filter(window=="ct_after_all") |> group_by(upstream_vcs_link) |> summarise_at(vars(count), list(name=sum)) View(summed_data) summed_data <- windowed_data |> filter(D==1) |> group_by(upstream_vcs_link) |> summarise_at(vars(count), list(summed_count=sum)) View(summed_data) source("~/Desktop/git/24_deb_gov/R/contrib_docChar_outcomes.R") contrib_total_df <- contrib_total_df|> join(summed_data, by=upstream_vcs_link) contrib_total_df <- contrib_pop_df |> join(contrib_readability_df, by="project_name") View(contrib_total_df) contrib_total_df <- contrib_total_df|> join(summed_data, by=upstream_vcs_link) View(summed_data) contrib_total_df <- contrib_total_df|> join(summed_data, by="upstream_vcs_link") View(contrib_total_df) View(contrib_df) source("~/Desktop/git/24_deb_gov/R/contrib_docChar_outcomes.R") #outcome variable that is number of commits by number of new contributors contrib_total_df$commit_by_contrib = contrib_total_df$summed_count * contrib_total_df$after_contrib_new # test regressions library(MASS) lm1 <- glm.nb(after_contrib_new ~ flesch_reading_ease + age_in_days, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(commit_by_contrib ~ flesch_reading_ease + age_in_days, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) View(contrib_total_df) lm1 <- glm.nb(commit_by_contrib ~ word_count, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) contrib_total_df$scaled_outcome = scale(contrib_total_df$commit_by_contrib) lm1 <- glm.nb(scaled_outcome ~ word_count + flesch_kincaid, data = contrib_total_df) lm1 <- glm.nb(scaled_outcome ~ word_count + flesch_kincaid_grade, data = contrib_total_df) contrib_total_df$logged_outcome = log1p(contrib_total_df$commit_by_contrib) # test regressions library(MASS) lm1 <- glm.nb(scaled_outcome ~ word_count + flesch_kincaid_grade, data = contrib_total_df) lm1 <- glm.nb(logged_outcome ~ word_count + flesch_kincaid_grade, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(logged_outcome ~ word_count + flesch_kincaid_grade + linsear_write_formula, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) contrib_total_df$scaled_outcome = scale(contrib_total_df$commit_by_contrib) # test regressions library(MASS) lm1 <- lm(scaled_outcome ~ word_count + flesch_kincaid_grade + linsear_write_formula, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(logged_outcome ~ word_count + flesch_kincaid_grade + linsear_write_formula, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(logged_outcome ~ word_count + flesch_kincaid_grade + linsear_write_formula + mcalpine_eflaw, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(logged_outcome ~ word_count + flesch_kincaid_grade + linsear_write_formula + mcalpine_eflaw + dale_chall_readability_score, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(logged_outcome ~ word_count + dale_chall_readability_score, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(logged_outcome ~ word_count + reading_time, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(commit_by_contrib ~ word_count + reading_time, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(commit_by_contrib ~ word_count + flesch_kincaid_grade, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) #libraries library(stringr) readme_df <- read_csv("../final_data/deb_readme_did.csv") readme_pop_df <- read_csv("../final_data/deb_readme_pop_change.csv") readme_readability_df <- read_csv('../text_analysis/dwo_readability_readmeuting.csv') source("~/Desktop/git/24_deb_gov/R/readme_docChar_outcomes.R") source("~/Desktop/git/24_deb_gov/R/readme_docChar_outcomes.R") lm1 <- glm.nb(commit_by_readme ~ word_count + flesch_kincaid_grade, data = readme_total_df) View(readme_readability_df) readme_readability_df <- readme_readability_df |> mutate(project_name = map_chr(filename, ~ { parts <- str_split(.x, pattern = "_")[[1]] if (length(parts) >= 1) { paste(head(parts, -1), collapse="_") } else { NA_character_ } })) readme_total_df <- readme_pop_df |> join(readme_readability_df, by="project_name") readme_total_df <- readme_total_df|> join(summed_data, by="upstream_vcs_link") #outcome variable that is number of commits by number of new readmeutors readme_total_df$commit_by_readme = readme_total_df$summed_count * readme_total_df$after_readme_new readme_total_df$logged_outcome = log(readme_total_df$commit_by_readme) View(readme_total_df) View(readme_total_df) #outcome variable that is number of commits by number of new readmeutors readme_total_df$commit_by_readme = readme_total_df$summed_count * readme_total_df$after_readme_new View(readme_total_df) View(readme_readability_df) readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/agateau/yokadi/issues/new", "project_name"] = "yokadi" View(readme_pop_df) readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/SciRuby/rb-gsl/issues/new", "project_name"] = "rb-gsl" source("~/Desktop/git/24_deb_gov/R/readme_docChar_outcomes.R") readme_readability_df <- readme_readability_df |> mutate(project_name = map_chr(filename, ~ { parts <- str_split(.x, pattern = "_")[[1]] if (length(parts) >= 1) { paste(head(parts, -1), collapse="_") } else { NA_character_ } })) readme_readability_df[readme_readability_df['filename'] == "yder_README_8md.html", "project_name"] = "yder" readme_readability_df[readme_readability_df['filename'] == "pg_filedump.git_README.pg_filedump", "project_name"] = "pg_filedump.git" readme_readability_df[readme_readability_df['filename'] == "openvas_UPGRADE_README", "project_name"] = "openvas" readme_readability_df[readme_readability_df['filename'] == "hyphen.git_README_hyph_en_US.txt", "project_name"] = "hyphen.git" readme_readability_df[readme_readability_df['filename'] == "cycle.git_README_ru.html", "project_name"] = "cycle.git" readme_readability_df[readme_readability_df['filename'] == "diffuse.git_README_ru", "project_name"] = "diffuse.git" readme_readability_df[readme_readability_df['filename'] == "CheMPS2_README_8md_source.html", "project_name"] = "CheMPS2" readme_readability_df[readme_readability_df['filename'] == "sleuthkit_README_win32.txt", "project_name"] = "sleuthkit" readme_readability_df[readme_readability_df['filename'] == "Lmod_README_lua_modulefiles.txt", "project_name"] = "Lmod" readme_readability_df[readme_readability_df['filename'] == "engauge_debian_README_for_osx", "project_name"] = "engauge_debian" readme_total_df <- readme_pop_df |> join(readme_readability_df, by="project_name") readme_total_df <- readme_total_df|> join(summed_data, by="upstream_vcs_link") #outcome variable that is number of commits by number of new readmeutors readme_total_df$commit_by_readme = readme_total_df$summed_count * readme_total_df$after_readme_new View(readme_total_df) readme_total_df$logged_outcome = log(readme_total_df$commit_by_readme) #outcome variable that is number of commits by number of new readmeutors readme_total_df$commit_by_readme = readme_total_df$summed_count * readme_total_df$after_readme_new #outcome variable that is number of commits by number of new readmeutors readme_total_df$commit_by_contrib = readme_total_df$summed_count * readme_total_df$after_readme_new #outcome variable that is number of commits by number of new readmeutors readme_total_df$commit_by_contrib = NA readme_total_df$commit_by_contrib = readme_total_df$summed_count * readme_total_df$after_readme_new View(readme_total_df) View(readme_total_df) readme_total_df$commit_by_contrib = readme_total_df$summed_count * readme_total_df$after_contrib_new lm1 <- glm.nb(commit_by_contrib ~ word_count + flesch_kincaid_grade, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) readme_total_df$logged_outcome = log(readme_total_df$commit_by_readme) readme_total_df$logged_outcome = log(readme_total_df$commit_by_contrib) lm1 <- glm.nb(commit_by_contrib ~ word_count + flesch_kincaid_grade, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(summed_count ~ word_count + flesch_kincaid_grade, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(commit_by_contrib ~ word_count + flesch_kincaid_grade, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(after_contrib_new ~ word_count + flesch_kincaid_grade, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(after_contrib_new ~ word_count + reading_time, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(commit_by_contrib ~ word_count + reading_time, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(reading_time ~ word_count , data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) View(readme_total_df) lm1 <- glm.nb(reading_time ~ flesch_reading_ease , data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(flesch_reading_ease ~ reading_time , data = readme_total_df) lm1 <- glm.nb(commit_by_contrib ~ reading_time , data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(commit_by_contrib ~ reading_time + linsear_write_formula , data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) readme_total_df$commit_by_contrib = readme_total_df$summed_count * (readme_total_df$after_contrib_new + 1) readme_total_df$logged_outcome = log(readme_total_df$commit_by_contrib) lm1 <- glm.nb(commit_by_contrib ~ reading_time + linsear_write_formula , data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) readme_total_df$logged_outcome = log1p(readme_total_df$commit_by_contrib) lm1 <- glm.nb(logged_outcome ~ reading_time + linsear_write_formula , data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(logged_outcome ~ reading_time + linsear_write_formula + flesch_reading_ease, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(logged_outcome ~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(summed_count~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(summed_count~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(logged_outcome~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = readme_total_df) qqnorm(residuals(lm1)) summary(lm1) source("~/Desktop/git/24_deb_gov/R/contrib_docChar_outcomes.R") lm1 <- glm.nb(logged_outcome~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = contrib_total_df) contrib_total_df$logged_outcome = log1p(contrib_total_df$commit_by_contrib) lm1 <- glm.nb(logged_outcome ~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1) lm1 <- glm.nb(summed_count ~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = contrib_total_df) qqnorm(residuals(lm1)) summary(lm1)