adding characteristic tests with outcomes

This commit is contained in:
mjgaughan 2024-07-15 18:20:46 -04:00
parent 586e46641d
commit 8b887a6af5
5 changed files with 617 additions and 441 deletions

View File

@ -1,442 +1,3 @@
time_plot <- all_actions_data |>
ggplot(aes(x=week_offset, y=log1p_count, color=factor(document_type))) +
geom_smooth() +
geom_vline(x=0)+
theme_bw() +
theme(legend.position = "top")
time_plot
time_plot <- all_actions_data |>
ggplot(aes(x=week_offset, y=log1p_count, color=factor(document_type))) +
geom_smooth() +
geom_vline(x=0)+
theme_bw() +
theme(legend.position = "top")
time_plot <- all_actions_data |>
ggplot(aes(x=week_offset, y=log1p_count, color=factor(document_type))) +
geom_smooth() +
geom_vline(0)+
theme_bw() +
theme(legend.position = "top")
time_plot
time_plot <- all_actions_data |>
ggplot(aes(x=week_offset, y=log1p_count, color=factor(document_type))) +
geom_smooth() +
geom_vline(xintercept = 0)+
theme_bw() +
theme(legend.position = "top")
time_plot
#looking at event gap
document_event_gap <- ggplot(all_actions_data, aes(x=event_gap, group=as.factor(document_type))) +
geom_density(aes(color = as.factor(document_type), fill=as.factor(document_type)), alpha=0.2, position="identity") +
theme_bw()
document_event_gap
#looking at event gap
document_event_gap <- ggplot(all_actions_data, aes(x=scale(event_gap), group=as.factor(document_type))) +
geom_density(aes(color = as.factor(document_type), fill=as.factor(document_type)), alpha=0.2, position="identity") +
theme_bw()
document_event_gap
#looking at event gap
mean(all_actions_readme_data$event_gap)
sd(all_actions_readme_data$event_gap)
mean(all_actions_contrib_data$event_gap)
sd(all_actions_contrib_data$event_gap)
mode(all_actions_contrib_data$event_gap)
mean(all_actions_contrib_data$event_gap)
library(tidyverse)
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
readme_df <- read_csv("../final_data/deb_readme_did.csv")
hist(readme_df$event_gap)
hist(readme_df$event_gap)
mean(readme_df$event_gap)
sd(readme_df$event_gap)
min(readme_df$event_gap)
count(readme_df$event_gap < 0)
length(readme_df$event_gap < 0)
table(readme_df$event_gap)
table(contrib_df$event_gap)
sum(readme_df$event_gap < 0)
table(readme_df$event_gap)
delta <- as.POSIXct(readme_df$event_date) - as.POSIXct(readme_df$first_commit_dt) -
delta <- as.POSIXct(readme_df$event_date) - as.POSIXct(readme_df$first_commit_dt)
delta <- as.POSIXct(readme_df$event_date) - as.POSIXct(readme_df$first_commit_dt)
readme_df$asposixctED <- as.POSIXct(readme_df$event_date)
View(readme_df)
readme_df$asposixctFC <- as.POSIXct(readme_df$first_commit_dt)
readme_df$new_delta <- readme_df$asposixctED - readme_df$asposixctFC
View(readme_df)
readme_df$new_delta <- as.numeric(readme_df$asposixctED - readme_df$asposixctFC, units="days")
View(readme_df)
readme_df$new_delta <- readme_df$asposixctED - readme_df$asposixctFC, units="days"
readme_df$new_delta <- readme_df$asposixctED - readme_df$asposixctFC
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
readme_df <- read_csv("../final_data/deb_readme_did.csv")
contrib_df <- contrib_df |>
filter(event_gap >= 0)
readme_df <- readme_df |>
filter(event_gap >= 0)
readme_df <- read_csv("../final_data/deb_readme_did.csv")
sum(readme_df$event_gap < 0)
sum(contrib_df$event_gap < 0)
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
sum(contrib_df$event_gap < 0)
as.POSIXct(2016-02-20 02:31:00)-as.POSIXct(2009-02-06 16:31:05)
as.POSIXct("2016-02-20 02:31:00")-as.POSIXct("2009-02-06 16:31:05")
as.POSIXct("2016-11-29 13:34:52")-as.POSIXct("2014-07-11 08:36:39")
as.POSIXct("2017-02-04 21:15:52")-as.POSIXct("2009-04-23 17:11:15")
as.POSIXct("2019-01-17 23:15:08")-as.POSIXct("2007-11-28 09:50:01")
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
sum(contrib_df$event_gap < 0)
as.POSIXct("2019-07-31 17:38:52")-as.POSIXct("2019-07-31 13:38:52")
as.numeric(as.POSIXct("2019-07-31 17:38:52")-as.POSIXct("2019-07-31 13:38:52"), unit=days)
as.numeric(as.POSIXct("2019-07-31 17:38:52")-as.POSIXct("2019-07-31 13:38:52"), unit="days")
as.numeric(as.POSIXct("1998-08-13 15:43:39")-as.POSIXct("1998-08-13 11:43:39"), unit="days")
as.numeric(as.POSIXct("2009-05-08 15:26:27")-as.POSIXct("2009-02-05 19:06:44"), unit="days")
readme_df <- read_csv("../final_data/deb_readme_did.csv")
results <- as.numeric(as.POSIXct(readme_df$event_date) - as.POSIXct(readme_df$first_commit_dt), unit="days")
min(results)
L1 <- as.POSIXct(c(
"2019-07-31 17:38:52", "1998-08-13 15:43:39", "2009-05-08 15:26:27",
"2011-03-22 14:02:38", "2009-10-09 09:54:44", "2009-04-29 23:16:59",
"2009-08-15 08:12:16", "2014-07-11 19:14:56", "2008-10-21 14:00:00",
"2016-08-15 21:55:21", "2000-06-18 14:00:37", "2000-11-05 00:12:44",
"2009-04-23 21:23:12", "2010-09-19 12:36:20", "2007-11-28 14:50:01",
"2013-08-09 18:25:49", "2005-05-07 21:52:07", "2004-01-27 19:53:06",
"2015-11-16 04:44:13", "2014-06-11 23:19:07", "2008-04-15 05:23:34",
"2015-02-08 10:08:15", "2008-06-20 10:52:21"
))
# L2 (provided list of ordered datetime values)
L2 <- as.POSIXct(c(
"2019-07-31 13:38:52", "1998-08-13 11:43:39", "2009-02-05 19:06:44",
"2011-01-27 10:48:48", "2009-10-09 05:54:44", "2009-04-29 19:16:59",
"2009-08-15 04:12:16", "2014-07-11 08:36:39", "2008-10-21 10:00:00",
"2016-04-14 14:41:36", "2000-06-18 10:00:37", "2000-11-04 19:12:44",
"2009-04-23 17:11:15", "2010-09-15 20:20:35", "2007-11-28 09:50:01",
"2013-08-09 14:25:49", "2005-05-01 18:31:26", "2004-01-27 14:53:06",
"2015-10-09 02:31:15", "2014-06-13 22:35:34", "2008-04-15 01:23:34",
"2010-03-07 00:58:08", "2008-06-20 06:30:10"
))
# Calculate differences in days
differences <- as.numeric(L2 - L1, units = "days")
# Print the resulting differences
print(differences)
as.numeric(as.POSIXct("2011-03-22 14:02:38
")-as.POSIXct("2011-01-27 10:48:48"), unit="days")
as.numeric(as.POSIXct("2009-10-09 09:54:44
")-as.POSIXct("2009-10-09 05:54:44
"), unit="days")
as.numeric(as.POSIXct("2009-04-29 23:16:59
")-as.POSIXct("2009-04-29 19:16:59
"), unit="days")
as.numeric(as.POSIXct("2009-08-15 08:12:16
")-as.POSIXct("2009-08-15 04:12:16
"), unit="days")
as.numeric(as.POSIXct("2014-07-11 19:14:56
")-as.POSIXct("2014-07-11 08:36:39
"), unit="days")
as.numeric(as.POSIXct("2008-10-21 14:00:00
")-as.POSIXct("2008-10-21 10:00:00
"), unit="days")
as.numeric(as.POSIXct("2016-08-15 21:55:21
")-as.POSIXct("2016-04-14 14:41:36
"), unit="days")
as.numeric(as.POSIXct("2000-06-18 14:00:37
")-as.POSIXct("2000-06-18 10:00:37
"), unit="days")
as.numeric(as.POSIXct("2000-11-05 00:12:44
")-as.POSIXct("2000-11-04 19:12:44
"), unit="days")
as.numeric(as.POSIXct("2009-04-23 21:23:12
")-as.POSIXct("2009-04-23 17:11:15
"), unit="days")
as.numeric(as.POSIXct("2010-09-19 12:36:20
")-as.POSIXct("2010-09-15 20:20:35
"), unit="days")
as.numeric(as.POSIXct("2007-11-28 14:50:01
")-as.POSIXct("2007-11-28 09:50:01
"), unit="days")
as.numeric(as.POSIXct("2013-08-09 18:25:49
")-as.POSIXct("2013-08-09 14:25:49
"), unit="days")
as.numeric(as.POSIXct("2005-05-07 21:52:07
")-as.POSIXct("2005-05-01 18:31:26
"), unit="days")
as.numeric(as.POSIXct("2004-01-27 19:53:06
")-as.POSIXct("2004-01-27 14:53:06
"), unit="days")
as.numeric(as.POSIXct("2015-11-16 04:44:13
")-as.POSIXct("2015-10-09 02:31:15
"), unit="days")
as.numeric(as.POSIXct("2014-06-11 23:19:07
")-as.POSIXct("2014-06-13 22:35:34
"), unit="days")
as.numeric(as.POSIXct("2014-06-11 23:19:07
")-as.POSIXct("2014-06-13 22:35:34
"), unit="days")
as.numeric(as.POSIXct("2008-04-15 05:23:34
")-as.POSIXct("2008-04-15 01:23:34
"), unit="days")
as.numeric(as.POSIXct("2015-02-08 10:08:15
")-as.POSIXct("2010-03-07 00:58:08
"), unit="days")
as.numeric(as.POSIXct("2008-06-20 10:52:21
")-as.POSIXct("2008-06-20 06:30:10
"), unit="days")
as.numeric(as.POSIXct("2001-06-22 17:39:29
")-as.POSIXct("2001-06-22 13:39:29
"), unit="days")
as.numeric(as.POSIXct("2013-05-15 12:13:50
")-as.POSIXct("2013-05-15 08:13:50
"), unit="days")
as.numeric(as.POSIXct("2015-12-10 12:31:14
")-as.POSIXct("2015-12-10 07:31:14
"), unit="days")
as.numeric(as.POSIXct("2013-02-07 15:58:18
")-as.POSIXct("2013-02-07 10:58:18
"), unit="days")
as.numeric(as.POSIXct("2013-06-05 15:19:59
")-as.POSIXct("2013-06-05 11:19:59
"), unit="days")
as.numeric(as.POSIXct("2016-02-24 21:54:34
")-as.POSIXct("2016-02-24 16:54:34
"), unit="days")
as.numeric(as.POSIXct("2013-09-09 16:52:04
")-as.POSIXct("2013-08-08 20:07:23
"), unit="days")
#all_gmodel <- glmer.nb(log1p_count ~ D * week_offset + scaled_project_age + scaled_event_gap + (D * week_offset | upstream_vcs_link),
# control=glmerControl(optimizer="bobyqa",
# optCtrl=list(maxfun=2e5)), nAGQ=0, data=all_actions_data)
all_gmodel <- readRDS("0710_contrib_all.rda")
"), unit="days")
#all_gmodel <- glmer.nb(log1p_count ~ D * week_offset + scaled_project_age + scaled_event_gap + (D * week_offset | upstream_vcs_link),
# control=glmerControl(optimizer="bobyqa",
# optCtrl=list(maxfun=2e5)), nAGQ=0, data=all_actions_data)
all_gmodel <- readRDS("0710_contrib_all.rda")
summary(all_gmodel)
as.numeric(as.POSIXct("2014-09-28 09:39:20
")-as.POSIXct("2014-09-28 03:41:27
"), unit="days")
as.numeric(as.POSIXct("2011-08-27 03:10:24
")-as.POSIXct("2011-08-26 23:10:24
"), unit="days")
as.numeric(as.POSIXct("2011-05-31 21:58:54
")-as.POSIXct("2011-05-31 18:00:13
"), unit="days")
as.numeric(as.POSIXct("2015-11-10 20:33:50
")-as.POSIXct("2015-11-10 15:33:50
"), unit="days")
as.numeric(as.POSIXct("2019-12-02 10:59:23
")-as.POSIXct("2019-12-02 05:59:23
"), unit="days")
as.numeric(as.POSIXct("2019-12-02 11:00:24
")-as.POSIXct("2019-12-02 06:00:24
"), unit="days")
as.numeric(as.POSIXct("2014-10-15 07:41:16
")-as.POSIXct("2014-09-20 06:22:40
"), unit="days")
as.numeric(as.POSIXct("2015-05-13 13:28:36
")-as.POSIXct("2015-05-13 09:28:36
"), unit="days")
as.numeric(as.POSIXct("2017-06-23 09:04:49
")-as.POSIXct("2017-06-23 05:04:49
"), unit="days")
as.numeric(as.POSIXct("2015-09-22 16:31:10
")-as.POSIXct("2015-09-01 14:47:44
"), unit="days")
as.numeric(as.POSIXct("2011-08-11 19:19:12
")-as.POSIXct("2011-07-05 16:09:48
"), unit="days")
as.numeric(as.POSIXct("2017-02-02 11:34:37
")-as.POSIXct("2017-02-01 05:48:49
"), unit="days")
as.numeric(as.POSIXct("1988-08-07 21:49:56
")-as.POSIXct("1988-06-05 13:51:08
"), unit="days")
as.numeric(as.POSIXct("2013-01-26 21:18:26
")-as.POSIXct("2013-01-26 16:18:26
"), unit="days")
as.numeric(as.POSIXct("2010-07-24 20:27:20
")-as.POSIXct("2010-07-24 16:27:20
"), unit="days")
as.numeric(as.POSIXct("2008-04-20 04:45:51
")-as.POSIXct("2008-02-23 06:53:28
"), unit="days")
as.numeric(as.POSIXct("2014-07-15 11:41:30
")-as.POSIXct("2014-01-28 15:47:41
"), unit="days")
as.numeric(as.POSIXct("2019-05-28 14:40:24
")-as.POSIXct("2019-05-28 10:29:07
"), unit="days")
as.numeric(as.POSIXct("2009-02-03 09:41:14
")-as.POSIXct("2009-02-01 17:37:33
"), unit="days")
as.numeric(as.POSIXct("2011-08-24 09:46:11
")-as.POSIXct("2010-07-24 17:09:50
"), unit="days")
as.numeric(as.POSIXct("2017-03-29 07:30:12
")-as.POSIXct("2017-03-28 20:24:14
"), unit="days")
as.numeric(as.POSIXct("2013-04-04 21:58:36
")-as.POSIXct("2013-04-04 17:58:36
"), unit="days")
as.numeric(as.POSIXct("2018-02-06 08:23:27
")-as.POSIXct("2018-02-04 20:54:04
"), unit="days")
as.numeric(as.POSIXct("2017-07-08 09:55:34
")-as.POSIXct("2017-07-08 05:30:53
"), unit="days")
as.numeric(as.POSIXct("2014-02-08 21:43:05
")-as.POSIXct("2014-02-08 16:43:05
"), unit="days")
as.numeric(as.POSIXct("2012-12-06 20:08:36
")-as.POSIXct("2012-12-06 15:08:36
"), unit="days")
as.numeric(as.POSIXct("2006-01-20 16:13:23
")-as.POSIXct("2006-01-20 11:13:23
"), unit="days")
as.numeric(as.POSIXct("2009-04-25 13:02:20
")-as.POSIXct("2009-04-25 09:02:20
"), unit="days")
as.numeric(as.POSIXct("2015-11-06 19:02:05
")-as.POSIXct("2015-11-06 14:02:05
"), unit="days")
as.numeric(as.POSIXct("2015-09-07 03:35:11
")-as.POSIXct("2015-09-06 23:30:43
"), unit="days")
as.numeric(as.POSIXct("2010-07-15 09:55:52
")-as.POSIXct("2010-07-15 05:55:52
"), unit="days")
as.numeric(as.POSIXct("2007-09-21 09:19:24
")-as.POSIXct("2007-09-21 05:02:27
"), unit="days")
as.numeric(as.POSIXct("2013-05-28 18:52:41
")-as.POSIXct("2007-04-01 16:01:20
"), unit="days")
as.numeric(as.POSIXct("2013-05-02 23:54:17
")-as.POSIXct("2013-05-02 19:54:17
"), unit="days")
as.numeric(as.POSIXct("2013-04-02 17:43:49
")-as.POSIXct("2013-04-02 13:43:49
"), unit="days")
as.numeric(as.POSIXct("2011-04-03 11:15:21
")-as.POSIXct("2011-04-03 07:15:21
"), unit="days")
as.numeric(as.POSIXct("2018-09-03 14:19:31
")-as.POSIXct("2018-09-03 10:19:31
"), unit="days")
as.numeric(as.POSIXct("2008-10-31 18:50:55
")-as.POSIXct("2008-10-21 10:34:54
"), unit="days")
as.numeric(as.POSIXct("2012-03-31 21:02:17
")-as.POSIXct("2012-03-31 17:02:17
"), unit="days")
as.numeric(as.POSIXct("2014-04-15 08:31:16
")-as.POSIXct("2014-04-15 04:31:16
"), unit="days")
as.numeric(as.POSIXct("2013-08-30 08:25:52
")-as.POSIXct("2013-08-30 04:25:52
"), unit="days")
as.numeric(as.POSIXct("2012-06-28 12:02:58
")-as.POSIXct("2012-06-28 04:41:05
"), unit="days")
as.numeric(as.POSIXct("2012-03-12 11:32:41
")-as.POSIXct("2012-03-08 07:32:06
"), unit="days")
as.numeric(as.POSIXct("2012-07-14 19:06:01
")-as.POSIXct("2012-07-14 15:06:01
"), unit="days")
as.numeric(as.POSIXct("2012-09-23 03:41:35
")-as.POSIXct("2012-09-22 23:41:35
"), unit="days")
as.numeric(as.POSIXct("2012-11-04 22:57:59
")-as.POSIXct("2012-11-04 09:36:27
"), unit="days")
as.numeric(as.POSIXct("2015-04-02 12:37:04
")-as.POSIXct("2014-02-17 09:19:10
"), unit="days")
as.numeric(as.POSIXct("2011-09-23 05:37:51
")-as.POSIXct("2007-03-09 11:17:14
"), unit="days")
as.numeric(as.POSIXct("2014-05-10 14:17:37
")-as.POSIXct("2014-03-15 09:47:58
"), unit="days")
as.numeric(as.POSIXct("2013-09-09 01:53:53
")-as.POSIXct("2013-09-08 21:53:53
"), unit="days")
as.numeric(as.POSIXct("2015-06-30 20:49:36
")-as.POSIXct("2015-06-30 16:49:36
"), unit="days")
as.numeric(as.POSIXct("2011-02-02 11:56:48
")-as.POSIXct("2011-02-02 05:57:37
"), unit="days")
as.numeric(as.POSIXct("2011-02-02 11:56:48
")-as.POSIXct("2011-11-04 09:43:27
"), unit="days")
as.numeric(as.POSIXct("2011-11-04 13:43:27
")-as.POSIXct("2011-11-04 09:43:27
"), unit="days")
as.numeric(as.POSIXct("2009-01-22 01:08:05
")-as.POSIXct("2007-03-23 16:50:26
"), unit="days")
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
readme_df <- read_csv("../final_data/deb_readme_did.csv")
contrib_df <- contrib_df |>
filter(event_gap >= 0)
readme_df <- readme_df |>
filter(event_gap >= 0)
hist(readme_df$event_gap)
mean(readme_df$event_gap)
sd(readme_df$event_gap)
median(readme_df$event_gap)
max(readme_df$event_gap)
13871.64 / 365
true_gap <- c()
for (i in len(readme_df$event_date)){
delta <- as.numeric(as.POSIXct(readme_df$event_date[i,])-as.POSIXct(readme_df$first_commit_dt[i,]), unit="days")
true_gap <- c(true_gap, delta)
}
for (i in length(readme_df$event_date)){
delta <- as.numeric(as.POSIXct(readme_df$event_date[i,])-as.POSIXct(readme_df$first_commit_dt[i,]), unit="days")
true_gap <- c(true_gap, delta)
}
delta <- as.numeric(as.POSIXct(readme_df$event_date[i])-as.POSIXct(readme_df$first_commit_dt[i]), unit="days")
for (i in length(readme_df$event_date)){
delta <- as.numeric(as.POSIXct(readme_df$event_date[i])-as.POSIXct(readme_df$first_commit_dt[i]), unit="days")
true_gap <- c(true_gap, delta)
}
true_gap
for (i in length(readme_df$event_date)){
delta <- as.numeric(as.POSIXct(readme_df$event_date[i])-as.POSIXct(readme_df$first_commit_dt[i]), unit="days")
true_gap <- c(true_gap, delta)
}
true_gap
length(readme_df$event_date)
true_gap <- c()
for (i in length(readme_df$event_date)){
delta <- as.numeric(as.POSIXct(readme_df$event_date[i])-as.POSIXct(readme_df$first_commit_dt[i]), unit="days")
true_gap <- c(true_gap, delta)
}
true_gap
length(readme_df$event_date)
true_gap <- c()
for (i in 1:length(readme_df$event_date)){
delta <- as.numeric(as.POSIXct(readme_df$event_date[i])-as.POSIXct(readme_df$first_commit_dt[i]), unit="days")
true_gap <- c(true_gap, delta)
}
true_gap
library(tidyverse)
readme_df <- read_csv("../final_data/deb_readme_did.csv")
readme_df <- readme_df |>
filter(event_gap >= 0)
hist(readme_df$event_gap)
median(readme_df$event_gap)
sd(readme_df$event_gap)
max(readme_df$event_gap)
table(readme_df$event_gap)
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
contrib_df <- contrib_df |>
filter(event_gap >= 0)
median(readme_df$event_gap)
sd(readme_df$event_gap)
hist(contrib_df$event_gap)
median(contrib_df$event_gap)
1786.431 / 265
@ -510,3 +71,442 @@ contrib_g
library(gridExtra)
grid.arrange(contrib_g, readme_g, nrow = 1)
source("~/Desktop/git/24_deb_gov/R/contribRDDAnalysis.R")
source("~/Desktop/git/24_deb_gov/R/documentReadabilityAnalysis.R")
contrib_pop_df <- read_csv("../final_data/deb_contrib_pop_change.csv")
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
View(contrib_pop_df)
contrib_readability_df <- read_csv('../text_analysis/dwo_readability_contributing.csv')
View(contrib_readability_df)
View(contrib_pop_df)
View(contrib_readability_df)
View(contrib_pop_df)
View(contrib_readability_df)
View(contrib_pop_df)
View(contrib_pop_df)
View(contrib_df)
View(contrib_pop_df)
View(contrib_readability_df)
View(contrib_pop_df)
#concat dataframes into central data
contrib_df_total <- contrib_pop_df |>
mutate(project_name = str_split(upstream_vcs_link, pattern="/")[-1])
View(contrib_pop_df)
View(contrib_readability_df)
View(contrib_readability_df)
contrib_df_total <- contrib_readability_df |>
mutate(project_name = str_split(filename, pattern="_")[-2])
View(contrib_readability_df)
contrib_df_total <- contrib_readability_df |>
mutate(project_name = str_split(filename, pattern="_"))
View(contrib_df_total)
contrib_df_total <- contrib_readability_df |>
mutate(project_name = str_split(filename, pattern="_")[0])
contrib_df_total <- contrib_readability_df |>
mutate(project_name = str_split(filename, pattern="_")[1])
View(contrib_df_total)
contrib_df_total <- contrib_readability_df |>
mutate(project_name = str_split(filename, pattern="_")[1] |>
sapply("[[", 1))
View(contrib_df_total)
contrib_df_total <- contrib_readability_df |>
mutate(project_name = str_split(filename, pattern="_"))
View(contrib_df_total)
contrib_df_total <- contrib_readability_df |>
mutate(project_name_array = str_split(filename, pattern="_")) |>
mutate(projes_name = project_name_array[1])
View(contrib_df_total)
View(contrib_readability_df)
View(contrib_pop_df)
#concat dataframes into central data
contrib_pop_df <- contrib_pop_df %>%
mutate(first_element = map_chr(upstream_vcs_link, ~ {
parts <- str_split(.x, pattern = "/")[[1]]
if (length(parts) >= 1) {
parts[1] # Extract the first element after splitting
} else {
NA_character_
}
}))
View(contrib_pop_df)
contrib_df_total <- contrib_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
parts[1]
} else {
NA_character_
}
}))
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
contrib_pop_df <- read_csv("../final_data/deb_contrib_pop_change.csv")
contrib_readability_df <- read_csv('../text_analysis/dwo_readability_contributing.csv')
contrib_df_total <- contrib_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
parts[1]
} else {
NA_character_
}
}))
View(contrib_df_total)
contrib_pop_df <- contrib_pop_df |>
mutate(project_name = map_chr(upstream_vcs_link, ~ {
parts <- str_split(.x, pattern = "/")[[1]]
if (length(parts) >= 1) {
parts[-1]
} else {
NA_character_
}
}))
parts[length(parts)]
contrib_pop_df <- contrib_pop_df |>
mutate(project_name = map_chr(upstream_vcs_link, ~ {
parts <- str_split(.x, pattern = "/")[[1]]
if (length(parts) >= 1) {
parts[length(parts)]
} else {
NA_character_
}
}))
View(contrib_pop_df)
source("~/Desktop/git/24_deb_gov/R/docChar_outcomes.R")
source("~/Desktop/git/24_deb_gov/R/docChar_outcomes.R")
contrib_total_df <- contrib_pop_df |>
left_join(contrib_readability_df, by="project_name")
View(contrib_total_df)
# test regressions
lm1 <- glm.nb(after_contrib_new ~ word_count, data = contrib_total_df)
# test regressions
library(MASS)
lm1 <- glm.nb(after_contrib_new ~ word_count, data = contrib_total_df)
summary(lm1)
View(contrib_total_df)
contrib_total_df <- contrib_pop_df |>
join(contrib_readability_df, by="project_name")
View(contrib_total_df)
View(contrib_readability_df)
qqnorm(residuals(lm1))
source("~/Desktop/git/24_deb_gov/R/docChar_outcomes.R")
lm1 <- glm.nb(after_contrib_new ~ linsear_write, data = contrib_total_df)
lm1 <- glm.nb(after_contrib_new ~ linsear, data = contrib_total_df)
View(contrib_total_df)
lm1 <- glm.nb(after_contrib_new ~ linsear_write_formula, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(after_contrib_new ~ reading_time, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(after_contrib_new ~ flesch_reading_ease, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
contrib_readability_df <- contrib_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
head(parts, -1)
} else {
NA_character_
}
}))
parts[1] + parts[2]
contrib_readability_df <- contrib_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
parts[1] + parts[2]
} else {
NA_character_
}
}))
contrib_readability_df <- contrib_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
paste(head(parts, -1), collapse="")
} else {
NA_character_
}
}))
View(contrib_readability_df)
#libraries
library(stringr)
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
contrib_pop_df <- read_csv("../final_data/deb_contrib_pop_change.csv")
contrib_readability_df <- read_csv('../text_analysis/dwo_readability_contributing.csv')
contrib_pop_df <- contrib_pop_df |>
mutate(project_name = map_chr(upstream_vcs_link, ~ {
parts <- str_split(.x, pattern = "/")[[1]]
if (length(parts) >= 1) {
parts[length(parts)]
} else {
NA_character_
}
}))
contrib_readability_df <- contrib_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
paste(head(parts, -1), collapse="_")
} else {
NA_character_
}
}))
contrib_total_df <- contrib_pop_df |>
join(contrib_readability_df, by="project_name")
View(contrib_total_df)
# test regressions
library(MASS)
lm1 <- glm.nb(after_contrib_new ~ flesch_reading_ease, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(after_contrib_new ~ flesch_reading_ease + age_in_days, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
View(contrib_df)
source("~/Desktop/git/24_deb_gov/R/contrib_docChar_outcomes.R")
View(windowed_data)
View(windowed_data)
summed_data <- windowed_data |>
group_by(upstream_vcs_link) |>
summarize(total_ct_after_all = sum(ct_after_all))
summed_data <- windowed_data |>
filter(window="ct_after_all") |>
group_by(upstream_vcs_link) |>
summarize(total_ct_after_all = sum(count))
summed_data <- windowed_data |>
filter(window=="ct_after_all") |>
group_by(upstream_vcs_link) |>
summarize(total_ct_after_all = sum(count))
View(summed_data)
summed_data <- windowed_data |>
filter(window=="ct_after_all") |>
group_by(upstream_vcs_link) |>
mutate(total_ct_after_all = sum(count))
View(summed_data)
summed_data <- windowed_data |>
filter(window=="ct_after_all") |>
group_by(upstream_vcs_link) |>
summarize(total_ct_after_all = sum(count)) |> ungroup()
View(summed_data)
View(windowed_data)
summed_data <- windowed_data |>
filter(window=="ct_after_all") |>
group_by(upstream_vcs_link) |>
summarise_at(vars(count), list(name=sum))
View(summed_data)
summed_data <- windowed_data |>
filter(D==1) |>
group_by(upstream_vcs_link) |>
summarise_at(vars(count), list(summed_count=sum))
View(summed_data)
source("~/Desktop/git/24_deb_gov/R/contrib_docChar_outcomes.R")
contrib_total_df <- contrib_total_df|>
join(summed_data, by=upstream_vcs_link)
contrib_total_df <- contrib_pop_df |>
join(contrib_readability_df, by="project_name")
View(contrib_total_df)
contrib_total_df <- contrib_total_df|>
join(summed_data, by=upstream_vcs_link)
View(summed_data)
contrib_total_df <- contrib_total_df|>
join(summed_data, by="upstream_vcs_link")
View(contrib_total_df)
View(contrib_df)
source("~/Desktop/git/24_deb_gov/R/contrib_docChar_outcomes.R")
#outcome variable that is number of commits by number of new contributors
contrib_total_df$commit_by_contrib = contrib_total_df$summed_count * contrib_total_df$after_contrib_new
# test regressions
library(MASS)
lm1 <- glm.nb(after_contrib_new ~ flesch_reading_ease + age_in_days, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(commit_by_contrib ~ flesch_reading_ease + age_in_days, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
View(contrib_total_df)
lm1 <- glm.nb(commit_by_contrib ~ word_count, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
contrib_total_df$scaled_outcome = scale(contrib_total_df$commit_by_contrib)
lm1 <- glm.nb(scaled_outcome ~ word_count + flesch_kincaid, data = contrib_total_df)
lm1 <- glm.nb(scaled_outcome ~ word_count + flesch_kincaid_grade, data = contrib_total_df)
contrib_total_df$logged_outcome = log1p(contrib_total_df$commit_by_contrib)
# test regressions
library(MASS)
lm1 <- glm.nb(scaled_outcome ~ word_count + flesch_kincaid_grade, data = contrib_total_df)
lm1 <- glm.nb(logged_outcome ~ word_count + flesch_kincaid_grade, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(logged_outcome ~ word_count + flesch_kincaid_grade + linsear_write_formula, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
contrib_total_df$scaled_outcome = scale(contrib_total_df$commit_by_contrib)
# test regressions
library(MASS)
lm1 <- lm(scaled_outcome ~ word_count + flesch_kincaid_grade + linsear_write_formula, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(logged_outcome ~ word_count + flesch_kincaid_grade + linsear_write_formula, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(logged_outcome ~ word_count + flesch_kincaid_grade + linsear_write_formula + mcalpine_eflaw, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(logged_outcome ~ word_count + flesch_kincaid_grade + linsear_write_formula + mcalpine_eflaw + dale_chall_readability_score, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(logged_outcome ~ word_count + dale_chall_readability_score, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(logged_outcome ~ word_count + reading_time, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(commit_by_contrib ~ word_count + reading_time, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(commit_by_contrib ~ word_count + flesch_kincaid_grade, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
#libraries
library(stringr)
readme_df <- read_csv("../final_data/deb_readme_did.csv")
readme_pop_df <- read_csv("../final_data/deb_readme_pop_change.csv")
readme_readability_df <- read_csv('../text_analysis/dwo_readability_readmeuting.csv')
source("~/Desktop/git/24_deb_gov/R/readme_docChar_outcomes.R")
source("~/Desktop/git/24_deb_gov/R/readme_docChar_outcomes.R")
lm1 <- glm.nb(commit_by_readme ~ word_count + flesch_kincaid_grade, data = readme_total_df)
View(readme_readability_df)
readme_readability_df <- readme_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
paste(head(parts, -1), collapse="_")
} else {
NA_character_
}
}))
readme_total_df <- readme_pop_df |>
join(readme_readability_df, by="project_name")
readme_total_df <- readme_total_df|>
join(summed_data, by="upstream_vcs_link")
#outcome variable that is number of commits by number of new readmeutors
readme_total_df$commit_by_readme = readme_total_df$summed_count * readme_total_df$after_readme_new
readme_total_df$logged_outcome = log(readme_total_df$commit_by_readme)
View(readme_total_df)
View(readme_total_df)
#outcome variable that is number of commits by number of new readmeutors
readme_total_df$commit_by_readme = readme_total_df$summed_count * readme_total_df$after_readme_new
View(readme_total_df)
View(readme_readability_df)
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/agateau/yokadi/issues/new", "project_name"] = "yokadi"
View(readme_pop_df)
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/SciRuby/rb-gsl/issues/new", "project_name"] = "rb-gsl"
source("~/Desktop/git/24_deb_gov/R/readme_docChar_outcomes.R")
readme_readability_df <- readme_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
paste(head(parts, -1), collapse="_")
} else {
NA_character_
}
}))
readme_readability_df[readme_readability_df['filename'] == "yder_README_8md.html", "project_name"] = "yder"
readme_readability_df[readme_readability_df['filename'] == "pg_filedump.git_README.pg_filedump", "project_name"] = "pg_filedump.git"
readme_readability_df[readme_readability_df['filename'] == "openvas_UPGRADE_README", "project_name"] = "openvas"
readme_readability_df[readme_readability_df['filename'] == "hyphen.git_README_hyph_en_US.txt", "project_name"] = "hyphen.git"
readme_readability_df[readme_readability_df['filename'] == "cycle.git_README_ru.html", "project_name"] = "cycle.git"
readme_readability_df[readme_readability_df['filename'] == "diffuse.git_README_ru", "project_name"] = "diffuse.git"
readme_readability_df[readme_readability_df['filename'] == "CheMPS2_README_8md_source.html", "project_name"] = "CheMPS2"
readme_readability_df[readme_readability_df['filename'] == "sleuthkit_README_win32.txt", "project_name"] = "sleuthkit"
readme_readability_df[readme_readability_df['filename'] == "Lmod_README_lua_modulefiles.txt", "project_name"] = "Lmod"
readme_readability_df[readme_readability_df['filename'] == "engauge_debian_README_for_osx", "project_name"] = "engauge_debian"
readme_total_df <- readme_pop_df |>
join(readme_readability_df, by="project_name")
readme_total_df <- readme_total_df|>
join(summed_data, by="upstream_vcs_link")
#outcome variable that is number of commits by number of new readmeutors
readme_total_df$commit_by_readme = readme_total_df$summed_count * readme_total_df$after_readme_new
View(readme_total_df)
readme_total_df$logged_outcome = log(readme_total_df$commit_by_readme)
#outcome variable that is number of commits by number of new readmeutors
readme_total_df$commit_by_readme = readme_total_df$summed_count * readme_total_df$after_readme_new
#outcome variable that is number of commits by number of new readmeutors
readme_total_df$commit_by_contrib = readme_total_df$summed_count * readme_total_df$after_readme_new
#outcome variable that is number of commits by number of new readmeutors
readme_total_df$commit_by_contrib = NA
readme_total_df$commit_by_contrib = readme_total_df$summed_count * readme_total_df$after_readme_new
View(readme_total_df)
View(readme_total_df)
readme_total_df$commit_by_contrib = readme_total_df$summed_count * readme_total_df$after_contrib_new
lm1 <- glm.nb(commit_by_contrib ~ word_count + flesch_kincaid_grade, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
readme_total_df$logged_outcome = log(readme_total_df$commit_by_readme)
readme_total_df$logged_outcome = log(readme_total_df$commit_by_contrib)
lm1 <- glm.nb(commit_by_contrib ~ word_count + flesch_kincaid_grade, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(summed_count ~ word_count + flesch_kincaid_grade, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(commit_by_contrib ~ word_count + flesch_kincaid_grade, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(after_contrib_new ~ word_count + flesch_kincaid_grade, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(after_contrib_new ~ word_count + reading_time, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(commit_by_contrib ~ word_count + reading_time, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(reading_time ~ word_count , data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
View(readme_total_df)
lm1 <- glm.nb(reading_time ~ flesch_reading_ease , data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(flesch_reading_ease ~ reading_time , data = readme_total_df)
lm1 <- glm.nb(commit_by_contrib ~ reading_time , data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(commit_by_contrib ~ reading_time + linsear_write_formula , data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
readme_total_df$commit_by_contrib = readme_total_df$summed_count * (readme_total_df$after_contrib_new + 1)
readme_total_df$logged_outcome = log(readme_total_df$commit_by_contrib)
lm1 <- glm.nb(commit_by_contrib ~ reading_time + linsear_write_formula , data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
readme_total_df$logged_outcome = log1p(readme_total_df$commit_by_contrib)
lm1 <- glm.nb(logged_outcome ~ reading_time + linsear_write_formula , data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(logged_outcome ~ reading_time + linsear_write_formula + flesch_reading_ease, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(logged_outcome ~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(summed_count~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(summed_count~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(logged_outcome~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)
source("~/Desktop/git/24_deb_gov/R/contrib_docChar_outcomes.R")
lm1 <- glm.nb(logged_outcome~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = contrib_total_df)
contrib_total_df$logged_outcome = log1p(contrib_total_df$commit_by_contrib)
lm1 <- glm.nb(logged_outcome ~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)
lm1 <- glm.nb(summed_count ~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)

Binary file not shown.

After

Width:  |  Height:  |  Size: 196 KiB

View File

@ -3,8 +3,6 @@ library(plyr)
#get the contrib data instead
try(setwd(dirname(rstudioapi::getActiveDocumentContext()$path)))
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
contrib_df <- contrib_df |>
filter(event_gap >= 0)
#some preprocessing and expansion
col_order <- c("upstream_vcs_link", "age_in_days", "first_commit", "first_commit_dt", "event_gap", "event_date", "event_hash", "before_all_ct", "after_all_ct", "before_mrg_ct", "after_mrg_ct", "before_auth_new", "after_auth_new", "before_commit_new", "after_commit_new")
contrib_df <- contrib_df[,col_order]

View File

@ -0,0 +1,77 @@
#libraries
library(stringr)
contrib_df <- read_csv("../final_data/deb_contrib_did.csv")
contrib_pop_df <- read_csv("../final_data/deb_contrib_pop_change.csv")
contrib_readability_df <- read_csv('../text_analysis/dwo_readability_contributing.csv')
#get the contribution count
#some preprocessing and expansion
col_order <- c("upstream_vcs_link", "age_in_days", "first_commit", "first_commit_dt", "event_gap", "event_date", "event_hash", "before_all_ct", "after_all_ct", "before_mrg_ct", "after_mrg_ct", "before_auth_new", "after_auth_new", "before_commit_new", "after_commit_new")
contrib_df <- contrib_df[,col_order]
contrib_df$ct_before_all <- str_split(gsub("[][]","", contrib_df$before_all_ct), ", ")
contrib_df$ct_after_all <- str_split(gsub("[][]","", contrib_df$after_all_ct), ", ")
contrib_df$ct_before_mrg <- str_split(gsub("[][]","", contrib_df$before_mrg_ct), ", ")
contrib_df$ct_after_mrg <- str_split(gsub("[][]","", contrib_df$after_mrg_ct), ", ")
drop <- c("before_all_ct", "before_mrg_ct", "after_all_ct", "after_mrg_ct")
contrib_df = contrib_df[,!(names(contrib_df) %in% drop)]
# 2 some expansion needs to happens for each project
expand_timeseries <- function(project_row) {
longer <- project_row |>
pivot_longer(cols = starts_with("ct"),
names_to = "window",
values_to = "count") |>
unnest(count)
longer$observation_type <- gsub("^.*_", "", longer$window)
longer <- ddply(longer, "observation_type", transform, week=seq(from=0, by=1, length.out=length(observation_type)))
longer$count <- as.numeric(longer$count)
#longer <- longer[which(longer$observation_type == "all"),]
return(longer)
}
expanded_data <- expand_timeseries(contrib_df[1,])
for (i in 2:nrow(contrib_df)){
expanded_data <- rbind(expanded_data, expand_timeseries(contrib_df[i,]))
}
#filter out the windows of time that we're looking at
window_num <- 8
windowed_data <- expanded_data |>
filter(week >= (27 - window_num) & week <= (27 + window_num)) |>
mutate(D = ifelse(week > 27, 1, 0))
summed_data <- windowed_data |>
filter(D==1) |>
group_by(upstream_vcs_link) |>
summarise_at(vars(count), list(summed_count=sum))
#concat dataframes into central data
contrib_pop_df <- contrib_pop_df |>
mutate(project_name = map_chr(upstream_vcs_link, ~ {
parts <- str_split(.x, pattern = "/")[[1]]
if (length(parts) >= 1) {
parts[length(parts)]
} else {
NA_character_
}
}))
contrib_readability_df <- contrib_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
paste(head(parts, -1), collapse="_")
} else {
NA_character_
}
}))
contrib_total_df <- contrib_pop_df |>
join(contrib_readability_df, by="project_name")
contrib_total_df <- contrib_total_df|>
join(summed_data, by="upstream_vcs_link")
#outcome variable that is number of commits by number of new contributors
contrib_total_df$commit_by_contrib = contrib_total_df$summed_count * contrib_total_df$after_contrib_new
contrib_total_df$logged_outcome = log1p(contrib_total_df$commit_by_contrib)
# test regressions
library(MASS)
lm1 <- glm.nb(summed_count ~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = contrib_total_df)
qqnorm(residuals(lm1))
summary(lm1)

101
R/readme_docChar_outcomes.R Normal file
View File

@ -0,0 +1,101 @@
#libraries
library(stringr)
readme_df <- read_csv("../final_data/deb_readme_did.csv")
readme_pop_df <- read_csv("../final_data/deb_readme_pop_change.csv")
readme_readability_df <- read_csv('../text_analysis/dwo_readability_readme.csv')
#get the readmeution count
#some preprocessing and expansion
col_order <- c("upstream_vcs_link", "age_in_days", "first_commit", "first_commit_dt", "event_gap", "event_date", "event_hash", "before_all_ct", "after_all_ct", "before_mrg_ct", "after_mrg_ct", "before_auth_new", "after_auth_new", "before_commit_new", "after_commit_new")
readme_df <- readme_df[,col_order]
readme_df$ct_before_all <- str_split(gsub("[][]","", readme_df$before_all_ct), ", ")
readme_df$ct_after_all <- str_split(gsub("[][]","", readme_df$after_all_ct), ", ")
readme_df$ct_before_mrg <- str_split(gsub("[][]","", readme_df$before_mrg_ct), ", ")
readme_df$ct_after_mrg <- str_split(gsub("[][]","", readme_df$after_mrg_ct), ", ")
drop <- c("before_all_ct", "before_mrg_ct", "after_all_ct", "after_mrg_ct")
readme_df = readme_df[,!(names(readme_df) %in% drop)]
# 2 some expansion needs to happens for each project
expand_timeseries <- function(project_row) {
longer <- project_row |>
pivot_longer(cols = starts_with("ct"),
names_to = "window",
values_to = "count") |>
unnest(count)
longer$observation_type <- gsub("^.*_", "", longer$window)
longer <- ddply(longer, "observation_type", transform, week=seq(from=0, by=1, length.out=length(observation_type)))
longer$count <- as.numeric(longer$count)
#longer <- longer[which(longer$observation_type == "all"),]
return(longer)
}
expanded_data <- expand_timeseries(readme_df[1,])
for (i in 2:nrow(readme_df)){
expanded_data <- rbind(expanded_data, expand_timeseries(readme_df[i,]))
}
#filter out the windows of time that we're looking at
window_num <- 8
windowed_data <- expanded_data |>
filter(week >= (27 - window_num) & week <= (27 + window_num)) |>
mutate(D = ifelse(week > 27, 1, 0))
summed_data <- windowed_data |>
filter(D==1) |>
group_by(upstream_vcs_link) |>
summarise_at(vars(count), list(summed_count=sum))
#concat dataframes into central data
readme_pop_df <- readme_pop_df |>
mutate(project_name = map_chr(upstream_vcs_link, ~ {
parts <- str_split(.x, pattern = "/")[[1]]
if (length(parts) >= 1) {
parts[length(parts)]
} else {
NA_character_
}
}))
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/agateau/yokadi/issues/new", "project_name"] = "yokadi"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "http://github.com/voloko/twitter-stream/issues/new", "project_name"] = "twitter-stream"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/ai/autoprefixer-rails/issues/new", "project_name"] = "autoprefixer-rails"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/aquasync/ruby-ole/issues/new", "project_name"] = "ruby-ole"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/bluemonk/ipaddress/issues/new", "project_name"] = "ipaddress"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/ccocchi/rabl-rails/issues/new", "project_name"] = "rabl-rails"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/dejan/espeak-ruby/issues/new", "project_name"] = "espeak-ruby"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/hadley/plyr/issues/new", "project_name"] = "plyr"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/jfelchner/ruby-progressbar/issues/new", "project_name"] = "ruby-progressbar"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/sargon/trayer-srg/issues/new", "project_name"] = "trayer-srg"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/SciRuby/rb-gsl/issues/new", "project_name"] = "rb-gsl"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/solvespace/solvespace/issues/new", "project_name"] = "solvespace"
readme_pop_df[readme_pop_df['upstream_vcs_link'] == "https://github.com/walling/unorm/issues/new", "project_name"] = "unorm"
readme_readability_df <- readme_readability_df |>
mutate(project_name = map_chr(filename, ~ {
parts <- str_split(.x, pattern = "_")[[1]]
if (length(parts) >= 1) {
paste(head(parts, -1), collapse="_")
} else {
NA_character_
}
}))
readme_readability_df[readme_readability_df['filename'] == "yder_README_8md.html", "project_name"] = "yder"
readme_readability_df[readme_readability_df['filename'] == "pg_filedump.git_README.pg_filedump", "project_name"] = "pg_filedump.git"
readme_readability_df[readme_readability_df['filename'] == "openvas_UPGRADE_README", "project_name"] = "openvas"
readme_readability_df[readme_readability_df['filename'] == "hyphen.git_README_hyph_en_US.txt", "project_name"] = "hyphen.git"
readme_readability_df[readme_readability_df['filename'] == "cycle.git_README_ru.html", "project_name"] = "cycle.git"
readme_readability_df[readme_readability_df['filename'] == "diffuse.git_README_ru", "project_name"] = "diffuse.git"
readme_readability_df[readme_readability_df['filename'] == "CheMPS2_README_8md_source.html", "project_name"] = "CheMPS2"
readme_readability_df[readme_readability_df['filename'] == "sleuthkit_README_win32.txt", "project_name"] = "sleuthkit"
readme_readability_df[readme_readability_df['filename'] == "Lmod_README_lua_modulefiles.txt", "project_name"] = "Lmod"
readme_readability_df[readme_readability_df['filename'] == "engauge_debian_README_for_osx", "project_name"] = "engauge_debian"
readme_total_df <- readme_pop_df |>
join(readme_readability_df, by="project_name")
readme_total_df <- readme_total_df|>
join(summed_data, by="upstream_vcs_link")
#outcome variable that is number of commits by number of new readmeutors
readme_total_df$commit_by_contrib = NA
readme_total_df$commit_by_contrib = readme_total_df$summed_count * (readme_total_df$after_contrib_new + 1)
readme_total_df$logged_outcome = log1p(readme_total_df$commit_by_contrib)
# test regressions
library(MASS)
lm1 <- glm.nb(logged_outcome~ reading_time + linsear_write_formula + flesch_reading_ease + mcalpine_eflaw + word_count, data = readme_total_df)
qqnorm(residuals(lm1))
summary(lm1)