update simulation and mle code
This commit is contained in:
		
							parent
							
								
									47e9367ed5
								
							
						
					
					
						commit
						979dc14b68
					
				| @ -125,7 +125,7 @@ simulate_data <- function(N, m, B0, Bxy, Bzx, Bzy, seed, y_explained_variance=0. | |||||||
| 
 | 
 | ||||||
| parser <- arg_parser("Simulate data and fit corrected models") | parser <- arg_parser("Simulate data and fit corrected models") | ||||||
| parser <- add_argument(parser, "--N", default=1000, help="number of observations of w") | parser <- add_argument(parser, "--N", default=1000, help="number of observations of w") | ||||||
| aparser <- add_argument(parser, "--m", default=500, help="m the number of ground truth observations") | parser <- add_argument(parser, "--m", default=500, help="m the number of ground truth observations") | ||||||
| parser <- add_argument(parser, "--seed", default=51, help='seed for the rng') | parser <- add_argument(parser, "--seed", default=51, help='seed for the rng') | ||||||
| parser <- add_argument(parser, "--outfile", help='output file', default='example_2.feather') | parser <- add_argument(parser, "--outfile", help='output file', default='example_2.feather') | ||||||
| parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.1) | parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.1) | ||||||
|  | |||||||
| @ -70,7 +70,7 @@ parser <- add_argument(parser, "--N", default=1000, help="number of observations | |||||||
| parser <- add_argument(parser, "--m", default=500, help="m the number of ground truth observations") | parser <- add_argument(parser, "--m", default=500, help="m the number of ground truth observations") | ||||||
| parser <- add_argument(parser, "--seed", default=17, help='seed for the rng') | parser <- add_argument(parser, "--seed", default=17, help='seed for the rng') | ||||||
| parser <- add_argument(parser, "--outfile", help='output file', default='example_2.feather') | parser <- add_argument(parser, "--outfile", help='output file', default='example_2.feather') | ||||||
| parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.005) | parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.1) | ||||||
| parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.72) | parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.72) | ||||||
| ## parser <- add_argument(parser, "--x_bias_y1", help='how is the classifier biased when y = 1?', default=-0.75) | ## parser <- add_argument(parser, "--x_bias_y1", help='how is the classifier biased when y = 1?', default=-0.75) | ||||||
| ## parser <- add_argument(parser, "--x_bias_y0", help='how is the classifier biased when y = 0 ?', default=0.75) | ## parser <- add_argument(parser, "--x_bias_y0", help='how is the classifier biased when y = 0 ?', default=0.75) | ||||||
|  | |||||||
							
								
								
									
										113
									
								
								simulations/05_irr_indep.R
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										113
									
								
								simulations/05_irr_indep.R
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,113 @@ | |||||||
|  | ### EXAMPLE 2_b: demonstrates how measurement error can lead to a type | ||||||
|  | ### sign error in a covariate This is the same as example 2, only | ||||||
|  | ### instead of x->k we have k->x.  Even when you have a good | ||||||
|  | ### predictor, if it's biased against a covariate you can get the | ||||||
|  | ### wrong sign.  Even when you include the proxy variable in the | ||||||
|  | ### regression.  But with some ground truth and multiple imputation, | ||||||
|  | ### you can fix it. | ||||||
|  | 
 | ||||||
|  | library(argparser) | ||||||
|  | library(mecor) | ||||||
|  | library(ggplot2) | ||||||
|  | library(data.table) | ||||||
|  | library(filelock) | ||||||
|  | library(arrow) | ||||||
|  | library(Amelia) | ||||||
|  | library(Zelig) | ||||||
|  | 
 | ||||||
|  | library(predictionError) | ||||||
|  | options(amelia.parallel="no", amelia.ncpus=1) | ||||||
|  | 
 | ||||||
|  | source("irr_simulation_base.R") | ||||||
|  | 
 | ||||||
|  | ## SETUP: | ||||||
|  | ### we want to estimate x -> y; x is MAR | ||||||
|  | ### we have x -> k; k -> w; x -> w is used to predict x via the model w. | ||||||
|  | ### A realistic scenario is that we have an NLP model predicting something like "racial harassment" in social media comments | ||||||
|  | ### The labels x are binary, but the model provides a continuous predictor | ||||||
|  | 
 | ||||||
|  | ### simulation: | ||||||
|  | #### how much power do we get from the model in the first place? (sweeping N and m) | ||||||
|  | ####  | ||||||
|  | 
 | ||||||
|  | simulate_data <- function(N, m, B0=0, Bxy=0.2, Bzy=-0.2, Bzx=0.2, y_explained_variance=0.025, prediction_accuracy=0.73, coder_accuracy=0.9, seed=1){ | ||||||
|  |     set.seed(seed) | ||||||
|  |     z <- rbinom(N, 1, 0.5) | ||||||
|  |                                         #    x.var.epsilon <- var(Bzx *z) * ((1-zx_explained_variance)/zx_explained_variance) | ||||||
|  |     xprime <- Bzx * z #+ x.var.epsilon | ||||||
|  |     x <- rbinom(N,1,plogis(xprime)) | ||||||
|  | 
 | ||||||
|  |     y.var.epsilon <- (var(Bzy * z) + var(Bxy *x) + 2*cov(Bxy*x,Bzy*z)) * ((1-y_explained_variance)/y_explained_variance) | ||||||
|  |     y.epsilon <- rnorm(N, sd = sqrt(y.var.epsilon)) | ||||||
|  |     y <- Bzy * z + Bxy * x + y.epsilon | ||||||
|  | 
 | ||||||
|  |     df <- data.table(x=x,y=y,z=z) | ||||||
|  | 
 | ||||||
|  |     if(m < N){ | ||||||
|  |         df <- df[sample(nrow(df), m), x.obs := x] | ||||||
|  |     } else { | ||||||
|  |         df <- df[, x.obs := x] | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     df[ (!is.na(x.obs)) ,x.obs.0 := abs(x.obs - rbinom(.N, 1, 1-coder_accuracy))] | ||||||
|  |     df[ (!is.na(x.obs)) ,x.obs.1 := abs(x.obs - rbinom(.N, 1, 1-coder_accuracy))] | ||||||
|  |      | ||||||
|  | 
 | ||||||
|  |     ## how can you make a model with a specific accuracy? | ||||||
|  |     w0 =(1-x)**2 + (-1)**(1-x) * prediction_accuracy | ||||||
|  | 
 | ||||||
|  |     ## how can you make a model with a specific accuracy, with a continuous latent variable. | ||||||
|  |     # now it makes the same amount of mistake to each point, probably | ||||||
|  |     # add mean0 noise to the odds. | ||||||
|  |      | ||||||
|  |     w.noisey.odds = rlogis(N,qlogis(w0)) | ||||||
|  |     df[,w := plogis(w.noisey.odds)] | ||||||
|  |     df[,w_pred:=as.integer(w > 0.5)] | ||||||
|  |     (mean(df$x==df$w_pred)) | ||||||
|  |     return(df) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | parser <- arg_parser("Simulate data and fit corrected models") | ||||||
|  | parser <- add_argument(parser, "--N", default=1000, help="number of observations of w") | ||||||
|  | parser <- add_argument(parser, "--m", default=500, help="m the number of ground truth observations") | ||||||
|  | parser <- add_argument(parser, "--seed", default=57, help='seed for the rng') | ||||||
|  | parser <- add_argument(parser, "--outfile", help='output file', default='example_1.feather') | ||||||
|  | parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.05) | ||||||
|  | # parser <- add_argument(parser, "--zx_explained_variance", help='what proportion of the variance of x can be explained by z?', default=0.3) | ||||||
|  | parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.73) | ||||||
|  | parser <- add_argument(parser, "--coder_accuracy", help='how accurate is the predictive model?', default=0.8) | ||||||
|  | parser <- add_argument(parser, "--outcome_formula", help='formula for the outcome variable', default="y~x+z") | ||||||
|  | parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy variable', default="w_pred~x") | ||||||
|  | 
 | ||||||
|  | # parser <- add_argument(parser, "--rater_formula", help='formula for the true variable', default="x.obs~x") | ||||||
|  | parser <- add_argument(parser, "--truth_formula", help='formula for the true variable', default="x~z") | ||||||
|  | parser <- add_argument(parser, "--Bzx", help='Effect of z on x', default=-0.3) | ||||||
|  | parser <- add_argument(parser, "--Bzy", help='Effect of z on y', default=-0.3) | ||||||
|  | parser <- add_argument(parser, "--Bxy", help='Effect of z on y', default=0.3) | ||||||
|  | 
 | ||||||
|  | args <- parse_args(parser) | ||||||
|  | B0 <- 0 | ||||||
|  | Bxy <- args$Bxy | ||||||
|  | Bzy <- args$Bzy | ||||||
|  | Bzx <- args$Bzx | ||||||
|  | 
 | ||||||
|  | if (args$m < args$N){ | ||||||
|  | 
 | ||||||
|  |     df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, Bzx, seed=args$seed + 500, y_explained_variance = args$y_explained_variance,  prediction_accuracy=args$prediction_accuracy, coder_accuracy=args$coder_accuracy) | ||||||
|  | 
 | ||||||
|  |     result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy, Bzx=Bzx, 'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'accuracy_imbalance_difference'=args$accuracy_imbalance_difference, 'outcome_formula'=args$outcome_formula, 'truth_formula'=args$truth_formula, 'proxy_formula'=args$proxy_formula,truth_formula=args$truth_formula, 'coder_accuracy'=args$coder_accuracy, error='') | ||||||
|  | 
 | ||||||
|  |     outline <- run_simulation(df, result, outcome_formula=as.formula(args$outcome_formula), proxy_formula=as.formula(args$proxy_formula), truth_formula=as.formula(args$truth_formula)) | ||||||
|  |      | ||||||
|  |     outfile_lock <- lock(paste0(args$outfile, '_lock'),exclusive=TRUE) | ||||||
|  |     if(file.exists(args$outfile)){ | ||||||
|  |         logdata <- read_feather(args$outfile) | ||||||
|  |         logdata <- rbind(logdata,as.data.table(outline),fill=TRUE) | ||||||
|  |     } else { | ||||||
|  |         logdata <- as.data.table(outline) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     print(outline) | ||||||
|  |     write_feather(logdata, args$outfile) | ||||||
|  |     unlock(outfile_lock) | ||||||
|  | } | ||||||
							
								
								
									
										99
									
								
								simulations/06_irr_dv.R
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										99
									
								
								simulations/06_irr_dv.R
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,99 @@ | |||||||
|  | 
 | ||||||
|  | library(argparser) | ||||||
|  | library(mecor) | ||||||
|  | library(ggplot2) | ||||||
|  | library(data.table) | ||||||
|  | library(filelock) | ||||||
|  | library(arrow) | ||||||
|  | library(Amelia) | ||||||
|  | library(Zelig) | ||||||
|  | library(predictionError) | ||||||
|  | options(amelia.parallel="no", | ||||||
|  |         amelia.ncpus=1) | ||||||
|  | setDTthreads(40) | ||||||
|  | 
 | ||||||
|  | source("irr_dv_simulation_base.R") | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ## one way to do it is by adding correlation to x.obs and y that isn't in w. | ||||||
|  | ## in other words, the model is missing an important feature of x.obs that's related to y. | ||||||
|  | simulate_data <- function(N, m, B0, Bxy, Bzy, seed, prediction_accuracy=0.73, coder_accuracy=0.8){ | ||||||
|  |     set.seed(seed) | ||||||
|  | 
 | ||||||
|  |     # make w and y dependent | ||||||
|  |     z <- rbinom(N, 1, 0.5) | ||||||
|  |     x <- rbinom(N, 1, 0.5) | ||||||
|  | 
 | ||||||
|  |     ystar <- Bzy * z + Bxy * x + B0 | ||||||
|  |     y <- rbinom(N,1,plogis(ystar)) | ||||||
|  | 
 | ||||||
|  |     # glm(y ~ x + z, family="binomial") | ||||||
|  | 
 | ||||||
|  |     df <- data.table(x=x,y=y,ystar=ystar,z=z) | ||||||
|  | 
 | ||||||
|  |     if(m < N){ | ||||||
|  |         df <- df[sample(nrow(df), m), y.obs := y] | ||||||
|  |     } else { | ||||||
|  |         df <- df[, y.obs := y] | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     df[ (!is.na(y.obs)) ,y.obs.0 := abs(y.obs - rbinom(.N, 1, 1-coder_accuracy))] | ||||||
|  |     df[ (!is.na(y.obs)) ,y.obs.1 := abs(y.obs - rbinom(.N, 1, 1-coder_accuracy))] | ||||||
|  | 
 | ||||||
|  |     odds.y1 <- qlogis(prediction_accuracy) | ||||||
|  |     odds.y0 <- qlogis(prediction_accuracy,lower.tail=F) | ||||||
|  | 
 | ||||||
|  |     df[y==0,w:=plogis(rlogis(.N,odds.y0))] | ||||||
|  |     df[y==1,w:=plogis(rlogis(.N,odds.y1))] | ||||||
|  | 
 | ||||||
|  |     df[,w_pred := as.integer(w > 0.5)] | ||||||
|  | 
 | ||||||
|  |     print(mean(df[x==0]$y == df[x==0]$w_pred)) | ||||||
|  |     print(mean(df[x==1]$y == df[x==1]$w_pred)) | ||||||
|  |     print(mean(df$w_pred == df$y)) | ||||||
|  |     return(df) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | parser <- arg_parser("Simulate data and fit corrected models") | ||||||
|  | parser <- add_argument(parser, "--N", default=1000, help="number of observations of w") | ||||||
|  | parser <- add_argument(parser, "--m", default=500, help="m the number of ground truth observations") | ||||||
|  | parser <- add_argument(parser, "--seed", default=17, help='seed for the rng') | ||||||
|  | parser <- add_argument(parser, "--outfile", help='output file', default='example_2.feather') | ||||||
|  | parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.005) | ||||||
|  | parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.72) | ||||||
|  | ## parser <- add_argument(parser, "--x_bias_y1", help='how is the classifier biased when y = 1?', default=-0.75) | ||||||
|  | ## parser <- add_argument(parser, "--x_bias_y0", help='how is the classifier biased when y = 0 ?', default=0.75) | ||||||
|  | parser <- add_argument(parser, "--Bxy", help='coefficient of x on y', default=0.3) | ||||||
|  | parser <- add_argument(parser, "--Bzy", help='coeffficient of z on y', default=-0.3) | ||||||
|  | parser <- add_argument(parser, "--outcome_formula", help='formula for the outcome variable', default="y~x+z") | ||||||
|  | parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy variable', default="w_pred~y") | ||||||
|  | parser <- add_argument(parser, "--coder_accuracy", help='How accurate are the coders?', default=0.8) | ||||||
|  | 
 | ||||||
|  | args <- parse_args(parser) | ||||||
|  | 
 | ||||||
|  | B0 <- 0 | ||||||
|  | Bxy <- args$Bxy | ||||||
|  | Bzy <- args$Bzy | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if(args$m < args$N){ | ||||||
|  |     df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, args$seed, args$prediction_accuracy, args$coder_accuracy) | ||||||
|  | 
 | ||||||
|  | #    result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'x_bias_y0'=args$x_bias_y0,'x_bias_y1'=args$x_bias_y1,'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||||
|  |     result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||||
|  | 
 | ||||||
|  |     outline <- run_simulation_depvar(df, result, outcome_formula = as.formula(args$outcome_formula), proxy_formula = as.formula(args$proxy_formula)) | ||||||
|  | 
 | ||||||
|  |     outfile_lock <- lock(paste0(args$outfile, '_lock'),exclusive=TRUE) | ||||||
|  | 
 | ||||||
|  |     if(file.exists(args$outfile)){ | ||||||
|  |         logdata <- read_feather(args$outfile) | ||||||
|  |         logdata <- rbind(logdata,as.data.table(outline),fill=TRUE) | ||||||
|  |     } else { | ||||||
|  |         logdata <- as.data.table(outline) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     print(outline) | ||||||
|  |     write_feather(logdata, args$outfile) | ||||||
|  |     unlock(outfile_lock) | ||||||
|  | } | ||||||
| @ -1,12 +1,12 @@ | |||||||
| 
 | 
 | ||||||
| SHELL=bash | SHELL=bash | ||||||
| 
 | 
 | ||||||
| Ns=[1000, 2000, 4000, 8000] | Ns=[1000, 2000, 4000] | ||||||
| ms=[100, 200, 400, 800] | ms=[200, 400, 800] | ||||||
| seeds=[$(shell seq -s, 1 100)] | seeds=[$(shell seq -s, 1 250)] | ||||||
| explained_variances=[0.1] | explained_variances=[0.1] | ||||||
| 
 | 
 | ||||||
| all:remembr.RDS | all:remembr.RDS remember_irr.RDS | ||||||
| 
 | 
 | ||||||
| srun=srun -A comdata -p compute-bigmem --time=6:00:00 --mem 4G -c 1 | srun=srun -A comdata -p compute-bigmem --time=6:00:00 --mem 4G -c 1 | ||||||
| 
 | 
 | ||||||
| @ -31,7 +31,7 @@ example_1.feather: example_1_jobs | |||||||
| #	sbatch --wait --verbose --array=3001-6001 run_simulation.sbatch 0 example_1_jobs
 | #	sbatch --wait --verbose --array=3001-6001 run_simulation.sbatch 0 example_1_jobs
 | ||||||
| 
 | 
 | ||||||
| example_2_jobs: 02_indep_differential.R simulation_base.R | example_2_jobs: 02_indep_differential.R simulation_base.R | ||||||
| 	grid_sweep.py --command "Rscript 02_indep_differential.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["example_2.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y*z*x"], "truth_formula":["x~z"]}' --outfile example_2_jobs | 	grid_sweep.py --command "Rscript 02_indep_differential.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["example_2.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y*z*x"]}' --outfile example_2_jobs | ||||||
| 
 | 
 | ||||||
| example_2.feather: example_2_jobs  | example_2.feather: example_2_jobs  | ||||||
| 	rm -f example_2.feather | 	rm -f example_2.feather | ||||||
| @ -59,6 +59,7 @@ example_4.feather: example_4_jobs | |||||||
| 	rm -f example_4.feather	 | 	rm -f example_4.feather	 | ||||||
| 	sbatch --wait --verbose --array=1-$(shell cat example_4_jobs | wc -l)  run_simulation.sbatch 0 example_4_jobs | 	sbatch --wait --verbose --array=1-$(shell cat example_4_jobs | wc -l)  run_simulation.sbatch 0 example_4_jobs | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| remembr.RDS:example_1.feather example_2.feather example_3.feather example_4.feather plot_example.R plot_dv_example.R | remembr.RDS:example_1.feather example_2.feather example_3.feather example_4.feather plot_example.R plot_dv_example.R | ||||||
| 	rm -f remembr.RDS | 	rm -f remembr.RDS | ||||||
| 	${srun} Rscript plot_example.R --infile example_1.feather --name "plot.df.example.1" | 	${srun} Rscript plot_example.R --infile example_1.feather --name "plot.df.example.1" | ||||||
| @ -66,6 +67,32 @@ remembr.RDS:example_1.feather example_2.feather example_3.feather example_4.feat | |||||||
| 	${srun} Rscript plot_dv_example.R --infile example_3.feather --name "plot.df.example.3" | 	${srun} Rscript plot_dv_example.R --infile example_3.feather --name "plot.df.example.3" | ||||||
| 	${srun} Rscript plot_dv_example.R --infile example_4.feather --name "plot.df.example.4" | 	${srun} Rscript plot_dv_example.R --infile example_4.feather --name "plot.df.example.4" | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
|  | irr_Ns = ${Ns} | ||||||
|  | irr_ms = ${ms} | ||||||
|  | irr_seeds=${seeds} | ||||||
|  | irr_explained_variances=${explained_variances} | ||||||
|  | 
 | ||||||
|  | example_5_jobs: 05_irr_indep.R irr_simulation_base.R | ||||||
|  | 	grid_sweep.py --command "Rscript 05_irr_indep.R" --arg_dict '{"N":${irr_Ns},"m":${irr_ms}, "seed":${irr_seeds}, "outfile":["example_5.feather"], "y_explained_variance":${irr_explained_variances}}' --outfile example_5_jobs | ||||||
|  | 
 | ||||||
|  | example_5.feather:example_5_jobs | ||||||
|  | 	rm -f example_5.feather | ||||||
|  | 	sbatch --wait --verbose --array=1-$(shell cat example_5_jobs | wc -l)  run_simulation.sbatch 0 example_5_jobs | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | example_6_jobs: 06_irr_dv.R irr_dv_simulation_base.R | ||||||
|  | 	grid_sweep.py --command "Rscript 06_irr_dv.R" --arg_dict '{"N":${irr_Ns},"m":${irr_ms}, "seed":${irr_seeds}, "outfile":["example_6.feather"], "y_explained_variance":${irr_explained_variances}}' --outfile example_6_jobs | ||||||
|  | 
 | ||||||
|  | example_6.feather:example_6_jobs | ||||||
|  | 	rm -f example_6.feather | ||||||
|  | 	sbatch --wait --verbose --array=1-$(shell cat example_6_jobs | wc -l)  run_simulation.sbatch 0 example_6_jobs | ||||||
|  | 
 | ||||||
|  | remember_irr.RDS: example_5.feather example_6.feather plot_irr_example.R plot_irr_dv_example.R | ||||||
|  | 	rm -f remember_irr.RDS | ||||||
|  | 	${srun} Rscript plot_irr_example.R --infile example_5.feather --name "plot.df.example.5" | ||||||
|  | 	${srun} Rscript plot_irr_dv_example.R --infile example_6.feather --name "plot.df.example.6" | ||||||
|  | 
 | ||||||
| clean: | clean: | ||||||
| 	rm *.feather | 	rm *.feather | ||||||
| 	rm -f remembr.RDS | 	rm -f remembr.RDS | ||||||
|  | |||||||
							
								
								
									
										107
									
								
								simulations/irr_dv_simulation_base.R
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										107
									
								
								simulations/irr_dv_simulation_base.R
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,107 @@ | |||||||
|  | library(matrixStats) # for numerically stable logsumexps | ||||||
|  | 
 | ||||||
|  | options(amelia.parallel="no", | ||||||
|  |         amelia.ncpus=1) | ||||||
|  | library(Amelia) | ||||||
|  | 
 | ||||||
|  | source("measerr_methods.R") ## for my more generic function. | ||||||
|  | 
 | ||||||
|  | run_simulation_depvar <- function(df, result, outcome_formula = y ~ x + z, rater_formula = y.obs ~ x, proxy_formula = w_pred ~ y){ | ||||||
|  | 
 | ||||||
|  |     accuracy <- df[,mean(w_pred==y)] | ||||||
|  |     result <- append(result, list(accuracy=accuracy)) | ||||||
|  | 
 | ||||||
|  |     (model.true <- glm(y ~ x + z, data=df, family=binomial(link='logit'))) | ||||||
|  |     true.ci.Bxy <- confint(model.true)['x',] | ||||||
|  |     true.ci.Bzy <- confint(model.true)['z',] | ||||||
|  | 
 | ||||||
|  |     result <- append(result, list(Bxy.est.true=coef(model.true)['x'], | ||||||
|  |                                   Bzy.est.true=coef(model.true)['z'], | ||||||
|  |                                   Bxy.ci.upper.true = true.ci.Bxy[2], | ||||||
|  |                                   Bxy.ci.lower.true = true.ci.Bxy[1], | ||||||
|  |                                   Bzy.ci.upper.true = true.ci.Bzy[2], | ||||||
|  |                                   Bzy.ci.lower.true = true.ci.Bzy[1])) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |     loa0.feasible <- glm(y.obs.0 ~ x + z, data = df[!(is.na(y.obs.0))], family=binomial(link='logit')) | ||||||
|  | 
 | ||||||
|  |     loa0.ci.Bxy <- confint(loa0.feasible)['x',] | ||||||
|  |     loa0.ci.Bzy <- confint(loa0.feasible)['z',] | ||||||
|  | 
 | ||||||
|  |     result <- append(result, list(Bxy.est.loa0.feasible=coef(loa0.feasible)['x'], | ||||||
|  |                                   Bzy.est.loa0.feasible=coef(loa0.feasible)['z'], | ||||||
|  |                                   Bxy.ci.upper.loa0.feasible = loa0.ci.Bxy[2], | ||||||
|  |                                   Bxy.ci.lower.loa0.feasible = loa0.ci.Bxy[1], | ||||||
|  |                                   Bzy.ci.upper.loa0.feasible = loa0.ci.Bzy[2], | ||||||
|  |                                   Bzy.ci.lower.loa0.feasible = loa0.ci.Bzy[1])) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |     df.loa0.mle <- copy(df) | ||||||
|  |     df.loa0.mle[,y:=y.obs.0] | ||||||
|  |     loa0.mle <- measerr_mle_dv(df.loa0.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula) | ||||||
|  |     fisher.info <- solve(loa0.mle$hessian) | ||||||
|  |     coef <- loa0.mle$par | ||||||
|  |     ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||||
|  |     ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||||
|  | 
 | ||||||
|  |     result <- append(result, list(Bxy.est.loa0.mle=coef['x'], | ||||||
|  |                                   Bzy.est.loa0.mle=coef['z'], | ||||||
|  |                                   Bxy.ci.upper.loa0.mle = ci.upper['x'], | ||||||
|  |                                   Bxy.ci.lower.loa0.mle = ci.lower['x'], | ||||||
|  |                                   Bzy.ci.upper.loa0.mle = ci.upper['z'], | ||||||
|  |                                   Bzy.ci.lower.loa0.mle = ci.upper['z'])) | ||||||
|  | 
 | ||||||
|  |     loco.feasible <- glm(y.obs.0 ~ x + z, data = df[(!is.na(y.obs.0)) & (y.obs.1 == y.obs.0)], family=binomial(link='logit')) | ||||||
|  | 
 | ||||||
|  |     loco.feasible.ci.Bxy <- confint(loco.feasible)['x',] | ||||||
|  |     loco.feasible.ci.Bzy <- confint(loco.feasible)['z',] | ||||||
|  | 
 | ||||||
|  |     result <- append(result, list(Bxy.est.loco.feasible=coef(loco.feasible)['x'], | ||||||
|  |                                   Bzy.est.loco.feasible=coef(loco.feasible)['z'], | ||||||
|  |                                   Bxy.ci.upper.loco.feasible = loco.feasible.ci.Bxy[2], | ||||||
|  |                                   Bxy.ci.lower.loco.feasible = loco.feasible.ci.Bxy[1], | ||||||
|  |                                   Bzy.ci.upper.loco.feasible = loco.feasible.ci.Bzy[2], | ||||||
|  |                                   Bzy.ci.lower.loco.feasible = loco.feasible.ci.Bzy[1])) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |     df.loco.mle <- copy(df) | ||||||
|  |     df.loco.mle[,y.obs:=NA] | ||||||
|  |     df.loco.mle[(y.obs.0)==(y.obs.1),y.obs:=y.obs.0] | ||||||
|  |     df.loco.mle[,y.true:=y] | ||||||
|  |     df.loco.mle[,y:=y.obs] | ||||||
|  |     print(df.loco.mle[!is.na(y.obs.1),mean(y.true==y,na.rm=TRUE)]) | ||||||
|  |     loco.mle <- measerr_mle_dv(df.loco.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula) | ||||||
|  |     fisher.info <- solve(loco.mle$hessian) | ||||||
|  |     coef <- loco.mle$par | ||||||
|  |     ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||||
|  |     ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||||
|  | 
 | ||||||
|  |     result <- append(result, list(Bxy.est.loco.mle=coef['x'], | ||||||
|  |                                   Bzy.est.loco.mle=coef['z'], | ||||||
|  |                                   Bxy.ci.upper.loco.mle = ci.upper['x'], | ||||||
|  |                                   Bxy.ci.lower.loco.mle = ci.lower['x'], | ||||||
|  |                                   Bzy.ci.upper.loco.mle = ci.upper['z'], | ||||||
|  |                                   Bzy.ci.lower.loco.mle = ci.upper['z'])) | ||||||
|  | 
 | ||||||
|  |     print(rater_formula) | ||||||
|  |     print(proxy_formula) | ||||||
|  | 
 | ||||||
|  |     ## mle.irr <- measerr_irr_mle( df, outcome_formula = outcome_formula, rater_formula = rater_formula, proxy_formula=proxy_formula, truth_formula=truth_formula) | ||||||
|  | 
 | ||||||
|  |     ## fisher.info <- solve(mle.irr$hessian) | ||||||
|  |     ## coef <- mle.irr$par | ||||||
|  |     ## ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||||
|  |     ## ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||||
|  |      | ||||||
|  |     ## result <- append(result, | ||||||
|  |     ##                  list(Bxy.est.mle = coef['x'], | ||||||
|  |     ##                       Bxy.ci.upper.mle = ci.upper['x'], | ||||||
|  |     ##                       Bxy.ci.lower.mle = ci.lower['x'], | ||||||
|  |     ##                       Bzy.est.mle = coef['z'], | ||||||
|  |     ##                       Bzy.ci.upper.mle = ci.upper['z'], | ||||||
|  |     ##                       Bzy.ci.lower.mle = ci.lower['z'])) | ||||||
|  | 
 | ||||||
|  |     return(result) | ||||||
|  | 
 | ||||||
|  | } | ||||||
							
								
								
									
										106
									
								
								simulations/irr_simulation_base.R
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										106
									
								
								simulations/irr_simulation_base.R
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,106 @@ | |||||||
|  | library(matrixStats) # for numerically stable logsumexps | ||||||
|  | 
 | ||||||
|  | options(amelia.parallel="no", | ||||||
|  |         amelia.ncpus=1) | ||||||
|  | library(Amelia) | ||||||
|  | 
 | ||||||
|  | source("measerr_methods.R") ## for my more generic function. | ||||||
|  | 
 | ||||||
|  | run_simulation <- function(df, result, outcome_formula = y ~ x + z, proxy_formula = w_pred ~ x, truth_formula = x ~ z){ | ||||||
|  | 
 | ||||||
|  |     accuracy <- df[,mean(w_pred==x)] | ||||||
|  |     result <- append(result, list(accuracy=accuracy)) | ||||||
|  | 
 | ||||||
|  |     (model.true <- lm(y ~ x + z, data=df)) | ||||||
|  |     true.ci.Bxy <- confint(model.true)['x',] | ||||||
|  |     true.ci.Bzy <- confint(model.true)['z',] | ||||||
|  | 
 | ||||||
|  |     result <- append(result, list(Bxy.est.true=coef(model.true)['x'], | ||||||
|  |                                   Bzy.est.true=coef(model.true)['z'], | ||||||
|  |                                   Bxy.ci.upper.true = true.ci.Bxy[2], | ||||||
|  |                                   Bxy.ci.lower.true = true.ci.Bxy[1], | ||||||
|  |                                   Bzy.ci.upper.true = true.ci.Bzy[2], | ||||||
|  |                                   Bzy.ci.lower.true = true.ci.Bzy[1])) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |     loa0.feasible <- lm(y ~ x.obs.0 + z, data = df[!(is.na(x.obs.1))]) | ||||||
|  | 
 | ||||||
|  |     loa0.ci.Bxy <- confint(loa0.feasible)['x.obs.0',] | ||||||
|  |     loa0.ci.Bzy <- confint(loa0.feasible)['z',] | ||||||
|  | 
 | ||||||
|  |     result <- append(result, list(Bxy.est.loa0.feasible=coef(loa0.feasible)['x.obs.0'], | ||||||
|  |                                   Bzy.est.loa0.feasible=coef(loa0.feasible)['z'], | ||||||
|  |                                   Bxy.ci.upper.loa0.feasible = loa0.ci.Bxy[2], | ||||||
|  |                                   Bxy.ci.lower.loa0.feasible = loa0.ci.Bxy[1], | ||||||
|  |                                   Bzy.ci.upper.loa0.feasible = loa0.ci.Bzy[2], | ||||||
|  |                                   Bzy.ci.lower.loa0.feasible = loa0.ci.Bzy[1])) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |     df.loa0.mle <- copy(df) | ||||||
|  |     df.loa0.mle[,x:=x.obs.0] | ||||||
|  |     loa0.mle <- measerr_mle(df.loa0.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula, truth_formula=truth_formula) | ||||||
|  |     fisher.info <- solve(loa0.mle$hessian) | ||||||
|  |     coef <- loa0.mle$par | ||||||
|  |     ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||||
|  |     ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||||
|  | 
 | ||||||
|  |     result <- append(result, list(Bxy.est.loa0.mle=coef['x'], | ||||||
|  |                                   Bzy.est.loa0.mle=coef['z'], | ||||||
|  |                                   Bxy.ci.upper.loa0.mle = ci.upper['x'], | ||||||
|  |                                   Bxy.ci.lower.loa0.mle = ci.lower['x'], | ||||||
|  |                                   Bzy.ci.upper.loa0.mle = ci.upper['z'], | ||||||
|  |                                   Bzy.ci.lower.loa0.mle = ci.upper['z'])) | ||||||
|  | 
 | ||||||
|  |     loco.feasible <- lm(y ~ x.obs.1 + z, data = df[(!is.na(x.obs.1)) & (x.obs.1 == x.obs.0)]) | ||||||
|  | 
 | ||||||
|  |     loco.feasible.ci.Bxy <- confint(loco.feasible)['x.obs.1',] | ||||||
|  |     loco.feasible.ci.Bzy <- confint(loco.feasible)['z',] | ||||||
|  | 
 | ||||||
|  |     result <- append(result, list(Bxy.est.loco.feasible=coef(loco.feasible)['x.obs.1'], | ||||||
|  |                                   Bzy.est.loco.feasible=coef(loco.feasible)['z'], | ||||||
|  |                                   Bxy.ci.upper.loco.feasible = loco.feasible.ci.Bxy[2], | ||||||
|  |                                   Bxy.ci.lower.loco.feasible = loco.feasible.ci.Bxy[1], | ||||||
|  |                                   Bzy.ci.upper.loco.feasible = loco.feasible.ci.Bzy[2], | ||||||
|  |                                   Bzy.ci.lower.loco.feasible = loco.feasible.ci.Bzy[1])) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |     df.loco.mle <- copy(df) | ||||||
|  |     df.loco.mle[,x.obs:=NA] | ||||||
|  |     df.loco.mle[(x.obs.0)==(x.obs.1),x.obs:=x.obs.0] | ||||||
|  |     df.loco.mle[,x.true:=x] | ||||||
|  |     df.loco.mle[,x:=x.obs] | ||||||
|  |     print(df.loco.mle[!is.na(x.obs.1),mean(x.true==x,na.rm=TRUE)]) | ||||||
|  |     loco.mle <- measerr_mle(df.loco.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula, truth_formula=truth_formula) | ||||||
|  |     fisher.info <- solve(loco.mle$hessian) | ||||||
|  |     coef <- loco.mle$par | ||||||
|  |     ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||||
|  |     ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||||
|  | 
 | ||||||
|  |     result <- append(result, list(Bxy.est.loco.mle=coef['x'], | ||||||
|  |                                   Bzy.est.loco.mle=coef['z'], | ||||||
|  |                                   Bxy.ci.upper.loco.mle = ci.upper['x'], | ||||||
|  |                                   Bxy.ci.lower.loco.mle = ci.lower['x'], | ||||||
|  |                                   Bzy.ci.upper.loco.mle = ci.upper['z'], | ||||||
|  |                                   Bzy.ci.lower.loco.mle = ci.upper['z'])) | ||||||
|  | 
 | ||||||
|  |     ## print(rater_formula) | ||||||
|  |     ## print(proxy_formula) | ||||||
|  |     ## mle.irr <- measerr_irr_mle( df, outcome_formula = outcome_formula, rater_formula = rater_formula, proxy_formula=proxy_formula, truth_formula=truth_formula) | ||||||
|  | 
 | ||||||
|  |     ## fisher.info <- solve(mle.irr$hessian) | ||||||
|  |     ## coef <- mle.irr$par | ||||||
|  |     ## ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||||
|  |     ## ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||||
|  |      | ||||||
|  |     ## result <- append(result, | ||||||
|  |     ##                  list(Bxy.est.mle = coef['x'], | ||||||
|  |     ##                       Bxy.ci.upper.mle = ci.upper['x'], | ||||||
|  |     ##                       Bxy.ci.lower.mle = ci.lower['x'], | ||||||
|  |     ##                       Bzy.est.mle = coef['z'], | ||||||
|  |     ##                       Bzy.ci.upper.mle = ci.upper['z'], | ||||||
|  |     ##                       Bzy.ci.lower.mle = ci.lower['z'])) | ||||||
|  | 
 | ||||||
|  |     return(result) | ||||||
|  | 
 | ||||||
|  | } | ||||||
| @ -102,14 +102,208 @@ measerr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link='lo | |||||||
|     return(fit) |     return(fit) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | ## Experimental, and not necessary if errors are independent. | ||||||
|  | measerr_irr_mle <- function(df, outcome_formula, outcome_family=gaussian(), rater_formula, proxy_formula, proxy_family=binomial(link='logit'), truth_formula, truth_family=binomial(link='logit')){ | ||||||
|  | 
 | ||||||
|  |     ### in this scenario, the ground truth also has measurement error, but we have repeated measures for it.  | ||||||
|  | 
 | ||||||
|  |     ## probability of y given observed data. | ||||||
|  |     df.obs <- df[!is.na(x.obs.1)] | ||||||
|  |     proxy.variable <- all.vars(proxy_formula)[1] | ||||||
|  |     df.x.obs.1 <- copy(df.obs)[,x:=1] | ||||||
|  |     df.x.obs.0 <- copy(df.obs)[,x:=0] | ||||||
|  |     y.obs <- df.obs[,y] | ||||||
|  | 
 | ||||||
|  |     nll <- function(params){ | ||||||
|  |         outcome.model.matrix.x.obs.0 <- model.matrix(outcome_formula, df.x.obs.0) | ||||||
|  |         outcome.model.matrix.x.obs.1 <- model.matrix(outcome_formula, df.x.obs.1) | ||||||
|  | 
 | ||||||
|  |         param.idx <- 1 | ||||||
|  |         n.outcome.model.covars <- dim(outcome.model.matrix.x.obs.0)[2] | ||||||
|  |         outcome.params <- params[param.idx:n.outcome.model.covars] | ||||||
|  |         param.idx <- param.idx + n.outcome.model.covars | ||||||
|  | 
 | ||||||
|  |         sigma.y <- params[param.idx] | ||||||
|  |         param.idx <- param.idx + 1 | ||||||
|  | 
 | ||||||
|  |         ll.y.x.obs.0 <- dnorm(y.obs, outcome.params %*% t(outcome.model.matrix.x.obs.0),sd=sigma.y, log=TRUE) | ||||||
|  |         ll.y.x.obs.1 <- dnorm(y.obs, outcome.params %*% t(outcome.model.matrix.x.obs.1),sd=sigma.y, log=TRUE) | ||||||
|  | 
 | ||||||
|  |         ## assume that the two coders are statistically independent conditional on x | ||||||
|  |         ll.x.obs.0.x0 <- vector(mode='numeric', length=nrow(df.obs)) | ||||||
|  |         ll.x.obs.1.x0 <- vector(mode='numeric', length=nrow(df.obs)) | ||||||
|  |         ll.x.obs.0.x1 <- vector(mode='numeric', length=nrow(df.obs)) | ||||||
|  |         ll.x.obs.1.x1 <- vector(mode='numeric', length=nrow(df.obs)) | ||||||
|  | 
 | ||||||
|  |         rater.model.matrix.x.obs.0 <- model.matrix(rater_formula, df.x.obs.0) | ||||||
|  |         rater.model.matrix.x.obs.1 <- model.matrix(rater_formula, df.x.obs.1) | ||||||
|  | 
 | ||||||
|  |         n.rater.model.covars <- dim(rater.model.matrix.x.obs.0)[2] | ||||||
|  |         rater.0.params <- params[param.idx:(n.rater.model.covars + param.idx - 1)] | ||||||
|  |         param.idx <- param.idx + n.rater.model.covars | ||||||
|  | 
 | ||||||
|  |         rater.1.params <- params[param.idx:(n.rater.model.covars + param.idx - 1)] | ||||||
|  |         param.idx <- param.idx + n.rater.model.covars | ||||||
|  |          | ||||||
|  |         # probability of rater 0 if x is 0 or 1 | ||||||
|  |         ll.x.obs.0.x0[df.obs$x.obs.0==1] <- plogis(rater.0.params %*% t(rater.model.matrix.x.obs.0[df.obs$x.obs.0==1,]), log=TRUE) | ||||||
|  |         ll.x.obs.0.x0[df.obs$x.obs.0==0] <- plogis(rater.0.params %*% t(rater.model.matrix.x.obs.0[df.obs$x.obs.0==0,]), log=TRUE, lower.tail=FALSE) | ||||||
|  |         ll.x.obs.0.x1[df.obs$x.obs.0==1] <- plogis(rater.0.params %*% t(rater.model.matrix.x.obs.1[df.obs$x.obs.0==1,]), log=TRUE) | ||||||
|  |         ll.x.obs.0.x1[df.obs$x.obs.0==0] <- plogis(rater.0.params %*% t(rater.model.matrix.x.obs.1[df.obs$x.obs.0==0,]), log=TRUE, lower.tail=FALSE) | ||||||
|  | 
 | ||||||
|  |         # probability of rater 1 if x is 0 or 1 | ||||||
|  |         ll.x.obs.1.x0[df.obs$x.obs.1==1] <- plogis(rater.1.params %*% t(rater.model.matrix.x.obs.0[df.obs$x.obs.1==1,]), log=TRUE) | ||||||
|  |         ll.x.obs.1.x0[df.obs$x.obs.1==0] <- plogis(rater.1.params %*% t(rater.model.matrix.x.obs.0[df.obs$x.obs.1==0,]), log=TRUE, lower.tail=FALSE) | ||||||
|  |         ll.x.obs.1.x1[df.obs$x.obs.1==1] <- plogis(rater.1.params %*% t(rater.model.matrix.x.obs.1[df.obs$x.obs.1==1,]), log=TRUE) | ||||||
|  |         ll.x.obs.1.x1[df.obs$x.obs.1==0] <- plogis(rater.1.params %*% t(rater.model.matrix.x.obs.1[df.obs$x.obs.1==0,]), log=TRUE, lower.tail=FALSE) | ||||||
|  | 
 | ||||||
|  |         proxy.model.matrix.x0 <- model.matrix(proxy_formula, df.x.obs.0) | ||||||
|  |         proxy.model.matrix.x1 <- model.matrix(proxy_formula, df.x.obs.1) | ||||||
|  | 
 | ||||||
|  |         n.proxy.model.covars <- dim(proxy.model.matrix.x0)[2] | ||||||
|  |         proxy.params <- params[param.idx:(n.proxy.model.covars+param.idx-1)] | ||||||
|  |         param.idx <- param.idx + n.proxy.model.covars | ||||||
|  | 
 | ||||||
|  |         proxy.obs <- with(df.obs, eval(parse(text=proxy.variable))) | ||||||
|  | 
 | ||||||
|  |         if( (proxy_family$family=="binomial") & (proxy_family$link=='logit')){ | ||||||
|  |             ll.w.obs.x0 <- vector(mode='numeric',length=dim(proxy.model.matrix.x0)[1]) | ||||||
|  |             ll.w.obs.x1 <- vector(mode='numeric',length=dim(proxy.model.matrix.x1)[1]) | ||||||
|  | 
 | ||||||
|  |                                         # proxy_formula likelihood using logistic regression | ||||||
|  |             ll.w.obs.x0[proxy.obs==1] <- plogis(proxy.params %*% t(proxy.model.matrix.x0[proxy.obs==1,]),log=TRUE) | ||||||
|  |             ll.w.obs.x0[proxy.obs==0] <- plogis(proxy.params %*% t(proxy.model.matrix.x0[proxy.obs==0,]),log=TRUE, lower.tail=FALSE) | ||||||
|  | 
 | ||||||
|  |             ll.w.obs.x1[proxy.obs==1] <- plogis(proxy.params %*% t(proxy.model.matrix.x1[proxy.obs==1,]),log=TRUE) | ||||||
|  |             ll.w.obs.x1[proxy.obs==0] <- plogis(proxy.params %*% t(proxy.model.matrix.x1[proxy.obs==0,]),log=TRUE, lower.tail=FALSE) | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         ## assume that the probability of x is a logistic regression depending on z | ||||||
|  |         truth.model.matrix.obs <- model.matrix(truth_formula, df.obs) | ||||||
|  |         n.truth.params <- dim(truth.model.matrix.obs)[2] | ||||||
|  |         truth.params <- params[param.idx:(n.truth.params + param.idx - 1)] | ||||||
|  | 
 | ||||||
|  |         ll.obs.x0 <- plogis(truth.params %*% t(truth.model.matrix.obs), log=TRUE) | ||||||
|  |         ll.obs.x1 <- plogis(truth.params %*% t(truth.model.matrix.obs), log=TRUE, lower.tail=FALSE) | ||||||
|  | 
 | ||||||
|  |         ll.obs <- colLogSumExps(rbind(ll.y.x.obs.0 + ll.x.obs.0.x0 + ll.x.obs.1.x0 + ll.obs.x0 + ll.w.obs.x0, | ||||||
|  |                                       ll.y.x.obs.1 + ll.x.obs.0.x1 + ll.x.obs.1.x1 + ll.obs.x1 + ll.w.obs.x1)) | ||||||
|  | 
 | ||||||
|  |         ### NOW FOR THE FUN PART. Likelihood of the unobserved data. | ||||||
|  |         ### we have to integrate out x.obs.0, x.obs.1, and x. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |         ## THE OUTCOME | ||||||
|  |         df.unobs <- df[is.na(x.obs)] | ||||||
|  |         df.x.unobs.0 <- copy(df.unobs)[,x:=0] | ||||||
|  |         df.x.unobs.1 <- copy(df.unobs)[,x:=1] | ||||||
|  |         y.unobs <- df.unobs$y | ||||||
|  | 
 | ||||||
|  |         outcome.model.matrix.x.unobs.0 <- model.matrix(outcome_formula, df.x.unobs.0) | ||||||
|  |         outcome.model.matrix.x.unobs.1 <- model.matrix(outcome_formula, df.x.unobs.1) | ||||||
|  | 
 | ||||||
|  |         ll.y.unobs.x0 <- dnorm(y.unobs, outcome.params %*% t(outcome.model.matrix.x.unobs.0), sd=sigma.y, log=TRUE) | ||||||
|  |         ll.y.unobs.x1 <- dnorm(y.unobs, outcome.params %*% t(outcome.model.matrix.x.unobs.1), sd=sigma.y, log=TRUE) | ||||||
|  | 
 | ||||||
|  |          | ||||||
|  |         ## THE UNLABELED DATA | ||||||
|  | 
 | ||||||
|  |          | ||||||
|  |         ## assume that the two coders are statistically independent conditional on x | ||||||
|  |         ll.x.unobs.0.x0 <- vector(mode='numeric', length=nrow(df.unobs)) | ||||||
|  |         ll.x.unobs.1.x0 <- vector(mode='numeric', length=nrow(df.unobs)) | ||||||
|  |         ll.x.unobs.0.x1 <- vector(mode='numeric', length=nrow(df.unobs)) | ||||||
|  |         ll.x.unobs.1.x1 <- vector(mode='numeric', length=nrow(df.unobs)) | ||||||
|  |          | ||||||
|  |         df.x.unobs.0[,x.obs := 1] | ||||||
|  |         df.x.unobs.1[,x.obs := 1] | ||||||
|  | 
 | ||||||
|  |         rater.model.matrix.x.unobs.0 <- model.matrix(rater_formula, df.x.unobs.0) | ||||||
|  |         rater.model.matrix.x.unobs.1 <- model.matrix(rater_formula, df.x.unobs.1) | ||||||
|  | 
 | ||||||
|  |           | ||||||
|  |         ## # probability of rater 0 if x is 0 or 1 | ||||||
|  |         ## ll.x.unobs.0.x0 <- colLogSumExps(rbind(plogis(rater.0.params %*% t(rater.model.matrix.x.unobs.0), log=TRUE), | ||||||
|  |         ##                                      plogis(rater.0.params %*% t(rater.model.matrix.x.unobs.0), log=TRUE, lower.tail=TRUE))) | ||||||
|  | 
 | ||||||
|  |         ## ll.x.unobs.0.x1 <- colLogSumExps(rbind(plogis(rater.0.params %*% t(rater.model.matrix.x.unobs.1), log=TRUE), | ||||||
|  |         ##                                        plogis(rater.0.params %*% t(rater.model.matrix.x.unobs.1), log=TRUE, lower.tail=TRUE))) | ||||||
|  | 
 | ||||||
|  |         ## # probability of rater 1 if x is 0 or 1 | ||||||
|  |         ## ll.x.unobs.1.x0 <- colLogSumExps(rbind(plogis(rater.1.params %*% t(rater.model.matrix.x.unobs.0), log=TRUE), | ||||||
|  |         ##                                      plogis(rater.1.params %*% t(rater.model.matrix.x.unobs.0), log=TRUE, lower.tail=TRUE))) | ||||||
|  | 
 | ||||||
|  |         ## ll.x.unobs.1.x1 <- colLogSumExps(rbind(plogis(rater.1.params %*% t(rater.model.matrix.x.unobs.1), log=TRUE), | ||||||
|  |         ##                                      plogis(rater.1.params %*% t(rater.model.matrix.x.unobs.1), log=TRUE, lower.tail=TRUE))) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |         proxy.unobs <- with(df.unobs, eval(parse(text=proxy.variable))) | ||||||
|  |         proxy.model.matrix.x0.unobs <- model.matrix(proxy_formula, df.x.unobs.0) | ||||||
|  |         proxy.model.matrix.x1.unobs <- model.matrix(proxy_formula, df.x.unobs.1) | ||||||
|  | 
 | ||||||
|  |         if( (proxy_family$family=="binomial") & (proxy_family$link=='logit')){ | ||||||
|  |             ll.w.unobs.x0 <- vector(mode='numeric',length=dim(proxy.model.matrix.x0)[1]) | ||||||
|  |             ll.w.unobs.x1 <- vector(mode='numeric',length=dim(proxy.model.matrix.x1)[1]) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |                                         # proxy_formula likelihood using logistic regression | ||||||
|  |             ll.w.unobs.x0[proxy.unobs==1] <- plogis(proxy.params %*% t(proxy.model.matrix.x0.unobs[proxy.unobs==1,]),log=TRUE) | ||||||
|  |             ll.w.unobs.x0[proxy.unobs==0] <- plogis(proxy.params %*% t(proxy.model.matrix.x0.unobs[proxy.unobs==0,]),log=TRUE, lower.tail=FALSE) | ||||||
|  | 
 | ||||||
|  |             ll.w.unobs.x1[proxy.unobs==1] <- plogis(proxy.params %*% t(proxy.model.matrix.x1.unobs[proxy.unobs==1,]),log=TRUE) | ||||||
|  |             ll.w.unobs.x1[proxy.unobs==0] <- plogis(proxy.params %*% t(proxy.model.matrix.x1.unobs[proxy.unobs==0,]),log=TRUE, lower.tail=FALSE) | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         truth.model.matrix.unobs <- model.matrix(truth_formula, df.unobs) | ||||||
|  | 
 | ||||||
|  |         ll.unobs.x0 <- plogis(truth.params %*% t(truth.model.matrix.unobs), log=TRUE) | ||||||
|  |         ll.unobs.x1 <- plogis(truth.params %*% t(truth.model.matrix.unobs), log=TRUE, lower.tail=FALSE) | ||||||
|  | 
 | ||||||
|  |         ll.unobs <- colLogSumExps(rbind(ll.unobs.x0 + ll.w.unobs.x0 + ll.y.unobs.x0, | ||||||
|  |                                         ll.unobs.x1 + ll.w.unobs.x1 + ll.y.unobs.x1)) | ||||||
|  | 
 | ||||||
|  |         return(-1 *( sum(ll.obs) + sum(ll.unobs))) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     outcome.params <- colnames(model.matrix(outcome_formula,df)) | ||||||
|  |     lower <- rep(-Inf, length(outcome.params)) | ||||||
|  | 
 | ||||||
|  |     if(outcome_family$family=='gaussian'){ | ||||||
|  |         params <- c(outcome.params, 'sigma_y') | ||||||
|  |         lower <- c(lower, 0.00001) | ||||||
|  |     } else { | ||||||
|  |         params <- outcome.params | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     rater.0.params <- colnames(model.matrix(rater_formula,df)) | ||||||
|  |     params <- c(params, paste0('rater_0',rater.0.params)) | ||||||
|  |     lower <- c(lower, rep(-Inf, length(rater.0.params))) | ||||||
|  | 
 | ||||||
|  |     rater.1.params <- colnames(model.matrix(rater_formula,df)) | ||||||
|  |     params <- c(params, paste0('rater_1',rater.1.params)) | ||||||
|  |     lower <- c(lower, rep(-Inf, length(rater.1.params))) | ||||||
|  | 
 | ||||||
|  |     proxy.params <- colnames(model.matrix(proxy_formula, df)) | ||||||
|  |     params <- c(params, paste0('proxy_',proxy.params)) | ||||||
|  |     lower <- c(lower, rep(-Inf, length(proxy.params))) | ||||||
|  | 
 | ||||||
|  |     truth.params <- colnames(model.matrix(truth_formula, df)) | ||||||
|  |     params <- c(params, paste0('truth_', truth.params)) | ||||||
|  |     lower <- c(lower, rep(-Inf, length(truth.params))) | ||||||
|  |     start <- rep(0.1,length(params)) | ||||||
|  |     names(start) <- params | ||||||
|  |      | ||||||
|  |     fit <- optim(start, fn = nll, lower=lower, method='L-BFGS-B', hessian=TRUE, control=list(maxit=1e6)) | ||||||
|  |     return(fit) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| measerr_mle <- function(df, outcome_formula, outcome_family=gaussian(), proxy_formula, proxy_family=binomial(link='logit'), truth_formula, truth_family=binomial(link='logit')){ | measerr_mle <- function(df, outcome_formula, outcome_family=gaussian(), proxy_formula, proxy_family=binomial(link='logit'), truth_formula, truth_family=binomial(link='logit')){ | ||||||
| 
 | 
 | ||||||
|     measrr_mle_nll <- function(params){ |     measrr_mle_nll <- function(params){ | ||||||
|         df.obs <- model.frame(outcome_formula, df) |         df.obs <- model.frame(outcome_formula, df) | ||||||
|          |  | ||||||
|         proxy.variable <- all.vars(proxy_formula)[1] |         proxy.variable <- all.vars(proxy_formula)[1] | ||||||
|         proxy.model.matrix <- model.matrix(proxy_formula, df) |         proxy.model.matrix <- model.matrix(proxy_formula, df) | ||||||
| 
 |  | ||||||
|         response.var <- all.vars(outcome_formula)[1] |         response.var <- all.vars(outcome_formula)[1] | ||||||
|         y.obs <- with(df.obs,eval(parse(text=response.var))) |         y.obs <- with(df.obs,eval(parse(text=response.var))) | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -10,8 +10,6 @@ parser <- add_argument(parser, "--infile", default="", help="name of the file to | |||||||
| parser <- add_argument(parser, "--name", default="", help="The name to safe the data to in the remember file.") | parser <- add_argument(parser, "--name", default="", help="The name to safe the data to in the remember file.") | ||||||
| args <- parse_args(parser) | args <- parse_args(parser) | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| summarize.estimator <- function(df, suffix='naive', coefname='x'){ | summarize.estimator <- function(df, suffix='naive', coefname='x'){ | ||||||
| 
 | 
 | ||||||
|     part <- df[,c('N', |     part <- df[,c('N', | ||||||
|  | |||||||
							
								
								
									
										63
									
								
								simulations/plot_irr_dv_example.R
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										63
									
								
								simulations/plot_irr_dv_example.R
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,63 @@ | |||||||
|  | source("RemembR/R/RemembeR.R") | ||||||
|  | library(arrow) | ||||||
|  | library(data.table) | ||||||
|  | library(ggplot2) | ||||||
|  | library(filelock) | ||||||
|  | library(argparser) | ||||||
|  | 
 | ||||||
|  | parser <- arg_parser("Simulate data and fit corrected models.") | ||||||
|  | parser <- add_argument(parser, "--infile", default="", help="name of the file to read.") | ||||||
|  | parser <- add_argument(parser, "--name", default="", help="The name to safe the data to in the remember file.") | ||||||
|  | args <- parse_args(parser) | ||||||
|  | source("summarize_estimator.R") | ||||||
|  | 
 | ||||||
|  | build_plot_dataset <- function(df){ | ||||||
|  |      | ||||||
|  |     x.true <-  summarize.estimator(df, 'true','x') | ||||||
|  | 
 | ||||||
|  |     z.true <-  summarize.estimator(df, 'true','z') | ||||||
|  | 
 | ||||||
|  |     x.loa0.feasible <- summarize.estimator(df, 'loa0.feasible','x') | ||||||
|  |      | ||||||
|  |     z.loa0.feasible <- summarize.estimator(df,'loa0.feasible','z') | ||||||
|  | 
 | ||||||
|  |     x.loa0.mle <- summarize.estimator(df, 'loa0.mle', 'x') | ||||||
|  | 
 | ||||||
|  |     z.loa0.mle <- summarize.estimator(df, 'loa0.mle', 'z') | ||||||
|  | 
 | ||||||
|  |     x.loco.feasible <- summarize.estimator(df, 'loco.feasible', 'x') | ||||||
|  | 
 | ||||||
|  |     z.loco.feasible <- summarize.estimator(df, 'loco.feasible', 'z') | ||||||
|  | 
 | ||||||
|  |     x.loco.mle <- summarize.estimator(df, 'loco.mle', 'x') | ||||||
|  | 
 | ||||||
|  |     z.loco.mle <- summarize.estimator(df, 'loco.mle', 'z') | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |     accuracy <- df[,mean(accuracy)] | ||||||
|  |     plot.df <- rbindlist(list(x.true,z.true,x.loa0.feasible,z.loa0.feasible,x.loa0.mle,z.loa0.mle,x.loco.feasible, z.loco.feasible, z.loco.mle, x.loco.mle),use.names=T) | ||||||
|  |     plot.df[,accuracy := accuracy] | ||||||
|  |     plot.df <- plot.df[,":="(sd.est=sqrt(var.est)/N.sims)] | ||||||
|  |     return(plot.df) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | plot.df <- read_feather(args$infile) | ||||||
|  | print(unique(plot.df$N)) | ||||||
|  | 
 | ||||||
|  | # df <- df[apply(df,1,function(x) !any(is.na(x)))] | ||||||
|  | 
 | ||||||
|  | if(!('Bzx' %in% names(plot.df))) | ||||||
|  |     plot.df[,Bzx:=NA] | ||||||
|  | 
 | ||||||
|  | if(!('accuracy_imbalance_difference' %in% names(plot.df))) | ||||||
|  |     plot.df[,accuracy_imbalance_difference:=NA] | ||||||
|  | 
 | ||||||
|  | unique(plot.df[,'accuracy_imbalance_difference']) | ||||||
|  | 
 | ||||||
|  | #plot.df <- build_plot_dataset(df[accuracy_imbalance_difference==0.1][N==700]) | ||||||
|  | plot.df <- build_plot_dataset(plot.df) | ||||||
|  | 
 | ||||||
|  | change.remember.file("remember_irr.RDS",clear=TRUE) | ||||||
|  | 
 | ||||||
|  | remember(plot.df,args$name) | ||||||
							
								
								
									
										129
									
								
								simulations/plot_irr_example.R
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										129
									
								
								simulations/plot_irr_example.R
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,129 @@ | |||||||
|  | source("RemembR/R/RemembeR.R") | ||||||
|  | library(arrow) | ||||||
|  | library(data.table) | ||||||
|  | library(ggplot2) | ||||||
|  | library(filelock) | ||||||
|  | library(argparser) | ||||||
|  | 
 | ||||||
|  | parser <- arg_parser("Simulate data and fit corrected models.") | ||||||
|  | parser <- add_argument(parser, "--infile", default="", help="name of the file to read.") | ||||||
|  | parser <- add_argument(parser, "--name", default="", help="The name to safe the data to in the remember file.") | ||||||
|  | args <- parse_args(parser) | ||||||
|  | source("summarize_estimator.R") | ||||||
|  | 
 | ||||||
|  | build_plot_dataset <- function(df){ | ||||||
|  |      | ||||||
|  |     x.true <-  summarize.estimator(df, 'true','x') | ||||||
|  | 
 | ||||||
|  |     z.true <-  summarize.estimator(df, 'true','z') | ||||||
|  | 
 | ||||||
|  |     x.loa0.feasible <- summarize.estimator(df, 'loa0.feasible','x') | ||||||
|  |      | ||||||
|  |     z.loa0.feasible <- summarize.estimator(df,'loa0.feasible','z') | ||||||
|  | 
 | ||||||
|  |     x.loa0.mle <- summarize.estimator(df, 'loa0.mle', 'x') | ||||||
|  | 
 | ||||||
|  |     z.loa0.mle <- summarize.estimator(df, 'loa0.mle', 'z') | ||||||
|  | 
 | ||||||
|  |     x.loco.feasible <- summarize.estimator(df, 'loco.feasible', 'x') | ||||||
|  | 
 | ||||||
|  |     z.loco.feasible <- summarize.estimator(df, 'loco.feasible', 'z') | ||||||
|  | 
 | ||||||
|  |     x.loco.mle <- summarize.estimator(df, 'loco.mle', 'x') | ||||||
|  | 
 | ||||||
|  |     z.loco.mle <- summarize.estimator(df, 'loco.mle', 'z') | ||||||
|  | 
 | ||||||
|  |     ## x.mle <- summarize.estimator(df, 'mle', 'x') | ||||||
|  | 
 | ||||||
|  |     ## z.mle <- summarize.estimator(df, 'mle', 'z') | ||||||
|  | 
 | ||||||
|  |     accuracy <- df[,mean(accuracy)] | ||||||
|  |     plot.df <- rbindlist(list(x.true,z.true,x.loa0.feasible,z.loa0.feasible,x.loa0.mle,z.loa0.mle,x.loco.feasible, z.loco.feasible, x.loco.mle, z.loco.mle),use.names=T) | ||||||
|  |     plot.df[,accuracy := accuracy] | ||||||
|  |     plot.df <- plot.df[,":="(sd.est=sqrt(var.est)/N.sims)] | ||||||
|  |     return(plot.df) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | plot.df <- read_feather(args$infile) | ||||||
|  | print(unique(plot.df$N)) | ||||||
|  | 
 | ||||||
|  | # df <- df[apply(df,1,function(x) !any(is.na(x)))] | ||||||
|  | 
 | ||||||
|  | if(!('Bzx' %in% names(plot.df))) | ||||||
|  |     plot.df[,Bzx:=NA] | ||||||
|  | 
 | ||||||
|  | if(!('accuracy_imbalance_difference' %in% names(plot.df))) | ||||||
|  |     plot.df[,accuracy_imbalance_difference:=NA] | ||||||
|  | 
 | ||||||
|  | unique(plot.df[,'accuracy_imbalance_difference']) | ||||||
|  | 
 | ||||||
|  | #plot.df <- build_plot_dataset(df[accuracy_imbalance_difference==0.1][N==700]) | ||||||
|  | plot.df <- build_plot_dataset(plot.df) | ||||||
|  | change.remember.file("remember_irr.RDS",clear=TRUE) | ||||||
|  | remember(plot.df,args$name) | ||||||
|  | 
 | ||||||
|  | #ggplot(df,aes(x=Bxy.est.mle)) + geom_histogram() + facet_grid(accuracy_imbalance_difference ~ Bzy) | ||||||
|  | 
 | ||||||
|  | ## ## ## df[gmm.ER_pval<0.05] | ||||||
|  | 
 | ||||||
|  | ## plot.df.test <- plot.df[,':='(method=factor(method,levels=c("Naive","Multiple imputation", "Multiple imputation (Classifier features unobserved)","Regression Calibration","2SLS+gmm","Bespoke MLE", "Feasible"),ordered=T), | ||||||
|  | ##                                    N=factor(N), | ||||||
|  | ##                                    m=factor(m))] | ||||||
|  | 
 | ||||||
|  | ## plot.df.test <- plot.df.test[(variable=='x') & (method!="Multiple imputation (Classifier features unobserved)")] | ||||||
|  | ## p <- ggplot(plot.df.test, aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method)) | ||||||
|  | ## p <- p + geom_hline(data=plot.df.test, mapping=aes(yintercept=0.1),linetype=2) | ||||||
|  | 
 | ||||||
|  | ## p <- p + geom_pointrange() + facet_grid(N~m,as.table=F,scales='free') + scale_x_discrete(labels=label_wrap_gen(4)) | ||||||
|  | ## print(p) | ||||||
|  | 
 | ||||||
|  | ## plot.df.test <- plot.df[,':='(method=factor(method,levels=c("Naive","Multiple imputation", "Multiple imputation (Classifier features unobserved)","Regression Calibration","2SLS+gmm","Bespoke MLE", "Feasible"),ordered=T), | ||||||
|  | ##                                    N=factor(N), | ||||||
|  | ##                                    m=factor(m))] | ||||||
|  | 
 | ||||||
|  | ## plot.df.test <- plot.df.test[(variable=='z') & (method!="Multiple imputation (Classifier features unobserved)")] | ||||||
|  | ## p <- ggplot(plot.df.test, aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method)) | ||||||
|  | ## p <- p + geom_hline(data=plot.df.test, mapping=aes(yintercept=-0.1),linetype=2) | ||||||
|  | 
 | ||||||
|  | ## p <- p + geom_pointrange() + facet_grid(m~N,as.table=F,scales='free') + scale_x_discrete(labels=label_wrap_gen(4)) | ||||||
|  | ## print(p) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ## x.mle <- df[,.(N,m,Bxy.est.mle,Bxy.ci.lower.mle, Bxy.ci.upper.mle, y_explained_variance, Bzx, Bzy, accuracy_imbalance_difference)] | ||||||
|  | ## x.mle.plot <- x.mle[,.(mean.est = mean(Bxy.est.mle), | ||||||
|  | ##                        var.est = var(Bxy.est.mle), | ||||||
|  | ##                        N.sims = .N, | ||||||
|  | ##                        variable='z', | ||||||
|  | ##                        method='Bespoke MLE' | ||||||
|  | ##                        ), | ||||||
|  | ##                     by=c("N","m",'y_explained_variance', 'Bzx', 'Bzy','accuracy_imbalance_difference')] | ||||||
|  | 
 | ||||||
|  | ## z.mle <- df[,.(N,m,Bzy.est.mle,Bzy.ci.lower.mle, Bzy.ci.upper.mle, y_explained_variance, Bzx, Bzy, accuracy_imbalance_difference)] | ||||||
|  | 
 | ||||||
|  | ## z.mle.plot <- z.mle[,.(mean.est = mean(Bzy.est.mle), | ||||||
|  | ##                        var.est = var(Bzy.est.mle), | ||||||
|  | ##                        N.sims = .N, | ||||||
|  | ##                        variable='z', | ||||||
|  | ##                        method='Bespoke MLE' | ||||||
|  | ##                        ), | ||||||
|  | ##                     by=c("N","m",'y_explained_variance','Bzx')] | ||||||
|  | 
 | ||||||
|  | ## plot.df <- z.mle.plot | ||||||
|  | ## plot.df.test <- plot.df[,':='(method=factor(method,levels=c("Naive","Multiple imputation", "Multiple imputation (Classifier features unobserved)","Regression Calibration","2SLS+gmm","Bespoke MLE", "Feasible"),ordered=T), | ||||||
|  | ##                                    N=factor(N), | ||||||
|  | ##                                    m=factor(m))] | ||||||
|  | 
 | ||||||
|  | ## plot.df.test <- plot.df.test[(variable=='z') & (m != 1000) & (m!=500) & (method!="Multiple imputation (Classifier features unobserved)")] | ||||||
|  | ## p <- ggplot(plot.df.test, aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method)) | ||||||
|  | ## p <- p + geom_hline(aes(yintercept=0.2),linetype=2) | ||||||
|  | 
 | ||||||
|  | ## p <- p + geom_pointrange() + facet_grid(m~Bzx, Bzy,as.table=F) + scale_x_discrete(labels=label_wrap_gen(4)) | ||||||
|  | ## print(p) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ## ## ggplot(plot.df[variable=='x'], aes(y=mean.est, ymax=mean.est + var.est/2, ymin=mean.est-var.est/2, x=method)) + geom_pointrange() + facet_grid(-m~N) + scale_x_discrete(labels=label_wrap_gen(10)) | ||||||
|  | 
 | ||||||
|  | ## ## ggplot(plot.df,aes(y=N,x=m,color=p.sign.correct)) + geom_point() + facet_grid(variable ~ method) + scale_color_viridis_c(option='D') + theme_minimal() + xlab("Number of gold standard labels") + ylab("Total sample size")  | ||||||
|  | 
 | ||||||
|  | ## ## ggplot(plot.df,aes(y=N,x=m,color=abs(mean.bias))) + geom_point() + facet_grid(variable ~ method) + scale_color_viridis_c(option='D') + theme_minimal() + xlab("Number of gold standard labels") + ylab("Total sample size")  | ||||||
							
								
								
									
										42
									
								
								simulations/summarize_estimator.R
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								simulations/summarize_estimator.R
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,42 @@ | |||||||
|  | 
 | ||||||
|  | summarize.estimator <- function(df, suffix='naive', coefname='x'){ | ||||||
|  | 
 | ||||||
|  |     part <- df[,c('N', | ||||||
|  |                   'm', | ||||||
|  |                   'Bxy', | ||||||
|  |                   paste0('B',coefname,'y.est.',suffix), | ||||||
|  |                   paste0('B',coefname,'y.ci.lower.',suffix), | ||||||
|  |                   paste0('B',coefname,'y.ci.upper.',suffix), | ||||||
|  |                   'y_explained_variance', | ||||||
|  |                   'Bzx', | ||||||
|  |                   'Bzy', | ||||||
|  |                   'accuracy_imbalance_difference' | ||||||
|  |                   ), | ||||||
|  |                with=FALSE] | ||||||
|  |      | ||||||
|  |     true.in.ci <- as.integer((part$Bxy >= part[[paste0('B',coefname,'y.ci.lower.',suffix)]]) & (part$Bxy <= part[[paste0('B',coefname,'y.ci.upper.',suffix)]])) | ||||||
|  |     zero.in.ci <- as.integer(0 >= part[[paste0('B',coefname,'y.ci.lower.',suffix)]]) & (0 <= part[[paste0('B',coefname,'y.ci.upper.',suffix)]]) | ||||||
|  |     bias <- part$Bxy - part[[paste0('B',coefname,'y.est.',suffix)]] | ||||||
|  |     sign.correct <- as.integer(sign(part$Bxy) == sign(part[[paste0('B',coefname,'y.est.',suffix)]])) | ||||||
|  | 
 | ||||||
|  |     part <- part[,':='(true.in.ci = true.in.ci, | ||||||
|  |                        zero.in.ci = zero.in.ci, | ||||||
|  |                        bias=bias, | ||||||
|  |                        sign.correct =sign.correct)] | ||||||
|  | 
 | ||||||
|  |     part.plot <- part[, .(p.true.in.ci = mean(true.in.ci), | ||||||
|  |                           mean.bias = mean(bias), | ||||||
|  |                           mean.est = mean(.SD[[paste0('B',coefname,'y.est.',suffix)]]), | ||||||
|  |                           var.est = var(.SD[[paste0('B',coefname,'y.est.',suffix)]]), | ||||||
|  |                           est.upper.95 = quantile(.SD[[paste0('B',coefname,'y.est.',suffix)]],0.95,na.rm=T), | ||||||
|  |                           est.lower.95 = quantile(.SD[[paste0('B',coefname,'y.est.',suffix)]],0.05,na.rm=T), | ||||||
|  |                           N.sims = .N, | ||||||
|  |                           p.sign.correct = mean(as.integer(sign.correct & (! zero.in.ci))), | ||||||
|  |                           variable=coefname, | ||||||
|  |                           method=suffix | ||||||
|  |                           ), | ||||||
|  |                       by=c("N","m",'y_explained_variance','Bzx', 'Bzy', 'accuracy_imbalance_difference') | ||||||
|  |                       ] | ||||||
|  |      | ||||||
|  |     return(part.plot) | ||||||
|  | } | ||||||
		Loading…
	
		Reference in New Issue
	
	Block a user