git-annex in nathante@n3246:/gscratch/comdata/users/nathante/ml_measurement_error_public
This commit is contained in:
		
							parent
							
								
									e17a52e236
								
							
						
					
					
						commit
						5c931a7198
					
				| @ -63,3 +63,94 @@ list.files() | ||||
| install.packages("filelock") | ||||
| q() | ||||
| n | ||||
| df | ||||
| df | ||||
| outcome_formula <- y ~ x + z | ||||
| outcome_family=gaussian() | ||||
| proxy_formula <- w_pred ~ x | ||||
| truth_formula <- x ~ z | ||||
| params <- start | ||||
| ll.y.obs.x0 | ||||
| ll.y.obs.x1 | ||||
| rater_formula <- x.obs ~ x | ||||
| rater_formula | ||||
| rater.modle.matrix.obs.x0 | ||||
| rater.model.matrix.obs.x0 | ||||
| names(rater.model.matrix.obs.x0) | ||||
| head(rater.model.matrix.obs.x0) | ||||
| df.obs | ||||
| ll.x.obs.0 | ||||
| rater.params | ||||
| rater.params %*% t(rater.model.matrix.x.obs.0[df.obs$xobs.0==1]) | ||||
| df.obs$xobs.0==1 | ||||
| df.obs$x.obs.0==1 | ||||
| ll.x.obs.0[df.obs$x.obs.0==1] | ||||
| rater.model.matrix.x.obs.0[df.obs$x.obs.0==1,] | ||||
| df.obs$x.obs.0==1 | ||||
| n.rater.model.covars <- dim(rater.model.matrix.x.obs.0)[2] | ||||
|         rater.params <- params[param.idx:n.rater.model.covars] | ||||
| rater.params | ||||
|         ll.x.obs.0[df.obs$x.obs.0==1] <- plogis(rater.params %*% t(rater.model.matrix.x.obs.0[df.obs$x.obs.0==1,]), log=TRUE) | ||||
| t(rater.model.matrix.x.obs.0[df.obs$x.obs.0==1,] | ||||
| ) | ||||
| dimt(rater.model.matrix.x.obs.0[df.obs$x.obs.0==1,]) | ||||
| dim(t(rater.model.matrix.x.obs.0[df.obs$x.obs.0==1,])) | ||||
| dim(ll.x.obs.0[df.obs$x.obs.0==1]) | ||||
| rater.params | ||||
| rater.params | ||||
| rater.params | ||||
| rater_formula | ||||
| rater.params | ||||
| ) | ||||
| 1+1 | ||||
| q() | ||||
| n | ||||
| outcome_formula <- y ~ x + z | ||||
| proxy_formula <- w_pred ~ x + z + y | ||||
| truth_formula <- x ~ z | ||||
| proxy_formula | ||||
| eyboardio Model 01 - Kaleidoscope locally built | ||||
| df <- df.triple.proxy.mle | ||||
| outcome_family='gaussian' | ||||
| outcome_family=gaussian() | ||||
| proxy_formulas=list(proxy_formula,x.obs.0~x, x.obs.1~x) | ||||
| proxy_formulas | ||||
| proxy_familites <- rep(binomial(link='logit'),3) | ||||
| proxy_families = rep(binomial(link='logit'),3) | ||||
| proxy_families | ||||
| proxy_families = list(binomial(link='logit'),binomial(link='logit'),binomial(link='logit')) | ||||
| proxy_families | ||||
| proxy_families[[1]] | ||||
| proxy.params | ||||
| i | ||||
| proxy_params | ||||
| proxy.params | ||||
| params | ||||
| params <- start | ||||
| df.triple.proxy.mle | ||||
| df | ||||
| coder.formulas <- c(x.obs.0 ~ x, x.obs.1 ~x) | ||||
| outcome.formula | ||||
| outcome_formula | ||||
| depvar(outcome_formula | ||||
| ) | ||||
| outcome_formula$terms | ||||
| terms(outcome_formula) | ||||
| q() | ||||
| n | ||||
| df.triple.proxy.mle | ||||
| triple.proxy.mle | ||||
| df | ||||
| df <- df.triple.proxy | ||||
| outcome_family <- binomial(link='logit') | ||||
| outcome_formula <- y ~x+z | ||||
| proxy_formula <- w_pred ~ y | ||||
| coder_formulas=list(y.obs.1~y,y.obs.2~y); proxy_formula=w_pred~y; proxy_family=binomial(link='logit')) | ||||
| coder_formulas=list(y.obs.1~y,y.obs.2~y); proxy_formula=w_pred~y; proxy_family=binomial(link='logit') | ||||
| coder_formulas=list(y.obs.0~y,y.obs.1~y) | ||||
| traceback() | ||||
| df | ||||
| df | ||||
| outcome.model.matrix | ||||
| q() | ||||
| n | ||||
|  | ||||
| @ -32,7 +32,7 @@ source("simulation_base.R") | ||||
| 
 | ||||
| simulate_data <- function(N, m, B0=0, Bxy=0.2, Bzy=-0.2, Bzx=0.2, y_explained_variance=0.025, prediction_accuracy=0.73, seed=1){ | ||||
|     set.seed(seed) | ||||
|     z <- rbinom(N, 1, 0.5) | ||||
|     z <- rnorm(N,sd=0.5) | ||||
|                                         #    x.var.epsilon <- var(Bzx *z) * ((1-zx_explained_variance)/zx_explained_variance) | ||||
|     xprime <- Bzx * z #+ x.var.epsilon | ||||
|     x <- rbinom(N,1,plogis(xprime)) | ||||
| @ -77,7 +77,7 @@ parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy va | ||||
| parser <- add_argument(parser, "--truth_formula", help='formula for the true variable', default="x~z") | ||||
| parser <- add_argument(parser, "--Bzx", help='Effect of z on x', default=0.3) | ||||
| parser <- add_argument(parser, "--Bzy", help='Effect of z on y', default=-0.3) | ||||
| parser <- add_argument(parser, "--Bxy", help='Effect of z on y', default=0.3) | ||||
| parser <- add_argument(parser, "--Bxy", help='Effect of x on y', default=0.3) | ||||
| 
 | ||||
| args <- parse_args(parser) | ||||
| B0 <- 0 | ||||
| @ -85,23 +85,21 @@ Bxy <- args$Bxy | ||||
| Bzy <- args$Bzy | ||||
| Bzx <- args$Bzx | ||||
| 
 | ||||
| if (args$m < args$N){ | ||||
| df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, Bzx, seed=args$seed + 500, y_explained_variance = args$y_explained_variance,  prediction_accuracy=args$prediction_accuracy) | ||||
| 
 | ||||
|     df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, Bzx, seed=args$seed + 500, y_explained_variance = args$y_explained_variance,  prediction_accuracy=args$prediction_accuracy) | ||||
| result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy, Bzx=Bzx, 'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'accuracy_imbalance_difference'=args$accuracy_imbalance_difference, 'outcome_formula'=args$outcome_formula, 'proxy_formula'=args$proxy_formula,truth_formula=args$truth_formula, error='') | ||||
| 
 | ||||
|     result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy, Bzx=Bzx, 'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'accuracy_imbalance_difference'=args$accuracy_imbalance_difference, 'outcome_formula'=args$outcome_formula, 'proxy_formula'=args$proxy_formula,truth_formula=args$truth_formula, error='') | ||||
| 
 | ||||
|     outline <- run_simulation(df, result, outcome_formula=as.formula(args$outcome_formula), proxy_formula=as.formula(args$proxy_formula), truth_formula=as.formula(args$truth_formula)) | ||||
| outline <- run_simulation(df, result, outcome_formula=as.formula(args$outcome_formula), proxy_formula=as.formula(args$proxy_formula), truth_formula=as.formula(args$truth_formula)) | ||||
|      | ||||
|     outfile_lock <- lock(paste0(args$outfile, '_lock'),exclusive=TRUE) | ||||
|     if(file.exists(args$outfile)){ | ||||
|         logdata <- read_feather(args$outfile) | ||||
|         logdata <- rbind(logdata,as.data.table(outline),fill=TRUE) | ||||
|     } else { | ||||
|         logdata <- as.data.table(outline) | ||||
|     } | ||||
| 
 | ||||
|     print(outline) | ||||
|     write_feather(logdata, args$outfile) | ||||
|     unlock(outfile_lock) | ||||
| outfile_lock <- lock(paste0(args$outfile, '_lock'),exclusive=TRUE) | ||||
| if(file.exists(args$outfile)){ | ||||
|     logdata <- read_feather(args$outfile) | ||||
|     logdata <- rbind(logdata,as.data.table(outline),fill=TRUE) | ||||
| } else { | ||||
|     logdata <- as.data.table(outline) | ||||
| } | ||||
| 
 | ||||
| print(outline) | ||||
| write_feather(logdata, args$outfile) | ||||
| unlock(outfile_lock) | ||||
| 
 | ||||
|  | ||||
| @ -31,11 +31,11 @@ source("simulation_base.R") | ||||
| 
 | ||||
| ## one way to do it is by adding correlation to x.obs and y that isn't in w. | ||||
| ## in other words, the model is missing an important feature of x.obs that's related to y. | ||||
| simulate_data <- function(N, m, B0, Bxy, Bzx, Bzy, seed, y_explained_variance=0.025, prediction_accuracy=0.73, y_bias=-0.8,accuracy_imbalance_difference=0.3){ | ||||
| simulate_data <- function(N, m, B0, Bxy, Bzx, Bzy, seed, y_explained_variance=0.025, prediction_accuracy=0.73, y_bias=-0.8,z_bias=0,accuracy_imbalance_difference=0.3){ | ||||
|     set.seed(seed) | ||||
|     # make w and y dependent | ||||
|     z <- rbinom(N, 1, plogis(qlogis(0.5))) | ||||
|     x <- rbinom(N, 1, plogis(Bzx * z + qlogis(0.5))) | ||||
|     z <- rnorm(N,sd=0.5) | ||||
|     x <- rbinom(N, 1, plogis(Bzx * z)) | ||||
| 
 | ||||
|     y.var.epsilon <- (var(Bzy * z) + var(Bxy *x) + 2*cov(Bzy*z,Bxy*x)) * ((1-y_explained_variance)/y_explained_variance) | ||||
|     y.epsilon <- rnorm(N, sd = sqrt(y.var.epsilon)) | ||||
| @ -105,8 +105,8 @@ simulate_data <- function(N, m, B0, Bxy, Bzx, Bzy, seed, y_explained_variance=0. | ||||
|     ## print(mean(df$w_pred == df$x)) | ||||
| 
 | ||||
|     resids <- resid(lm(y~x + z)) | ||||
|     odds.x1 <- qlogis(prediction_accuracy) + y_bias*qlogis(pnorm(resids[x==1])) | ||||
|     odds.x0 <- qlogis(prediction_accuracy,lower.tail=F) + y_bias*qlogis(pnorm(resids[x==0])) | ||||
|     odds.x1 <- qlogis(prediction_accuracy) + y_bias*qlogis(pnorm(resids[x==1])) + z_bias * qlogis(pnorm(z,sd(z))) | ||||
|     odds.x0 <- qlogis(prediction_accuracy,lower.tail=F) + y_bias*qlogis(pnorm(resids[x==0])) + z_bias * qlogis(pnorm(z,sd(z))) | ||||
| 
 | ||||
|     ## acc.x0 <- p.correct[df[,x==0]] | ||||
|     ## acc.x1 <- p.correct[df[,x==1]] | ||||
| @ -129,14 +129,15 @@ parser <- add_argument(parser, "--m", default=500, help="m the number of ground | ||||
| parser <- add_argument(parser, "--seed", default=51, help='seed for the rng') | ||||
| parser <- add_argument(parser, "--outfile", help='output file', default='example_2.feather') | ||||
| parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.1) | ||||
| parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.8) | ||||
| parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.75) | ||||
| parser <- add_argument(parser, "--accuracy_imbalance_difference", help='how much more accurate is the predictive model for one class than the other?', default=0.3) | ||||
| parser <- add_argument(parser, "--Bzx", help='Effect of z on x', default=0.3) | ||||
| parser <- add_argument(parser, "--Bzy", help='Effect of z on y', default=-0.3) | ||||
| parser <- add_argument(parser, "--Bxy", help='Effect of z on y', default=0.3) | ||||
| parser <- add_argument(parser, "--outcome_formula", help='formula for the outcome variable', default="y~x+z") | ||||
| parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy variable', default="w_pred~y*z*x") | ||||
| parser <- add_argument(parser, "--y_bias", help='coefficient of y on the probability a classification is correct', default=-1) | ||||
| parser <- add_argument(parser, "--y_bias", help='coefficient of y on the probability a classification is correct', default=-0.5) | ||||
| parser <- add_argument(parser, "--z_bias", help='coefficient of z on the probability a classification is correct', default=0) | ||||
| parser <- add_argument(parser, "--truth_formula", help='formula for the true variable', default="x~z") | ||||
| 
 | ||||
| args <- parse_args(parser) | ||||
|  | ||||
| @ -31,13 +31,13 @@ source("simulation_base.R") | ||||
| 
 | ||||
| ## one way to do it is by adding correlation to x.obs and y that isn't in w. | ||||
| ## in other words, the model is missing an important feature of x.obs that's related to y. | ||||
| simulate_data <- function(N, m, B0, Bxy, Bzy, seed, prediction_accuracy=0.73, log.likelihood.gain = 0.1){ | ||||
| simulate_data <- function(N, m, B0, Bxy, Bzy, Bzx, seed, prediction_accuracy=0.73, log.likelihood.gain = 0.1){ | ||||
|     set.seed(seed) | ||||
|     set.seed(seed) | ||||
| 
 | ||||
|     # make w and y dependent | ||||
|     z <- rbinom(N, 1, 0.5) | ||||
|     x <- rbinom(N, 1, 0.5) | ||||
|     z <- rnorm(N, sd=0.5) | ||||
|     x <- rbinom(N, 1, plogis(Bzx*z)) | ||||
| 
 | ||||
|     ystar <- Bzy * z + Bxy * x + B0 | ||||
|     y <- rbinom(N,1,plogis(ystar)) | ||||
| @ -75,6 +75,7 @@ parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is th | ||||
| ## parser <- add_argument(parser, "--x_bias_y0", help='how is the classifier biased when y = 0 ?', default=0.75) | ||||
| parser <- add_argument(parser, "--Bxy", help='coefficient of x on y', default=0.01) | ||||
| parser <- add_argument(parser, "--Bzy", help='coeffficient of z on y', default=-0.01) | ||||
| parser <- add_argument(parser, "--Bzx", help='coeffficient of z on x', default=-0.5) | ||||
| parser <- add_argument(parser, "--outcome_formula", help='formula for the outcome variable', default="y~x+z") | ||||
| parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy variable', default="w_pred~y") | ||||
| 
 | ||||
| @ -83,13 +84,13 @@ args <- parse_args(parser) | ||||
| B0 <- 0 | ||||
| Bxy <- args$Bxy | ||||
| Bzy <- args$Bzy | ||||
| 
 | ||||
| Bzx <- args$Bzx | ||||
| 
 | ||||
| if(args$m < args$N){ | ||||
|     df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, args$seed, args$prediction_accuracy) | ||||
|     df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, Bzx, args$seed, args$prediction_accuracy) | ||||
| 
 | ||||
| #    result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'x_bias_y0'=args$x_bias_y0,'x_bias_y1'=args$x_bias_y1,'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
|     result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
|     result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'Bzx'=Bzx,'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
| 
 | ||||
|     outline <- run_simulation_depvar(df, result, outcome_formula = as.formula(args$outcome_formula), proxy_formula = as.formula(args$proxy_formula)) | ||||
| 
 | ||||
|  | ||||
| @ -31,12 +31,12 @@ source("simulation_base.R") | ||||
| 
 | ||||
| ## one way to do it is by adding correlation to x.obs and y that isn't in w. | ||||
| ## in other words, the model is missing an important feature of x.obs that's related to y. | ||||
| simulate_data <- function(N, m, B0, Bxy, Bzy, seed, prediction_accuracy=0.73, x_bias=-0.75){ | ||||
| simulate_data <- function(N, m, B0, Bxy, Bzy, seed, prediction_accuracy=0.73, z_bias=-0.75){ | ||||
|     set.seed(seed) | ||||
| 
 | ||||
|     # make w and y dependent | ||||
|     z <- rbinom(N, 1, 0.5) | ||||
|     x <- rbinom(N, 1, 0.5) | ||||
|     z <- rnorm(N,sd=0.5) | ||||
|     x <- rbinom(N,1,0.5) | ||||
| 
 | ||||
|     ystar <- Bzy * z + Bxy * x + B0 | ||||
|     y <- rbinom(N,1,plogis(ystar)) | ||||
| @ -51,8 +51,8 @@ simulate_data <- function(N, m, B0, Bxy, Bzy, seed, prediction_accuracy=0.73, x_ | ||||
|         df <- df[, y.obs := y] | ||||
|     } | ||||
|      | ||||
|     odds.y1 <- qlogis(prediction_accuracy) + x_bias*df[y==1]$x | ||||
|     odds.y0 <- qlogis(prediction_accuracy,lower.tail=F) + x_bias*df[y==0]$x | ||||
|     odds.y1 <- qlogis(prediction_accuracy) + z_bias*df[y==1]$z | ||||
|     odds.y0 <- qlogis(prediction_accuracy,lower.tail=F) + z_bias*df[y==0]$z | ||||
| 
 | ||||
|     df[y==0,w:=plogis(rlogis(.N,odds.y0))] | ||||
|     df[y==1,w:=plogis(rlogis(.N,odds.y1))] | ||||
| @ -69,16 +69,15 @@ parser <- arg_parser("Simulate data and fit corrected models") | ||||
| parser <- add_argument(parser, "--N", default=1000, help="number of observations of w") | ||||
| parser <- add_argument(parser, "--m", default=500, help="m the number of ground truth observations") | ||||
| parser <- add_argument(parser, "--seed", default=17, help='seed for the rng') | ||||
| parser <- add_argument(parser, "--outfile", help='output file', default='example_2.feather') | ||||
| parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.005) | ||||
| parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.8) | ||||
| ## parser <- add_argument(parser, "--x_bias_y1", help='how is the classifier biased when y = 1?', default=-0.75) | ||||
| ## parser <- add_argument(parser, "--x_bias_y0", help='how is the classifier biased when y = 0 ?', default=0.75) | ||||
| parser <- add_argument(parser, "--x_bias", help='how is the classifier biased?', default=0.75) | ||||
| parser <- add_argument(parser, "--Bxy", help='coefficient of x on y', default=0.3) | ||||
| parser <- add_argument(parser, "--Bzy", help='coeffficient of z on y', default=-0.3) | ||||
| parser <- add_argument(parser, "--outfile", help='output file', default='example_4.feather') | ||||
| parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.79) | ||||
| ## parser <- add_argument(parser, "--z_bias_y1", help='how is the classifier biased when y = 1?', default=-0.75) | ||||
| ## parser <- add_argument(parser, "--z_bias_y0", help='how is the classifier biased when y = 0 ?', default=0.75) | ||||
| parser <- add_argument(parser, "--z_bias", help='how is the classifier biased?', default=1.5) | ||||
| parser <- add_argument(parser, "--Bxy", help='coefficient of x on y', default=0.1) | ||||
| parser <- add_argument(parser, "--Bzy", help='coeffficient of z on y', default=-0.1) | ||||
| parser <- add_argument(parser, "--outcome_formula", help='formula for the outcome variable', default="y~x+z") | ||||
| parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy variable', default="w_pred~y+x") | ||||
| parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy variable', default="w_pred~y+z") | ||||
| 
 | ||||
| args <- parse_args(parser) | ||||
| 
 | ||||
| @ -88,10 +87,10 @@ Bzy <- args$Bzy | ||||
| 
 | ||||
| 
 | ||||
| if(args$m < args$N){ | ||||
|     df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, args$seed, args$prediction_accuracy, args$x_bias) | ||||
|     df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, args$seed, args$prediction_accuracy, args$z_bias) | ||||
| 
 | ||||
| #    result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'x_bias_y0'=args$x_bias_y0,'x_bias_y1'=args$x_bias_y1,'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
|     result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'x_bias'=args$x_bias,'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
| #    result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'z_bias_y0'=args$z_bias_y0,'z_bias_y1'=args$z_bias_y1,'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
|     result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'z_bias'=args$z_bias,'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
| 
 | ||||
|     outline <- run_simulation_depvar(df, result, outcome_formula = as.formula(args$outcome_formula), proxy_formula = as.formula(args$proxy_formula)) | ||||
| 
 | ||||
|  | ||||
| @ -39,7 +39,7 @@ simulate_data <- function(N, m, B0=0, Bxy=0.2, Bzy=-0.2, Bzx=0.2, y_explained_va | ||||
| 
 | ||||
|     y.var.epsilon <- (var(Bzy * z) + var(Bxy *x) + 2*cov(Bxy*x,Bzy*z)) * ((1-y_explained_variance)/y_explained_variance) | ||||
|     y.epsilon <- rnorm(N, sd = sqrt(y.var.epsilon)) | ||||
|     y <- Bzy * z + Bxy * x + y.epsilon | ||||
|     y <- Bzy * z + Bxy * x + y.epsilon + B0 | ||||
| 
 | ||||
|     df <- data.table(x=x,y=y,z=z) | ||||
| 
 | ||||
| @ -49,9 +49,12 @@ simulate_data <- function(N, m, B0=0, Bxy=0.2, Bzy=-0.2, Bzx=0.2, y_explained_va | ||||
|         df <- df[, x.obs := x] | ||||
|     } | ||||
| 
 | ||||
|     df[ (!is.na(x.obs)) ,x.obs.0 := abs(x.obs - rbinom(.N, 1, 1-coder_accuracy))] | ||||
|     df[ (!is.na(x.obs)) ,x.obs.1 := abs(x.obs - rbinom(.N, 1, 1-coder_accuracy))] | ||||
|      | ||||
|     coder.0.correct <- rbinom(m, 1, coder_accuracy) | ||||
|     coder.1.correct <- rbinom(m, 1, coder_accuracy) | ||||
| 
 | ||||
|     df[!is.na(x.obs),x.obs.0 := as.numeric((x.obs & coder.0.correct) | (!x.obs & !coder.0.correct))] | ||||
|     df[!is.na(x.obs),x.obs.1 := as.numeric((x.obs & coder.1.correct) | (!x.obs & !coder.1.correct))] | ||||
| 
 | ||||
| 
 | ||||
|     ## how can you make a model with a specific accuracy? | ||||
|     w0 =(1-x)**2 + (-1)**(1-x) * prediction_accuracy | ||||
| @ -69,21 +72,21 @@ simulate_data <- function(N, m, B0=0, Bxy=0.2, Bzy=-0.2, Bzx=0.2, y_explained_va | ||||
| 
 | ||||
| parser <- arg_parser("Simulate data and fit corrected models") | ||||
| parser <- add_argument(parser, "--N", default=1000, help="number of observations of w") | ||||
| parser <- add_argument(parser, "--m", default=500, help="m the number of ground truth observations") | ||||
| parser <- add_argument(parser, "--seed", default=57, help='seed for the rng') | ||||
| parser <- add_argument(parser, "--m", default=150, help="m the number of ground truth observations") | ||||
| parser <- add_argument(parser, "--seed", default=1, help='seed for the rng') | ||||
| parser <- add_argument(parser, "--outfile", help='output file', default='example_1.feather') | ||||
| parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.05) | ||||
| parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.1) | ||||
| # parser <- add_argument(parser, "--zx_explained_variance", help='what proportion of the variance of x can be explained by z?', default=0.3) | ||||
| parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.73) | ||||
| parser <- add_argument(parser, "--coder_accuracy", help='how accurate is the predictive model?', default=0.8) | ||||
| parser <- add_argument(parser, "--coder_accuracy", help='how accurate are the human coders?', default=0.85) | ||||
| parser <- add_argument(parser, "--outcome_formula", help='formula for the outcome variable', default="y~x+z") | ||||
| parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy variable', default="w_pred~x") | ||||
| 
 | ||||
| # parser <- add_argument(parser, "--rater_formula", help='formula for the true variable', default="x.obs~x") | ||||
| parser <- add_argument(parser, "--truth_formula", help='formula for the true variable', default="x~z") | ||||
| parser <- add_argument(parser, "--Bzx", help='Effect of z on x', default=-0.3) | ||||
| parser <- add_argument(parser, "--Bzy", help='Effect of z on y', default=-0.3) | ||||
| parser <- add_argument(parser, "--Bxy", help='Effect of z on y', default=0.3) | ||||
| parser <- add_argument(parser, "--Bzy", help='Effect of z on y', default=0.27) | ||||
| parser <- add_argument(parser, "--Bxy", help='Effect of x on y', default=-0.33) | ||||
| 
 | ||||
| args <- parse_args(parser) | ||||
| B0 <- 0 | ||||
| @ -93,7 +96,7 @@ Bzx <- args$Bzx | ||||
| 
 | ||||
| if (args$m < args$N){ | ||||
| 
 | ||||
|     df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, Bzx, seed=args$seed + 500, y_explained_variance = args$y_explained_variance,  prediction_accuracy=args$prediction_accuracy, coder_accuracy=args$coder_accuracy) | ||||
|     df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, Bzx, seed=args$seed, y_explained_variance = args$y_explained_variance,  prediction_accuracy=args$prediction_accuracy, coder_accuracy=args$coder_accuracy) | ||||
| 
 | ||||
|     result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy, Bzx=Bzx, 'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'accuracy_imbalance_difference'=args$accuracy_imbalance_difference, 'outcome_formula'=args$outcome_formula, 'truth_formula'=args$truth_formula, 'proxy_formula'=args$proxy_formula,truth_formula=args$truth_formula, 'coder_accuracy'=args$coder_accuracy, error='') | ||||
| 
 | ||||
|  | ||||
| @ -31,14 +31,13 @@ simulate_data <- function(N, m, B0, Bxy, Bzy, seed, prediction_accuracy=0.73, co | ||||
| 
 | ||||
|     df <- data.table(x=x,y=y,ystar=ystar,z=z) | ||||
| 
 | ||||
|     if(m < N){ | ||||
|         df <- df[sample(nrow(df), m), y.obs := y] | ||||
|     } else { | ||||
|         df <- df[, y.obs := y] | ||||
|     } | ||||
|     df <- df[sample(nrow(df), m), y.obs := y] | ||||
|      | ||||
|     df[ (!is.na(y.obs)) ,y.obs.0 := abs(y.obs - rbinom(.N, 1, 1-coder_accuracy))] | ||||
|     df[ (!is.na(y.obs)) ,y.obs.1 := abs(y.obs - rbinom(.N, 1, 1-coder_accuracy))] | ||||
|     coder.0.correct <- rbinom(m, 1, coder_accuracy) | ||||
|     coder.1.correct <- rbinom(m, 1, coder_accuracy) | ||||
|      | ||||
|     df[!is.na(y.obs),y.obs.0 := as.numeric((.SD$y.obs & coder.0.correct) | (!.SD$y.obs & !coder.0.correct))] | ||||
|     df[!is.na(y.obs),y.obs.1 := as.numeric((.SD$y.obs & coder.1.correct) | (!.SD$y.obs & !coder.1.correct))] | ||||
| 
 | ||||
|     odds.y1 <- qlogis(prediction_accuracy) | ||||
|     odds.y0 <- qlogis(prediction_accuracy,lower.tail=F) | ||||
| @ -48,6 +47,9 @@ simulate_data <- function(N, m, B0, Bxy, Bzy, seed, prediction_accuracy=0.73, co | ||||
| 
 | ||||
|     df[,w_pred := as.integer(w > 0.5)] | ||||
| 
 | ||||
|     print(mean(df$y == df$y.obs.0,na.rm=T)) | ||||
|     print(mean(df$y == df$y.obs.1,na.rm=T)) | ||||
| 
 | ||||
|     print(mean(df[x==0]$y == df[x==0]$w_pred)) | ||||
|     print(mean(df[x==1]$y == df[x==1]$w_pred)) | ||||
|     print(mean(df$w_pred == df$y)) | ||||
| @ -55,18 +57,18 @@ simulate_data <- function(N, m, B0, Bxy, Bzy, seed, prediction_accuracy=0.73, co | ||||
| } | ||||
| 
 | ||||
| parser <- arg_parser("Simulate data and fit corrected models") | ||||
| parser <- add_argument(parser, "--N", default=1000, help="number of observations of w") | ||||
| parser <- add_argument(parser, "--N", default=5000, help="number of observations of w") | ||||
| parser <- add_argument(parser, "--m", default=500, help="m the number of ground truth observations") | ||||
| parser <- add_argument(parser, "--seed", default=17, help='seed for the rng') | ||||
| parser <- add_argument(parser, "--seed", default=16, help='seed for the rng') | ||||
| parser <- add_argument(parser, "--outfile", help='output file', default='example_2.feather') | ||||
| parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.005) | ||||
| parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.72) | ||||
| parser <- add_argument(parser, "--y_explained_variance", help='what proportion of the variance of y can be explained?', default=0.1) | ||||
| parser <- add_argument(parser, "--prediction_accuracy", help='how accurate is the predictive model?', default=0.73) | ||||
| ## parser <- add_argument(parser, "--x_bias_y1", help='how is the classifier biased when y = 1?', default=-0.75) | ||||
| ## parser <- add_argument(parser, "--x_bias_y0", help='how is the classifier biased when y = 0 ?', default=0.75) | ||||
| parser <- add_argument(parser, "--Bxy", help='coefficient of x on y', default=0.3) | ||||
| parser <- add_argument(parser, "--Bzy", help='coeffficient of z on y', default=-0.3) | ||||
| parser <- add_argument(parser, "--outcome_formula", help='formula for the outcome variable', default="y~x+z") | ||||
| parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy variable', default="w_pred~y") | ||||
| parser <- add_argument(parser, "--proxy_formula", help='formula for the proxy variable', default="w_pred~y+y.obs.1+y.obs.0") | ||||
| parser <- add_argument(parser, "--coder_accuracy", help='How accurate are the coders?', default=0.8) | ||||
| 
 | ||||
| args <- parse_args(parser) | ||||
| @ -76,24 +78,24 @@ Bxy <- args$Bxy | ||||
| Bzy <- args$Bzy | ||||
| 
 | ||||
| 
 | ||||
| if(args$m < args$N){ | ||||
|     df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, args$seed, args$prediction_accuracy, args$coder_accuracy) | ||||
| df <- simulate_data(args$N, args$m, B0, Bxy, Bzy, args$seed, args$prediction_accuracy, args$coder_accuracy) | ||||
| 
 | ||||
| #    result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'x_bias_y0'=args$x_bias_y0,'x_bias_y1'=args$x_bias_y1,'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
|     result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
|                                         #    result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'x_bias_y0'=args$x_bias_y0,'x_bias_y1'=args$x_bias_y1,'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
| result <- list('N'=args$N,'m'=args$m,'B0'=B0,'Bxy'=Bxy,'Bzy'=Bzy, 'seed'=args$seed, 'y_explained_variance'=args$y_explained_variance, 'prediction_accuracy'=args$prediction_accuracy, 'outcome_formula' = args$outcome_formula, 'proxy_formula' = args$proxy_formula) | ||||
| 
 | ||||
|     outline <- run_simulation_depvar(df, result, outcome_formula = as.formula(args$outcome_formula), proxy_formula = as.formula(args$proxy_formula)) | ||||
| outline <- run_simulation_depvar(df, result, outcome_formula = as.formula(args$outcome_formula), proxy_formula = as.formula(args$proxy_formula)) | ||||
| 
 | ||||
|     outfile_lock <- lock(paste0(args$outfile, '_lock'),exclusive=TRUE) | ||||
| outfile_lock <- lock(paste0(args$outfile, '_lock'),exclusive=TRUE) | ||||
| 
 | ||||
|     if(file.exists(args$outfile)){ | ||||
|         logdata <- read_feather(args$outfile) | ||||
|         logdata <- rbind(logdata,as.data.table(outline),fill=TRUE) | ||||
|     } else { | ||||
|         logdata <- as.data.table(outline) | ||||
|     } | ||||
| 
 | ||||
|     print(outline) | ||||
|     write_feather(logdata, args$outfile) | ||||
|     unlock(outfile_lock) | ||||
| if(file.exists(args$outfile)){ | ||||
|     logdata <- read_feather(args$outfile) | ||||
|     logdata <- rbind(logdata,as.data.table(outline),fill=TRUE) | ||||
| } else { | ||||
|     logdata <- as.data.table(outline) | ||||
| } | ||||
| 
 | ||||
| print(outline) | ||||
| write_feather(logdata, args$outfile) | ||||
| unlock(outfile_lock) | ||||
| 
 | ||||
| warnings() | ||||
|  | ||||
| @ -1,9 +1,9 @@ | ||||
| 
 | ||||
| SHELL=bash | ||||
| 
 | ||||
| Ns=[1000, 2000, 4000] | ||||
| ms=[100, 200, 400, 800] | ||||
| seeds=[$(shell seq -s, 1 250)] | ||||
| Ns=[1000, 5000, 10000] | ||||
| ms=[100, 200, 400] | ||||
| seeds=[$(shell seq -s, 1 500)] | ||||
| explained_variances=[0.1] | ||||
| 
 | ||||
| all:remembr.RDS remember_irr.RDS | ||||
| @ -23,21 +23,28 @@ joblists:example_1_jobs example_2_jobs example_3_jobs | ||||
| # 	sbatch --wait --verbose --array=3001-6001 run_simulation.sbatch 0 test_true_z_jobs
 | ||||
| 
 | ||||
| 
 | ||||
| example_1_jobs: 01_two_covariates.R simulation_base.R grid_sweep.py | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 01_two_covariates.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["example_1.feather"], "y_explained_variance":${explained_variances}, "Bzx":[0.3]}' --outfile example_1_jobs | ||||
| example_1_jobs: 01_two_covariates.R simulation_base.R grid_sweep.py pl_methods.R | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 01_two_covariates.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["example_1.feather"], "y_explained_variance":${explained_variances}, "Bzx":[1]}' --outfile example_1_jobs | ||||
| 
 | ||||
| example_1.feather: example_1_jobs  | ||||
| 	rm -f example_1.feather | ||||
| 	sbatch --wait --verbose --array=1-$(shell cat example_1_jobs | wc -l) run_simulation.sbatch 0 example_1_jobs | ||||
| #	sbatch --wait --verbose --array=3001-6001 run_simulation.sbatch 0 example_1_jobs
 | ||||
| 	sbatch --wait --verbose --array=1-1000  run_simulation.sbatch 0 example_1_jobs | ||||
| 	sbatch --wait --verbose --array=1001-2000  run_simulation.sbatch 0 example_1_jobs | ||||
| 	sbatch --wait --verbose --array=2001-3000  run_simulation.sbatch 0 example_1_jobs | ||||
| 	sbatch --wait --verbose --array=3001-4000  run_simulation.sbatch 0 example_1_jobs | ||||
| 	sbatch --wait --verbose --array=4001-$(shell cat example_1_jobs | wc -l)  run_simulation.sbatch 0 example_1_jobs | ||||
| 
 | ||||
| example_2_jobs: 02_indep_differential.R simulation_base.R grid_sweep.py | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 02_indep_differential.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["example_2.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[0.3], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y*z*x"]}' --outfile example_2_jobs | ||||
| example_2_jobs: 02_indep_differential.R simulation_base.R grid_sweep.py pl_methods.R | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 02_indep_differential.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["example_2.feather"],"y_explained_variance":${explained_variances},  "Bzy":[-0.3],"Bxy":[0.3],"Bzx":[1], "outcome_formula":["y~x+z"], "proxy_formula":["w_pred~y*z*x"]}' --outfile example_2_jobs | ||||
| 
 | ||||
| example_2.feather: example_2_jobs  | ||||
| 	rm -f example_2.feather | ||||
| 	sbatch --wait --verbose --array=1-$(shell cat example_1_jobs | wc -l) run_simulation.sbatch 0 example_2_jobs | ||||
| #	sbatch --wait --verbose --array=3001-6001 run_simulation.sbatch 0 example_2_jobs
 | ||||
| 	sbatch --wait --verbose --array=1-1000  run_simulation.sbatch 0 example_2_jobs | ||||
| 	sbatch --wait --verbose --array=1001-2000  run_simulation.sbatch 0 example_2_jobs | ||||
| 	sbatch --wait --verbose --array=2001-3000  run_simulation.sbatch 0 example_2_jobs | ||||
| 	sbatch --wait --verbose --array=3001-4000  run_simulation.sbatch 0 example_2_jobs | ||||
| 	sbatch --wait --verbose --array=4001-$(shell cat example_2_jobs | wc -l)  run_simulation.sbatch 0 example_2_jobs | ||||
| 
 | ||||
| 
 | ||||
| # example_2_B_jobs: example_2_B.R
 | ||||
| # 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript example_2_B.R" --arg_dict '{"N":${Ns},"m":${ms}, "seed":${seeds}, "outfile":["example_2_B.feather"]}' --outfile example_2_B_jobs
 | ||||
| @ -46,19 +53,28 @@ example_2.feather: example_2_jobs | ||||
| # 	rm -f example_2_B.feather
 | ||||
| # 	sbatch --wait --verbose --array=1-3000 run_simulation.sbatch 0 example_2_B_jobs
 | ||||
| 
 | ||||
| example_3_jobs: 03_depvar.R simulation_base.R grid_sweep.py | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 03_depvar.R" --arg_dict '{"N":${Ns},"m":${ms}, "Bxy":[0.01],"Bzy":[-0.01],"seed":${seeds}, "outfile":["example_3.feather"], "y_explained_variance":${explained_variances}}' --outfile example_3_jobs | ||||
| example_3_jobs: 03_depvar.R simulation_base.R grid_sweep.py pl_methods.R | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 03_depvar.R" --arg_dict '{"N":${Ns},"m":${ms}, "Bxy":[0.7],"Bzy":[-0.7],"seed":${seeds}, "outfile":["example_3.feather"], "y_explained_variance":${explained_variances}}' --outfile example_3_jobs | ||||
| 
 | ||||
| example_3.feather: example_3_jobs | ||||
| 	rm -f example_3.feather	 | ||||
| 	sbatch --wait --verbose --array=1-$(shell cat example_3_jobs | wc -l)  run_simulation.sbatch 0 example_3_jobs | ||||
| 	sbatch --wait --verbose --array=1-1000  run_simulation.sbatch 0 example_3_jobs | ||||
| 	sbatch --wait --verbose --array=1001-2000  run_simulation.sbatch 0 example_3_jobs | ||||
| 	sbatch --wait --verbose --array=2001-3000  run_simulation.sbatch 0 example_3_jobs | ||||
| 	sbatch --wait --verbose --array=3001-4000  run_simulation.sbatch 0 example_3_jobs | ||||
| 	sbatch --wait --verbose --array=4001-$(shell cat example_3_jobs | wc -l)  run_simulation.sbatch 0 example_3_jobs | ||||
| 
 | ||||
| example_4_jobs: 04_depvar_differential.R simulation_base.R grid_sweep.py | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 04_depvar_differential.R" --arg_dict '{"N":${Ns},"Bxy":[0.01],"Bzy":[-0.01],"m":${ms}, "seed":${seeds}, "outfile":["example_4.feather"], "y_explained_variance":${explained_variances}}' --outfile example_4_jobs | ||||
| example_4_jobs: 04_depvar_differential.R simulation_base.R grid_sweep.py pl_methods.R | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 04_depvar_differential.R" --arg_dict '{"N":${Ns},"Bxy":[0.7],"Bzy":[-0.7],"m":${ms}, "seed":${seeds}, "outfile":["example_4.feather"], "z_bias":[0.5]}' --outfile example_4_jobs | ||||
| 
 | ||||
| example_4.feather: example_4_jobs | ||||
| 	rm -f example_4.feather	 | ||||
| 	sbatch --wait --verbose --array=1-$(shell cat example_4_jobs | wc -l)  run_simulation.sbatch 0 example_4_jobs | ||||
| 	sbatch --wait --verbose --array=1-1000  run_simulation.sbatch 0 example_4_jobs | ||||
| 	sbatch --wait --verbose --array=1001-2000  run_simulation.sbatch 0 example_4_jobs | ||||
| 	sbatch --wait --verbose --array=2001-3000  run_simulation.sbatch 0 example_4_jobs | ||||
| 	sbatch --wait --verbose --array=3001-4000  run_simulation.sbatch 0 example_4_jobs | ||||
| 	sbatch --wait --verbose --array=4001-$(shell cat example_4_jobs | wc -l)  run_simulation.sbatch 0 example_4_jobs | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| remembr.RDS:example_1.feather example_2.feather example_3.feather example_4.feather plot_example.R plot_dv_example.R summarize_estimator.R | ||||
| @ -69,30 +85,39 @@ remembr.RDS:example_1.feather example_2.feather example_3.feather example_4.feat | ||||
| 	${srun} Rscript plot_dv_example.R --infile example_4.feather --name "plot.df.example.4" | ||||
| 
 | ||||
| 
 | ||||
| irr_Ns = ${Ns} | ||||
| irr_ms = ${ms} | ||||
| irr_Ns = [1000] | ||||
| irr_ms = [150,300,600] | ||||
| irr_seeds=${seeds} | ||||
| irr_explained_variances=${explained_variances} | ||||
| irr_coder_accuracy=[0.80] | ||||
| 
 | ||||
| example_5_jobs: 05_irr_indep.R irr_simulation_base.R grid_sweep.py | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 05_irr_indep.R" --arg_dict '{"N":${irr_Ns},"m":${irr_ms}, "seed":${irr_seeds}, "outfile":["example_5.feather"], "y_explained_variance":${irr_explained_variances}}' --outfile example_5_jobs | ||||
| example_5_jobs: 05_irr_indep.R irr_simulation_base.R grid_sweep.py pl_methods.R measerr_methods.R | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 05_irr_indep.R" --arg_dict '{"N":${irr_Ns},"m":${irr_ms}, "seed":${irr_seeds}, "outfile":["example_5.feather"], "y_explained_variance":${irr_explained_variances}, "coder_accuracy":${irr_coder_accuracy}}' --outfile example_5_jobs | ||||
| 
 | ||||
| example_5.feather:example_5_jobs | ||||
| 	rm -f example_5.feather | ||||
| 	sbatch --wait --verbose --array=1-$(shell cat example_5_jobs | wc -l)  run_simulation.sbatch 0 example_5_jobs | ||||
| 	sbatch --wait --verbose --array=1-1000  run_simulation.sbatch 0 example_5_jobs | ||||
| 	sbatch --wait --verbose --array=1001-$(shell cat example_5_jobs | wc -l)  run_simulation.sbatch 1000 example_5_jobs | ||||
| 	# sbatch --wait --verbose --array=2001-3000  run_simulation.sbatch 2000 example_5_jobs | ||||
| 	# sbatch --wait --verbose --array=3001-4000  run_simulation.sbatch 3000 example_5_jobs | ||||
| 	# sbatch --wait --verbose --array=2001-$(shell cat example_5_jobs | wc -l)  run_simulation.sbatch 4000 example_5_jobs | ||||
| 
 | ||||
| 
 | ||||
| example_6_jobs: 06_irr_dv.R irr_dv_simulation_base.R grid_sweep.py | ||||
| 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 06_irr_dv.R" --arg_dict '{"N":${irr_Ns},"m":${irr_ms}, "seed":${irr_seeds}, "outfile":["example_6.feather"], "y_explained_variance":${irr_explained_variances}}' --outfile example_6_jobs | ||||
| 
 | ||||
| example_6.feather:example_6_jobs | ||||
| 	rm -f example_6.feather | ||||
| 	sbatch --wait --verbose --array=1-$(shell cat example_6_jobs | wc -l)  run_simulation.sbatch 0 example_6_jobs | ||||
| # example_6_jobs: 06_irr_dv.R irr_dv_simulation_base.R grid_sweep.py pl_methods.R
 | ||||
| # 	sbatch --wait --verbose run_job.sbatch grid_sweep.py --command "Rscript 06_irr_dv.R" --arg_dict '{"N":${irr_Ns},"m":${irr_ms}, "seed":${irr_seeds}, "outfile":["example_6.feather"], "y_explained_variance":${irr_explained_variances},"coder_accuracy":${irr_coder_accuracy}}' --outfile example_6_jobs
 | ||||
| 
 | ||||
| remember_irr.RDS: example_5.feather example_6.feather plot_irr_example.R plot_irr_dv_example.R summarize_estimator.R | ||||
| # example_6.feather:example_6_jobs
 | ||||
| # 	rm -f example_6.feather
 | ||||
| # 	sbatch --wait --verbose --array=1-1000  run_simulation.sbatch 0 example_6_jobs
 | ||||
| # 	sbatch --wait --verbose --array=1001-2000  run_simulation.sbatch 1000 example_6_jobs
 | ||||
| # 	sbatch --wait --verbose --array=2001-$(shell cat example_6_jobs | wc -l)  run_simulation.sbatch 2000 example_6_jobs
 | ||||
| 
 | ||||
| 
 | ||||
| remember_irr.RDS: example_5.feather plot_irr_example.R plot_irr_dv_example.R summarize_estimator.R | ||||
| 	rm -f remember_irr.RDS | ||||
| 	sbatch --wait --verbose run_job.sbatch Rscript plot_irr_example.R --infile example_5.feather --name "plot.df.example.5" | ||||
| 	sbatch --wait --verbose run_job.sbatch Rscript plot_irr_dv_example.R --infile example_6.feather --name "plot.df.example.6" | ||||
| #	sbatch --wait --verbose run_job.sbatch Rscript plot_irr_dv_example.R --infile example_6.feather --name "plot.df.example.6"
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|  | ||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							| @ -4,23 +4,47 @@ options(amelia.parallel="no", | ||||
|         amelia.ncpus=1) | ||||
| library(Amelia) | ||||
| 
 | ||||
| source("measerr_methods.R") ## for my more generic function. | ||||
| source("pl_methods.R") | ||||
| source("measerr_methods_2.R") ## for my more generic function. | ||||
| 
 | ||||
| run_simulation_depvar <- function(df, result, outcome_formula = y ~ x + z, rater_formula = y.obs ~ x, proxy_formula = w_pred ~ y){ | ||||
| 
 | ||||
|     accuracy <- df[,mean(w_pred==y)] | ||||
| run_simulation_depvar <- function(df, result, outcome_formula = y ~ x + z, coder_formulas = list(y.obs.0 ~ 1, y.obs.1 ~ 1), proxy_formula = w_pred ~ y.obs.1+y.obs.0+y){ | ||||
|     (accuracy <- df[,mean(w_pred==y)]) | ||||
|     result <- append(result, list(accuracy=accuracy)) | ||||
|     (error.cor.z <- cor(df$x, df$w_pred - df$z)) | ||||
|     (error.cor.x <- cor(df$x, df$w_pred - df$y)) | ||||
|     (error.cor.y <- cor(df$y, df$y - df$w_pred)) | ||||
|     result <- append(result, list(error.cor.x = error.cor.x, | ||||
|                                   error.cor.z = error.cor.z, | ||||
|                                   error.cor.y = error.cor.y)) | ||||
| 
 | ||||
|     model.null <- glm(y~1, data=df,family=binomial(link='logit')) | ||||
|     (model.true <- glm(y ~ x + z, data=df,family=binomial(link='logit'))) | ||||
|     (lik.ratio <- exp(logLik(model.true) - logLik(model.null))) | ||||
| 
 | ||||
|     (model.true <- glm(y ~ x + z, data=df, family=binomial(link='logit'))) | ||||
|     true.ci.Bxy <- confint(model.true)['x',] | ||||
|     true.ci.Bzy <- confint(model.true)['z',] | ||||
| 
 | ||||
| 
 | ||||
|     result <- append(result, list(lik.ratio=lik.ratio)) | ||||
| 
 | ||||
|     result <- append(result, list(Bxy.est.true=coef(model.true)['x'], | ||||
|                                   Bzy.est.true=coef(model.true)['z'], | ||||
|                                   Bxy.ci.upper.true = true.ci.Bxy[2], | ||||
|                                   Bxy.ci.lower.true = true.ci.Bxy[1], | ||||
|                                   Bzy.ci.upper.true = true.ci.Bzy[2], | ||||
|                                   Bzy.ci.lower.true = true.ci.Bzy[1])) | ||||
|                                    | ||||
|     (model.naive <- lm(y~w_pred+z, data=df)) | ||||
|      | ||||
|     naive.ci.Bxy <- confint(model.naive)['w_pred',] | ||||
|     naive.ci.Bzy <- confint(model.naive)['z',] | ||||
| 
 | ||||
|     result <- append(result, list(Bxy.est.naive=coef(model.naive)['w_pred'], | ||||
|                                   Bzy.est.naive=coef(model.naive)['z'], | ||||
|                                   Bxy.ci.upper.naive = naive.ci.Bxy[2], | ||||
|                                   Bxy.ci.lower.naive = naive.ci.Bxy[1], | ||||
|                                   Bzy.ci.upper.naive = naive.ci.Bzy[2], | ||||
|                                   Bzy.ci.lower.naive = naive.ci.Bzy[1])) | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| @ -37,20 +61,20 @@ run_simulation_depvar <- function(df, result, outcome_formula = y ~ x + z, rater | ||||
|                                   Bzy.ci.lower.loa0.feasible = loa0.ci.Bzy[1])) | ||||
| 
 | ||||
| 
 | ||||
|     df.loa0.mle <- copy(df) | ||||
|     df.loa0.mle[,y:=y.obs.0] | ||||
|     loa0.mle <- measerr_mle_dv(df.loa0.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula) | ||||
|     fisher.info <- solve(loa0.mle$hessian) | ||||
|     coef <- loa0.mle$par | ||||
|     ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||
|     ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||
|     ## df.loa0.mle <- copy(df) | ||||
|     ## df.loa0.mle[,y:=y.obs.0] | ||||
|     ## loa0.mle <- measerr_mle_dv(df.loa0.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula) | ||||
|     ## fisher.info <- solve(loa0.mle$hessian) | ||||
|     ## coef <- loa0.mle$par | ||||
|     ## ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||
|     ## ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||
| 
 | ||||
|     result <- append(result, list(Bxy.est.loa0.mle=coef['x'], | ||||
|                                   Bzy.est.loa0.mle=coef['z'], | ||||
|                                   Bxy.ci.upper.loa0.mle = ci.upper['x'], | ||||
|                                   Bxy.ci.lower.loa0.mle = ci.lower['x'], | ||||
|                                   Bzy.ci.upper.loa0.mle = ci.upper['z'], | ||||
|                                   Bzy.ci.lower.loa0.mle = ci.upper['z'])) | ||||
|     ## result <- append(result, list(Bxy.est.loa0.mle=coef['x'], | ||||
|     ##                               Bzy.est.loa0.mle=coef['z'], | ||||
|     ##                               Bxy.ci.upper.loa0.mle = ci.upper['x'], | ||||
|     ##                               Bxy.ci.lower.loa0.mle = ci.lower['x'], | ||||
|     ##                               Bzy.ci.upper.loa0.mle = ci.upper['z'], | ||||
|     ##                               Bzy.ci.lower.loa0.mle = ci.upper['z'])) | ||||
| 
 | ||||
|     loco.feasible <- glm(y.obs.0 ~ x + z, data = df[(!is.na(y.obs.0)) & (y.obs.1 == y.obs.0)], family=binomial(link='logit')) | ||||
| 
 | ||||
| @ -64,29 +88,110 @@ run_simulation_depvar <- function(df, result, outcome_formula = y ~ x + z, rater | ||||
|                                   Bzy.ci.upper.loco.feasible = loco.feasible.ci.Bzy[2], | ||||
|                                   Bzy.ci.lower.loco.feasible = loco.feasible.ci.Bzy[1])) | ||||
| 
 | ||||
|      | ||||
|     ## df.double.proxy <- copy(df) | ||||
|     ## df.double.proxy <- df.double.proxy[,y.obs:=NA] | ||||
|     ## df.double.proxy <- df.double.proxy[,y:=NA] | ||||
|      | ||||
|     ## double.proxy.mle <- measerr_irr_mle_dv(df.double.proxy, outcome_formula=y~x+z, outcome_family=binomial(link='logit'), coder_formulas=list(y.obs.0 ~ y),  proxy_formula=w_pred ~ y.obs.0 + y, proxy_family=binomial(link='logit')) | ||||
|     ## print(double.proxy.mle$hessian) | ||||
|     ## fisher.info <- solve(double.proxy.mle$hessian) | ||||
|     ## coef <- double.proxy.mle$par | ||||
|     ## ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||
|     ## ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||
| 
 | ||||
|     df.loco.mle <- copy(df) | ||||
|     df.loco.mle[,y.obs:=NA] | ||||
|     df.loco.mle[(y.obs.0)==(y.obs.1),y.obs:=y.obs.0] | ||||
|     df.loco.mle[,y.true:=y] | ||||
|     df.loco.mle[,y:=y.obs] | ||||
|     print(df.loco.mle[!is.na(y.obs.1),mean(y.true==y,na.rm=TRUE)]) | ||||
|     loco.mle <- measerr_mle_dv(df.loco.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula) | ||||
|     fisher.info <- solve(loco.mle$hessian) | ||||
|     coef <- loco.mle$par | ||||
|     ## result <- append(result, list(Bxy.est.double.proxy=coef['x'], | ||||
|     ##                               Bzy.est.double.proxy=coef['z'], | ||||
|     ##                               Bxy.ci.upper.double.proxy = ci.upper['x'], | ||||
|     ##                               Bxy.ci.lower.double.proxy = ci.lower['x'], | ||||
|     ##                               Bzy.ci.upper.double.proxy = ci.upper['z'], | ||||
|     ##                               Bzy.ci.lower.double.proxy = ci.lower['z'])) | ||||
| 
 | ||||
| 
 | ||||
|     df.triple.proxy <- copy(df) | ||||
|     df.triple.proxy <- df.triple.proxy[,y.obs:=NA] | ||||
|     df.triple.proxy <- df.triple.proxy[,y:=NA] | ||||
|      | ||||
|     triple.proxy.mle <- measerr_irr_mle_dv(df.triple.proxy, outcome_formula=outcome_formula, outcome_family=binomial(link='logit'), coder_formulas=coder_formulas, proxy_formula=proxy_formula, proxy_family=binomial(link='logit')) | ||||
|     print(triple.proxy.mle$hessian) | ||||
|     fisher.info <- solve(triple.proxy.mle$hessian) | ||||
|     print(fisher.info) | ||||
|     coef <- triple.proxy.mle$par | ||||
|     ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||
|     ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||
| 
 | ||||
|     result <- append(result, list(Bxy.est.loco.mle=coef['x'], | ||||
|                                   Bzy.est.loco.mle=coef['z'], | ||||
|                                   Bxy.ci.upper.loco.mle = ci.upper['x'], | ||||
|                                   Bxy.ci.lower.loco.mle = ci.lower['x'], | ||||
|                                   Bzy.ci.upper.loco.mle = ci.upper['z'], | ||||
|                                   Bzy.ci.lower.loco.mle = ci.lower['z'])) | ||||
|     result <- append(result, list(Bxy.est.triple.proxy=coef['x'], | ||||
|                                   Bzy.est.triple.proxy=coef['z'], | ||||
|                                   Bxy.ci.upper.triple.proxy = ci.upper['x'], | ||||
|                                   Bxy.ci.lower.triple.proxy = ci.lower['x'], | ||||
|                                   Bzy.ci.upper.triple.proxy = ci.upper['z'], | ||||
|                                   Bzy.ci.lower.triple.proxy = ci.lower['z'])) | ||||
| 
 | ||||
|     print(rater_formula) | ||||
|     print(proxy_formula) | ||||
|     ## df.loco.mle <- copy(df) | ||||
|     ## df.loco.mle[,y.obs:=NA] | ||||
|     ## df.loco.mle[(y.obs.0)==(y.obs.1),y.obs:=y.obs.0] | ||||
|     ## df.loco.mle[,y.true:=y] | ||||
|     ## df.loco.mle[,y:=y.obs] | ||||
|     ## print(df.loco.mle[!is.na(y.obs.1),mean(y.true==y,na.rm=TRUE)]) | ||||
|     ## loco.mle <- measerr_mle_dv(df.loco.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula) | ||||
|     ## fisher.info <- solve(loco.mle$hessian) | ||||
|     ## coef <- loco.mle$par | ||||
|     ## ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||
|     ## ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||
| 
 | ||||
|     ## result <- append(result, list(Bxy.est.loco.mle=coef['x'], | ||||
|     ##                               Bzy.est.loco.mle=coef['z'], | ||||
|     ##                               Bxy.ci.upper.loco.mle = ci.upper['x'], | ||||
|     ##                               Bxy.ci.lower.loco.mle = ci.lower['x'], | ||||
|     ##                               Bzy.ci.upper.loco.mle = ci.upper['z'], | ||||
|     ##                               Bzy.ci.lower.loco.mle = ci.lower['z'])) | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|     ## my implementatoin of liklihood based correction | ||||
|     mod.zhang <- zhang.mle.dv(df.loco.mle) | ||||
|     coef <- coef(mod.zhang) | ||||
|     ci <- confint(mod.zhang,method='quad') | ||||
| 
 | ||||
|     result <- append(result, | ||||
|                      list(Bxy.est.zhang = coef['Bxy'], | ||||
|                           Bxy.ci.upper.zhang = ci['Bxy','97.5 %'], | ||||
|                           Bxy.ci.lower.zhang = ci['Bxy','2.5 %'], | ||||
|                           Bzy.est.zhang = coef['Bzy'], | ||||
|                           Bzy.ci.upper.zhang = ci['Bzy','97.5 %'], | ||||
|                           Bzy.ci.lower.zhang = ci['Bzy','2.5 %'])) | ||||
| 
 | ||||
|      | ||||
| 
 | ||||
|     print(df.loco.mle) | ||||
| 
 | ||||
|     # amelia says use normal distribution for binary variables. | ||||
|     tryCatch({ | ||||
|         amelia.out.k <- amelia(df.loco.mle, m=200, p2s=0, idvars=c('y','ystar','w','y.obs.1','y.obs.0','y.true')) | ||||
|         mod.amelia.k <- zelig(y.obs~x+z, model='ls', data=amelia.out.k$imputations, cite=FALSE) | ||||
|         (coefse <- combine_coef_se(mod.amelia.k, messages=FALSE)) | ||||
|         est.x.mi <- coefse['x','Estimate'] | ||||
|         est.x.se <- coefse['x','Std.Error'] | ||||
|         result <- append(result, | ||||
|                          list(Bxy.est.amelia.full = est.x.mi, | ||||
|                               Bxy.ci.upper.amelia.full = est.x.mi + 1.96 * est.x.se, | ||||
|                               Bxy.ci.lower.amelia.full = est.x.mi - 1.96 * est.x.se | ||||
|                               )) | ||||
| 
 | ||||
|         est.z.mi <- coefse['z','Estimate'] | ||||
|         est.z.se <- coefse['z','Std.Error'] | ||||
| 
 | ||||
|         result <- append(result, | ||||
|                          list(Bzy.est.amelia.full = est.z.mi, | ||||
|                               Bzy.ci.upper.amelia.full = est.z.mi + 1.96 * est.z.se, | ||||
|                               Bzy.ci.lower.amelia.full = est.z.mi - 1.96 * est.z.se | ||||
|                               )) | ||||
| 
 | ||||
|     }, | ||||
|     error = function(e){ | ||||
|         message("An error occurred:\n",e) | ||||
|         result$error <- paste0(result$error,'\n', e) | ||||
|     }) | ||||
|     ## mle.irr <- measerr_irr_mle( df, outcome_formula = outcome_formula, rater_formula = rater_formula, proxy_formula=proxy_formula, truth_formula=truth_formula) | ||||
| 
 | ||||
|     ## fisher.info <- solve(mle.irr$hessian) | ||||
|  | ||||
| @ -3,10 +3,10 @@ library(matrixStats) # for numerically stable logsumexps | ||||
| options(amelia.parallel="no", | ||||
|         amelia.ncpus=1) | ||||
| library(Amelia) | ||||
| source("measerr_methods.R") | ||||
| source("pl_methods.R") | ||||
| 
 | ||||
| source("measerr_methods.R") ## for my more generic function. | ||||
| 
 | ||||
| run_simulation <- function(df, result, outcome_formula = y ~ x + z, proxy_formula = w_pred ~ x, truth_formula = x ~ z){ | ||||
| run_simulation <- function(df, result, outcome_formula = y ~ x + z, proxy_formula = w_pred ~ x, coder_formulas=list(x.obs.1~x, x.obs.0~x), truth_formula = x ~ z){ | ||||
| 
 | ||||
|     accuracy <- df[,mean(w_pred==x)] | ||||
|     result <- append(result, list(accuracy=accuracy)) | ||||
| @ -24,6 +24,8 @@ run_simulation <- function(df, result, outcome_formula = y ~ x + z, proxy_formul | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|     loa0.feasible <- lm(y ~ x.obs.0 + z, data = df[!(is.na(x.obs.1))]) | ||||
| 
 | ||||
|     loa0.ci.Bxy <- confint(loa0.feasible)['x.obs.0',] | ||||
| @ -35,7 +37,7 @@ run_simulation <- function(df, result, outcome_formula = y ~ x + z, proxy_formul | ||||
|                                   Bxy.ci.lower.loa0.feasible = loa0.ci.Bxy[1], | ||||
|                                   Bzy.ci.upper.loa0.feasible = loa0.ci.Bzy[2], | ||||
|                                   Bzy.ci.lower.loa0.feasible = loa0.ci.Bzy[1])) | ||||
| 
 | ||||
|     print("fitting loa0 model") | ||||
| 
 | ||||
|     df.loa0.mle <- copy(df) | ||||
|     df.loa0.mle[,x:=x.obs.0] | ||||
| @ -52,8 +54,11 @@ run_simulation <- function(df, result, outcome_formula = y ~ x + z, proxy_formul | ||||
|                                   Bzy.ci.upper.loa0.mle = ci.upper['z'], | ||||
|                                   Bzy.ci.lower.loa0.mle = ci.upper['z'])) | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|     loco.feasible <- lm(y ~ x.obs.1 + z, data = df[(!is.na(x.obs.1)) & (x.obs.1 == x.obs.0)]) | ||||
| 
 | ||||
| 
 | ||||
|     loco.feasible.ci.Bxy <- confint(loco.feasible)['x.obs.1',] | ||||
|     loco.feasible.ci.Bzy <- confint(loco.feasible)['z',] | ||||
| 
 | ||||
| @ -65,41 +70,152 @@ run_simulation <- function(df, result, outcome_formula = y ~ x + z, proxy_formul | ||||
|                                   Bzy.ci.lower.loco.feasible = loco.feasible.ci.Bzy[1])) | ||||
| 
 | ||||
| 
 | ||||
|     (model.naive <- lm(y~w_pred+z, data=df)) | ||||
|      | ||||
|     naive.ci.Bxy <- confint(model.naive)['w_pred',] | ||||
|     naive.ci.Bzy <- confint(model.naive)['z',] | ||||
| 
 | ||||
|     result <- append(result, list(Bxy.est.naive=coef(model.naive)['w_pred'], | ||||
|                                   Bzy.est.naive=coef(model.naive)['z'], | ||||
|                                   Bxy.ci.upper.naive = naive.ci.Bxy[2], | ||||
|                                   Bxy.ci.lower.naive = naive.ci.Bxy[1], | ||||
|                                   Bzy.ci.upper.naive = naive.ci.Bzy[2], | ||||
|                                   Bzy.ci.lower.naive = naive.ci.Bzy[1])) | ||||
|                                    | ||||
|     print("fitting loco model") | ||||
| 
 | ||||
|     df.loco.mle <- copy(df) | ||||
|     df.loco.mle[,x.obs:=NA] | ||||
|     df.loco.mle[(x.obs.0)==(x.obs.1),x.obs:=x.obs.0] | ||||
|     df.loco.mle[,x.true:=x] | ||||
|     df.loco.mle[,x:=x.obs] | ||||
|     print(df.loco.mle[!is.na(x.obs.1),mean(x.true==x,na.rm=TRUE)]) | ||||
|     loco.accuracy <- df.loco.mle[(!is.na(x.obs.1)) & (x.obs.1 == x.obs.0),mean(x.obs.1 == x.true)]     | ||||
|     loco.mle <- measerr_mle(df.loco.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula, truth_formula=truth_formula) | ||||
|     fisher.info <- solve(loco.mle$hessian) | ||||
|     coef <- loco.mle$par | ||||
|     ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||
|     ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||
| 
 | ||||
|     result <- append(result, list(Bxy.est.loco.mle=coef['x'], | ||||
|     result <- append(result, list(loco.accuracy=loco.accuracy, | ||||
|                                   Bxy.est.loco.mle=coef['x'], | ||||
|                                   Bzy.est.loco.mle=coef['z'], | ||||
|                                   Bxy.ci.upper.loco.mle = ci.upper['x'], | ||||
|                                   Bxy.ci.lower.loco.mle = ci.lower['x'], | ||||
|                                   Bzy.ci.upper.loco.mle = ci.upper['z'], | ||||
|                                   Bzy.ci.lower.loco.mle = ci.lower['z'])) | ||||
| 
 | ||||
|     ## print(rater_formula) | ||||
|     ## print(proxy_formula) | ||||
|     ## mle.irr <- measerr_irr_mle( df, outcome_formula = outcome_formula, rater_formula = rater_formula, proxy_formula=proxy_formula, truth_formula=truth_formula) | ||||
|     df.double.proxy.mle <- copy(df) | ||||
|     df.double.proxy.mle[,x.obs:=NA] | ||||
|     print("fitting double proxy model") | ||||
| 
 | ||||
|     double.proxy.mle <- measerr_irr_mle(df.double.proxy.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula, coder_formulas=coder_formulas[1], truth_formula=truth_formula) | ||||
|     fisher.info <- solve(double.proxy.mle$hessian) | ||||
|     coef <- double.proxy.mle$par | ||||
|     ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||
|     ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||
| 
 | ||||
|     result <- append(result, list( | ||||
|                                   Bxy.est.double.proxy=coef['x'], | ||||
|                                   Bzy.est.double.proxy=coef['z'], | ||||
|                                   Bxy.ci.upper.double.proxy = ci.upper['x'], | ||||
|                                   Bxy.ci.lower.double.proxy = ci.lower['x'], | ||||
|                                   Bzy.ci.upper.double.proxy = ci.upper['z'], | ||||
|                                   Bzy.ci.lower.double.proxy = ci.lower['z'])) | ||||
| 
 | ||||
|     df.triple.proxy.mle <- copy(df) | ||||
|     df.triple.proxy.mle[,x.obs:=NA] | ||||
| 
 | ||||
|     print("fitting triple proxy model") | ||||
|     triple.proxy.mle <- measerr_irr_mle(df.triple.proxy.mle, outcome_formula=outcome_formula, proxy_formula=proxy_formula, coder_formulas=coder_formulas, truth_formula=truth_formula) | ||||
|     fisher.info <- solve(triple.proxy.mle$hessian) | ||||
|     coef <- triple.proxy.mle$par | ||||
|     ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||
|     ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||
| 
 | ||||
|     result <- append(result, list( | ||||
|                                   Bxy.est.triple.proxy=coef['x'], | ||||
|                                   Bzy.est.triple.proxy=coef['z'], | ||||
|                                   Bxy.ci.upper.triple.proxy = ci.upper['x'], | ||||
|                                   Bxy.ci.lower.triple.proxy = ci.lower['x'], | ||||
|                                   Bzy.ci.upper.triple.proxy = ci.upper['z'], | ||||
|                                   Bzy.ci.lower.triple.proxy = ci.lower['z'])) | ||||
|     tryCatch({ | ||||
|     amelia.out.k <- amelia(df.loco.mle, m=200, p2s=0, idvars=c('x.true','w','x.obs.1','x.obs.0','x')) | ||||
|     mod.amelia.k <- zelig(y~x.obs+z, model='ls', data=amelia.out.k$imputations, cite=FALSE) | ||||
|     (coefse <- combine_coef_se(mod.amelia.k, messages=FALSE)) | ||||
| 
 | ||||
|     est.x.mi <- coefse['x.obs','Estimate'] | ||||
|     est.x.se <- coefse['x.obs','Std.Error'] | ||||
|     result <- append(result, | ||||
|                      list(Bxy.est.amelia.full = est.x.mi, | ||||
|                           Bxy.ci.upper.amelia.full = est.x.mi + 1.96 * est.x.se, | ||||
|                           Bxy.ci.lower.amelia.full = est.x.mi - 1.96 * est.x.se | ||||
|                           )) | ||||
| 
 | ||||
|     est.z.mi <- coefse['z','Estimate'] | ||||
|     est.z.se <- coefse['z','Std.Error'] | ||||
| 
 | ||||
|     result <- append(result, | ||||
|                      list(Bzy.est.amelia.full = est.z.mi, | ||||
|                           Bzy.ci.upper.amelia.full = est.z.mi + 1.96 * est.z.se, | ||||
|                           Bzy.ci.lower.amelia.full = est.z.mi - 1.96 * est.z.se | ||||
|                           )) | ||||
| 
 | ||||
|     }, | ||||
|     error = function(e){ | ||||
|         message("An error occurred:\n",e) | ||||
|         result$error <-paste0(result$error,'\n', e) | ||||
|     } | ||||
|     ) | ||||
| 
 | ||||
|     tryCatch({ | ||||
| 
 | ||||
|         mod.zhang.lik <- zhang.mle.iv(df.loco.mle) | ||||
|         coef <- coef(mod.zhang.lik) | ||||
|         ci <- confint(mod.zhang.lik,method='quad') | ||||
|         result <- append(result, | ||||
|                          list(Bxy.est.zhang = coef['Bxy'], | ||||
|                               Bxy.ci.upper.zhang = ci['Bxy','97.5 %'], | ||||
|                               Bxy.ci.lower.zhang = ci['Bxy','2.5 %'], | ||||
|                               Bzy.est.zhang = coef['Bzy'], | ||||
|                               Bzy.ci.upper.zhang = ci['Bzy','97.5 %'], | ||||
|                               Bzy.ci.lower.zhang = ci['Bzy','2.5 %'])) | ||||
|     }, | ||||
| 
 | ||||
|     error = function(e){ | ||||
|         message("An error occurred:\n",e) | ||||
|         result$error <- paste0(result$error,'\n', e) | ||||
|     }) | ||||
| 
 | ||||
|     df <- df.loco.mle | ||||
|     N <- nrow(df) | ||||
|     m <- nrow(df[!is.na(x.obs)]) | ||||
|     p <- v <- train <- rep(0,N) | ||||
|     M <- m | ||||
|     p[(M+1):(N)] <- 1 | ||||
|     v[1:(M)] <- 1 | ||||
|     df <- df[order(x.obs)] | ||||
|     y <- df[,y] | ||||
|     x <- df[,x.obs] | ||||
|     z <- df[,z] | ||||
|     w <- df[,w_pred] | ||||
|     # gmm gets pretty close | ||||
|     (gmm.res <- predicted_covariates(y, x, z, w, v, train, p, max_iter=100, verbose=TRUE)) | ||||
| 
 | ||||
|     result <- append(result, | ||||
|                      list(Bxy.est.gmm = gmm.res$beta[1,1], | ||||
|                           Bxy.ci.upper.gmm = gmm.res$confint[1,2], | ||||
|                           Bxy.ci.lower.gmm = gmm.res$confint[1,1], | ||||
|                           gmm.ER_pval = gmm.res$ER_pval | ||||
|                           )) | ||||
| 
 | ||||
|     result <- append(result, | ||||
|                      list(Bzy.est.gmm = gmm.res$beta[2,1], | ||||
|                           Bzy.ci.upper.gmm = gmm.res$confint[2,2], | ||||
|                           Bzy.ci.lower.gmm = gmm.res$confint[2,1])) | ||||
| 
 | ||||
| 
 | ||||
|     ## fisher.info <- solve(mle.irr$hessian) | ||||
|     ## coef <- mle.irr$par | ||||
|     ## ci.upper <- coef + sqrt(diag(fisher.info)) * 1.96 | ||||
|     ## ci.lower <- coef - sqrt(diag(fisher.info)) * 1.96 | ||||
|      | ||||
|     ## result <- append(result, | ||||
|     ##                  list(Bxy.est.mle = coef['x'], | ||||
|     ##                       Bxy.ci.upper.mle = ci.upper['x'], | ||||
|     ##                       Bxy.ci.lower.mle = ci.lower['x'], | ||||
|     ##                       Bzy.est.mle = coef['z'], | ||||
|     ##                       Bzy.ci.upper.mle = ci.upper['z'], | ||||
|     ##                       Bzy.ci.lower.mle = ci.lower['z'])) | ||||
| 
 | ||||
|     return(result) | ||||
| 
 | ||||
|  | ||||
| @ -1,5 +1,6 @@ | ||||
| library(formula.tools) | ||||
| library(matrixStats) | ||||
| library(optimx) | ||||
| library(bbmle) | ||||
| ## df: dataframe to model | ||||
| ## outcome_formula: formula for y | x, z | ||||
| @ -113,227 +114,18 @@ measerr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link='lo | ||||
|     return(fit) | ||||
| } | ||||
| 
 | ||||
| ## Experimental, and not necessary if errors are independent. | ||||
| measerr_irr_mle <- function(df, outcome_formula, outcome_family=gaussian(), rater_formula, proxy_formula, proxy_family=binomial(link='logit'), truth_formula, truth_family=binomial(link='logit'),method='optim'){ | ||||
| 
 | ||||
|     ### in this scenario, the ground truth also has measurement error, but we have repeated measures for it.  | ||||
| 
 | ||||
|     ## probability of y given observed data. | ||||
|     df.obs <- df[!is.na(x.obs.1)] | ||||
|     proxy.variable <- all.vars(proxy_formula)[1] | ||||
|     df.x.obs.1 <- copy(df.obs)[,x:=1] | ||||
|     df.x.obs.0 <- copy(df.obs)[,x:=0] | ||||
|     y.obs <- df.obs[,y] | ||||
| 
 | ||||
|     nll <- function(params){ | ||||
|         outcome.model.matrix.x.obs.0 <- model.matrix(outcome_formula, df.x.obs.0) | ||||
|         outcome.model.matrix.x.obs.1 <- model.matrix(outcome_formula, df.x.obs.1) | ||||
| 
 | ||||
|         param.idx <- 1 | ||||
|         n.outcome.model.covars <- dim(outcome.model.matrix.x.obs.0)[2] | ||||
|         outcome.params <- params[param.idx:n.outcome.model.covars] | ||||
|         param.idx <- param.idx + n.outcome.model.covars | ||||
| 
 | ||||
|         sigma.y <- params[param.idx] | ||||
|         param.idx <- param.idx + 1 | ||||
| 
 | ||||
|         ll.y.x.obs.0 <- dnorm(y.obs, outcome.params %*% t(outcome.model.matrix.x.obs.0),sd=sigma.y, log=TRUE) | ||||
|         ll.y.x.obs.1 <- dnorm(y.obs, outcome.params %*% t(outcome.model.matrix.x.obs.1),sd=sigma.y, log=TRUE) | ||||
| 
 | ||||
|         ## assume that the two coders are statistically independent conditional on x | ||||
|         ll.x.obs.0.x0 <- vector(mode='numeric', length=nrow(df.obs)) | ||||
|         ll.x.obs.1.x0 <- vector(mode='numeric', length=nrow(df.obs)) | ||||
|         ll.x.obs.0.x1 <- vector(mode='numeric', length=nrow(df.obs)) | ||||
|         ll.x.obs.1.x1 <- vector(mode='numeric', length=nrow(df.obs)) | ||||
| 
 | ||||
|         rater.model.matrix.x.obs.0 <- model.matrix(rater_formula, df.x.obs.0) | ||||
|         rater.model.matrix.x.obs.1 <- model.matrix(rater_formula, df.x.obs.1) | ||||
| 
 | ||||
|         n.rater.model.covars <- dim(rater.model.matrix.x.obs.0)[2] | ||||
|         rater.0.params <- params[param.idx:(n.rater.model.covars + param.idx - 1)] | ||||
|         param.idx <- param.idx + n.rater.model.covars | ||||
| 
 | ||||
|         rater.1.params <- params[param.idx:(n.rater.model.covars + param.idx - 1)] | ||||
|         param.idx <- param.idx + n.rater.model.covars | ||||
|          | ||||
|         # probability of rater 0 if x is 0 or 1 | ||||
|         ll.x.obs.0.x0[df.obs$x.obs.0==1] <- plogis(rater.0.params %*% t(rater.model.matrix.x.obs.0[df.obs$x.obs.0==1,]), log=TRUE) | ||||
|         ll.x.obs.0.x0[df.obs$x.obs.0==0] <- plogis(rater.0.params %*% t(rater.model.matrix.x.obs.0[df.obs$x.obs.0==0,]), log=TRUE, lower.tail=FALSE) | ||||
|         ll.x.obs.0.x1[df.obs$x.obs.0==1] <- plogis(rater.0.params %*% t(rater.model.matrix.x.obs.1[df.obs$x.obs.0==1,]), log=TRUE) | ||||
|         ll.x.obs.0.x1[df.obs$x.obs.0==0] <- plogis(rater.0.params %*% t(rater.model.matrix.x.obs.1[df.obs$x.obs.0==0,]), log=TRUE, lower.tail=FALSE) | ||||
| 
 | ||||
|         # probability of rater 1 if x is 0 or 1 | ||||
|         ll.x.obs.1.x0[df.obs$x.obs.1==1] <- plogis(rater.1.params %*% t(rater.model.matrix.x.obs.0[df.obs$x.obs.1==1,]), log=TRUE) | ||||
|         ll.x.obs.1.x0[df.obs$x.obs.1==0] <- plogis(rater.1.params %*% t(rater.model.matrix.x.obs.0[df.obs$x.obs.1==0,]), log=TRUE, lower.tail=FALSE) | ||||
|         ll.x.obs.1.x1[df.obs$x.obs.1==1] <- plogis(rater.1.params %*% t(rater.model.matrix.x.obs.1[df.obs$x.obs.1==1,]), log=TRUE) | ||||
|         ll.x.obs.1.x1[df.obs$x.obs.1==0] <- plogis(rater.1.params %*% t(rater.model.matrix.x.obs.1[df.obs$x.obs.1==0,]), log=TRUE, lower.tail=FALSE) | ||||
| 
 | ||||
|         proxy.model.matrix.x0 <- model.matrix(proxy_formula, df.x.obs.0) | ||||
|         proxy.model.matrix.x1 <- model.matrix(proxy_formula, df.x.obs.1) | ||||
| 
 | ||||
|         n.proxy.model.covars <- dim(proxy.model.matrix.x0)[2] | ||||
|         proxy.params <- params[param.idx:(n.proxy.model.covars+param.idx-1)] | ||||
|         param.idx <- param.idx + n.proxy.model.covars | ||||
| 
 | ||||
|         proxy.obs <- with(df.obs, eval(parse(text=proxy.variable))) | ||||
| 
 | ||||
|         if( (proxy_family$family=="binomial") & (proxy_family$link=='logit')){ | ||||
|             ll.w.obs.x0 <- vector(mode='numeric',length=dim(proxy.model.matrix.x0)[1]) | ||||
|             ll.w.obs.x1 <- vector(mode='numeric',length=dim(proxy.model.matrix.x1)[1]) | ||||
| 
 | ||||
|                                         # proxy_formula likelihood using logistic regression | ||||
|             ll.w.obs.x0[proxy.obs==1] <- plogis(proxy.params %*% t(proxy.model.matrix.x0[proxy.obs==1,]),log=TRUE) | ||||
|             ll.w.obs.x0[proxy.obs==0] <- plogis(proxy.params %*% t(proxy.model.matrix.x0[proxy.obs==0,]),log=TRUE, lower.tail=FALSE) | ||||
| 
 | ||||
|             ll.w.obs.x1[proxy.obs==1] <- plogis(proxy.params %*% t(proxy.model.matrix.x1[proxy.obs==1,]),log=TRUE) | ||||
|             ll.w.obs.x1[proxy.obs==0] <- plogis(proxy.params %*% t(proxy.model.matrix.x1[proxy.obs==0,]),log=TRUE, lower.tail=FALSE) | ||||
|         } | ||||
| 
 | ||||
|         ## assume that the probability of x is a logistic regression depending on z | ||||
|         truth.model.matrix.obs <- model.matrix(truth_formula, df.obs) | ||||
|         n.truth.params <- dim(truth.model.matrix.obs)[2] | ||||
|         truth.params <- params[param.idx:(n.truth.params + param.idx - 1)] | ||||
| 
 | ||||
|         ll.obs.x0 <- plogis(truth.params %*% t(truth.model.matrix.obs), log=TRUE) | ||||
|         ll.obs.x1 <- plogis(truth.params %*% t(truth.model.matrix.obs), log=TRUE, lower.tail=FALSE) | ||||
| 
 | ||||
|         ll.obs <- colLogSumExps(rbind(ll.y.x.obs.0 + ll.x.obs.0.x0 + ll.x.obs.1.x0 + ll.obs.x0 + ll.w.obs.x0, | ||||
|                                       ll.y.x.obs.1 + ll.x.obs.0.x1 + ll.x.obs.1.x1 + ll.obs.x1 + ll.w.obs.x1)) | ||||
| 
 | ||||
|         ### NOW FOR THE FUN PART. Likelihood of the unobserved data. | ||||
|         ### we have to integrate out x.obs.0, x.obs.1, and x. | ||||
| 
 | ||||
| 
 | ||||
|         ## THE OUTCOME | ||||
|         df.unobs <- df[is.na(x.obs)] | ||||
|         df.x.unobs.0 <- copy(df.unobs)[,x:=0] | ||||
|         df.x.unobs.1 <- copy(df.unobs)[,x:=1] | ||||
|         y.unobs <- df.unobs$y | ||||
| 
 | ||||
|         outcome.model.matrix.x.unobs.0 <- model.matrix(outcome_formula, df.x.unobs.0) | ||||
|         outcome.model.matrix.x.unobs.1 <- model.matrix(outcome_formula, df.x.unobs.1) | ||||
| 
 | ||||
|         ll.y.unobs.x0 <- dnorm(y.unobs, outcome.params %*% t(outcome.model.matrix.x.unobs.0), sd=sigma.y, log=TRUE) | ||||
|         ll.y.unobs.x1 <- dnorm(y.unobs, outcome.params %*% t(outcome.model.matrix.x.unobs.1), sd=sigma.y, log=TRUE) | ||||
| 
 | ||||
|          | ||||
|         ## THE UNLABELED DATA | ||||
| 
 | ||||
|          | ||||
|         ## assume that the two coders are statistically independent conditional on x | ||||
|         ll.x.unobs.0.x0 <- vector(mode='numeric', length=nrow(df.unobs)) | ||||
|         ll.x.unobs.1.x0 <- vector(mode='numeric', length=nrow(df.unobs)) | ||||
|         ll.x.unobs.0.x1 <- vector(mode='numeric', length=nrow(df.unobs)) | ||||
|         ll.x.unobs.1.x1 <- vector(mode='numeric', length=nrow(df.unobs)) | ||||
|          | ||||
|         df.x.unobs.0[,x.obs := 1] | ||||
|         df.x.unobs.1[,x.obs := 1] | ||||
| 
 | ||||
|         rater.model.matrix.x.unobs.0 <- model.matrix(rater_formula, df.x.unobs.0) | ||||
|         rater.model.matrix.x.unobs.1 <- model.matrix(rater_formula, df.x.unobs.1) | ||||
| 
 | ||||
|           | ||||
|         ## # probability of rater 0 if x is 0 or 1 | ||||
|         ## ll.x.unobs.0.x0 <- colLogSumExps(rbind(plogis(rater.0.params %*% t(rater.model.matrix.x.unobs.0), log=TRUE), | ||||
|         ##                                      plogis(rater.0.params %*% t(rater.model.matrix.x.unobs.0), log=TRUE, lower.tail=TRUE))) | ||||
| 
 | ||||
|         ## ll.x.unobs.0.x1 <- colLogSumExps(rbind(plogis(rater.0.params %*% t(rater.model.matrix.x.unobs.1), log=TRUE), | ||||
|         ##                                        plogis(rater.0.params %*% t(rater.model.matrix.x.unobs.1), log=TRUE, lower.tail=TRUE))) | ||||
| 
 | ||||
|         ## # probability of rater 1 if x is 0 or 1 | ||||
|         ## ll.x.unobs.1.x0 <- colLogSumExps(rbind(plogis(rater.1.params %*% t(rater.model.matrix.x.unobs.0), log=TRUE), | ||||
|         ##                                      plogis(rater.1.params %*% t(rater.model.matrix.x.unobs.0), log=TRUE, lower.tail=TRUE))) | ||||
| 
 | ||||
|         ## ll.x.unobs.1.x1 <- colLogSumExps(rbind(plogis(rater.1.params %*% t(rater.model.matrix.x.unobs.1), log=TRUE), | ||||
|         ##                                      plogis(rater.1.params %*% t(rater.model.matrix.x.unobs.1), log=TRUE, lower.tail=TRUE))) | ||||
| 
 | ||||
| 
 | ||||
|         proxy.unobs <- with(df.unobs, eval(parse(text=proxy.variable))) | ||||
|         proxy.model.matrix.x0.unobs <- model.matrix(proxy_formula, df.x.unobs.0) | ||||
|         proxy.model.matrix.x1.unobs <- model.matrix(proxy_formula, df.x.unobs.1) | ||||
| 
 | ||||
|         if( (proxy_family$family=="binomial") & (proxy_family$link=='logit')){ | ||||
|             ll.w.unobs.x0 <- vector(mode='numeric',length=dim(proxy.model.matrix.x0)[1]) | ||||
|             ll.w.unobs.x1 <- vector(mode='numeric',length=dim(proxy.model.matrix.x1)[1]) | ||||
| 
 | ||||
| 
 | ||||
|                                         # proxy_formula likelihood using logistic regression | ||||
|             ll.w.unobs.x0[proxy.unobs==1] <- plogis(proxy.params %*% t(proxy.model.matrix.x0.unobs[proxy.unobs==1,]),log=TRUE) | ||||
|             ll.w.unobs.x0[proxy.unobs==0] <- plogis(proxy.params %*% t(proxy.model.matrix.x0.unobs[proxy.unobs==0,]),log=TRUE, lower.tail=FALSE) | ||||
| 
 | ||||
|             ll.w.unobs.x1[proxy.unobs==1] <- plogis(proxy.params %*% t(proxy.model.matrix.x1.unobs[proxy.unobs==1,]),log=TRUE) | ||||
|             ll.w.unobs.x1[proxy.unobs==0] <- plogis(proxy.params %*% t(proxy.model.matrix.x1.unobs[proxy.unobs==0,]),log=TRUE, lower.tail=FALSE) | ||||
|         } | ||||
| 
 | ||||
|         truth.model.matrix.unobs <- model.matrix(truth_formula, df.unobs) | ||||
| 
 | ||||
|         ll.unobs.x0 <- plogis(truth.params %*% t(truth.model.matrix.unobs), log=TRUE) | ||||
|         ll.unobs.x1 <- plogis(truth.params %*% t(truth.model.matrix.unobs), log=TRUE, lower.tail=FALSE) | ||||
| 
 | ||||
|         ll.unobs <- colLogSumExps(rbind(ll.unobs.x0 + ll.w.unobs.x0 + ll.y.unobs.x0, | ||||
|                                         ll.unobs.x1 + ll.w.unobs.x1 + ll.y.unobs.x1)) | ||||
| 
 | ||||
|         return(-1 *( sum(ll.obs) + sum(ll.unobs))) | ||||
|     } | ||||
| 
 | ||||
|     outcome.params <- colnames(model.matrix(outcome_formula,df)) | ||||
|     lower <- rep(-Inf, length(outcome.params)) | ||||
| 
 | ||||
|     if(outcome_family$family=='gaussian'){ | ||||
|         params <- c(outcome.params, 'sigma_y') | ||||
|         lower <- c(lower, 0.00001) | ||||
|     } else { | ||||
|         params <- outcome.params | ||||
|     } | ||||
|      | ||||
|     rater.0.params <- colnames(model.matrix(rater_formula,df)) | ||||
|     params <- c(params, paste0('rater_0',rater.0.params)) | ||||
|     lower <- c(lower, rep(-Inf, length(rater.0.params))) | ||||
| 
 | ||||
|     rater.1.params <- colnames(model.matrix(rater_formula,df)) | ||||
|     params <- c(params, paste0('rater_1',rater.1.params)) | ||||
|     lower <- c(lower, rep(-Inf, length(rater.1.params))) | ||||
| 
 | ||||
|     proxy.params <- colnames(model.matrix(proxy_formula, df)) | ||||
|     params <- c(params, paste0('proxy_',proxy.params)) | ||||
|     lower <- c(lower, rep(-Inf, length(proxy.params))) | ||||
| 
 | ||||
|     truth.params <- colnames(model.matrix(truth_formula, df)) | ||||
|     params <- c(params, paste0('truth_', truth.params)) | ||||
|     lower <- c(lower, rep(-Inf, length(truth.params))) | ||||
|     start <- rep(0.1,length(params)) | ||||
|     names(start) <- params | ||||
|      | ||||
|      | ||||
|     if(method=='optim'){ | ||||
|         fit <- optim(start, fn = nll, lower=lower, method='L-BFGS-B', hessian=TRUE, control=list(maxit=1e6)) | ||||
|     } else { | ||||
|                  | ||||
|         quoted.names <- gsub("[\\(\\)]",'',names(start)) | ||||
|         print(quoted.names) | ||||
|         text <- paste("function(", paste0(quoted.names,'=',start,collapse=','),"){params<-c(",paste0(quoted.names,collapse=','),");return(nll(params))}") | ||||
| 
 | ||||
|         measerr_mle_nll <- eval(parse(text=text)) | ||||
|         names(start) <- quoted.names | ||||
|         names(lower) <- quoted.names | ||||
|         fit <- mle2(minuslogl=measerr_mle_nll, start=start, lower=lower, parnames=params,control=list(maxit=1e6),method='L-BFGS-B') | ||||
|     } | ||||
| 
 | ||||
|     return(fit) | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| measerr_mle <- function(df, outcome_formula, outcome_family=gaussian(), proxy_formula, proxy_family=binomial(link='logit'), truth_formula, truth_family=binomial(link='logit'),method='optim'){ | ||||
| 
 | ||||
|     df.obs <- model.frame(outcome_formula, df) | ||||
|     response.var <- all.vars(outcome_formula)[1] | ||||
|     proxy.variable <- all.vars(proxy_formula)[1] | ||||
|     truth.variable <- all.vars(truth_formula)[1] | ||||
|     outcome.model.matrix <- model.matrix(outcome_formula, df) | ||||
|     proxy.model.matrix <- model.matrix(proxy_formula, df) | ||||
|     y.obs <- with(df.obs,eval(parse(text=response.var))) | ||||
| 
 | ||||
|     measerr_mle_nll <- function(params){ | ||||
|         df.obs <- model.frame(outcome_formula, df) | ||||
|         proxy.variable <- all.vars(proxy_formula)[1] | ||||
|         proxy.model.matrix <- model.matrix(proxy_formula, df) | ||||
|         response.var <- all.vars(outcome_formula)[1] | ||||
|         y.obs <- with(df.obs,eval(parse(text=response.var))) | ||||
| 
 | ||||
|         outcome.model.matrix <- model.matrix(outcome_formula, df) | ||||
| 
 | ||||
|         param.idx <- 1 | ||||
|         n.outcome.model.covars <- dim(outcome.model.matrix)[2] | ||||
|         outcome.params <- params[param.idx:n.outcome.model.covars] | ||||
| @ -343,7 +135,6 @@ measerr_mle <- function(df, outcome_formula, outcome_family=gaussian(), proxy_fo | ||||
|         if(outcome_family$family == "gaussian"){ | ||||
|             sigma.y <- params[param.idx] | ||||
|             param.idx <- param.idx + 1 | ||||
| 
 | ||||
|                                         #  outcome_formula likelihood using linear regression | ||||
|             ll.y.obs <- dnorm(y.obs, outcome.params %*% t(outcome.model.matrix),sd=sigma.y, log=TRUE) | ||||
|         } | ||||
| @ -363,7 +154,7 @@ measerr_mle <- function(df, outcome_formula, outcome_family=gaussian(), proxy_fo | ||||
|         } | ||||
| 
 | ||||
|         df.obs <- model.frame(truth_formula, df) | ||||
|         truth.variable <- all.vars(truth_formula)[1] | ||||
| 
 | ||||
|         truth.obs <- with(df.obs, eval(parse(text=truth.variable))) | ||||
|         truth.model.matrix <- model.matrix(truth_formula,df) | ||||
|         n.truth.model.covars <- dim(truth.model.matrix)[2] | ||||
| @ -468,3 +259,338 @@ measerr_mle <- function(df, outcome_formula, outcome_family=gaussian(), proxy_fo | ||||
|     return(fit) | ||||
| } | ||||
| 
 | ||||
| ## Experimental, but probably works.  | ||||
| measerr_irr_mle <- function(df, outcome_formula, outcome_family=gaussian(), coder_formulas=list(x.obs.0~x, x.obs.1~x), proxy_formula, proxy_family=binomial(link='logit'), truth_formula, truth_family=binomial(link='logit'),method='optim'){ | ||||
| 
 | ||||
|     ### in this scenario, the ground truth also has measurement error, but we have repeated measures for it.  | ||||
|     # this time we never get to observe the true X | ||||
|     outcome.model.matrix <- model.matrix(outcome_formula, df) | ||||
|     proxy.model.matrix <- model.matrix(proxy_formula, df) | ||||
|     response.var <- all.vars(outcome_formula)[1] | ||||
|     proxy.var <- all.vars(proxy_formula)[1] | ||||
|     param.var <- all.vars(truth_formula)[1] | ||||
|     truth.var<- all.vars(truth_formula)[1] | ||||
|     y <- with(df,eval(parse(text=response.var))) | ||||
| 
 | ||||
|     nll <- function(params){ | ||||
|         param.idx <- 1 | ||||
|         n.outcome.model.covars <- dim(outcome.model.matrix)[2] | ||||
|         outcome.params <- params[param.idx:n.outcome.model.covars] | ||||
|         param.idx <- param.idx + n.outcome.model.covars | ||||
| 
 | ||||
| 
 | ||||
|         if(outcome_family$family == "gaussian"){ | ||||
|             sigma.y <- params[param.idx] | ||||
|             param.idx <- param.idx + 1 | ||||
|         } | ||||
| 
 | ||||
| 
 | ||||
|         n.proxy.model.covars <- dim(proxy.model.matrix)[2] | ||||
|         proxy.params <- params[param.idx:(n.proxy.model.covars+param.idx-1)] | ||||
|         param.idx <- param.idx + n.proxy.model.covars | ||||
|          | ||||
|         df.temp <- copy(df) | ||||
| 
 | ||||
|         if((truth_family$family == "binomial") | ||||
|            & (truth_family$link=='logit')){ | ||||
|             integrate.grid <- expand.grid(replicate(1 + length(coder_formulas), c(0,1), simplify=FALSE)) | ||||
|             ll.parts <- matrix(nrow=nrow(df),ncol=nrow(integrate.grid)) | ||||
|             for(i in 1:nrow(integrate.grid)){ | ||||
|                 # setup the dataframe for this row | ||||
|                 row <- integrate.grid[i,] | ||||
| 
 | ||||
|                 df.temp[[param.var]] <- row[[1]] | ||||
|                 ci <- 2 | ||||
|                 for(coder_formula in coder_formulas){ | ||||
|                     coder.var <- all.vars(coder_formula)[1] | ||||
|                     df.temp[[coder.var]] <- row[[ci]] | ||||
|                     ci <- ci + 1  | ||||
|                 } | ||||
|                  | ||||
|                 outcome.model.matrix <- model.matrix(outcome_formula, df.temp)                 | ||||
|                 if(outcome_family$family == "gaussian"){ | ||||
|                     ll.y <- dnorm(y, outcome.params %*% t(outcome.model.matrix), sd=sigma.y, log=TRUE) | ||||
|                 } | ||||
| 
 | ||||
|                 if(proxy_family$family=="binomial" & (proxy_family$link=='logit')){ | ||||
|                     proxy.model.matrix <- model.matrix(proxy_formula, df.temp) | ||||
|                     ll.w <- vector(mode='numeric', length=dim(proxy.model.matrix)[1]) | ||||
|                     proxyvar <- with(df.temp,eval(parse(text=proxy.var))) | ||||
|                     ll.w[proxyvar==1] <- plogis(proxy.params %*% t(proxy.model.matrix[proxyvar==1,]),log=TRUE) | ||||
|                     ll.w[proxyvar==0] <- plogis(proxy.params %*% t(proxy.model.matrix[proxyvar==0,]),log=TRUE,lower.tail=FALSE) | ||||
|                 } | ||||
| 
 | ||||
|                 ## probability of the coded variables | ||||
|                 coder.lls <- matrix(nrow=nrow(df.temp),ncol=length(coder_formulas)) | ||||
|                 ci <- 1 | ||||
|                 for(coder_formula in coder_formulas){ | ||||
|                     coder.model.matrix <- model.matrix(coder_formula, df.temp) | ||||
|                     n.coder.model.covars <- dim(coder.model.matrix)[2] | ||||
|                     coder.params <- params[param.idx:(n.coder.model.covars + param.idx - 1)] | ||||
|                     param.idx <- param.idx + n.coder.model.covars | ||||
|                     coder.var <- all.vars(coder_formula)[1] | ||||
|                     x.obs <- with(df.temp, eval(parse(text=coder.var))) | ||||
|                     true.codervar <- df[[all.vars(coder_formula)[1]]] | ||||
| 
 | ||||
|                     ll.coder <- vector(mode='numeric', length=dim(coder.model.matrix)[1]) | ||||
|                     ll.coder[x.obs==1] <- plogis(coder.params %*% t(coder.model.matrix[x.obs==1,]),log=TRUE) | ||||
|                     ll.coder[x.obs==0] <- plogis(coder.params %*% t(coder.model.matrix[x.obs==0,]),log=TRUE,lower.tail=FALSE) | ||||
| 
 | ||||
|                     # don't count when we know the observed value, unless we're accounting for observed value | ||||
|                     ll.coder[(!is.na(true.codervar)) & (true.codervar != x.obs)] <- NA | ||||
|                     coder.lls[,ci] <- ll.coder | ||||
|                     ci <- ci + 1 | ||||
|                 } | ||||
|                  | ||||
|                 truth.model.matrix <- model.matrix(truth_formula, df.temp) | ||||
|                 n.truth.model.covars <- dim(truth.model.matrix)[2] | ||||
|                 truth.params <- params[param.idx:(n.truth.model.covars + param.idx - 1)] | ||||
| 
 | ||||
|                 for(coder_formula in coder_formulas){ | ||||
|                     coder.model.matrix <- model.matrix(coder_formula, df.temp) | ||||
|                     n.coder.model.covars <- dim(coder.model.matrix)[2] | ||||
|                     param.idx <- param.idx - n.coder.model.covars | ||||
|                 } | ||||
| 
 | ||||
|                 x <- with(df.temp, eval(parse(text=truth.var))) | ||||
|                 ll.truth <- vector(mode='numeric', length=dim(truth.model.matrix)[1]) | ||||
|                 ll.truth[x==1] <- plogis(truth.params %*% t(truth.model.matrix[x==1,]), log=TRUE) | ||||
|                 ll.truth[x==0] <- plogis(truth.params %*% t(truth.model.matrix[x==0,]), log=TRUE, lower.tail=FALSE) | ||||
| 
 | ||||
|                 true.truthvar <- df[[all.vars(truth_formula)[1]]] | ||||
|                  | ||||
|                 if(!is.null(true.truthvar)){ | ||||
|                                         # ll.truth[(!is.na(true.truthvar)) & (true.truthvar != truthvar)] <- -Inf | ||||
|                     # ll.truth[(!is.na(true.truthvar)) & (true.truthvar == truthvar)] <- 0 | ||||
|                 } | ||||
|                 ll.parts[,i] <- ll.y + ll.w + apply(coder.lls,1,sum) + ll.truth | ||||
|                  | ||||
|             } | ||||
| 
 | ||||
|             lls <- rowLogSumExps(ll.parts,na.rm=TRUE) | ||||
| 
 | ||||
|         ## likelihood of observed data  | ||||
|             target <- -1 * sum(lls) | ||||
|             return(target) | ||||
|         } | ||||
|     } | ||||
|          | ||||
|     outcome.params <- colnames(model.matrix(outcome_formula,df)) | ||||
|     lower <- rep(-Inf, length(outcome.params)) | ||||
| 
 | ||||
|     if(outcome_family$family=='gaussian'){ | ||||
|         params <- c(outcome.params, 'sigma_y') | ||||
|         lower <- c(lower, 0.00001) | ||||
|     } else { | ||||
|         params <- outcome.params | ||||
|     } | ||||
| 
 | ||||
|     proxy.params <- colnames(model.matrix(proxy_formula, df)) | ||||
|     params <- c(params, paste0('proxy_',proxy.params)) | ||||
|     positive.params <- paste0('proxy_',truth.var) | ||||
|     lower <- c(lower, rep(-Inf, length(proxy.params))) | ||||
|     names(lower) <- params | ||||
|     lower[positive.params] <- 0.01 | ||||
|     ci <- 0 | ||||
|      | ||||
|     for(coder_formula in coder_formulas){ | ||||
|         coder.params <- colnames(model.matrix(coder_formula,df)) | ||||
|         params <- c(params, paste0('coder_',ci,coder.params)) | ||||
|         positive.params <- paste0('coder_', ci, truth.var) | ||||
|         ci <- ci + 1 | ||||
|         lower <- c(lower, rep(-Inf, length(coder.params))) | ||||
|         names(lower) <- params | ||||
|         lower[positive.params] <- 0.01 | ||||
|     } | ||||
| 
 | ||||
|     truth.params <- colnames(model.matrix(truth_formula, df)) | ||||
|     params <- c(params, paste0('truth_', truth.params)) | ||||
|     lower <- c(lower, rep(-Inf, length(truth.params))) | ||||
|     start <- rep(0.1,length(params)) | ||||
|     names(start) <- params | ||||
|     names(lower) <- params | ||||
|      | ||||
|     if(method=='optim'){ | ||||
|         print(start) | ||||
|         fit <- optim(start, fn = nll, lower=lower, method='L-BFGS-B', hessian=TRUE, control=list(maxit=1e6)) | ||||
|     } else { | ||||
|                  | ||||
|         quoted.names <- gsub("[\\(\\)]",'',names(start)) | ||||
|         print(quoted.names) | ||||
|         text <- paste("function(", paste0(quoted.names,'=',start,collapse=','),"){params<-c(",paste0(quoted.names,collapse=','),");return(nll(params))}") | ||||
| 
 | ||||
|         measerr_mle_nll <- eval(parse(text=text)) | ||||
|         names(start) <- quoted.names | ||||
|         names(lower) <- quoted.names | ||||
|         fit <- mle2(minuslogl=measerr_mle_nll, start=start, lower=lower, method='L-BFGS-B',control=list(maxit=1e6)) | ||||
|     } | ||||
| 
 | ||||
|     return(fit) | ||||
| } | ||||
| 
 | ||||
| ## Experimental, and does not work. | ||||
| measerr_irr_mle_dv <- function(df, outcome_formula, outcome_family=binomial(link='logit'), coder_formulas=list(y.obs.0~y+w_pred+y.obs.1,y.obs.1~y+w_pred+y.obs.0), proxy_formula=w_pred~y, proxy_family=binomial(link='logit'),method='optim'){ | ||||
|     integrate.grid <- expand.grid(replicate(1 + length(coder_formulas), c(0,1), simplify=FALSE)) | ||||
|     print(integrate.grid) | ||||
| 
 | ||||
| 
 | ||||
|     outcome.model.matrix <- model.matrix(outcome_formula, df) | ||||
|     n.outcome.model.covars <- dim(outcome.model.matrix)[2] | ||||
| 
 | ||||
| 
 | ||||
|     ### in this scenario, the ground truth also has measurement error, but we have repeated measures for it.  | ||||
|     # this time we never get to observe the true X | ||||
|     nll <- function(params){ | ||||
|         param.idx <- 1 | ||||
|         outcome.params <- params[param.idx:n.outcome.model.covars] | ||||
|         param.idx <- param.idx + n.outcome.model.covars | ||||
|         proxy.model.matrix <- model.matrix(proxy_formula, df) | ||||
|         n.proxy.model.covars <- dim(proxy.model.matrix)[2] | ||||
|         response.var <- all.vars(outcome_formula)[1] | ||||
| 
 | ||||
|         if(outcome_family$family == "gaussian"){ | ||||
|             sigma.y <- params[param.idx] | ||||
|             param.idx <- param.idx + 1 | ||||
|         } | ||||
| 
 | ||||
|         proxy.params <- params[param.idx:(n.proxy.model.covars+param.idx-1)] | ||||
|         param.idx <- param.idx + n.proxy.model.covars | ||||
| 
 | ||||
|         df.temp <- copy(df) | ||||
| 
 | ||||
|         if((outcome_family$family == "binomial") | ||||
|            & (outcome_family$link=='logit')){ | ||||
|             ll.parts <- matrix(nrow=nrow(df),ncol=nrow(integrate.grid)) | ||||
|             for(i in 1:nrow(integrate.grid)){ | ||||
|                 # setup the dataframe for this row | ||||
|                 row <- integrate.grid[i,] | ||||
| 
 | ||||
|                 df.temp[[response.var]] <- row[[1]] | ||||
|                 ci <- 2 | ||||
|                 for(coder_formula in coder_formulas){ | ||||
|                     codervar <- all.vars(coder_formula)[1] | ||||
|                     df.temp[[codervar]] <- row[[ci]] | ||||
|                     ci <- ci + 1  | ||||
|                 } | ||||
|                  | ||||
|                 outcome.model.matrix <- model.matrix(outcome_formula, df.temp)                 | ||||
|                 if(outcome_family$family == "gaussian"){ | ||||
|                     ll.y <- dnorm(df.temp[[response.var]], outcome.params %*% t(outcome.model.matrix), sd=sigma.y, log=T) | ||||
|                 } | ||||
| 
 | ||||
|                 if(outcome_family$family == "binomial" & (outcome_family$link=='logit')){ | ||||
|                     ll.y <- vector(mode='numeric',length=nrow(df.temp)) | ||||
|                     ll.y[df.temp[[response.var]]==1] <- plogis( outcome.params %*% t(outcome.model.matrix), log=TRUE) | ||||
|                     ll.y[df.temp[[response.var]]==0] <- plogis( outcome.params %*% t(outcome.model.matrix), log=TRUE,lower.tail=FALSE) | ||||
|                 } | ||||
| 
 | ||||
|                 if(proxy_family$family=="binomial" & (proxy_family$link=='logit')){ | ||||
|                     proxy.model.matrix <- model.matrix(proxy_formula, df.temp) | ||||
|                     ll.w <- vector(mode='numeric', length=dim(proxy.model.matrix)[1]) | ||||
|                     proxyvar <- with(df.temp,eval(parse(text=all.vars(proxy_formula)[1]))) | ||||
|                     ll.w[proxyvar==1] <- plogis(proxy.params %*% t(proxy.model.matrix[proxyvar==1,]),log=TRUE) | ||||
|                     ll.w[proxyvar==0] <- plogis(proxy.params %*% t(proxy.model.matrix[proxyvar==0,]),log=TRUE,lower.tail=FALSE) | ||||
|                 } | ||||
| 
 | ||||
|                 ## probability of the coded variables | ||||
|                 coder.lls <- matrix(nrow=nrow(df.temp),ncol=length(coder_formulas)) | ||||
|                 ci <- 1 | ||||
|                 for(coder_formula in coder_formulas){ | ||||
|                     coder.model.matrix <- model.matrix(coder_formula, df.temp) | ||||
|                     n.coder.model.covars <- dim(coder.model.matrix)[2] | ||||
|                     coder.params <- params[param.idx:(n.coder.model.covars + param.idx - 1)] | ||||
|                     param.idx <- param.idx + n.coder.model.covars | ||||
|                     codervar <- with(df.temp, eval(parse(text=all.vars(coder_formula)[1]))) | ||||
|                     true.codervar <- df[[all.vars(coder_formula)[1]]] | ||||
| 
 | ||||
|                     ll.coder <- vector(mode='numeric', length=dim(coder.model.matrix)[1]) | ||||
|                     ll.coder[codervar==1] <- plogis(coder.params %*% t(coder.model.matrix[codervar==1,]),log=TRUE) | ||||
|                     ll.coder[codervar==0] <- plogis(coder.params %*% t(coder.model.matrix[codervar==0,]),log=TRUE,lower.tail=FALSE) | ||||
| 
 | ||||
|                     # don't count when we know the observed value, unless we're accounting for observed value | ||||
|                     ll.coder[(!is.na(true.codervar)) & (true.codervar != codervar)] <- NA | ||||
|                     coder.lls[,ci] <- ll.coder | ||||
|                     ci <- ci + 1 | ||||
|                 } | ||||
| 
 | ||||
|                 for(coder_formula in coder_formulas){ | ||||
|                     coder.model.matrix <- model.matrix(coder_formula, df.temp) | ||||
|                     n.coder.model.covars <- dim(coder.model.matrix)[2] | ||||
|                     param.idx <- param.idx - n.coder.model.covars | ||||
|                 } | ||||
| 
 | ||||
|                 ll.parts[,i] <- ll.y + ll.w + apply(coder.lls,1,function(x) sum(x))  | ||||
|                  | ||||
|             } | ||||
| 
 | ||||
|             lls <- rowLogSumExps(ll.parts,na.rm=TRUE) | ||||
| 
 | ||||
|             ## likelihood of observed data  | ||||
|             target <- -1 * sum(lls) | ||||
|             print(target) | ||||
|             print(params) | ||||
|             return(target) | ||||
|         } | ||||
|     } | ||||
|          | ||||
|     outcome.params <- colnames(model.matrix(outcome_formula,df)) | ||||
|     response.var <- all.vars(outcome_formula)[1] | ||||
|     lower <- rep(-Inf, length(outcome.params)) | ||||
| 
 | ||||
|     if(outcome_family$family=='gaussian'){ | ||||
|         params <- c(outcome.params, 'sigma_y') | ||||
|         lower <- c(lower, 0.00001) | ||||
|     } else { | ||||
|         params <- outcome.params | ||||
|     } | ||||
| 
 | ||||
|     ## constrain the model of the coder and proxy vars | ||||
|     ## this is to ensure identifiability | ||||
|     ## it is a safe assumption because the coders aren't hostile (wrong more often than right) | ||||
|     ## so we can assume that y ~Bw, B is positive | ||||
|     proxy.params <- colnames(model.matrix(proxy_formula, df)) | ||||
|     positive.params <- paste0('proxy_',response.var) | ||||
|     params <- c(params, paste0('proxy_',proxy.params)) | ||||
|     lower <- c(lower, rep(-Inf, length(proxy.params))) | ||||
|     names(lower) <- params | ||||
|     lower[positive.params] <- 0.001 | ||||
| 
 | ||||
|     ci <- 0 | ||||
|     for(coder_formula in coder_formulas){ | ||||
|         coder.params <- colnames(model.matrix(coder_formula,df)) | ||||
|         latent.coder.params <- coder.params %in% response.var | ||||
|         params <- c(params, paste0('coder_',ci,coder.params)) | ||||
|         positive.params <- paste0('coder_',ci,response.var) | ||||
|         ci <- ci + 1 | ||||
|         lower <- c(lower, rep(-Inf, length(coder.params))) | ||||
|         names(lower) <-params | ||||
|         lower[positive.params] <- 0.001 | ||||
|     } | ||||
| 
 | ||||
|     ## init by using the "loco model" | ||||
|     temp.df <- copy(df) | ||||
|     temp.df <- temp.df[y.obs.1 == y.obs.0, y:=y.obs.1] | ||||
|     loco.model <- glm(outcome_formula, temp.df, family=outcome_family) | ||||
|      | ||||
|     start <- rep(1,length(params)) | ||||
|     names(start) <- params | ||||
|     start[names(coef(loco.model))] <- coef(loco.model) | ||||
|     names(lower) <- params | ||||
|     if(method=='optim'){ | ||||
|         print(lower) | ||||
|         fit <- optim(start, fn = nll, lower=lower, method='L-BFGS-B', hessian=TRUE,control=list(maxit=1e6)) | ||||
|     } else { | ||||
|                  | ||||
|         quoted.names <- gsub("[\\(\\)]",'',names(start)) | ||||
|         print(quoted.names) | ||||
|         text <- paste("function(", paste0(quoted.names,'=',start,collapse=','),"){params<-c(",paste0(quoted.names,collapse=','),");return(nll(params))}") | ||||
| 
 | ||||
|         measerr_mle_nll <- eval(parse(text=text)) | ||||
|         names(start) <- quoted.names | ||||
|         names(lower) <- quoted.names | ||||
|         fit <- mle2(minuslogl=measerr_mle_nll, start=start, lower=lower, parnames=params,control=list(maxit=1e6),method='L-BFGS-B') | ||||
|     } | ||||
| 
 | ||||
|     return(fit) | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -6,7 +6,7 @@ library(filelock) | ||||
| library(argparser) | ||||
| 
 | ||||
| parser <- arg_parser("Simulate data and fit corrected models.") | ||||
| parser <- add_argument(parser, "--infile", default="", help="name of the file to read.") | ||||
| parser <- add_argument(parser, "--infile", default="example_4.feather", help="name of the file to read.") | ||||
| parser <- add_argument(parser, "--remember-file", default="remembr.RDS", help="name of the remember file.") | ||||
| parser <- add_argument(parser, "--name", default="", help="The name to safe the data to in the remember file.") | ||||
| args <- parse_args(parser) | ||||
| @ -87,6 +87,7 @@ build_plot_dataset <- function(df){ | ||||
| change.remember.file(args$remember_file, clear=TRUE) | ||||
| sims.df <- read_feather(args$infile) | ||||
| sims.df[,Bzx:=NA] | ||||
| sims.df[,y_explained_variance:=NA] | ||||
| sims.df[,accuracy_imbalance_difference:=NA] | ||||
| plot.df <- build_plot_dataset(sims.df) | ||||
| 
 | ||||
| @ -97,6 +98,7 @@ set.remember.prefix(gsub("plot.df.","",args$name)) | ||||
| remember(median(sims.df$cor.xz),'med.cor.xz') | ||||
| remember(median(sims.df$accuracy),'med.accuracy') | ||||
| remember(median(sims.df$error.cor.x),'med.error.cor.x') | ||||
| remember(median(sims.df$error.cor.z),'med.error.cor.z') | ||||
| remember(median(sims.df$lik.ratio),'med.lik.ratio') | ||||
| 
 | ||||
| 
 | ||||
|  | ||||
| @ -9,7 +9,7 @@ source("summarize_estimator.R") | ||||
| 
 | ||||
| 
 | ||||
| parser <- arg_parser("Simulate data and fit corrected models.") | ||||
| parser <- add_argument(parser, "--infile", default="", help="name of the file to read.") | ||||
| parser <- add_argument(parser, "--infile", default="example_2.feather", help="name of the file to read.") | ||||
| parser <- add_argument(parser, "--remember-file", default="remembr.RDS", help="name of the remember file.") | ||||
| parser <- add_argument(parser, "--name", default="", help="The name to safe the data to in the remember file.") | ||||
| args <- parse_args(parser) | ||||
| @ -76,13 +76,13 @@ build_plot_dataset <- function(df){ | ||||
| 
 | ||||
|     z.amelia.full <- summarize.estimator(df, 'amelia.full', 'z') | ||||
|      | ||||
|     x.mecor <- summarize.estimator(df, 'mecor', 'x') | ||||
|     ## x.mecor <- summarize.estimator(df, 'mecor', 'x') | ||||
| 
 | ||||
|     z.mecor <- summarize.estimator(df, 'mecor', 'z') | ||||
|     ## z.mecor <- summarize.estimator(df, 'mecor', 'z') | ||||
| 
 | ||||
|     x.mecor <- summarize.estimator(df, 'mecor', 'x') | ||||
|     ## x.mecor <- summarize.estimator(df, 'mecor', 'x') | ||||
| 
 | ||||
|     z.mecor <- summarize.estimator(df, 'mecor', 'z') | ||||
|     ## z.mecor <- summarize.estimator(df, 'mecor', 'z') | ||||
| 
 | ||||
|     x.mle <- summarize.estimator(df, 'mle', 'x') | ||||
| 
 | ||||
| @ -97,7 +97,7 @@ build_plot_dataset <- function(df){ | ||||
|     z.gmm <- summarize.estimator(df, 'gmm', 'z') | ||||
| 
 | ||||
|     accuracy <- df[,mean(accuracy)] | ||||
|     plot.df <- rbindlist(list(x.true,z.true,x.naive,z.naive,x.amelia.full,z.amelia.full,x.mecor, z.mecor, x.gmm, z.gmm, x.feasible, z.feasible,z.mle, x.mle, x.zhang, z.zhang, x.gmm, z.gmm),use.names=T) | ||||
|     plot.df <- rbindlist(list(x.true,z.true,x.naive,z.naive,x.amelia.full,z.amelia.full,x.gmm, z.gmm, x.feasible, z.feasible,z.mle, x.mle, x.zhang, z.zhang, x.gmm, z.gmm),use.names=T) | ||||
|     plot.df[,accuracy := accuracy] | ||||
|     plot.df <- plot.df[,":="(sd.est=sqrt(var.est)/N.sims)] | ||||
|     return(plot.df) | ||||
| @ -105,6 +105,7 @@ build_plot_dataset <- function(df){ | ||||
| 
 | ||||
| 
 | ||||
| sims.df <- read_feather(args$infile) | ||||
| unique(sims.df[,.N,by=.(N,m)]) | ||||
| print(unique(sims.df$N)) | ||||
| 
 | ||||
| # df <- df[apply(df,1,function(x) !any(is.na(x)))] | ||||
|  | ||||
| @ -17,6 +17,10 @@ build_plot_dataset <- function(df){ | ||||
| 
 | ||||
|     z.true <-  summarize.estimator(df, 'true','z') | ||||
| 
 | ||||
|     x.naive <-  summarize.estimator(df, 'naive','x') | ||||
| 
 | ||||
|     z.naive <-  summarize.estimator(df, 'naive','z') | ||||
| 
 | ||||
|     x.loa0.feasible <- summarize.estimator(df, 'loa0.feasible','x') | ||||
|      | ||||
|     z.loa0.feasible <- summarize.estimator(df,'loa0.feasible','z') | ||||
| @ -34,8 +38,14 @@ build_plot_dataset <- function(df){ | ||||
|     z.loco.mle <- summarize.estimator(df, 'loco.mle', 'z') | ||||
| 
 | ||||
| 
 | ||||
|     z.loco.amelia <- summarize.estimator(df, 'amelia.full', 'z') | ||||
|     x.loco.amelia <- summarize.estimator(df, 'amelia.full', 'x') | ||||
| 
 | ||||
|     z.loco.zhang <- summarize.estimator(df, 'zhang', 'z') | ||||
|     x.loco.zhang <- summarize.estimator(df, 'zhang', 'x') | ||||
| 
 | ||||
|     accuracy <- df[,mean(accuracy)] | ||||
|     plot.df <- rbindlist(list(x.true,z.true,x.loa0.feasible,z.loa0.feasible,x.loa0.mle,z.loa0.mle,x.loco.feasible, z.loco.feasible, z.loco.mle, x.loco.mle),use.names=T) | ||||
|     plot.df <- rbindlist(list(x.true,z.true,x.loa0.feasible,z.loa0.feasible,x.naive,z.naive,x.loa0.mle,z.loa0.mle,x.loco.feasible, z.loco.feasible, z.loco.mle, x.loco.mle, x.loco.amelia, z.loco.amelia, z.loco.zhang, x.loco.zhang),use.names=T) | ||||
|     plot.df[,accuracy := accuracy] | ||||
|     plot.df <- plot.df[,":="(sd.est=sqrt(var.est)/N.sims)] | ||||
|     return(plot.df) | ||||
|  | ||||
| @ -17,6 +17,10 @@ build_plot_dataset <- function(df){ | ||||
| 
 | ||||
|     z.true <-  summarize.estimator(df, 'true','z') | ||||
| 
 | ||||
|     x.naive <-  summarize.estimator(df, 'naive','x') | ||||
| 
 | ||||
|     z.naive <-  summarize.estimator(df, 'naive','z') | ||||
| 
 | ||||
|     x.loa0.feasible <- summarize.estimator(df, 'loa0.feasible','x') | ||||
|      | ||||
|     z.loa0.feasible <- summarize.estimator(df,'loa0.feasible','z') | ||||
| @ -33,36 +37,55 @@ build_plot_dataset <- function(df){ | ||||
| 
 | ||||
|     z.loco.mle <- summarize.estimator(df, 'loco.mle', 'z') | ||||
| 
 | ||||
|     x.loco.mle <- summarize.estimator(df, 'loco.mle', 'x') | ||||
| 
 | ||||
|     z.loco.amelia <- summarize.estimator(df, 'amelia.full', 'z') | ||||
|     x.loco.amelia <- summarize.estimator(df, 'amelia.full', 'x') | ||||
| 
 | ||||
|     z.loco.zhang <- summarize.estimator(df, 'zhang', 'z') | ||||
|     x.loco.zhang <- summarize.estimator(df, 'zhang', 'x') | ||||
| 
 | ||||
| 
 | ||||
|     z.loco.gmm <- summarize.estimator(df, 'gmm', 'z') | ||||
|     x.loco.gmm <- summarize.estimator(df, 'gmm', 'x') | ||||
| 
 | ||||
|      | ||||
| 
 | ||||
| 
 | ||||
|     ## x.mle <- summarize.estimator(df, 'mle', 'x') | ||||
| 
 | ||||
|     ## z.mle <- summarize.estimator(df, 'mle', 'z') | ||||
| 
 | ||||
|     accuracy <- df[,mean(accuracy)] | ||||
|     plot.df <- rbindlist(list(x.true,z.true,x.loa0.feasible,z.loa0.feasible,x.loa0.mle,z.loa0.mle,x.loco.feasible, z.loco.feasible, x.loco.mle, z.loco.mle),use.names=T) | ||||
|     plot.df <- rbindlist(list(x.true,z.true,x.loa0.feasible,z.loa0.feasible,x.loa0.mle,z.loa0.mle,x.loco.feasible, z.loco.feasible, x.loco.mle, z.loco.mle, x.loco.amelia, z.loco.amelia,x.loco.zhang, z.loco.zhang,x.loco.gmm, z.loco.gmm,x.naive,z.naive),use.names=T) | ||||
|     plot.df[,accuracy := accuracy] | ||||
|     plot.df <- plot.df[,":="(sd.est=sqrt(var.est)/N.sims)] | ||||
|     return(plot.df) | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| plot.df <- read_feather(args$infile) | ||||
| print(unique(plot.df$N)) | ||||
| sims.df <- read_feather(args$infile) | ||||
| print(unique(sims.df$N)) | ||||
| 
 | ||||
| # df <- df[apply(df,1,function(x) !any(is.na(x)))] | ||||
| 
 | ||||
| if(!('Bzx' %in% names(plot.df))) | ||||
|     plot.df[,Bzx:=NA] | ||||
| if(!('Bzx' %in% names(sims.df))) | ||||
|     sims.df[,Bzx:=NA] | ||||
| 
 | ||||
| if(!('accuracy_imbalance_difference' %in% names(plot.df))) | ||||
|     plot.df[,accuracy_imbalance_difference:=NA] | ||||
| if(!('accuracy_imbalance_difference' %in% names(sims.df))) | ||||
|     sims.df[,accuracy_imbalance_difference:=NA] | ||||
| 
 | ||||
| unique(plot.df[,'accuracy_imbalance_difference']) | ||||
| unique(sims.df[,'accuracy_imbalance_difference']) | ||||
| 
 | ||||
| #plot.df <- build_plot_dataset(df[accuracy_imbalance_difference==0.1][N==700]) | ||||
| plot.df <- build_plot_dataset(plot.df) | ||||
| plot.df <- build_plot_dataset(sims.df) | ||||
| change.remember.file("remember_irr.RDS",clear=TRUE) | ||||
| remember(plot.df,args$name) | ||||
| 
 | ||||
| 
 | ||||
| set.remember.prefix(gsub("plot.df.","",args$name)) | ||||
| remember(median(sims.df$loco.accuracy),'med.loco.acc') | ||||
| 
 | ||||
| #ggplot(df,aes(x=Bxy.est.mle)) + geom_histogram() + facet_grid(accuracy_imbalance_difference ~ Bzy) | ||||
| 
 | ||||
| ## ## ## df[gmm.ER_pval<0.05] | ||||
|  | ||||
| @ -1,8 +1,8 @@ | ||||
| #!/bin/bash | ||||
| #SBATCH --job-name="simulate measurement error models" | ||||
| ## Allocation Definition | ||||
| #SBATCH --account=comdata | ||||
| #SBATCH --partition=compute-bigmem | ||||
| #SBATCH --account=comdata-ckpt | ||||
| #SBATCH --partition=ckpt | ||||
| ## Resources | ||||
| #SBATCH --nodes=1     | ||||
| ## Walltime (4 hours) | ||||
| @ -18,5 +18,6 @@ source ~/.bashrc | ||||
| 
 | ||||
| TASK_NUM=$(($SLURM_ARRAY_TASK_ID + $1)) | ||||
| TASK_CALL=$(sed -n ${TASK_NUM}p $2) | ||||
| echo ${TASK_CALL} | ||||
| ${TASK_CALL} | ||||
| 
 | ||||
|  | ||||
| @ -7,6 +7,7 @@ library(Zelig) | ||||
| library(bbmle) | ||||
| library(matrixStats) # for numerically stable logsumexps | ||||
| 
 | ||||
| source("pl_methods.R") | ||||
| source("measerr_methods.R") ## for my more generic function. | ||||
| 
 | ||||
| ## This uses the pseudolikelihood approach from Carroll page 349. | ||||
| @ -36,124 +37,6 @@ my.pseudo.mle <- function(df){ | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| ## model from Zhang's arxiv paper, with predictions for y | ||||
| ## Zhang got this model from Hausman 1998 | ||||
| ### I think this is actually eqivalent to the pseudo.mle method | ||||
| zhang.mle.iv <- function(df){ | ||||
|     df.obs <- df[!is.na(x.obs)] | ||||
|     df.unobs <- df[is.na(x.obs)] | ||||
| 
 | ||||
|     tn <- df.obs[(w_pred == 0) & (x.obs == w_pred),.N] | ||||
|     pn <- df.obs[(w_pred==0), .N] | ||||
|     npv <- tn / pn | ||||
| 
 | ||||
|     tp <- df.obs[(w_pred==1) & (x.obs == w_pred),.N] | ||||
|     pp <- df.obs[(w_pred==1),.N] | ||||
|     ppv <- tp / pp | ||||
| 
 | ||||
|     nll <- function(B0=0, Bxy=0, Bzy=0, sigma_y=0.1){ | ||||
| 
 | ||||
|     ## fpr = 1 - TNR | ||||
|     ### Problem: accounting for uncertainty in ppv / npv | ||||
| 
 | ||||
|     ## fnr = 1 - TPR | ||||
|     ll.y.obs <- with(df.obs, dnorm(y, B0 + Bxy * x + Bzy * z, sd=sigma_y,log=T)) | ||||
|     ll <- sum(ll.y.obs) | ||||
|      | ||||
|     # unobserved case; integrate out x | ||||
|     ll.x.1 <- with(df.unobs, dnorm(y, B0 + Bxy + Bzy * z, sd = sigma_y, log=T)) | ||||
|     ll.x.0 <- with(df.unobs, dnorm(y, B0 + Bzy * z, sd = sigma_y,log=T)) | ||||
| 
 | ||||
|     ## case x == 1 | ||||
|     lls.x.1 <- colLogSumExps(rbind(log(ppv) + ll.x.1, log(1-ppv) + ll.x.0)) | ||||
|      | ||||
|     ## case x == 0 | ||||
|     lls.x.0 <- colLogSumExps(rbind(log(1-npv) + ll.x.1, log(npv) + ll.x.0)) | ||||
| 
 | ||||
|     lls <- colLogSumExps(rbind(df.unobs$w_pred * lls.x.1, (1-df.unobs$w_pred) * lls.x.0)) | ||||
|     ll <- ll + sum(lls) | ||||
|     return(-ll) | ||||
|     }     | ||||
|     mlefit <- mle2(minuslogl = nll, control=list(maxit=1e6), lower=list(sigma_y=0.0001, B0=-Inf, Bxy=-Inf, Bzy=-Inf), | ||||
|                    upper=list(sigma_y=Inf, B0=Inf, Bxy=Inf, Bzy=Inf),method='L-BFGS-B') | ||||
|     return(mlefit) | ||||
| } | ||||
| 
 | ||||
| ## this is equivalent to the pseudo-liklihood model from Caroll | ||||
| ## zhang.mle.dv <- function(df){ | ||||
| 
 | ||||
| ##     nll <- function(B0=0, Bxy=0, Bzy=0, ppv=0.9, npv=0.9){ | ||||
| ##     df.obs <- df[!is.na(y.obs)] | ||||
| 
 | ||||
| ##     ## fpr = 1 - TNR | ||||
| ##     ll.w0y0 <- with(df.obs[y.obs==0],dbinom(1-w_pred,1,npv,log=TRUE)) | ||||
| ##     ll.w1y1 <- with(df.obs[y.obs==1],dbinom(w_pred,1,ppv,log=TRUE)) | ||||
| 
 | ||||
| ##     # observed case | ||||
| ##     ll.y.obs <- vector(mode='numeric', length=nrow(df.obs)) | ||||
| ##     ll.y.obs[df.obs$y.obs==1] <- with(df.obs[y.obs==1], plogis(B0 + Bxy * x + Bzy * z,log=T)) | ||||
| ##     ll.y.obs[df.obs$y.obs==0] <- with(df.obs[y.obs==0], plogis(B0 + Bxy * x + Bzy * z,log=T,lower.tail=FALSE)) | ||||
| 
 | ||||
| ##     ll <- sum(ll.y.obs) + sum(ll.w0y0) + sum(ll.w1y1) | ||||
| 
 | ||||
| ##     # unobserved case; integrate out y | ||||
| ##     ## case y = 1 | ||||
| ##     ll.y.1 <- vector(mode='numeric', length=nrow(df)) | ||||
| ##     pi.y.1 <- with(df,plogis(B0 + Bxy * x + Bzy*z, log=T)) | ||||
| ##     ## P(w=1| y=1)P(y=1) + P(w=0|y=1)P(y=1) = P(w=1,y=1) + P(w=0,y=1) | ||||
| ##     lls.y.1 <- colLogSumExps(rbind(log(ppv) + pi.y.1, log(1-ppv) + pi.y.1)) | ||||
|      | ||||
| ##     ## case y = 0 | ||||
| ##     ll.y.0 <- vector(mode='numeric', length=nrow(df)) | ||||
| ##     pi.y.0 <- with(df,plogis(B0 + Bxy * x + Bzy*z, log=T,lower.tail=FALSE)) | ||||
| 
 | ||||
| ##     ## P(w=1 | y=0)P(y=0) + P(w=0|y=0)P(y=0) = P(w=1,y=0) + P(w=0,y=0) | ||||
| ##     lls.y.0 <- colLogSumExps(rbind(log(npv) + pi.y.0, log(1-npv) + pi.y.0)) | ||||
| 
 | ||||
| ##     lls <- colLogSumExps(rbind(lls.y.1, lls.y.0)) | ||||
| ##     ll <- ll + sum(lls) | ||||
| ##     return(-ll) | ||||
| ##     }     | ||||
| ##     mlefit <- mle2(minuslogl = nll, control=list(maxit=1e6),method='L-BFGS-B',lower=list(B0=-Inf, Bxy=-Inf, Bzy=-Inf, ppv=0.001,npv=0.001), | ||||
| ##                    upper=list(B0=Inf, Bxy=Inf, Bzy=Inf,ppv=0.999,npv=0.999)) | ||||
| ##     return(mlefit) | ||||
| ## } | ||||
| 
 | ||||
| zhang.mle.dv <- function(df){ | ||||
|     df.obs <- df[!is.na(y.obs)] | ||||
|     df.unobs <- df[is.na(y.obs)] | ||||
| 
 | ||||
|     fp <- df.obs[(w_pred==1) & (y.obs != w_pred),.N] | ||||
|     p <- df.obs[(w_pred==1),.N] | ||||
|     fpr <- fp / p | ||||
|     fn <- df.obs[(w_pred==0) & (y.obs != w_pred), .N] | ||||
|     n <- df.obs[(w_pred==0),.N] | ||||
|     fnr <- fn / n | ||||
| 
 | ||||
|     nll <- function(B0=0, Bxy=0, Bzy=0){ | ||||
| 
 | ||||
| 
 | ||||
|         ## observed case | ||||
|         ll.y.obs <- vector(mode='numeric', length=nrow(df.obs)) | ||||
|         ll.y.obs[df.obs$y.obs==1] <- with(df.obs[y.obs==1], plogis(B0 + Bxy * x + Bzy * z,log=T)) | ||||
|         ll.y.obs[df.obs$y.obs==0] <- with(df.obs[y.obs==0], plogis(B0 + Bxy * x + Bzy * z,log=T,lower.tail=FALSE)) | ||||
| 
 | ||||
|         ll <- sum(ll.y.obs) | ||||
| 
 | ||||
|         pi.y.1 <- with(df,plogis(B0 + Bxy * x + Bzy*z, log=T)) | ||||
|         pi.y.0 <- with(df,plogis(B0 + Bxy * x + Bzy*z, log=T,lower.tail=FALSE)) | ||||
| 
 | ||||
|         lls <- with(df.unobs, colLogSumExps(rbind(w_pred * colLogSumExps(rbind(log(fpr), log(1 - fnr - fpr)+pi.y.1)), | ||||
|         (1-w_pred) * colLogSumExps(rbind(log(1-fpr), log(1 - fnr - fpr)+pi.y.0))))) | ||||
|      | ||||
|         ll <- ll + sum(lls) | ||||
|         return(-ll) | ||||
|     }     | ||||
|     mlefit <- mle2(minuslogl = nll, control=list(maxit=1e6),method='L-BFGS-B',lower=c(B0=-Inf, Bxy=-Inf, Bzy=-Inf), | ||||
|                    upper=c(B0=Inf, Bxy=Inf, Bzy=Inf)) | ||||
|     return(mlefit) | ||||
| } | ||||
|   | ||||
| ## This uses the likelihood approach from Carroll page 353. | ||||
| ## assumes that we have a good measurement error model | ||||
| @ -208,10 +91,14 @@ my.mle <- function(df){ | ||||
| 
 | ||||
| run_simulation_depvar <- function(df, result, outcome_formula=y~x+z, proxy_formula=w_pred~y){ | ||||
| 
 | ||||
|     accuracy <- df[,mean(w_pred==y)] | ||||
|     (accuracy <- df[,mean(w_pred==y)]) | ||||
|     result <- append(result, list(accuracy=accuracy)) | ||||
|     error.cor.x <- cor(df$x, df$w - df$x) | ||||
|     result <- append(result, list(error.cor.x = error.cor.x)) | ||||
|     (error.cor.z <- cor(df$z, df$y - df$w_pred)) | ||||
|     (error.cor.x <- cor(df$x, df$y - df$w_pred)) | ||||
|     (error.cor.y <- cor(df$y, df$y - df$w_pred)) | ||||
|     result <- append(result, list(error.cor.x = error.cor.x, | ||||
|                                   error.cor.z = error.cor.z, | ||||
|                                   error.cor.y = error.cor.y)) | ||||
| 
 | ||||
|     model.null <- glm(y~1, data=df,family=binomial(link='logit')) | ||||
|     (model.true <- glm(y ~ x + z, data=df,family=binomial(link='logit'))) | ||||
| @ -220,7 +107,7 @@ run_simulation_depvar <- function(df, result, outcome_formula=y~x+z, proxy_formu | ||||
|     true.ci.Bxy <- confint(model.true)['x',] | ||||
|     true.ci.Bzy <- confint(model.true)['z',] | ||||
| 
 | ||||
| 
 | ||||
|     result <- append(result, list(cor.xz=cor(df$x,df$z))) | ||||
|     result <- append(result, list(lik.ratio=lik.ratio)) | ||||
| 
 | ||||
|     result <- append(result, list(Bxy.est.true=coef(model.true)['x'], | ||||
| @ -293,33 +180,26 @@ run_simulation_depvar <- function(df, result, outcome_formula=y~x+z, proxy_formu | ||||
|      | ||||
| 
 | ||||
|     # amelia says use normal distribution for binary variables. | ||||
|     tryCatch({ | ||||
|         amelia.out.k <- amelia(df, m=200, p2s=0, idvars=c('y','ystar','w')) | ||||
|         mod.amelia.k <- zelig(y.obs~x+z, model='ls', data=amelia.out.k$imputations, cite=FALSE) | ||||
|         (coefse <- combine_coef_se(mod.amelia.k, messages=FALSE)) | ||||
|         est.x.mi <- coefse['x','Estimate'] | ||||
|         est.x.se <- coefse['x','Std.Error'] | ||||
|         result <- append(result, | ||||
|                          list(Bxy.est.amelia.full = est.x.mi, | ||||
|                               Bxy.ci.upper.amelia.full = est.x.mi + 1.96 * est.x.se, | ||||
|                               Bxy.ci.lower.amelia.full = est.x.mi - 1.96 * est.x.se | ||||
|                               )) | ||||
| 
 | ||||
|         est.z.mi <- coefse['z','Estimate'] | ||||
|         est.z.se <- coefse['z','Std.Error'] | ||||
|     amelia.out.k <- amelia(df, m=200, p2s=0, idvars=c('y','ystar','w')) | ||||
|     mod.amelia.k <- zelig(y.obs~x+z, model='ls', data=amelia.out.k$imputations, cite=FALSE) | ||||
|     (coefse <- combine_coef_se(mod.amelia.k, messages=FALSE)) | ||||
|     est.x.mi <- coefse['x','Estimate'] | ||||
|     est.x.se <- coefse['x','Std.Error'] | ||||
|     result <- append(result, | ||||
|                      list(Bxy.est.amelia.full = est.x.mi, | ||||
|                           Bxy.ci.upper.amelia.full = est.x.mi + 1.96 * est.x.se, | ||||
|                           Bxy.ci.lower.amelia.full = est.x.mi - 1.96 * est.x.se | ||||
|                           )) | ||||
| 
 | ||||
|         result <- append(result, | ||||
|                          list(Bzy.est.amelia.full = est.z.mi, | ||||
|                               Bzy.ci.upper.amelia.full = est.z.mi + 1.96 * est.z.se, | ||||
|                               Bzy.ci.lower.amelia.full = est.z.mi - 1.96 * est.z.se | ||||
|                               )) | ||||
| 
 | ||||
|     }, | ||||
|     error = function(e){ | ||||
|         message("An error occurred:\n",e) | ||||
|         result$error <- paste0(result$error,'\n', e) | ||||
|     }) | ||||
|     est.z.mi <- coefse['z','Estimate'] | ||||
|     est.z.se <- coefse['z','Std.Error'] | ||||
| 
 | ||||
|     result <- append(result, | ||||
|                      list(Bzy.est.amelia.full = est.z.mi, | ||||
|                           Bzy.ci.upper.amelia.full = est.z.mi + 1.96 * est.z.se, | ||||
|                           Bzy.ci.lower.amelia.full = est.z.mi - 1.96 * est.z.se | ||||
|                           )) | ||||
| 
 | ||||
|     return(result) | ||||
| 
 | ||||
| @ -393,7 +273,7 @@ run_simulation <-  function(df, result, outcome_formula=y~x+z, proxy_formula=NUL | ||||
|                                   Bzy.ci.lower.naive = naive.ci.Bzy[1])) | ||||
|                                    | ||||
| 
 | ||||
|     tryCatch({ | ||||
| 
 | ||||
|     amelia.out.k <- amelia(df, m=200, p2s=0, idvars=c('x','w')) | ||||
|     mod.amelia.k <- zelig(y~x.obs+z, model='ls', data=amelia.out.k$imputations, cite=FALSE) | ||||
|     (coefse <- combine_coef_se(mod.amelia.k, messages=FALSE)) | ||||
| @ -415,14 +295,7 @@ run_simulation <-  function(df, result, outcome_formula=y~x+z, proxy_formula=NUL | ||||
|                           Bzy.ci.lower.amelia.full = est.z.mi - 1.96 * est.z.se | ||||
|                           )) | ||||
| 
 | ||||
|     }, | ||||
|     error = function(e){ | ||||
|         message("An error occurred:\n",e) | ||||
|         result$error <-paste0(result$error,'\n', e) | ||||
|     } | ||||
|     ) | ||||
| 
 | ||||
|     tryCatch({ | ||||
|         temp.df <- copy(df) | ||||
|         temp.df <- temp.df[,x:=x.obs] | ||||
|         mod.caroll.lik <- measerr_mle(temp.df, outcome_formula=outcome_formula, proxy_formula=proxy_formula, truth_formula=truth_formula) | ||||
| @ -439,14 +312,6 @@ run_simulation <-  function(df, result, outcome_formula=y~x+z, proxy_formula=NUL | ||||
|                               Bzy.est.mle = coef['z'], | ||||
|                               Bzy.ci.upper.mle = ci.upper['z'], | ||||
|                               Bzy.ci.lower.mle = ci.lower['z'])) | ||||
|     }, | ||||
| 
 | ||||
|     error = function(e){ | ||||
|         message("An error occurred:\n",e) | ||||
|         result$error <- paste0(result$error,'\n', e) | ||||
|     }) | ||||
| 
 | ||||
|     tryCatch({ | ||||
| 
 | ||||
|         mod.zhang.lik <- zhang.mle.iv(df) | ||||
|         coef <- coef(mod.zhang.lik) | ||||
| @ -458,12 +323,6 @@ run_simulation <-  function(df, result, outcome_formula=y~x+z, proxy_formula=NUL | ||||
|                               Bzy.est.zhang = coef['Bzy'], | ||||
|                               Bzy.ci.upper.zhang = ci['Bzy','97.5 %'], | ||||
|                               Bzy.ci.lower.zhang = ci['Bzy','2.5 %'])) | ||||
|     }, | ||||
| 
 | ||||
|     error = function(e){ | ||||
|         message("An error occurred:\n",e) | ||||
|         result$error <- paste0(result$error,'\n', e) | ||||
|     }) | ||||
| 
 | ||||
|     ## What if we can't observe k -- most realistic scenario. We can't include all the ML features in a model. | ||||
|     ## amelia.out.nok <- amelia(df, m=200, p2s=0, idvars=c("x","w_pred"), noms=noms) | ||||
| @ -514,29 +373,29 @@ run_simulation <-  function(df, result, outcome_formula=y~x+z, proxy_formula=NUL | ||||
|                           Bzy.ci.lower.gmm = gmm.res$confint[2,1])) | ||||
| 
 | ||||
| 
 | ||||
|     tryCatch({ | ||||
|     mod.calibrated.mle <- mecor(y ~ MeasError(w_pred, reference = x.obs) + z, df, B=400, method='efficient') | ||||
|     (mod.calibrated.mle) | ||||
|     (mecor.ci <- summary(mod.calibrated.mle)$c$ci['x.obs',]) | ||||
|     result <- append(result, list( | ||||
|                                  Bxy.est.mecor = mecor.ci['Estimate'], | ||||
|                                  Bxy.ci.upper.mecor = mecor.ci['UCI'], | ||||
|                                  Bxy.ci.lower.mecor = mecor.ci['LCI']) | ||||
|                      ) | ||||
|     ## tryCatch({ | ||||
|     ## mod.calibrated.mle <- mecor(y ~ MeasError(w_pred, reference = x.obs) + z, df, B=400, method='efficient') | ||||
|     ## (mod.calibrated.mle) | ||||
|     ## (mecor.ci <- summary(mod.calibrated.mle)$c$ci['x.obs',]) | ||||
|     ## result <- append(result, list( | ||||
|     ##                              Bxy.est.mecor = mecor.ci['Estimate'], | ||||
|     ##                              Bxy.ci.upper.mecor = mecor.ci['UCI'], | ||||
|     ##                              Bxy.ci.lower.mecor = mecor.ci['LCI']) | ||||
|     ##                  ) | ||||
| 
 | ||||
|     (mecor.ci <- summary(mod.calibrated.mle)$c$ci['z',]) | ||||
|     ## (mecor.ci <- summary(mod.calibrated.mle)$c$ci['z',]) | ||||
| 
 | ||||
|     result <- append(result, list( | ||||
|                                  Bzy.est.mecor = mecor.ci['Estimate'], | ||||
|                                  Bzy.ci.upper.mecor = mecor.ci['UCI'], | ||||
|                                  Bzy.ci.lower.mecor = mecor.ci['LCI']) | ||||
|                      ) | ||||
|     }, | ||||
|     error = function(e){ | ||||
|         message("An error occurred:\n",e) | ||||
|         result$error <- paste0(result$error, '\n', e) | ||||
|     } | ||||
|     ) | ||||
|     ## result <- append(result, list( | ||||
|     ##                              Bzy.est.mecor = mecor.ci['Estimate'], | ||||
|     ##                              Bzy.ci.upper.mecor = mecor.ci['UCI'], | ||||
|     ##                              Bzy.ci.lower.mecor = mecor.ci['LCI']) | ||||
|     ##                  ) | ||||
|     ## }, | ||||
|     ## error = function(e){ | ||||
|     ##     message("An error occurred:\n",e) | ||||
|     ##     result$error <- paste0(result$error, '\n', e) | ||||
|     ## } | ||||
|     ## ) | ||||
| ##    clean up memory | ||||
| ##    rm(list=c("df","y","x","g","w","v","train","p","amelia.out.k","amelia.out.nok", "mod.calibrated.mle","gmm.res","mod.amelia.k","mod.amelia.nok", "model.true","model.naive","model.feasible")) | ||||
|      | ||||
|  | ||||
| @ -31,8 +31,8 @@ summarize.estimator <- function(df, suffix='naive', coefname='x'){ | ||||
|                           var.est = var(.SD[[paste0('B',coefname,'y.est.',suffix)]]), | ||||
|                           est.upper.95 = quantile(.SD[[paste0('B',coefname,'y.est.',suffix)]],0.975,na.rm=T), | ||||
|                           est.lower.95 = quantile(.SD[[paste0('B',coefname,'y.est.',suffix)]],0.025,na.rm=T), | ||||
|                           mean.ci.upper = mean(.SD[[paste0('B',coefname,'y.ci.upper.',suffix)]]), | ||||
|                           mean.ci.lower = mean(.SD[[paste0('B',coefname,'y.ci.lower.',suffix)]]), | ||||
|                           mean.ci.upper = mean(.SD[[paste0('B',coefname,'y.ci.upper.',suffix)]],na.rm=T), | ||||
|                           mean.ci.lower = mean(.SD[[paste0('B',coefname,'y.ci.lower.',suffix)]],na.rm=T), | ||||
|                           ci.upper.975 = quantile(.SD[[paste0('B',coefname,'y.ci.upper.',suffix)]],0.975,na.rm=T), | ||||
|                           ci.upper.025 = quantile(.SD[[paste0('B',coefname,'y.ci.upper.',suffix)]],0.025,na.rm=T), | ||||
|                           ci.lower.975 = quantile(.SD[[paste0('B',coefname,'y.ci.lower.',suffix)]],0.975,na.rm=T), | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user