commit c5e0a0171397981e8f6a0ad1ad1ee3ce8e0fa15f Author: nathante Date: Wed Feb 22 19:00:30 2023 +0000 Update on Overleaf. diff --git a/#Makefile# b/#Makefile# new file mode 100644 index 0000000..93ee860 --- /dev/null +++ b/#Makefile# @@ -0,0 +1,50 @@ +#!/usr/bin/make +all: $(patsubst %.Rtex,%.pdf,$(wildcard *.Rtex)) + +# refs.bib: + +# wget -r -q -O refs.bib "http://127.0.0.1:23119/better-bibtex/export/collection?/2/Nate//Change - Population Ecology.bibtex" + +sync.remember: + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/remembr.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/remember_irr.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/remember_grid_sweep.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_1.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_2.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_3.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_3_proflik.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_4.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_1_dv.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_2_dv.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_3_dv.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_3_dv_profliko.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_4_dv.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/civil_comments/dv_perspective_example.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/civil_comments/iv_perspective_example.RDS . + + +%.tex: %.Rtex remembr.RDS resources/*.R # refs.bib + Rscript -e "library(knitr); knit('$<')" + +autoupdate: + latexmk -f -xelatex -pvc $< + +%.pdf: %.tex + latexmk -f -pdf $< + +clean: + latexmk -C *.tex + rm -f article.tex + rm -f *.bbl + rm -f *.run.xml + +viewpdf: all + evince *.pdf + +spell: + aspell -c -t --tex-check-comments -b text.tex + +pdf: all + +.PHONY: clean all refs.bib autoupdate +.PRECIOUS: %.tex diff --git a/#article.Rtex# b/#article.Rtex# new file mode 100644 index 0000000..bb3676e --- /dev/null +++ b/#article.Rtex# @@ -0,0 +1,1042 @@ +\documentclass[floatsintext, draftfirst, man]{apa7} +<>= +library(knitr) +library(ggplot2) +library(data.table) +knitr::opts_chunk$set(fig.show='hold') +f <- function (x) {formatC(x, format="d", big.mark=',')} +format.percent <- function(x) {paste(f(x*100),"\\%",sep='')} + +theme_set(theme_bw()) +source('resources/functions.R') +source('resources/variables.R') +source('resources/real_data_example.R') +@ + + +\usepackage{epstopdf}% To incorporate .eps illustrations using PDFLaTeX, etc. +\usepackage{subcaption}% Support for small, `sub' figures and tables +\usepackage{tikz} +\usetikzlibrary{positioning, shapes, arrows, shadows} + +\def \parrotpdf {\includegraphics[]{parrot.pdf}} +\DeclareUnicodeCharacter{1F99C}{\parrotpdf} +\usepackage{tabularx} +\usepackage[utf8]{inputenc} +\usepackage{wrapfig} +\usepackage[T1]{fontenc} +\usepackage{textcomp} +\usepackage{listings} +\usepackage{xcolor} + +%New colors defined below +\definecolor{codegreen}{rgb}{0,0.6,0} +\definecolor{codegray}{rgb}{0.5,0.5,0.5} +\definecolor{codepurple}{rgb}{0.58,0,0.82} +\definecolor{backcolour}{rgb}{0.95,0.95,0.92} + +%Code listing style named "mystyle" +\lstdefinestyle{mystyle}{ + backgroundcolor=\color{backcolour}, commentstyle=\color{codegreen}, + keywordstyle=\color{magenta}, + numberstyle=\tiny\color{codegray}, + stringstyle=\color{codepurple}, + basicstyle=\ttfamily\footnotesize, + breakatwhitespace=false, + breaklines=true, + captionpos=b, + keepspaces=true, + numbers=left, + numbersep=5pt, + showspaces=false, + showstringspaces=false, + showtabs=false, + tabsize=2 +} + +% \usepackage[garamond]{mathdesign} + +% \usepackage[letterpaper,left=1in,right=1in,top=1in,bottom=1in]{geometry} + +% packages i use in essentially every document +\usepackage{graphicx} +\usepackage{enumerate} + +% packages i use in many documents but leave off by default +\usepackage{amsmath}%}, amsthm, amssymb} +\DeclareMathOperator*{\argmin}{arg\,min} % thin space, limits underneath in displays +\DeclareMathOperator*{\argmax}{arg\,max} % thin space, limits underneath in displays + + +\usepackage{subcaption} +% import and customize urls +% \usepackage[usenames,dvipsnames]{color} +% \usepackage[breaklinks]{hyperref} + +\hypersetup{colorlinks=true, linkcolor=black, citecolor=black, filecolor=blue, + urlcolor=blue, unicode=true} + +% add bibliographic stuff +\usepackage[american]{babel} +\usepackage{csquotes} +\usepackage[natbib=true, style=apa, sortcites=true, backend=biber]{biblatex} +\addbibresource{Bibliography.bib} +\DeclareLanguageMapping{american}{american-apa} + +\defbibheading{secbib}[\bibname]{% + \section*{#1}% + \markboth{#1}{#1}% + \baselineskip 14.2pt% + \prebibhook} + +\def\citepos#1{\citeauthor{#1}'s (\citeyear{#1})} +\def\citespos#1{\citeauthor{#1}' (\citeyear{#1})} +\newcommand\TODO[1]{\textsc{\color{red} #1}} + +% I've gotten advice to make this as general as possible to attract the widest possible audience. +\title{Automated Content Misclassification Causes Bias in Regression. Can We Fix It? Yes We Can!} + +\shorttitle{Automated Content Misclassification} + +\authorsnames[1,2,3]{Nathan TeBlunthuis, Valerie Hase, Chung-hong Chan} +\authorsaffiliations{{{Department of Communication Studies, Northwestern University}, {School of Information, University of Michigan}}, {LMU Munich}, {GESIS - Leibniz-Institut für Sozialwissenschaften}} +\leftheader{TeBlunthuis, Hase \& Chan} + +\keywords{ +Content Analysis; Machine Learning; Classification Error; Attenuation Bias; Simulation; Computational Methods; Big Data; AI; +} + +\abstract{ +Automated classifiers have become widely popular measurement devices in communication science. These classifiers, often built via supervised machine learning (SML), can categorize large, statistically powerful samples of data ranging from text to images and video. +Even the most accurate non-trivial automated classifiers make errors that cause biased inferences in downstream statistical findings—unless analyses account for these errors. +As we show in a systematic literature review of SML applications, +communication scholars rarely acknowledge this important problem of ``ignoring misclassification in automated content analysis''. +In principle, existing statistical methods that use ``gold standard'' validation data, such as that created by human annotators, can account for misclassification and produce correct statistical results. +We introduce and test such methods, including a new method we design and implement in the R package \texttt{misclassificationmodels}, via Monte-Carlo simulations designed to reveal each method's limitations. Based on these results, we provide recommendations for addressing misclassification errors via statistical correction methods. In sum, automated classifiers, even those below common accuracy standards, can be useful for measurement with careful study design and appropriate correction methods. +} +\begin{document} +\maketitle +%\section{Introduction} + + +\emph{Automated classifiers} (ACs) based on supervised machine learning (SML) have rapidly gained popularity +as part of the \emph{automated content analysis} toolkit in communication science \citep{baden_three_2022}. With these measurement devices, researchers can categorize large samples of text, images, video or other types of data into predefined categories \citep{scharkow_content_2017}. In communication science, studies for instance use ACs to automatically classify topics \citep{vermeer_online_2020} or frames \citep{opperhuizen_framing_2019} in news articles or social media posts. + +% TODO: restore citation to fortuna_toxic_2020 below +However, there is increasing concern about the validity of automated content analysis \citep{baden_three_2022, grimmer_text_2013}. As we demonstrate using the Perspective toxicity classifier, even very accurate ACs make \emph{misclassifications} +which can lead to incorrect statistical findings—unless correctly modeled \citep{scharkow_how_2017, fong_machine_2021}. Research areas where ACs have the greatest potential—e.g., content moderation, social media bots, affective polarization, or radicalization—are haunted by the specter of methodological questions related to misclassification \citep{baden_three_2022, rauchfleisch_false_2020}: How accurate must an AC be to usefully measure a variable? When—if ever—should an AC built for one context be used in another \citep{gonzalez-bailon_signals_2015, hede_toxicity_2021}? How do biases that an AC learns from training data affect findings of downstream analyses \citep{millimet_accounting_2022}? Knowing that high classification accuracy limits the risks of misleading inference, careful researchers might use only those ACs having excellent predictive performance. Yet, important social scientific concepts such as news tone \citep{van_atteveldt_validity_2021} %even ones as seemingly straightforward as sentiment \citep{van_atteveldt_validity_2021}, toxicity \citep{fortuna_toxic_2020} +or civility \citep{hede_toxicity_2021} +%and institutional frameworks \citep{rice_machine_2021} + can be challenging to classify with high performance. + +Despite these concerns, a systematic literature review of \emph{N} = 48 studies employing SML-based text classification to study substantial empirical questions shows that the problem of \emph{ignoring misclassification} is widespread. This review demonstrates a troubling lack of attention to the threats ACs introduce—and virtually no mitigation of such threats. In the current state of affairs, ACs are unlikely to be useful for studying nuanced concepts. Researchers will either draw misleading conclusions from inaccurate ACs or avoid ACs in favor of costly methods such as manually coding large samples \citep{van_atteveldt_validity_2021}. + +Our primary contribution is to \emph{introduce and test statistical methods for addressing misclassification} with the goal of rescuing ACs from this dismal state \citep{carroll_measurement_2006, buonaccorsi_measurement_2010, yi_handbook_2021}. We consider some recently some proposed methods including \citet{fong_machine_2021}'s generalized method of moments (GMM) calibration method, \citet{zhang_how_2021}'s pseudo-likelihood models, and \citet{blackwell_multiple_2012}'s application of imputation methods. To overcome limitations of the methods above, we develop our own specialized implementation of a general likelihood modeling framework drawn from the statistical literature on measurement error \citep{carroll_measurement_2006}, which we implement via the experimental R package \texttt{misclassificationmodels}. + + We test the error correction methods using Monte Carlo simulations of four prototypical situations representative of those identified by our systematic review: Using ACs to measure either (1) a dependent or (2) an independent variable where the classifier makes misclassifications that are either (a) easy to correct or (b) more difficult (e.g., when an AC is biased and misclassifications and covariates are correlated). +The more difficult cases are important. +As the real-data example we provide in the next section demonstrates, even modest biases in very accurate ACs can cause misleading statistical findings. + +% Such biases can easily result when classifier errors affect human behavior, such as that of social media moderators \maskparencite{teblunthuis_effects_2021}. Studies using classifiers from APIs that are also used in sociotechnical systems therefore be particularly prone to to differential error, which can cause misleading statistics even when classification accuracy is high. + +% Our Supplementary Materials present numerous extensions of these scenarios. We show that none of the existing error correction methods are effective in all scenarios. +%— multiple imputation fails in scenario 2; GMM calibration fails in scenario 1b and is not designed for scenario 2; and the pseudo-likelihood method fails in scenario 1 and in scenario 2b. When correctly applied, our likelihood modeling is the only correction method recovering the true parameters in all scenarios. %We provide our implementation as an R package. + +% , and our approach based on maximum likelihood methods \citep{carroll_measurement_2006} . + + %By doing so, we follow a handful of recent studies in which social scientists have used samples of human-labeled \emph{validation data} to account for misclassification by automated classifiers. + + % This paragraph is likely to get cut, but its useful so that we have a working outline: In what follows, we begin with an overview of automated content analysis to describe how AC-based measures can affect downstream analyses and how these errors thus threaten progress in automated text classification often used in the field of Computational Social Science (CSS). We substantiate our claims via a systematic literature review of \emph{N}=49 empirical studies employing SML for classification (see \nameref{appendix:lit.review} for details). + + +% Although the methods above are all effective in bivariate least squares regression when an AC is used to measure a covariate, validation data are error-free, and measurement error is \emph{nondifferential} (conditionally independent of the outcome given other covariates), +% these methods all have limitations in more general cases. Below, we present simulated scenarios in which each of these methods fail to recover the true parameters. + +% so long as the coders' errors are conditionally independent given observable variables. + +% In our discussion section, we provide detailed recommendations based on our literature review and our simulations. +According to our simulations, even biased classifiers with low predictive performance can be useful in conjunction with appropriate validation data. +As a result, we are optimistic about the potential of ACs for communication science and beyond if researchers statistically correct for misclassification. +Current practices of ``validating'' ACs by publishing misclassification rates are important but provide no safeguard against statistical distortions. + +In sum, this paper makes a methodological contribution by introducing the often-ignored problem of ``ignoring misclassification in automated content analysis'' by testing approaches to address this problem via Monte Carlo simulations and introducing a new method for error correction. +The required assumptions for error correction methods are no more difficult than those already commonly adopted in traditional content analyses—and much more reasonable than the current default approach +This method can succeed where others fail, is easily applied by experienced regression modelers, and is straightforward to extend. +Profoundly, our contributions suggest automated content will progress not through ever more accurate classifiers, but through rigorous human-coding and error modeling. + + +\section{Illustrating the Problem of Bias through Misclassification in the Perspective API} + +There is no perfect AC. All non-trival ACs make errors. +This inevitable misclassification causes bias in statistical inference, and in the estimation of regression models in particular \citep{carroll_measurement_2006, scharkow_how_2017}. +This bias can lead researchers to make both type-1 (false discovery) and type-2 errors (failure to reject the null) in hypotheses tests. Here, we illustrate the problem of bias as a consequence of AC-based misclassification for a common example in communication research: detecting and understanding harmful social media content. In recent years, communication researchers have increasingly employed automated tools, and the Perspective toxicity classifier in particular, \citep{cjadams_jigsaw_2019} to detect toxicity in online content \citep[e.g.,][]{hopp_social_2019, kim_distorting_2021, salminen_topic-driven_2020}. To illustrate biases this AC and others like it may introduce, we compare using toxicity scores for comments created by manual content analysis to automated classifications made by Perspective. + + To do so, we use the Civil Comments dataset, which was released in 2019 by Jigsaw, the Alphabet corporation subsidiary which develops Perspective. This dataset has \Sexpr{f(dv.example[['n.annotated.comments']])} comments in English made on independent news sites that were all manually coded for ``toxicity'' and for whether they disclose each of several aspects of personal identity including race and ethnicity. + +We then also obtained AC-based variables for the toxicity of comments from the Perspective API in November 2022. Perspective's toxicity classifier performs very well in this dataset, with an accuracy of \Sexpr{format.percent(iv.example[['civil_comments_accuracies']][['toxicity_acc']])} and an F1 score of \Sexpr{round(iv.example[['civil_comments_f1s']][['toxicity_f1']],2)}. Nevertheless, if we treat the human annotations as the ground-truth, the classifier is modestly biased. For instance, it disproportionately misclassifies comments that disclose a racial or ethnic identity as toxic (Pearson's $\rho=\Sexpr{round(dv.example[['civil_comments_cortab']]['toxicity_error','race_disclosed'],2)}$). +As a result of these misclassifications, regression analyses of the Civil Comments dataset using Perspective to measure toxicity can produce different results than those using human annotations. + +In our first example, we consider the logistic regression model predicting whether a comment contains \emph{racial or ethnic identity disclosure} using \emph{number of likes}, \emph{toxicity} and the interaction of these two independent variables as covariates. Although this is a toy example constructed to illustrate a statistical problem, it is a realistic investigation of how disclosing aspects of one's identity on social media relates to the normative reception of one's behavior. + +\begin{figure}[htbp!] +\centering +\begin{subfigure}{\linewidth} +<>= +p <- plot.civilcomments.iv.example() +print(p) +@ +\subcaption{\emph{Example 1} illustrates bias when automatic classifications are a covariate in logistic regression.\label{fig:real.data.example.iv}} +\end{subfigure} + +\begin{subfigure}{\linewidth} +<>= +p <- plot.civilcomments.dv.example() +print(p) +@ +\subcaption{\emph{Example 2} illustrates bias in regression when automatic classifications are the outcome in logistic regression. \label{fig:real.data.example.dv}} + +\end{subfigure} + +\caption{Misclassification by Perspective causes bias in regression analyses as shown in the annotated civil comments dataset. +Figure \ref{fig:real.data.example.iv} compares a model using automatic toxicity classifications to a model using human toxicity annotations and shows that the 95\% confidence interval of the coefficient for likes contains 0. +In Figure \ref{fig:real.data.example.dv}, a model predicting automatic toxicity classifications for toxicity detects a negative correlation between likes and toxicity that is not found when human annotations are used instead. A \Sexpr{format.percent(iv.sample.prop)} random sample of \Sexpr{f(iv.sample.count)} annotations does not provide sufficient statistical power to distinguish the false discovery from 0. +In both examples, a random \Sexpr{format.percent(iv.sample.prop)} sample of \Sexpr{f(iv.sample.count)} annotations does not provide sufficient statistical power to distinguish the coefficient for likes from 0. Yet the methods we introduce can use this sample to model the misclassifications and obtain results close to those using the full dataset of annotations. +\label{fig:real.data.example} +} +\end{figure} + +As shown in Figure \ref{fig:real.data.example}, a researcher using Perspective's automatic toxicity classifications could draw different conclusions than if she had instead used the human annotations. Specifically, evidence using the AC could lead her to reject her hypothesized direct relationship between likes and identity disclosure conclude and to instead conclude that the correlation between likes and disclosure is entirely mediated by toxicity. +This is because the coefficient for likes is statistically indistinguishable from 0 and the coefficient for the interaction between likes and toxicity is positive and well-estimated. However, using the human annotations, she would have instead found a subtle positive direct relationship between likes and identity disclosure. + +Obtaining such a large number of high-quality human annotations is impractical for all but the most well-resourced research teams. The direct relationship between likes and identity disclosure is so subtle that even a random sample of \Sexpr{format.percent(iv.sample.prop)} of annotations lacks sufficient statistical power to detect it. +However, our method can use this sample of annotations to correct the bias introduced by Perspective's misclassifications while preserving enough statistical power to detect the direct relationship between likes and identity disclosure at the 95\% confidence level with estimates similar to those in the model using all \Sexpr{f(dv.example[['n.annotated.comments']])} annotations. + +This first example demonstrates that misclassification errors, even from a very accurate model in a large dataset can mislead a researcher into rejecting a hypothesis of a nonzero effect. +Our second example shows that the problem of misclassification bias can also lead to false discovery by driving detection of a nonzero relationship. + +For simplicity, our second example uses the same variables as the first. Only this time \emph{toxicity} is the outcome predicted by a logistic regression model with covariates a comment's number of \emph{likes}, \emph{racial or ethnic identity disclosure}, and the interaction of these two variables. +As shown in Figure \ref{fig:real.data.example.dv}, using Perspective's automatic classifications to measure toxicity results in a small negative coefficient for likes, but there is no detectable relationship in the dataset of annotations. The model using a \Sexpr{format.percent(dv.sample.prop) } sample of \Sexpr{f(dv.sample.count)} annotations cannot rule out such a weak relationship (the estimated effect using the AC is in the 95\% confidence interval), but our error correction method using this sample and Perspective's automatic classifications together can do so. + +These examples show that that misclassification can produce misleading statistical findings, even with a very accurate and modestly biased automatic classifier. If we consider hypothesis tests of non-zero coefficients, automatic classifications instead of human annotations caused both type-I and type-II errors in our examples. Although the effect sizes in these cases are rather subtle and would not be detectable in smaller datasets, such small effects commonly found using large datasets can easily result from subtle biases in observational study designs \citep{kaplan_big_2014}. Such small effect sizes may not appear practically or theoretically important, but note that the consequences of bias from automatic classification for coefficients in these examples (i.e., the interaction term in the first example and \emph{identity disclosure} in the second) are larger. +Of course, with a less accurate or more biased AC, misclassification will be even more prone to cause type-I and type-II errors in large effect sizes. +Importantly, these errors are correctable using human annotations. Although this example required \Sexpr{iv.sample.count} annotations, a large number representing considerable effort, to consistently do so, this is a small fraction of the entire dataset. +Additional details on these examples are available in Appendix \ref{appendix:perspective}. + +We have now illustrated the problem. Next we will discuss it in greater depth. + +\subsection{Problem I: Misclassification can cause anti-conservative bias} + +A large dataset does not reduce such inferential bias \citep{carroll_measurement_2006, van_smeden_reflection_2020}. It is often believed—incorrectly—that misclassification causes only conservative bias (i.e., bias towards 0) because this is true in the simplest cases of least squares regression—when measurement error in the only covariate is classical or when measurement error in the outcome is unbiased +\citep{carroll_measurement_2006, loken_measurement_2017, van_smeden_reflection_2020}.\footnote{Measurement error is \emph{classical} when $W = X + \xi$ because the variance of an AC's predictions is greater than the variance of the true value \citep{carroll_measurement_2006}. If nondifferential measurement error is not classical then it is called Berkson, and we would write $X = W + \xi$ instead of $W = X + \xi$. In general, Berkson measurement error is easier to deal with than classical error. It is hard to imagine how a AC would have Berkson errors (the predictions would have to have lower variance than the training data), so, following prior work, we do not consider Berkson errors \citep{fong_machine_2021, zhang_how_2021}.} As a result, researchers interested in a hypothesis of a statistically significant relationship may not consider misclassification an important threat to validity \citep{loken_measurement_2017}. However, there are at least two compelling reasons that misclassification is a serious concern. + +First, the inferential bias that misclassification causes is not necessarily conservative \citep{carroll_measurement_2006, loken_measurement_2017, van_smeden_reflection_2020}. In logistic regression or other nonlinear models, random measurement error can cause bias away from 0. +Moreover differential measurement error (i.e., error not conditionally independent of the outcome given the other covariates) can bias inference in any direction and lead to wildly misleading conclusions. Researchers can check the assumption of nondifferential measurement error via graphical and statistical conditional independence tests \citep{carroll_measurement_2006, fong_machine_2021}. +For example, \citet{fong_machine_2021} suggest using Sargan's J-test of the null hypothesis that the product of the AC's predictions and regression residuals have an expected value of 0. + +Users of ACs should be especially conscious of differential measurement error due to the nonlinear behavior of many ACs \citep{breiman_statistical_2001}. +For instance, ACs designed in one context and applied in another are likely to cause differential measurement error. The Perspective API used to classify toxic content, for example, was developed for social media comments, but performs much worse when applied to news data \citep{hede_toxicity_2021}. +Differential measurement error is also likely to arise when an AC used for measurement shapes behavior in the sociotechnical system under study. For example, the Perspective API is used for moderation in many forums \citep{hede_toxicity_2021} and the ORES API is used by Wikipedia moderators \citep{teblunthuis_effects_2021}. +Therefore, its predictions may have causal effects on outcomes related to moderation which cause differential error in regression models using these ACs as covariates. + +\subsection{Problem II: Systematic Biases in Specific Research Areas} +%TODO: uncomment citation below +The second reason that misclassification is a concern is that it may systematically contaminate the literature in a research area. If certain ACs become standard measurement devices within a research area, such as the LIWC dictionary to measure sentiment \citep{boukes_whats_2020}, +%\citep{dobbrick_enhancing_2021} +Google's Perspective API used to measure toxicity \citep{hosseini_deceiving_2017} or Botometer used to classify social media bots \citep[see, for a critical discussion][]{rauchfleisch_false_2020}, such research areas may become confused by systematic biases. For example, \citet{scharkow_how_2017} argue that media's ``minimal effects'' on political opinions and behavior may be an artifact of how many study designs in this area have common sources of measurement error that created systematic bias towards 0. Conversely, if researchers selectively report statistically significant hypothesis tests, measurement error can introduce an upward bias in the magnitude of reported effect sizes and contribute to a replication crisis \citep{loken_measurement_2017}. + + +% First, we note that when the anticipated effect size is large enough, traditional content analysis of a random sample has the advantage over the considerable complexity of automated content analysis. +% ACs should be used when costs prohibit traditional content analysis of sample size sufficient to detect anticipated effect sizes, but where collective a relatively small sample of validation data is tractable. + +% When the data used to train an AC is not representative of the study population, as is the case with commercial APIs or other black-box classifiers, this increases the risk of differential measurement error, which can introduce extremely misleading forms of statistical bias. Even this form of error can be addressed. + + +% Therefore, we recommend reporting (and preregistering) at least two aforementioned corrective methods in addition to uncorrected estimates. When machine learning classification is used for an independent variable, we recommend multiple imputation because it is robust to differential error and it simple to implement. However, our simulations show that multiple imputation does not work well when machine learning classification is used for the dependent variable. Greater care may be required if measurement error may be differential, because specifying the error model may open many degrees of research freedom and plausible error moe +\section{Misclassification in Automated Content Analysis: Reviewing Reporting and Error Correction Practices} + +% In traditional content analysis, humans use their judgement to classify messages, and automated content analysis uses computers as an instrument to + +% % can be defined either as a research approach or as an instrument. + +% In this paper, automated content analysis is defined as a research approach, which is a sub-type of content analysis for +% In contrast to manual content analysis, the difference is that the instrument used to code messages shifts from human judgment to computer algorithms \citep{scharkow2017content}. These computer algorithms, which can also be confusingly defined as ``automated content analysis" in the instrumental sense, are called automated coding techniques (versus manual coding techniques) in this paper. + + +% Social scientists have long recognized that measurement error can be an important methodological concern, but this concern has often been neglected \citep{schwartz_neglected_1985}. + + +% There have been several papers outlining what automated coding techniques are in the "toolbox" of communication researchers (key papers are \citep{scharkow2017content} and \citep{boumans:2015:tst}). +% Unsupervised and supervised machine learning procedures are deployed for coding. +% There has been discussion on the best practices for deploying unsupervised machine learning for communication research \citep{maier:2018:ALT}. +% This paper is going to focus only on classification. +% Researchers have raised concerns about validity issues of the approach \citep{scharkow2017content}. And by definition, the coding made by this technique is an imperfect surrogate of manual coding \citep{boumans:2015:tst}. When machine-classified surrogates are used in regression analyses for ``making replicable and valid inferences from texts", measurement errors are introduced \citep{fong_machine_2021}. A formal mathematical definition of these measurement errors is available later. + +% In the next section, all communication research studies with SML are reviewed to show how researchers deals with these measurement errors. + +% Furthermore, human classifiers also make errors and none of the prior methods consider how errors in the validation data can bias statistical results \citep{geis_statistical_2021, song_validations_2020, bachl_correcting_2017, scharkow_how_2017}. + + % Changeme to bring back citations after ICA + Misclassification is a long-standing concern in + the content analysis literature which has extensively studied difficulties in human-labeling through the framework of intercoder reliability \citep{krippendorff_reliability_2004}. + %, hayes_answering_2007, gwet_computing_2008}. + The increasing use of metrics such as Krippendorf's $\alpha$ + %and Gwet's AC \citep{gwet_computing_2008, krippendorff_reliability_2004}, + demonstrates transparency efforts in reporting imperfect manual annotations \citep{lovejoy_assessing_2014}. Moreover, \citet{bachl_correcting_2017} introduced methods for correcting proportion estimates using data from multiple independent human coders. +Despite this awareness of threats posed by manual misclassification, our review below demonstrates that misclassification by ACs is often downplayed. + +Content analysis focuses on ``\emph{making replicable and valid inferences from texts (or other meaningful matter) to the contexts of their use}'' \citep[p. 24, emphasis in original]{krippendorff_content_2018}. Automated content analysis, where computers are used as measurement devices, has gained traction in communication science \citep{baden_three_2022, junger_unboxing_2022} \maskparencite{hase_computational_2022}. +One common automated content analysis method is supervised machine learning (SML) \citep{scharkow_content_2017}.\footnote{Automated content analysis includes a range of other methods both for assigning content to predefined categories (e.g., dictionaries) and for assigning content to unknown categories (e.g., topic modeling) \citep{grimmer_text_2013}. Here, we focus on SML-based ACs. However, our arguments extend to other deductive approaches introducing misclassifications such as dictionary-based classification.} In essence, the procedure is to train an algorithm—e.g., a naïve Bayes classifier, decision tree, or artificial neural network—on manually coded material as the training set. The trained classifier is then used to predict categories in new, as of yet unseen data. Automatic classifiers enable researchers to inexpensively measure categorical variables in large data sets of digitized media. This promises to be useful for study designs requiring large samples such as to infer effect sizes smaller than would be possible using a sample size that humans could feasibly classify. + +But are scholars aware that misclassification by ACs poses threats to the validity of downstream analyses? Although such issues in the context of manual content analysis have attracted much debate \citep{bachl_correcting_2017}, this is less true for misclassification by newly popular automatic classifiers. +To understand how social scientists, including communication scholars, use SML-based classifiers to construct variables and engage with the problem of misclassification, we conducted a systematic literature review (see Appendix \ref{appendix:lit.review} in our Supplement for details\footnote{Anonymized link for review: \url{https://osf.io/pyqf8/?view_only=c80e7b76d94645bd9543f04c2a95a87e}}). Our review builds on studies identified by recent reviews on automated content analysis, including SML \citep{baden_three_2022, hase_computational_2022, junger_unboxing_2022, song_validations_2020}. Our goal in our review is not to comprehensively review all SML studies +%\footnote{In fact, our review likely underestimates the use of the method, as we focused on text-based SML methods in the social science domain employed for empirical analyses.} +but to provide a picture of common practices, with an eye toward awareness of misclassification and its statistical implications. + +We identified a total of 48 empirical studies published between 2013 and 2021—more than half of which were published in communication journals—which employed SML-based text classification to create 146 variables. Studies used SML-based text classification to perform tasks stuch as identifying frames \citep{opperhuizen_framing_2019} or topics \citep{vermeer_online_2020}. They often employed SML-based ACs to create dichotomous (50\%) or other categorical (22.9\%) variables\footnote{Metric variables were also created in 35.4\% of studies, mostly via the non-parametric method by Hopkins and King \citeyear{hopkins_method_2010} estimating proportions instead of classifying documents, something we do not focus on.}. Although 89.6\% of empirical studies used SML-based ACs to report descriptive statistics, +%— from the prevalence of topics in online news \citep{vermeer_online_2020} to incivility in social media posts \citep{su_uncivil_2018} —, +many also employed automated classification for downstream statistical analyses by using ACs as dependent (43.8\%) and independent (39.6\%) variables in multiple regression models. These regression analyses tend to be reported in higher-status journals compared to papers only reporting proportions. + +Given the rising popularity of SML-based text classification, our review indicates a worrying \emph{lack of transparency when reporting SML-based text classification}, similar to that reported in previous studies \citep{reiss_reporting_2022}: A large share of studies do not report important methodological decisions related to the sampling and sizes of training and test sets or to intercoder reliability (see Appendix \ref{appendix:lit.review}). This lack of transparency concerning model validation not only limits the degree to which researchers can evaluate studies, but also makes replicating such analyses to correct for misclassification nearly impossible. Most importantly, our review finds that \emph{studies almost never reflected upon or corrected for misclassification in their automated content analyses}. According to our review, only 18.8\% of studies discussed in any way the possibility that an AC misclassified texts. Only a single article reported using error correction methods. + +\subsection{Is Transparancy about Misclassification Enough?} +%TODO Uncomment below + +Commonly recommend practices in automated content analyses address the threats of misclassification through \emph{transparency} in the form of reporting metrics such as precision, recall, F1 and AUC scores computed using human-classified validation data \citep{grimmer_text_2013}. +%, pilny_using_2019}. +These metrics are intended to promote confidence in inferences resulting from the use of ACs by demonstrating high predictiveness. However, our literature review indicates that they are not always included in reporting, at least when it comes to SML-based text classifications. +%Moreover, such metrics can limit the potential impact of measurement error if they dissuade researchers from using inaccurate classifiers. + +Moreover, high predictiveness according to these metrics may be less protective from measurement error than it seems. +Algorithms and models for building effective automated classifiers were developed in the culture of algorithmic modeling associated with fields like computer science and management \citep{breiman_statistical_2001}. +As a paradigm, SML takes the opposite position on the bias-variance tradeoff from conventional statistics. Its methods achieve high predictiveness by throwing unbiased inference to the wind and pursuing prediction at all costs \citep{breiman_statistical_2001}. +On their own, predictiveness metrics provide no guarantees about the accuracy of downstream statistical inferences. + +In fact, steps made in the interest of predictiveness may increase inferential bias. +As a growing body of scholarship critical of the hasty adoption of SML in criminal justice, healthcare, content moderation, and employment has demonstrated, machine learning models boasting high performance often have biases. These result from the use of non-representative training datasets and spurious correlations that neither reflect causal mechanisms nor generalize in different (sub)populations \citep{bender_dangers_2021}. +% \citep{obermeyer_dissecting_2019, kleinberg_algorithmic_2018, bender_dangers_2021, wallach_big_2019, noble_algorithms_2018}. +For example, \citet{hede_toxicity_2021} show that, when applied to news datasets, the Perspecitve API overestimates incivility in topics such as racial identity, violence and sex. These automatic classifications will likely introduce differential measurement error to a regression model of an outcome related to such topics. +If ACs used in communication science also have such biases, these biases may flow downstream, by way of differential or systematic measurement error, into statistical inferences. + +The good news is that human-classified validation data can do more than benchmark predictive performance to increase transparency about measurement errors. With an appropriate model, validation data can effectively correct biases in statistical inferences. + + +%yi_handbook_2021,buonaccorsi_measurement_2010 +\section{Correcting for Misclassification} +Statisticians have extensively studied problems that measurement errors can cause for statistical inferences and proposed statistical methods to correct them \citep[see][]{carroll_measurement_2006, fuller_measurement_1987}. +We therefore narrow our focus to methods that are particularly appropriate to dealing with misclassifications by ACs: \citet{fong_machine_2021}'s GMM calibration method, \citet{zhang_how_2021}'s pseudo-likelihood model, and approaches that promise greater generality—multiple imputation, \citep{blackwell_multiple_2012} and likelihood modeling \citep{carroll_measurement_2006}. +%Measurement error is a vast and deep subject in statistics. We recommend \citet{carroll_measurement_2006} as a graduate-level textbook on the subject. + +In the interest of clarity, we introduce some notation in this section. Say $X$ is the covariate that is automatically classified, and $X^*$ is a sample of validation data. The automatic classifications are $W$, $Z$ is a second covariate, and $Y$ is the outcome. +To illustrate, consider an idealized example study from social media research: whether someone breaks a rule on a social media site and how long it takes for them to be banned. +This study might analyze the regression model $Y = B_0 + B_1 X + B_2 Z + \varepsilon$ where $Y$ is the (log-scaled) time until an account is banned, $X$ is whether the account broke a rule, and $Z$ is a covariate related to the account's reputation, such as the number of posts. Humans can observe whether an account breaks a rule, but human classifications are expensive and only available in a relatively small sample $X^*$. In contrast, an SML model can make automatic classifications $W$ for the entire dataset. But how do we correct for errors introduced by such ACs? + +\emph{Regression calibration} uses observable variables, including the automatic classifications $W$ and other variables measured without error $Z$, to approximate the true value of a covariate $X$ \citep{carroll_measurement_2006}. \citet{fong_machine_2021} propose a regression calibration procedure designed for supervised machine learning that we refer to as \emph{GMM calibration} or abbreviate as GMM.\footnote{\citet{fong_machine_2021} describe their method within an instrumental variable framework, but it is equivalent to regression calibration and regression calibration is the standard term in measurement error literature.} For their calibration model, \citet{fong_machine_2021} use 2-stage least squares (2SLS), regressing observable covariates $Z$ and AC predictions $W$ onto the validation data and then use the resulting model to approximate the covariate $\hat{X}$. +Next, \citet{fong_machine_2021} use the generalized method of moments (gmm) to combine the estimate based on the approximated covariate $\hat{X}$ and the estimate using the validation data $X^*$. This method makes efficient use of validation data and provides an asymptotic theory for deriving confidence intervals. The GMM method's assumptions do not include strong assumptions about the distribution of the outcome $Y$, but are still violated by differential error \citep{fong_machine_2021}. GMM, like other regression calibration techniques, is not designed to correct for misclassification in the outcome. + +\emph{Multiple imputation} (MI) treats measurement error as a missing data problem because the true value of $X$ is observed in the validation data $X^*$ and missing otherwise \citep{blackwell_multiple_2012}. For example, the regression calibration step in \citet{fong_machine_2021}'s GMM method uses least squares regression to impute unobserved values of the covariate $X$. Indeed, \citet{carroll_measurement_2006} describe regression calibration when validation data are available as ``simply a poor person's imputation methodology'' (pp. 70). +Like regression calibration, multiple imputation uses a model to infer likely values of possibly misclassified variables. The difference is that multiple imputation samples several (hence \emph{multiple} imputation) entire datasets filling in the missing data from the predictive probability distribution of the covariate $X$ conditional on the other variables $\{X,Y,Z\}$, then runs a statistical analysis on each of these sampled datasets and pools the results of each of these analyses \citep{blackwell_multiple_2012}. Note that $Y$ is included among the imputing variables, giving the MI approach the potential to address differential error. \citet{blackwell_multiple_2012} claim that their MI method works with differential measurement error (so long as the bias in the measurement error can be modeled) and when measurement error is in the outcome or in a covariate. + +\emph{Maximum likelihood methods} (MLE) can effectively deal with measurement error in ACs by maximizing a likelihood that correctly specifies an \emph{error model} of the probability of the automatic classifications conditional on the true value and the outcome \citep{carroll_measurement_2006}. +In contrast to the GMM and the MI approach, which predict values of the mismeasured variable, the MLE method accounts for all possible values of the variable by ``integrating them out'' of the likelihood. +``Integrating out'' means adding both possible values of a binary variable to the likelihood, weighted by the likelihood of the error model. +MLE methods have two advantages in the context of ACs. First, they are quite general and can be applied to any model with a convex likelihood including generalized linear models (GLMs) and generalized additive models (GAMs). +Second, assuming the model is correctly specified, MLE estimators are fully consistent whereas regression calibration estimators are only approximately consistent \citep{carroll_measurement_2006}. Practically, this means that MLE methods can have greater statistical efficiency and require less validation data to make precise estimates. + +The MLE approach is conceptually different from the GMM one. The GMM approach first imputes likely values and then runs the main analysis on imputed values. By contrast, MLE approaches estimate—all in one step—the main analysis using the full dataset and the error model estimated using only the validation data \citep{carroll_measurement_2006}. +The MLE approach is applicable both when the automatically classified variable is a covariate and when it is the outcome. + +\emph{``Pseudo-likelihood''} methods (PL)—even if not always explicitly labeled this way—are another approach. \citet{zhang_how_2021} proposes a method that approximates the error model using quantities from the AC's confusion matrix—the positive and negative predictive values in the case of a mismeasured covariate and the AC's false positive and false negative rates in the case of a mismeasured outcome. Because quantities from the confusion matrix are neither data nor model parameters, \citet{zhang_how_2021}'s method is technically a ``pseudo-likelihood'' method. A clear benefit of this idea is that it only requires summary quantities derived from validation data. It can thus be applied when validation data are unavailable. We will discuss likelihood methods in greater depth in the presentation of our MLE framework below. + +Statisticians have studied other methods for correcting measurement error that we do not test in our simulations including simulation extrapolation, Bayesian estimation, and score function methods. As we argue in Appendix \ref{appendix:other.methods} of our Supplement, these approaches are not advantageous for correcting misclassification when validation data is available. + + +\subsection{Proposing a Likelihood Modeling Approach to Correct Misclassification} + +% This section basically translates Carroll et al. for a technically advanced 1st year graduate student. +We now elaborate on our likelihood modeling approach +by applying \citet{carroll_measurement_2006}'s presentation of the general statistical theory of likelihood modeling for measurement error correction to the context of binary classification when validation data is available. +The idea is to use an \emph{error model} of the conditional probability of the automatic classifications given the true classifications and other variables on which automatic classifications depend. +In other words, the error model estimates the conditional probability mass function of the automatic classifications. + +% When a variable is measured with error, this error introduces uncertainty. The overall idea of correcting an analysis with a mismeasured variable through likelihood modeling is to use + +Including the error model in the likelihood effectively accounts for uncertainty of the true classifications and, assuming the error model gives consistent estimates of the conditional probability of the automatic classifications given the true values, is sufficient to obtain consistent estimates using MLE \citep{carroll_measurement_2006}. The MLE approach is particularly well-suited to misclassification by ACs because it can be quite straightforward to fit the error model when the mismeasured variable is discrete. + +\subsubsection{When an Automatic Classifier Predicts a Covariate} + +Say we want to fit the linear regression model $Y=B_0 + B_1 X + B_2 Z + \varepsilon$ and an AC makes classifications $W$ that predict the discrete covariate $X$—for instance, whether a message by a social media account broke a rule according to an AC, to then explain the time until the account is banned. +Maximizing ($\mathcal{L}(\Theta|Y,W)$), the likelihood of parameters $\Theta$ given data $W$ and $Y$, can jointly fit the regression model of $Y$ having parameters $\Theta_Y= \{B_0, B_1, B_2\}$ and an error model of $W$ because $P(Y,W|\theta)$, +the joint probability of $Y$ and $W$, can be factored into the product of three terms: $P(Y|\Theta_Y)$, the regression model we want to fit, $P(W|X,Y)$, the error model, and $P(X|Z)$, a model for the probability of $X$. +Therefore, calculating these three conditional probabilities is sufficient to calculate the joint probability of the outcome and automatic classifications and obtain a consistent estimate despite misclassification. + +For instance, we can assume that the probability of $W$ follows a logistic regression model of $Y$, $X$ and $Z$ and that the probability of $X$ follows a logistic regression model of $Z$. In this case, the likelihood model below is sufficient to consistently estimate the parameters $\Theta = \{\Theta_Y, \Theta_W, \Theta_X\} = \{\{B_0, B_1, B_2\}, \{\alpha_0, \alpha_1, \alpha_2\}, \{\gamma_0, \gamma_1\}\}$. + + +\begin{align} + \mathcal{L}(\Theta | Y, W) &= \prod_{i=0}^{N}\sum_{x} {P(Y_i| X_i, Z_i, \Theta_Y)P(W_i|X_i, Y_i, Z_i, \Theta_W)P(X_i|Z_i, \Theta_X)} \label{eq:covariate.reg.general}\\ + P(Y_i| X_i, Z_i, \Theta_Y) &= \phi(B_0 + B_1 X_i + B_2 Z_i) \\ + P(W_i| X_i, Y_i, Z_i, \Theta_W) &= \frac{1}{1 + e^{-(\alpha_0 + \alpha_1 Y_i + \alpha_2 X_i)}} \label{eq:covariate.logisticreg.w} \\ + P(X_i | Z_i, \Theta_X) &= \frac{1}{1 + e^{-(\gamma_0 + \gamma_1 Z_i)}} +\end{align} + + +\noindent $\phi$ is the normal probability distribution function. Note that Equation \ref{eq:covariate.reg.general} models differential error taking the form of a linear relationship between $W$ and $Y$. When error is nondifferential, the dependence between $W$ and $Y$ can be removed from Equations \ref{eq:covariate.reg.general} and \ref{eq:covariate.logisticreg.w}. + +Calculating the three conditional probabilities in practice requires specifying models on which validity of the method depends. +This framework is very general and a wide range of probability models, such as generalized additive models (GAMs) or Gaussian process classification, may be used to estimate $P(W|X,Y)$ and $P(X|Z)$ \citep{williams_bayesian_1998}. +For simplicity, we proceed with a focus on linear regression for the probability of $Y$ and logistic regression for the probability of $W$ and the probability of $X$. + +\subsubsection{When an Automatic Classifier Predicts the Outcome} + +We now turn to the case when an AC makes classifications $W$ that predict the discrete-valued outcome $Y$—for example to use an automatically classifier predicting whether social media users break rules to test hypotheses about why they do so. +This case is simpler than the case above where an automatic classifier is used to measure a covariate $X$ because there is no need to specify a model for the probability of $X$. + +If we assume that the probability of $Y$ follows a logistic regression model of $X$ and $Z$, and allow $W$ to be biased and directly depend on $X$ and $Z$, then maximizing the following likelihood is sufficient to consistently estimate the parameters $\Theta = \{\Theta_Y, \Theta_W\} = \{\{B_0, B_1, B_2\},\{\alpha_0, \alpha_1, \alpha_2, \alpha_3\}\}$. + +\begin{align} + \mathcal{L}(\Theta|Y,W) &= \prod_{i=0}^{N} {\sum_{x}{P(Y_i | X_i, Z_i, \Theta_Y)P(W_i|X_i, Z_i, Y_i, \Theta_W)}} \label{eq:depvar.general}\\ + P(Y_i| X_i, Z_i, \Theta_Y) &= \frac{1}{1 + e^{-(B_0 + B_1 X_i + B_2 Z_i)}} \\ + P(W_i | Y_i, X_i, Z_i, \Theta_W) &= \frac{1}{1 + e^{-(\alpha_0 + \alpha_1 Y_i + \alpha_2 X_i + \alpha_3 Z_i)}} \label{eq:depvar.w} +\end{align} + +If the AC's errors are conditionally independent of $X$ and $Z$ given the model for $W$ then the dependence of $W$ on $X$ and $Z$ can be omitted from equations \ref{eq:depvar.general} and \ref{eq:depvar.w}. +Additional details are available in Appendix \ref{appendix:derivation} of the Supplement. + + +% TODO: bring back once appendix is ready. +% as we demonstrate in Appendix \ref{appendix:lit.review} . + +\section{Simulation Design} + +% \TODO{Create a table summarizing the simulations and the parameters.} + +In this section, we present four Monte Carlo simulations (\emph{Simulations 1a}, \emph{1b}, \emph{2a}, and \emph{2b}) to evaluate existing methods (GMM, MI, PL) as well as our approach (MLE) for correcting statistical inference when a variable is measured by an error-prone AC. We first describe the set-up of our Monte Carlo simulations before delving into the four prototypical scenarios we identified via our literature review and therefore simulated. + +\subsection{Parameters of the Monte Carlo simulations} +Monte Carlo simulations are a common tool for evaluating statistical methods, including (automated) content analysis \citep[e.g.][]{song_validations_2020,bachl_correcting_2017,geis_statistical_2021, fong_machine_2021,zhang_how_2021}. +A Monte Carlo simulation defines a model of study design in terms of a data generating process from which datasets are repeatedly sampled. Running an analysis on each sampled dataset provides an empirical distribution of the results the analysis would obtain over study replications. The methods affords exploration of finite-sample performance, robustness to assumption violations, comparison across several methods, and ease of interpretability \citep{mooney_monte_1997}. + +For each prototypical scenario, we ran up to six analyses. Four of these test error correction methods: \emph{GMM calibration} (GMM) \citep{fong_machine_2021}, \emph{multiple imputation} (MI) \citep{blackwell_multiple_2012}, \emph{Zhang's pseudo-likelihood model} (PL) \citep{zhang_how_2021}, and our \emph{likelihood modeling} (MLE) approach. GMM is not designed for the case when an automatically classified variable is the outcome, so we omit this method in \emph{Simulations 2a} and \emph{2b}. We compare error correction methods to two other approaches: the \emph{feasible} estimator in which researchers abstain from using ACs by using only perfectly accurate manually annotated validation data (i.e., cases where manual coders agree on codes) +%and illustrates the motivation for using an AC in these scenarios—validation alone provide insufficient statistical power for a sufficiently precise hypothesis test. +and the \emph{naïve} estimator, representative of common practice, where researchers use AC- based classifications $W$ as a stand-ins for $X$. + +We repeat each simulation with different amounts of automatically classified data (ranging from \Sexpr{min(N.sizes)} to \Sexpr{max(N.sizes)} observations) and human labeled data (ranging from \Sexpr{min(m.sizes)} to \Sexpr{max(m.sizes)} +observations). + +\begin{equation} + Y= B_0^* + B_1^*W + B_2^*Z + \varepsilon^* = B_0^* + B_1^*(X + \xi) + B_2^*Z +\label{mod:measerr.ols} +\end{equation} + +We evaluate each analytical approach in terms of \emph{consistency}, whether the estimates of parameters $\hat{B_X}$ and $\hat{B_Z}$ have expected values nearly equal to the true values $B_X$ and $B_Z$; \emph{efficiency}, how precisely the parameters are estimated and how precision improves with additional automatically classified or human labeled data; and \emph{uncertainty quantification}, how well the 95\% confidence intervals provided by each method approximate the confidence interval of parameter estimates across Monte Carlo simulations. + +%These simulations are designed to verify that error correction methods from prior work are effective in ideal scenarios and to create the simplest possible cases where these methods are inconsistent. Showing how prior methods fail is instructive for understanding how our MLE approach does better both in these artificial simulations and in practical projects. +We use the \texttt{predictionError} R package \citep{fong_machine_2021} for the GMM method, the \texttt{Amelia} R package for the MI approach, and the \texttt{optim()} R function for implementing \citet{zhang_how_2021}'s PL approach and our approach. + +\subsection{Four Prototypical Scenarios} + +We simulate regression models with two covariates ($X$ and $Z$). This sufficiently constrains our study's scope but is general enough to be applied in a wide range of research studies. +%Simulating studies with two covariates lets us study how measurement error in one covariate can cause bias in coefficient estimates of other covariates. +Whether the methods we evaluate below are effective or not depends on the conditional dependence structure among the covariates, the outcome $Y$, and the model predictions $W$. +This structure determines whether covariate measurement error is differential and whether outcome measurement error is systematic \citep{carroll_measurement_2006}. +We illustrate our simulated scenarios using Bayesian networks to represent the conditional dependence structure of the variables in Figure \ref{bayesnets} +\citep{pearl_fusion_1986}. +%In these figures, an edge between two variables indicates that they have a direct relationship. Two nodes that are not neighbors are statistically independent given the variables between them on the graph. For example, in Figure \ref{fig:simulation.1a}, the automatic classifications $W$ are conditionally independent of $Y$ given $X$ because all paths between $W$ and $Y$ contain $X$. This indicates that the model $Y=B_0 +B_1 W+ B_2 Z$ (the \emph{naïve estimator}) has non-differential error because the automatic classifications $W$ are conditionally independent of $Y$ given $X$. However, in Figure \ref{fig:simulation.1b}, there is an edge between $W$ and $Y$ to indicate that $W$ is not conditionally independent of $Y$ given other variables. Therefore, the naïve estimator has differential error. + +We first simulate two cases when an AC is used to measure a covariate with and without differential error. Then, we simulate two cases where an AC is used to measure the outcome either making errors that are correlated with predictors or not. + +\input{bayesnets.tex} + +\subsection{Measurement Error in a Covariate (\emph{Simulations 1a} and \emph{1b})} + +We consider studies with a goal of testing a hypotheses about the coefficients $B_1$ and $B_2$ in the least squares regression (Model \ref{mod:true.ols}). +\begin{equation} +Y=B_0 + B_1 X + B_2 Z + \varepsilon + \label{mod:true.ols} +\end{equation} +In this example, $Y$ is continuous variable, $X$ is a binary variable measured with an AC, and $Z$ is a normally distributed variable with mean 0 and standard deviation \Sexpr{sim1.z.sd} measured without error. +For example, $Y$ could be the time until an account on an online forum is banned, $X$ if a message breaks one of the forum's rules, and $Z$ the account's reputation score. $X$ and $Z$ are negatively correlated because high-reputation accounts may be less likely to break rules. +%$Z$ can indicate if the message is in German or English, the two possible languages in the hypothetical study. + +Say that human content coders can observe $X$ perfectly, but each observation is so expensive that observing $X$ for a large sample is infeasible. +%Instead, the human coders can measure $X$ without error for a subsample of size $m << N$. +To scale up content analysis, a SML-based AC makes predictions $W$ of $X$—for instance predicting if any of the messages from that social media user break the rules. +Both scenarios have a normally distributed outcome $Y$ and two binary-valued covariates $X$ and $Z$, which are balanced ($P(X)=P(Z)=0.5$) and correlated (Pearson's $\rho=\Sexpr{round(sim1a.cor.xz,2)}$). Simulating balanced covariates serves simplicity so that accuracy is adequate to quantify the predictive performance of our simulated classifier. Simulating correlated covariates is helpful to study how misclassification in one variable affects parameter inference in other covariates. +To represent a research study design where automated classification is needed to obtain sufficient statistical power, $Z$ and $X$ can explain only \Sexpr{format.percent(sim1.R2)} of variance in $Y$. +% TODO, bring back when these simulations are in the appendix. +%Additional simulations in appendix \ref{appendix:sim1.imbalanced} show results for variations of \emph{Simulation 1} with imbalanced covariates explaining a range of variances, different classifier accuracies, heteroskedastic misclassifications and deviance from normality in the an outcome $Y$. + +In \emph{Simulation 1a}, visualized in Figure \ref{fig:simulation.1a}, we simulate an AC with \Sexpr{format.percent(sim1a.acc)} accuracy to reflect a situation where $X$ may be difficult to predict, but an automated classifier, represented as a logistic regression model having linear predictor $W^*$, provides a useful signal. The \emph{naïve estimator} has classical and nondifferential measurement error because $W=X+\xi$ because $\xi$ is normally distributed with mean $0$ and $\xi$ is conditionally independent of $Y$ given $X$ and $Z$ ($P(\xi| Y,X,Z) = P(\xi|X,Z)$). +%For simplicity, the AC's errors $\xi$ are independent of all other variables. In Appendix F, we demonstrate that the methods we study perform similarly when $\xi$ is heteroskedastic, correlated with $X$ or $Z$. Note that heteroskedasticity does not imply differential error. Suppose, for example, that AC's accuracy predicting rule violations $W$ depends on language $Z$. As a result, $\xi$ and $Z$ are correlated, and since time-till-ban $Y$ and repuation $Z$ are also correlated, $\xi$ is in turn correlated with $Y$. Despite this, the error in Model \ref{mod:measerr.ols} remains nondifferential, because $Y$ is conditionally independent of $\xi$ given $Z$ and $X$. + +% Measuring $X$ is expensive, perhaps requiring trained human annotators, but an automated classifier can predict $X$ with We choose this level of accuracy to reflect a situation where $X$ may be difficult to predict + +% The classifier, perhaps a proprietary API, has unobservable features $K$. The classifier's predictions $W=X + \xi$ are unbiased—the errors $\xi$ are not correlated with $Y$,$X$ or $Z$. Figure \ref{fig:simulation.1} shows a Bayesian network representing \emph{Simulation 1}'s conditional dependencies of $Z$, $Y$, $K$, $Z$ and $W$ as a directed acyclic graph (DAG). + +% \emph{Simulation 2} extends \emph{Simulation 1} by making the automated classifier classification errors $\xi$ that are correlated with $Y$ even after accounting for $Z$ and $x$. + +In \emph{Simulation 1b} visualized in Figure \ref{fig:simulation.1b}, the AC's predictions directly depend on the outcome $Y$, so we can test error correction methods in the presence of differential error. +We create this dependence by simulating an AC with $\Sexpr{format.percent(sim1b.acc)}$ accuracy that makes predictions $W$ that are negatively correlated with the residuals of the linear regression of $X$ and $Z$ on $Y$ (Pearson's $\rho=\Sexpr{round(sim1b.cor.resid.w_pred,2)}$). As a result, this AC makes fewer false-positives and more false-negatives at greater levels of $Y$. Although the false-negative rate of the AC is \Sexpr{format.percent(sim1b.fnr)} overall, when $Y<=0$ the false-negative rate is only \Sexpr{format.percent(sim1b.fnr.y0)}, but when $Y>=0$ it rises to \Sexpr{format.percent(sim1b.fnr.y1)}. +%Figure \ref{fig:simulation.1b} shows a Bayesian network representing conditional dependencies of $Z$, $Y$, $Z$ and $W$ in \emph{Simulation 1b}. + +These simulations are prototypical of an AC that influences behavior in a system under study such as if community moderators use ACs to identify rule-breakers and correct their behavior. False negatives may cause delays in moderation increasing $Y$ (time-until-ban), while false-positives could draw moderator scrutiny and cause them to issue speedy bans. +This mechanism is not mediated by observable variables such as reputation ($Z$) or the true rule-breaking ($X$). Therefore, Model \ref{mod:measerr.ols} has differential error. + +\subsection{Measurement Error in the Outcome (Simulation 2a and 2b)} + +We then simulate using an AC to measure the dependent variable $Y$, a binary covariate $X$, and a continuous covariate $Z$. For example, $Y$ describes whether a message is rule-breaking, $X$ whether the user leaving the message has been warned by moderators, and $Z$ a reputation score. The goal is to estimate $B_1$ and $B_2$ in the following logistic regression model: + +\begin{equation} + P(y) = \frac{1}{1 + e^{-(B_0 + B_1 x + B_2 z)}} + \label{mod:measerr.logit} +\end{equation} + +\noindent As was true for $X$ in \emph{Simulation 1}, human coders can observe $Y$, but at considerable expense, and an AC makes predictions $W = Y + \xi$ . + +\emph{Simulation 2a} (visualized in Figure \ref{fig:simulation.2a}) and \emph{Simulation 2b} (visualized in Figure \ref{fig:simulation.2b}) implement these scenarios. Here, $X$ and $Z$ are balanced $P(X)=P(Z)=0.5$ and correlated. + (Pearson's $\rho=\Sexpr{round(sim2a.cor.xz,2)}$). +As in \emph{Simulation 1} we simulate scenarios where an AC is of practical use to estimate subtle relationships. In \emph{Simulation 1} we chose the variance of the normally distributed outcome given our chosen coefficients $B_X$ and $B_Z$, but this is not appropriate for \emph{Simulation 2}'s logistic regression so we choose, somewhat arbitrarily, $B_X=\Sexpr{sim2.Bx}$ and $B_Z=\Sexpr{sim2.Bz}$. + +Again, we simulate ACs with moderate predictive performance. +The AC in \emph{Simulation 2a} is \Sexpr{format.percent(sim2a.AC.acc)} accurate and the AC in \emph{Simulation 2b} is \Sexpr{format.percent(sim2b.AC.acc)} accurate. In \emph{Simulation 2a}, the predictions $W$ are unbiased because classification errors $\xi$ have mean $0$ and are independent of covariates $X$ and $Z$. However, in \emph{Simulation 2b} the predictions are biased because their errors $\xi$ are correlated with $Z$ (Pearson's $\rho = \Sexpr{round(sim2b.error.cor.z,2)}$). +One way such a correlation might obtain in our example of online moderation is if community members are adept at skirting the rules without violating them. Such members are both likely to be warned by moderators and also to leave messages misclassified as rule-breaking. + +\section{Simulation Results} + +We visualize the consistency, efficiency, and the accuracy of uncertainty quantification of each method in each prototypical scenario. +%Our main results are presented as plots visualizing the consistency (i.e., does the method, on average, recover the true parameter?), efficiency (i.e., how precise are estimates and does precision improve as sample size increases?), and the accuracy of uncertainty quantification of each method in each scenario. +For example, Figure \ref{fig:sim1a.x} visualizes results for \emph{Simulation 1a}. Its subplots each show a simulation with a given total sample size (No. observations) and validation sample size (No. validation data). + +To understand how each plot visualizes the consistency of estimators, see for instance the leftmost column in the bottom-left subplot illustrating performance of the naïve estimator using AC classifications $W$ to stand in for the true variable $X$. The center of the black circle locates the expected value of the point estimate over our \Sexpr{n.simulations} simulations. For the naïve estimator in Figure \ref{fig:sim1a.x}, the circle is far below the dashed line which shows the true value of $B_X$, indicating that misclassification causes a dramatic bias toward 0 and that the estimator is inconsistent. + +To assess efficiency, we mark the region in which point estimate falls in 95\% of the simulations with black lines. +These black lines in the bottom-left subplot of Figure \ref{fig:sim1a.x} for example show that the feasible estimator, which uses only perfectly accurate validation data, is consistent but less precise than the estimates from correction methods that use both automatic classifications and human-labeled data. + +The accuracy of the method's uncertainty quantification can be seen by comparing the gray lines, which show for each method the expected value of its approximate 95\% confidence intervals over the \Sexpr{n.simulations} simulations for each method, to the neighboring black lines. + The \emph{PL} column in the bottom-left subplot of Figure \ref{fig:sim1a.x} shows that the method's 95\% confidence interval is biased towards 0 when the number of human labels is low. This result is expected because the method does not account for uncertainty in misclassification probabilities estimated using the sample of true classifications. +Now that we have explained how to interpret our plots, we will unpack them for each simulated scenario. + +\subsection{Simulation 1a: When Misclassifications Are Independent of the Outcome} + +\begin{figure} +<>= +p <- plot.simulation.iv(plot.df.example.1, iv='x') +grid.draw(p) +@ +\caption{Estimates of $B_X$ in multivariate regression with $X$ measured using machine learning and model accuracy independent of $X$, $Y$, and $Z$. All methods, except the pseudo-likelihood method obtain precise and accurate estimates given sufficient validation data. \label{fig:sim1a.x}} +\end{figure} + +As visualized in Figure \ref{fig:sim1a.x}, the naïve estimator is severely biased in its estimation of $B_X$ in \emph{Simulation 1a}. +Fortunately, error correction methods including our MLE method as well as the GMM and MI approach produce consistent estimates and acceptably accurate confidence intervals. +Notably, the PL method is inconsistent and considerable bias remains when the number of human classifications is much less than the total number of observations. The most likely source of this inconsistency is that $P(X=x)$ is missing from the pseudo-likelihood as can be seen by comparing Equation \ref{eq:mle.covariate.chainrule.4} in our Supplement to Equations 24-28 from \citet{zhang_how_2021}. The bottom row of Figure +\ref{fig:sim1a.x} shows that the precision of MLE and GMM estimates increase in larger datasets. +However, this is not true for multiple imputation (MI). +Therefore, GMM calibration and MLE appear to use automatic classifications more efficiently than MI does. + +%It is important to correct misclassification error even when an AC is only used as a statistical control \citep[for example]{weld_adjusting_2022}, because when a covariate $Z$ is correlated with $X$, misclassifications of $X$ cause bias in the \emph{naïve} estimates of $B_Z$, the regression coefficient of $Z$ on $Y$. As Figure \ref{fig:sim1a.z} in Appendix \ref{appendix:main.sim.plots} shows, methods that effectively correct estimates of $X$ in \emph{Simulation 1a} also correct estimates of $B_Z$. + +In brief, when misclassifications cause nondifferential error, our simulations provide evidence that MLE and GMM calibration are both effective, efficient and provide accurate uncertainty quantification. These two methods complement each other since they have different assumptions and advantages. In theory, MLE depends on correctly specifying the likelihood and its robustness to incorrect specifications is difficult to analyze \citep{carroll_measurement_2006}. GMM calibration depends on an exclusion restriction instead of such distributional assumptions \citep{fong_machine_2021}. +As discussed above, MLE's advantages over GMM calibration come from the relative ease with which it can be extended to more complex statistical models such as generalized linear models (GLMs) and generalized additive models (GAMs). +Therefore, in cases similar to \emph{Simulation 1a} we recommend using both GMM and an appropriately specified MLE model. + +\subsection{Simulation 1b: When Misclassifications Depend on the Outcome} + +Differential error can give rise to dramatic bias that is more difficult to correct using measurement error methods. +As Figure \ref{fig:sim1b.x} shows, the naïve estimator is opposite in sign to the true parameter in \emph{Simulation 1b}. +Of the four methods we test, only the MLE and the MI approach provide consistent estimates. This is expected because these are the only two methods using the outcome $Y$ to adjust for errors in classifications. The bottom row of Figure \ref{fig:sim1b.x} shows how the precision of the MI and MLE estimates increase with additional unlabeled data. As with \emph{Simulation 1a}, MLE uses this data more efficiently than MI does. However, due to the low accuracy and bias of the AC, additional unlabeled data improves precision less than one might expect. Both methods provide acceptably accurate confidence intervals. Figure \ref{fig:sim1b.z} in the Supplement shows that as in \emph{Simulation 1a}, effective correction for misclassifications of $X$ is required to consistently estimate $B_Z$, the coefficient of $Z$ on $Y$. Looking at results from methods that do not correct differential error is useful for understanding their limitations. When few true values of $X$ are known, GMM is nearly as bad as the naïve estimator, and PL is also visibly biased. Both improve when a greater proportion of the entire dataset is labeled because they combine their AC-based estimates with the feasible estimator. + +\begin{figure} +<>= +p <- plot.simulation.iv(plot.df.example.2, iv='x') +grid.draw(p) +@ +\caption{Estimates of $B_X$ in multivariate regression with $X$ measured using machine learning, where model accuracy correlated with $X$ and $Y$. Only multiple imputation and our MLE model with a full specification of the error model obtain consistent estimates of $B_X$. \label{fig:sim1b.x}} +\end{figure} + +In sum, our simulations suggest that the MLE method is the superior choice when misclassifications are not conditionally independent of the outcome given observed covariates. Although MI estimations are consistent, the method's practicality is limited by its inefficiency. + +\subsection{Simulation 2a: When Random Misclassifications Affect the Outcome} + +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.dv(plot.df.example.3,'z') +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in \emph{Simulation 1b}, multivariate regression with $Y$ measured using an imperfect automatic classifier. Only our MLE model obtains consistent estimates \label{fig:sim2a.x}.} +\end{figure} + +Ignoring misclassification in dependent variables also introduces bias as evidenced by the naïve estimator's inaccuracy illustrated in Figure \ref{fig:sim2a.x}. Both our MLE method and MI are able to correct this error and provide consistent estimates, but MLE is more efficient. +It is puzzling that the MI estimator is inconsistent and does not improve with more human-labeled data. +%Note that the GMM estimator is not designed to correct misclassifications in the outcome. +The PL approach is also inconsistent, especially when the validation dataset is small compared to the entire dataset, but it is closer to recovering the true parameter than the MI or naïve estimators. +Based on Figure \ref{fig:sim2a.x}, it is clear that the precision of the MLE estimator improves with the addition of unlabeled data to a greater extent than the PL estimator. The PL estimator provides only modest improvements in precision compared to the feasible estimator. +When the amount of human-labled data is low, inaccuracies in the 95\% confidence intervals of both the MLE and PL become visible. As before, PL's inaccurate confidence intervals are due to its use of finite-sample estimates of the automatic classification probabilities. +%In both cases, the poor finite-sample properties of the fischer-information quadratic approximation contribute to this inaccuracy. In Appendix \ref{appendix:sim1.profile}, we show that the MLE method's inaccuracy vanishes when using the profile-likelihood method instead. + + In brief, our simulations suggest that MLE is the best of the methods we tested when misclassifications affect the dependent variable. It is the only consistent option and more efficient than the PL method, which is almost consistent. + +\subsection{Simulation 2b: When Misclassifications Affecting the Outcome Are Biased} + +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.dv(plot.df.example.4,'z') +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in \emph{Simulation 2b}, multivariate regression with $Y$ measured using an automatic classifier that makes errors correlated a covariate $X$. Only our MLE model with a full specification of the error model obtains consistent estimates. \label{fig:sim2b.x}} +\end{figure} + +In \emph{Simulation 2b}, misclassifiations in the outcome are correlated with a covariate $X$. As shown in Figure \ref{fig:sim2b.x}, this type of misclassification can cause dramatic bias in the naïve estimator. +Similar to \emph{Simulation 1a}, MI is inconsistent, however PL is also inconsistent because it does not account for $X$ in its measurement error model. +As in \emph{Simulation 1b}, our MLE method obtains consistent estimates, but only does much better than the feasible estimator when the dataset is large. +Figure \ref{appendix:main.sim.plots} in the Supplement shows the precision of estimates for the coefficient for $X$ improves with additional data to a greater extent and so this imprecision is mainly in estimating the coefficient for $Z$, the variable correlated with misclassification. +Therefore, our simulations suggest that MLE is the best method when misclassifications in the outcome are correlated with a covariate. + +\section{Transparency Is Not Enough. We Can Fix It!: Recommendations for Automated Content Analyses} + +``Validate, Validate, Validate'' \citep{grimmer_text_2013} is one of the guiding mantras for automated content analysis. It reminds us that ACs can produce misleading results and of the importance of steps to ascertain their validity, for instance by making misclassificition rates transparent. +\citet[p.5]{grimmer_text_2013} write that +``when categories are known [...], scholars must demonstrate that the supervised methods are able to reliably replicate human coding.'' +This suggests that quantifying an AC's predictive performance by comparing human-labeled validation data to automatic classifications sufficiently establishes an AC's validity and thereby the validity of downstream analyses. + + +Like \citet{grimmer_text_2013}, we are deeply concerned that computational methods may produce invalid evidence. In this sense, their validation mantra animates this paper. But transparency about misclassification rates via metrics such as precision or recall leaves unanswered an important question: Is comparing automated classifications to external ground truth sufficient to claim validity? Or is there something else we can do and should do? We think there is: Using statistical methods to not only quantify but also correct for misclassification. Our study provides several recommendations in this regard. +%Similar to recent work in communication science \citep{mahl_noise_2022, stoll_supervised_2020}, our goal is not only to \textit{highlight} and \textit{quantify} common pitfalls in automated content analysis applications of ACs but to also \textit{propose} constructive guidelines on the road ahead. + +\subsubsection{Construct Validation Data before Building an AC} + +Analyzing human-coded data for validation is often done \textit{post facto}, e.g., to calculate predictiveness metrics an AC is built. We propose to instead to collect and use manually annotated validation data \textit{ante facto}. +Practically speaking, the main reason to use an AC is feasibility, i.e., avoiding to label large data sets manually. +For example, a large dataset may be necessary to study a small effect xand manually labeling such a dataset may be more expensive than building an AC. +In this way, ACs can be seen as a cost-saving procedure that exchanges the expense of manual labeling in exchange for the threats to validity posed by misclassification. +However, building an AC can also be very expensive because of the considerable costs of human annotation, software development, and computational resources needed to train ACs. Due to this often unpredictable effort, we caution researchers against building an AC unless doing so is necessary to obtain useful evidence. Instead, validation data should be used \textit{ante facto}, with researchers beginning with preliminary analysis of human-coded data from which they can discern if an AC is necessary. +In our simulations, the ``feasible estimate'' is less precise but consistent in all cases. So if fortune shines and this estimate sufficiently answers one's research question, the costs of building the AC are avoided. +If feasible estimation fails to provide convincing evidence, for example by not rejecting a null hypothesis, the human-labeled data is not wasted. It can be reused to validate the AC and account for misclassification in downstream analysis. +%One potential problem of this \textit{ante facto} approach is that conducting two statistical tests of the same hypothesis increases the chances of false discover. A simple solution to this is to adjust the significance threshold $\alpha$ for drawing conclusions from the feasible estimate. %We recommend p < .01. %That said, it might useful use an AC in a preliminary analysis, prior to collecting validation data when an AC such as one available from an API, is available for reuse and confusion matrix quantities necessary for the pseudo-likelihood (PL) method are published. Although (PL) is inconsistent when used for a covariate, this can be corrected if the true rate of $X$ can be estimated. +%Caution is still warranted because ACs can perform quite differently from one dataset to another so we recommend collecting validation representative of your study's dataset and using another appropriate method for published studies. + +\subsubsection{Use Validation Data to Evaluate Differential Error} + +% Let's suppose an AC is used to the feasible estimator is insufficiently informative +%There are many guides on how to train and validate ACs \citep[e.g.][]{grimmer_text_2013,van_atteveldt_validity_2021}. However, they mostly refer to performance metrics such as the F1-score or Area under the Curve (AUC). The problem with this approach is that such criteria make misclassifications transparent but do not provide information on how misclassification will affect downstream analyses and how to correct for such effects. +%One reason for this is that such criterion do not account for differential error or for correlation between misclassifications in the outcome and a regression covariate—both of which can give rise to extremely misleading statistics. +As we argued and demonstrated in our simulations, biases introduced by misclassification may not be trivial to adjust. Here, knowing whether an AC makes differential misclassification is particularly important for downstream analysises: It determines which correction method might work best. +Fortunately, human coded data can be used to investigate differential misclassification. +For example, ``algorithmic audits'' \citep[e.g.][]{rauchfleisch_false_2020, kleinberg_algorithmic_2018} evaluate the performance of AC across different subgroups in the data for example when using AC for corpora of different languages or data from different social media platforms. Differential misclassification can be ruled out if the performance is the same across all analytically relevant subgroups and other variables. +We strongly recommend using such methods to test for differential misclassification and design the measurement error model within our MLE framework. Evidence that the model effectively corrects differential error can be provided by tests of conditional independence between the automatic classifications $W$ and the outcome $Y$ given a chosen model of $P(W|Y,X,Z)$, the conditional probability of the automatic classifications given the outcome and covariates. + +\subsubsection{Correct for Misclassification Errors (Twice) Instead of Being Naïve} + +Across our simulations, we showed that the naïve estimator is biased. Testing different error correction methods, we found that these generate different levels of consistency, efficiency, and accuracy in uncertainty quantification. That said, our proposed MLE method should be considered as a versatile method because it is the only method capable of producing consistent estimates in prototypical situations studied here. We recommend the MLE method as the first ``go-to'' method. The method requires specifying the error model, but this can be known if one follows our second recommendation. We developed the \textbf{misclassificationmodels} R package to facilitate adoption of our MLE method (see Appendix \ref{appendix:misclassificationmodels} in our Supplement). + +We recommend comparing our MLE approach to another error correction method. Consistency between two correction methods shows that results are robust independent of the choice of correction method. If the AC is used to predict the dependent variable, PL might be a reasonable choice. For cases of AC-predicted covariates, GMM calibration is a good choice if error is nondifferential. Otherwise, MI can be considered. +The range of viable choices in error correction motivates our next recommendation. + +\subsubsection{Provide a Full Account of Methodological Decisions and Robustness Checks} + +Finally, we add our voices to those +recommending that researchers report methodological decisions so other can understand and replicate their design \citep{pipal_if_2022, reiss_reporting_2022}. These decisions include but are not limited to choices concerning test and training data (e.g., size, sampling, split in cross-validation procedures, balance), manual annotations (size of manually annotated data, number of coders, intercoder values, size of data coded for intercoder testing), and the classifier itself (choice of algorithm or ensemble, different accuracy metrics). They extend to reporting different error correction methods as proposed by our third recommendation. +In our review, we found that reporting such decisions is not yet common, at least in the context of SML-based text classification. +When correcting for misclassification, uncorrected results will often provide a lower-bound on effect sizes; corrected analyses will provide more accurate but less conservative results. +Therefore, both corrected and uncorrected estimates should be presented as part of making potential multiverses of findings transparent. +% we +% To report instead of hiding methodological decisions and related uncertainty that may emerge in generated results, +We realize that researchers might need to cut methodological information, especially for empirical studies, to conform to either word limits or reviewers. If word limitations are the problem, this information could be reported in appendices. +% Here, the field might consider adopting ---or adapting--- machine learning reporting standards such as DOME (Computational Biology) and PRIME (Diagnostic medicine). + + +\section{Conclusion and Limitations} + +We introduced the often-ignored problem of misclassification in automated content analysis, a topic often discussed in the context of manual content analysis \citep{scharkow_how_2017}, but that we believe has not attracted enough attention within the computational social science community. In a systematic review of SML applications, we show that scholars rarely acknowledge this problem. We therefore discuss a range of statistical methods that use manually annotated validation data as a ``gold standard'' to account for misclassification and produce correct statistical results, including a new MLE method we design. Using Monte-Carlo simulations, we show that our method provides consistent estimates, especially in less trivial situations involving differential error. Based on these results, we provide four recommendations for the future of automated content analysis: Researchers should (1) construct manually annotated validation data before running ACs to see whether using human-labeled data is sufficient, (2) use validation data to test for differential error and choose error correction methods (3) correct for misclassifications via more than one error correction method, and (4) be transparent about the methodological decisions involved in SML-based classifications and error correction. + +Our study has several limitations. First, the simulations and methods we introduce focus on misclassification by automated tools. They provisionally assume that human coders do not make errors. +This assumption can be reasonable if intercoder reliability is very high but this may not always be the case. +%Alternatively, validation data can be treated as a gold standard if the goal is measuring \emph{how a person categorizes content}, as opposed to the more common approach of measuring presumably objective content categories. That said, the prevailing approaches in content analysis use human coders to measure a latent category who are prone to misclassification. +Thus, it may be important to account for measurement error by human classifiers and by automatic classifiers simultaneously. In theory, it is possible to extend our MLE approach in order to do so \citep{carroll_measurement_2006}. +However, because the true values of content categories are never observed, accounting for automatic and human misclassification at once requires latent variable methods that bear considerable additional complexity and assumptions \citep{pepe_insights_2007}. We leave the integration of such methods into our MLE framework for future work. Second, the simulations we present do not consider a number of factors that may influence the performance and robustness of the methods we test including classifier accuracy, heteroskedasticity, and violations of distributional assumptions. We are working to investigate such factors by extending our simulations. We simulated datasets with balanced covariates, but classifiers are often used to measure rare occurrences. Imbalanced covariates will require greater sample sizes of validation data to correct misclassification bias. +In such cases, validation data may be collected more efficiently using approaches that provide balanced, but unrepresentative samples. +Such non-representative sampling requires correction methods to account for probability that a datapoint will be sampled, but we have not evaluated if the correction methods can do so. + +\setcounter{biburlnumpenalty}{9001} +\printbibliography[title = {References}] + +\clearpage +\appendix + +\section{Perspective API Example}\label{appendix:perspective} + +The civil comments dataset represented the human-coded variables we analyzed as proportions of annotators who labeled a comment as ``toxic'' or as disclosing each of several aspects of personal identity including race and ethnicity. +For the purposes of this exercise, we convert the annotation proportions into indicators of the majority view. The dataset also includes counts of ``reactions'' (e.g., 'funny', 'like', 'sad') to each comment. + + Our maximum-likelihood based error correction technique in this example requires specifying models for the Perspective's scores and, in the case where these scores are used as a covariate, a model for the human annotations. In our first example, where toxicity was used as a covariate, we used the \emph{human annotations}, \emph{identity disclosure}, and the interaction of these two variables in the model for scores. We omitted \emph{likes} from this model because they are virtually uncorrelated with misclassifications (Pearson's $\rho=\Sexpr{iv.example[['civil_comments_cortab']]['toxicity_error','likes']}$). Our model for the human annotations is an intercept-only model. + + In our second example, where toxicity is the outcome, we use the fully interacted model of the \emph{human annotations}, \emph{identity disclosure}, and \emph{likes} in our model for the human annotations because all three variables are correlated with the Perspective scores. + +\section{Systematic Literature Review} \label{appendix:lit.review} + +To understand scholarly awareness of measurement errors, we conducted a systematic literature review of common practices in SML-based text classification. + +\subsection{Identification of Relevant Studies} +To identify relevant studies, we relied on four recent reviews on the use of AC with a focus on communication science \citep{baden_three_2022, hase_computational_2022, junger_unboxing_2022, song_validations_2020}. We contacted authors of respective studies who, thankfully, either already published their data in an open-science approach or shared their data with us when asked. +Based on their reviews, we collected \emph{N} = 110 studies that, according to their analyses, included some type of SML (for an overview, see Figure \ref{fig:FigureA1}). + +\begin{figure} + \centering + \includegraphics{measurement_flow.pdf} + \caption{Identifying relevant studies for the literature review} + \label{fig:FigureA1} +\end{figure} + +We first removed 8 duplicate studies identified by several reviews. Two coders then coded the remaining \emph{N} = 102 studies of our preliminary sample for relevance. After an intercoder test (\emph{N} = 10, $\alpha$ = .89), coders sorted studies into one of four categories: Similar to previous reviews \citep{hase_computational_2022}, we only included studies either focusing on methodologically advancing SML-based ACs (Code = 1) or applying the method in empirical studies (Code = 2). In contrast, we removed studies that did not include any SML approach (Code = 3) or only used SML-based text classification for data cleaning, not data analysis (Code = 4)—for instance to sort out topically irrelevant articles. + +Subsequently, \emph{N} = 69 studies remained in our sample of relevant articles. Out of these, only empirical studies (\emph{N} = 48) were coded in further detail. We explicitly excluded methodological studies for understanding common practices within SML-based text classification since these will like include far more robustness and validity tests than commonly employed in empirical settings. + +\subsection{Manual Coding of Relevant Empirical Studies} +For the remaining \emph{N} = 48 empirical studies, we created a range of variables (for an overview, see Table \ref{tab:TableA1}). Based on data from the Social Sciences Citation Index (SSCI), we identified whether studies were published in journals classified as belonging to \emph{Communication} and their \emph{Impact} according to their H index. In addition, two coders manually coded... +\begin{itemize} + \item the type of variables created via SML-based ACS using the variables \emph{Dichotomous} (0 = No, 1 = Yes), \emph{Categorical} (0 = No, 1 = Yes), \emph{Ordinal} (0 = No, 1 = Yes), \emph{Metric} (0 = No, 1 = Yes), + \item whether variables were used in descriptive or multivariate analyses using the variables \emph{Descriptive} (0 = No, 1 = Yes), \emph{Independent} (0 = No, 1 = Yes), \emph{Dependent} (0 = No, 1 = Yes), + \item how classifiers were trained and validated via manually annotated data using the variables \emph{Size Training Data} (Open String), \emph{Size Test Data} (Open String), \emph{Size Data Intercoder Test} (Open String), \emph{Intercoder Reliability} (Open String), \emph{Accuracy of Classifier} (Open String), + \item and whether articles mentioned and/or corrected for misclassifications using the variables \emph{Error Mentioned} (0 = No, 1 = Yes) and \emph{Error Corrected}) (0 = No, 1 = Yes). + +\end{itemize} + +\begin{table} + \caption{Variables Coded for Relevant Empirical Studies} + \label{tab:TableA1} + \begin{tabular}{l l l l} \toprule + Category & Variable & Krippendorf's $\alpha$ & \% or \emph{M} (\emph{SD}) \\ \midrule + Type of Journal & \emph{Communication} & n.a. & 55.1\% \\ + & \emph{Impact} & n.a. & \emph{M = 3.69} \\ + Type of Variable & \emph{Dichotomous} & 0.86 & 50\% \\ + & \emph{Categorical} & 1 & 22.9\% \\ + & \emph{Ordinal} & 0.85 & 10.4\% \\ + & \emph{Metric} & 1 & 35.4\% \\ + Use of Variable & \emph{Descriptive} & 0.89 & 89.6\% \\ + & \emph{Independent} & 1 & 43.8\% \\ + & \emph{Dependent} & 1 & 39.6\% \\ + Information on Classifier & \emph{Size Training Data} & 0.95 & 66.7\% \\ + & \emph{Size Test Data} & 0.79 & 52.1\% \\ + & \emph{Size Data Intercoder Test} & 1 & 43.8\% \\ + & \emph{Intercoder Reliability} & 0.8 & 56.2\% \\ + & \emph{Accuracy of Classifier} & 0.77 & 85.4\% \\ + Measurement Error & \emph{Error Mentioned} & 1 & 18.8\% \\ + & \emph{Error Corrected} & 1 & 2.1\% \\ \bottomrule + \end{tabular} +\end{table} + +\subsection{Results} + +Overall, more than half of all studies were published in communication journals (\emph{Communication}: 55.1\%). Across domains, SML-based ACs were most often used to create dichotomous measurements (\emph{Dichotomous}: 50\%), followed by variables on a metric (\emph{Metric}: 35.4\%), categorical (\emph{Categorical}: 22.9\%), or ordinal scale (\emph{Ordinal}: 10.4\%). Almost all studies used SML-based classifications to report descriptive statistics on created variables (\emph{Descriptive}: 89.6\%). However, many also used these in downstream analyses, either as dependent variables (\emph{Dependent}: 39.6\%) or independent variables (\emph{Independent}: 43.8\%) in multivariate models. When regressing the use of multivariate models for each variable on the status of journals in which respective studies were published \emph{Impact}) via a mixed model where variables are nested in studies and journals, we find that both correlate: The use of multivariate modeling is more widespread in high-impact journals (\emph{B} = 13.525, \emph{p} < .001) + +Overall, we found a persistent lack of transparency in reporting important information: Only slightly more than half of all studies included information on, for instance, the size of training or test sets (\emph{Size Training Data}: 66.7\%, \emph{Size Test Data}: 52.1\%). Even fewer included information on the size of manually annotated data for intercoder testing (\emph{Size Data Intercoder Test}: 43.8\%) or respective reliability values (\emph{Intercoder Reliability}: 56.2\%). Lastly, not all studies reported how well their classifier performed by using metrics such as precision, recall, or F1-scores (\emph{Accuracy of Classifier}: 85.4\%). + +Lastly, we also found that few studies mentioned the issue of misclassification or measurement errors (\emph{Error Mentioned}: 18.8\%, with only a single study correcting for such (\emph{Error Corrected}: 2.2\%). + +\section{Other methods not tested} +\label{appendix:other.methods} +Simulation extrapolation (SIMEX) uses a simulation of the process generating measurement error to model how measurement error affects an analysis and ultimately to approximate an analysis with no measurement error \citep{carroll_measurement_2006}. SIMEX is a very powerful and general method that can be used without validation data, but may be more complicated than necessary to correct measurement error from ACs when validation data are available. Likelihood methods are easy to apply to classification errors so SIMEX seems unnecessary \citep{carroll_measurement_2006}. + +Score function methods derive estimating equations for models without measurement error and then solve them either exactly or using numerical integration \citep{carroll_measurement_2006, yi_handbook_2021}. +The main advantage of score function methods may have over likelihood-based methods is that they do not require distributional assumptions about the mismeasured covariates. This advantage has limited use in the context of ACs because binary classifications must follow Bernoulli distributions. + +We also do not consider Bayesian methods (aside from the Amelia implementation of multiple imputation) because we expect these to have similar limitations to the maximum likelihood methods we consider. Bayesian methods may have other advantages resulting from posterior inference, and may generalize to a wide range of applications, but specifying prior distributions introduces additional methodological complexity and posterior inference is computationally intensive making Bayesian methods less convenient for monte-carlo simulation. + + +\section{Deriving the maximum likelihood approach} +\label{appendix:derivation} +\subsection{When an AC measures a covariate} +To show why $L(\theta|Y,W)$ can be factored, we follow \citet{carroll_measurement_2006} and begin by observing the following fact from basic probability theory. + +\begin{align} + P(Y,W) &= \sum_{x}{P(Y,W,X=x)} + \label{eq:mle.covariate.chainrule.1}\\ + &= \sum_{x}{P(Y|W,X=x)P(W,X=x)} + \label{eq:mle.covariate.chainrule.2}\\ + &= \sum_{x}{P(Y,X=x)P(W|Y,X=x)} \label{eq:mle.covariate.chainrule.3} \\ + &= \sum_{x}{P(Y|X=x)P(W|Y,X=x)P(X=x)} \label{eq:mle.covariate.chainrule.4} +\end{align} +\noindent +Equation \ref{eq:mle.covariate.chainrule.1} integrates $X$ out of the joint probability of $Y$ and $W$ by summing over its possible values $x$. If $X$ is binary, this means adding the probability given $x=1$ to the probability given $x=0$. When $X$ is observed, say $x=0$, then $P(X=0)=1$ and $P(X=1)=0$. As a result, only the true value of $X$ contributes to the likelihood. However, when $X$ is unobserved, all of its possible values contribute. In this way, integrating out $X$ allows us to include data where $X$ is not observed to the likelihood. + +Equation \ref{eq:mle.covariate.chainrule.2} uses the chain rule of probability to factor the joint probability $P(Y,W)$ of $Y$ and $W$ from $P(Y|W,X)$, the conditional probability of $Y$ given $W$ and $X$ and $P(W,X=x)$, the joint probability of $W$ and $X$. This lets us see how maximizing $\mathcal{L}(\Theta|Y,W)$, the joint likelihood of $\Theta$ given $Y$ and $W$ accounts for the uncertainty of the automatic classifications. For each possible value $x$ of $X$, it weights the model of the outcome $Y$ by the probability that $x$ is the true value and that the AC outputs $W$. + +Equation \ref{eq:mle.covariate.chainrule.3} shows a different way to factor the joint probability $P(Y,W)$ so that $W$ is not in model of $Y$. Since $X$ and $W$ are correlated, if $W$ in the model for $Y$ estimation of $B_1$ will be biased. By including $Y$ in the model for $W$, Equation \ref{eq:mle.covariate.chainrule.3} can account for differential measurement error. + +Equation \ref{eq:mle.covariate.chainrule.4} factors $P(Y,X=x)$ the joint probability of $Y$ and $X$ into $P(Y|X=x)$, the conditional probability of $Y$ given $X$, $P(W|X=x,Y)$, the conditional probability of $W$ given $X$ and $Y$, and $P(X=x)$ the probability of $X$. This shows that fitting a model $Y$ given $X$, in this framework, such as the regression model $Y = B_0 + B_1 X + B_2 Z$ requires including $X$. Without validation data, $P(X=x)$ is difficult to calculate without strong assumptions \citep{carroll_measurement_2006}, but $P(X=x)$ can easily be estimated using a sample of validation data. + +%Our appendix includes supplementary simulations that explore how robust our method to model mispecification. +Equations \ref{eq:mle.covariate.chainrule.1}--\ref{eq:mle.covariate.chainrule.4} demonstrate the generality of this method because the conditional probabilities may be calculated using a wide range of probability models. For simplicity, we proceed with a focus on linear regression for the probability of $Y$ and logistic regression for the probability of $W$ and the probability of $X$. However, more flexible probability models such as generalized additive models (GAMs) or Gaussian process classification may be useful for modeling nonlinear conditional probability functions \citep{williams_bayesian_1998}. + +\subsection{When an AC measures the outcome} + +Again, we will maximize $\mathcal{L}(\Theta|Y,W)$, the joint likelihood of the parameters $\Theta$ given the outcome $Y$ and the automatic classifications $W$ measure the dependent variable $Y$ \citep{carroll_measurement_2006}. +Therefore, we use the law of total probability to integrate out $Y$ and the chain rule of probability to factor the joint probability into $P(Y)$, the probability of $Y$, and $P(W|Y)$ as the conditional probability of $W$ given $Y$. + +\begin{align} + P(Y,W) &= \sum_{y}{P(Y=y,W)} \\ + &= \sum_{y}{P(Y)P(W|Y)} +\end{align} + +As above, the conditional probability of $W$ given $Y$ must be calculated using a model. The range of possible models is vast and analysts must choose a model that accurately describes the conditional dependence of $W$ on $Y$. + +We implement these methods in \texttt{R} using the \texttt{optim} library for maximum likelihood estimation. Our implementation supports models specified using \texttt{R}'s formula syntax can fit linear and logistic regression models when an AC measures a covariate and logistic regression models when an AC measures the outcome. Our implementation provides two methods for approximating confidence intervals: The Fischer information quadratic approximation, and the profile likelihood method provided in the \texttt{R} package \texttt{bbmle}. The Fischer approximation usually works well in simple models fit to large samples and is fast enough for practical use for the large number of simulations we present. However, the profile likelihood method provides more accurate confidence intervals \citep{carroll_measurement_2006}. + +\section{misclassificationmodels: The R package} \label{appendix:misclassificationmodels} + +The package provides a function to conduct regression analysis but also corrects for misclassification in proxy using the information in validation data. The function is very simular to \textbf{glm()} but with two changes: + +\begin{itemize} +\item The formula interface has been extended with the double-pipe operator to denote proxy variable. For example, \textbf{x || w} indicates \textit{w} is the proxy of the ground truth \textit{x}. +\item The validation data must be provided +\end{itemize} + +The following code listing shows a typical correction scenario: +\lstset{style=mystyle} +\begin{lstlisting}[language=R, caption=A demo of misclassificationmodels] +library(misclassificationmodels) +## research_data contains the following columns: y, w, z +## val_data contains the following columns: y, w, x, z +# w is a proxy of x +res <- glm_fixit(formula = y ~ x || w + z, + data = research_data, + data2 = val_data) +summary(res) +\end{lstlisting} + +% For more information about the package, please refer to our online appendix. + + +\section{Additional plots from Simulations 1 and 2} +\label{appendix:main.sim.plots} + +\begin{figure} +<>= + +p <- plot.simulation.iv(plot.df.example.1,iv='z') + +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in \emph{simulation 1a}, multivariate regression with $X$ measured using machine learning and model accuracy independent of $X$, $Y$, and $Z$. All methods obtain precise and accurate estimates given sufficient validation data.} +\end{figure} + +\begin{figure} +<>= +p <- plot.simulation.iv(plot.df.example.2, iv='z') +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in multivariate regression with $X$ measured using machine learning and model accuracy correlated with $X$ and $Y$ and error is differential. Only multiple imputation and our MLE model with a full specification of the error model obtain consistent estimates of $B_X$.\label{fig:sim1b.z}} +\end{figure} + +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.dv(plot.df.example.3,'z') +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in \emph{simulation 2a}, multivariate regression with $Y$ measured using an AC that makes errors. Only our MLE model with a full specification of the error model obtains consistent estimates.} +\end{figure} + +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.dv(plot.df.example.4,'x') +grid.draw(p) +@ +\caption{Estimates of $B_X$ in \emph{simulation 2b} multivariate regression with $Y$ measured using machine learning, model accuracy correlated with $Z$ and $Y$ and differential error. Only our MLE model with a full specification of the error model obtains consistent estimates.} +\end{figure} + + +% \section{Additional simulations} +% \subsection{Heteroskedasktic but nondifferential misclassifications}\label{appendix:sim1.hetero} + +% \subsection{Imbalanced covariates} +% \label{appendix:sim1.imbalanced} +\end{document} + +\subsection{Profile likelihood improves uncertainty quantification} +\label{appendix:sim1.profile} + +\section{Four prototypical scenarios} + +We must clearly distinguish four types of measurement error that arise in this context. +The first type occurs when a covariate is measured with error and this error can be made statistically independent of the outcome by conditioning on other covariates. In this case the error is called nondifferential. +The second type, differential error occurs when a covariate is measured with error that is systematically correlated with the outcome, even after accounting for the other covariates \citep{carroll_measurement_2006}. +These two types of error apply when an AC is used to measure a covariate. +When an AC is used to measure an outcome, errors can be random—uncorrelated with the covariates or they can be systematic—correlated with a covariate. + +nondifferential measurement error and random error in the outcome are relatively straightforward to correct. We will argue below that differential measurement error can be avoided when an AC is carefully designed. Yet the risk of differential measurement error is considerable in such cases as multilingual text classification because the ease of classification may systematically vary in relation to the outcome and covariates or as when a model trained in one context is applied in another. + +Research using ACs based on supervised machine learning may be particularly prone to differential and systematic measurement error. Problems of bias and generalizability have machine learning field of machine learning more generally has + + + +%Statistical theory and simulations have shown that all these methods are effective (though some are more efficient) when ``ground-truth'' observations are unproblematic and when classifiers only make random, but not systematic, errors. We contribute by testing these methods in more difficult cases likely to arise in text-as-data studies. + +% +% All prior methods for correcting measurement error using validation data presume that the validation data is error-free. However, the methodological content analysis literature has extensively studied the difficulties in human-labeling theoretically and substantively significant content categories through the lens of inter-coder reliability. We contribute novel methods that account for both inter-coder reliability and machine classification error. + +Our monte-carlo simulations show that different error-correction methods fail in different cases and that none is always the best. For example, methods that can correct for differential error will be inefficient when none is present. In addition, Fong and Taylor \citep{fong_machine_2021}'s method-of-moments estimator exchanges distributional assumptions for an exclusion restriction and fails in different cases from methods based on parametric models, such as ours. + + +\subsection{Our Contributions} + +\begin{itemize} + \item Introduce this methodological problem to Communication Research; argue that this is not too far from ignoring disagreement in manual codings + \item Document the prevalence of automated content analysis to show the importance of the problem. + \item Summarize available statistical methods for adjusting for measurement error and bias. + \item Evaluate these methods in realistic scenarios to show when they work and when they do not. + \item Recommend best practices for applied automated content analysis. + \item Chart directions for future research to advance methods for automated content analysis. +\end{itemize} + +\section{Background} + +\subsection{Methods used to correct measurement error in simulation scenarios} + +We'll compare the performance of these methods in terms of: + +\begin{itemize} + \item Consistency: Does the method recover the true parameter on average? + \item Efficiency: How precise are the estimates? Does precision improve with sample size? + \item Robustness: Does the method work when parametric assumptions are violated? +\end{itemize} + +We'll run simulations that vary along these dimensions: + +\begin{itemize} + \item Explained variance (function of $B_XZ$ and $\varepsilon$) + \item Predictor accuracy (we'll always have balanced classes). + \item iterrater reliability + \item Data type of measured variable: binary / likert + \item Distribution of other variable: normal, lognormal, binary + \item Unlabeled sample size + \item Labeled sample size +\end{itemize} + + +\subsection{Explanation of Bayesian Networks / Causal Dags for representing scenarios} + +In this section we present the design of our simulation studies. So far I have designed the following three scenarios (though I have some work to do to polish them and fix bugs): + +\subsection{Definition of MLE Models} + +We model example 1 and 2, +\section{Discussion} + +\citet{fong_machine_2021} argue, and we agree, that a carefully designed AC can avoid forms of measurement error that are more difficult to deal with. However, tailoring an AC from scratch requires considerable effort and expense compared to reuse an AC developed for common purposes as the wide popularity that classifiers like LIWC and Perspective enjoy demonstrates. Our recommended approaches of GMM calibration, multiple imputation and likelihood modeling can all be concieved as fine-tuning steps that transform general purpose classifiers into tailored classifiers capable of providing reliable inferences. + +A natural response to the above extended meditation on measurement error in the context of automatic classifiers is to question the purpose of using ACs at all. It seems strange to think that by using model's predictions of a variable to build another model predicting that same variable we can solve the problems introduced by first model. Indeed, the more complex modeling strategies we propose are only necessary to correct the shortcomings of an AC. We envision ACs such as a commercial APIs, widely used dictionaries, or ACs that are generalized to new contexts that are likely to have such shortcomings because such ACs may provide information about a variable that would be difficult to obtain otherwise. + +Even though machine learning algorithms such as random forests might obtain greater performance at automatic classification, this comes at the expense of bias that may be difficult to model using validation data \citep{breiman_statistical_2001}. +Instead of tayloring an AC for a research study, using predictive features directly to infer missing validation data using multiple imputation or to model the probability of a variable in the likelihood modeling framework may be simpler and more likely to result in valid inferences. + +% A common strategy is to use a machine learning classifier $g(\mathbf{K})$ (e.g., the Perspective API) to obtain Often, researchers use the $N^*$ observations of $\mathbf{x}$ to build $\hat{\mathbf{w}}=g(\mathbf{Z})$. Other times they may use a different ``black-box'' model $g(\mathbf{Z})$ that is perhaps trained on a larger dataset different from that used to estimate $B$. + + +% Although it is often claimed that this bias is a conservative ``attenuation'' of estimates toward zero, this is only necessarily the case of ordinary linear regression with 2 variables when the bias is uncorrelated with $\mathbf{x}$ and $\mathbf{y}$ \citep{carroll_measurement_2006}. What's more, in conditions likely to occur in social scientific research, such as when the explained variance of the regression model is very low, the estimate of $\hat{B}^*$ can be \emph{more precise} than that of $\hat{B}$. As a result, the measurement error of a machine learning classifier is not always conservative but can result in false discovery \citep{carroll_measurement_2006}. + + + Note that specific forms of statistical bias are of particular concern for scientific measurement and although these may often be related to biases against social groups \cite[][e.g.]{obermeyer_dissecting_2019}, these notions of bias are not equivalent \cite{kleinberg_algorithmic_2018}. Introduce multi-lingual text classification as an example. + +(attenuation bias / correlation dilution), but this bias towards zero defeats the purpose of automated content analysis in the first place! +\subsection{Rationale} +\begin{itemize} + \item Automated content analysis is all the rage. Tons of people are doing it, but they all have the same problem: their models are inaccurate. They don't know if the model is accurate enough to trust their inferences. + + \item Social scientists often adopt performance criteria and standards for machine learning predictors used in computer science. These criteria do not tell how well a predictor works as a measurement device for a given scientific study. + + \item In general, prediction errors result in biased estimates of regression coefficients. In simple models with optimistic assumptions this bias will be conservative (attenuation bias / correlation dilution), but this bias towards zero defeats the purpose of automated content analysis in the first place! + + \item In more general scenarios (e.g., GLMs, differential error, multivariate regression), prediction errors can create bias that is not conservative. + + \item Statisticians have studied measurement error for a long time, and have developed several methods, but the settings they consider most often lack features of automated content analysis. Specifically: + + \begin{itemize} + \item The availability of (potentially inaccurate) validation data. (Most methods are designed for \emph{sensors} where the distribution of the error can be known, but error can be assumed to be nondifferential). + \item Differential error—the amount of noise is not independent of observations. + + \item The possibility of bias in addition to noise. + \end{itemize} + + \item Conducting simulations to evaluate existing methods including regression calibration, the extension of regression calibration by Fong and Taylor (2021) \cite{fong_machine_2021}, multiple imputation, and simulation extrapolation. + + \item These issues become even more important, and also more complex in important research designs such as those involving multiple languages. + + +\subsection{Imperfect human-coded validation data} + +All approaches stated above depend on the human-coded validation data $X^*$. Most often, ACs are also trained on human-coded material. The content analysis literature has long been documented how unreliable human coding and manual content analysis papers routinely report intercoder reliability as a result \citep{krippendorff_content_2018}. Intercoder reliability metrics typically assume that human coders are interchangeable and the only source of disagreement is ``coder idiosyncrasies'' \citep{krippendorff_reliability_2004}. A previous monte-carlo simulation operationalizes these ``coder idiosyncrasies'' as a fixed probability that a coder makes a random guess independent of the coder and of the material \citep{geis_statistical_2021}. In this work, we accept this ``interchangeable coders making random errors'' (ICMRE) assumption. Under this optimistic assumption, only ``coder idiosyncrasies'' cause misclassification error in the validation data. + +\citet{song_validations_2020}'s monte-carlo simulation demonstrates that human-coded $X^*$ with a lower intercoder reliability generates more biased classification accuracy of the AC. So even if manual annotation errors are only due to the ICMRE assumption, they may bias results. None of the above correction approaches account for the imperfect human coding of $X^*$, although \citet{zhang_how_2021} identifies the omission of this as a weakness of his proposed approach. Even in the context of manual content analysis, these ``coder idiosyncrasies'' are not routinely adjusted (although methods are available, e.g. \citet{bachl_correcting_2017}). +An advantage of our proposed method over prior approaches is that it automatically accounts for imperfection of human coding under the ICMRE assumption because the random errors in validation data are independent from the AC errors. + +Precision of estimates can be improved using more than one independent coder. With two coders, for example, two sets of validation data are generated, $X^*_{1}$, $X^*_{2}$. We then list-wise delete all data that $X^*_{1} \neq X^*_{2}$. If the ICMRE assumption holds, the deleted data, where two coders disagree, can only be due to ``coder idiosyncrasies''. As coders are assumed to be interchangeable, the probability of two interchangeable coders both making the same misclassification error is much less than the probability that one makes a misclassification error . Using such ``labeled-only, coherent-only'' (LOCO) data improves the precision of consistent estimates in our simulation. + + +\subsection{Measurement error in validation data} + +The simulations above assume that validation data is perfectly accurate. This is obviously unrealistic because, validation data, such as that obtained from human classifiers, normally has inaccuracies. +To evaluate the robustness of correction methods to imperfect validation data, we extend our scenarios with with nondifferential error with simulated validation data that is misclassified \Sexpr{format.percent(med.loco.accuracy)} of the time at random. + +\subsubsection{Recommendation II: Employ at Least Two Manual Coders, not One} + +Independent of whether researchers use manually annotated data for the feasible approach or AC, principles of manual content analysis, including justifying one's sample size, still apply. +%\citep[for details]{krippendorff_content_2018}. +%TODO uncomment below after ICA +Arguably, the most important problem in traditional content +analysis is whether human coders are capable of reliably classifying content into the categories under study. With multiple human coders labelling the same data, metrics such as Krippendorff's $\alpha$ +%and Gwet's $AC$ +can quantify ``intercoder reliability'' in terms of how often coders agree and disagree \citep{krippendorff_reliability_2004}. +These metrics all assume that disagreements are due to +``coder idiosyncrasies'' that are independent of the data \citep{krippendorff_reliability_2004}. + +We recommend that such metrics also be used to establish intercoder reliability in all of the human-labeled data, not only a smaller subset for intercoder testing. +Other than that, the gold standard data is also reused in later steps and those steps can be influenced by these ``coder idiosyncrasies'' \citep{song_validations_2020}. +We recommend that the gold standard data should be manually coded by two coders, not one. It allows the calculation of interrater reliability, a more accurate validation of the AC's performance, and better correction. With additional independent coders, would eliminate even more of these ``coder idiosyncrasies'' than two coders. + + + +However, the gains from introducing additional coders are diminishing so using more than two coders may not be cost effective. +\end{itemize} + + +\section{Accounting for errors in the validation data} + +In this section, we extend \emph{Simulation 1b} and \emph{Simulation 2b} with + + +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.irr(plot.df.example.5,'z') +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in multivariate regression with $X$ measured using machine learning, with validation data collected by 2 independent coders that make random errors.} +\end{figure} +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.irr(plot.df.example.5,'x') +grid.draw(p) +@ +\caption{Estimates of $B_X$ in multivariate regression with $X$ measured using machine learning, with validation data collected by 2 independent coders that make random errors.} +\end{figure} + +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.irr.dv(plot.df.example.6,'z') +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in multivariate regression with $Y$ measured using machine learning, with validation data collected by 2 independent coders that make random errors.} +\end{figure} +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.irr.dv(plot.df.example.6,'x') +grid.draw(p) +@ +\caption{Estimates of $B_X$ in multivariate regression with $Y$ measured using machine learning, with validation data collected by 2 independent coders that make random errors.} +\end{figure} diff --git a/#article.tex# b/#article.tex# new file mode 100644 index 0000000..8ef7de5 --- /dev/null +++ b/#article.tex# @@ -0,0 +1,36 @@ +\documentclass[floatsintext, mask, man]{apa7} +\begin{knitrout} +\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe} + + +{\ttfamily\noindent\bfseries\color{errorcolor}{\#\# Error in unlist(example.1["{}med.cor.xz"{}]): object 'example.1' not found}} + +{\ttfamily\noindent\color{warningcolor}{\#\# Warning in file(filename, "{}r"{}, encoding = encoding): cannot open file 'resource/real\_data\_example.R': No such file or directory}} + +{\ttfamily\noindent\bfseries\color{errorcolor}{\#\# Error in file(filename, "{}r"{}, encoding = encoding): cannot open the (connection}}\end{kframe} +\end{knitrout} + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Bibliography.bib b/Bibliography.bib new file mode 100644 index 0000000..9123037 --- /dev/null +++ b/Bibliography.bib @@ -0,0 +1,2116 @@ +@article{adcock_measurement_2001, + title = {Measurement {{Validity}}: {{A Shared Standard}} for {{Qualitative}} and {{Quantitative Research}}}, + shorttitle = {Measurement {{Validity}}}, + author = {Adcock, Robert and Collier, David}, + date = {2001-09}, + journaltitle = {American Political Science Review}, + volume = {95}, + number = {3}, + pages = {529--546}, + publisher = {{Cambridge University Press}}, + issn = {0003-0554, 1537-5943}, + abstract = {Scholars routinely make claims that presuppose the validity of the observations and measurements that operationalize their concepts. Yet, despite recent advances in political science methods, surprisingly little attention has been devoted to measurement validity. We address this gap by exploring four themes. First, we seek to establish a shared framework that allows quantitative and qualitative scholars to assess more effectively, and communicate about, issues of valid measurement. Second, we underscore the need to draw a clear distinction between measurement issues and disputes about concepts. Third, we discuss the contextual specificity of measurement claims, exploring a variety of measurement strategies that seek to combine generality and validity by devoting greater attention to context. Fourth, we address the proliferation of terms for alternative measurement validation procedures and offer an account of the three main types of validation most relevant to political scientists.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/9XTPCM8L/Adcock_Collier_2001_Measurement Validity.pdf;/home/nathante/Zotero/storage/PNSZW6MZ/91C7A9800DB26A76EBBABC5889A50C8B.html} +} + +@unpublished{aliapoulios_gospel_2021, + title = {The {{Gospel According}} to {{Q}}: {{Understanding}} the {{QAnon Conspiracy}} from the {{Perspective}} of {{Canonical Information}}}, + shorttitle = {The {{Gospel According}} to {{Q}}}, + author = {Aliapoulios, Max and Papasavva, Antonis and Ballard, Cameron and De Cristofaro, Emiliano and Stringhini, Gianluca and Zannettou, Savvas and Blackburn, Jeremy}, + date = {2021-05-20}, + eprint = {2101.08750}, + eprinttype = {arxiv}, + primaryclass = {cs}, + abstract = {The QAnon conspiracy theory claims that a cabal of (literally) blood-thirsty politicians and media personalities are engaged in a war to destroy society. By interpreting cryptic "drops" of information from an anonymous insider calling themself Q, adherents of the conspiracy theory believe that Donald Trump is leading them in an active fight against this cabal. QAnon has been covered extensively by the media, as its adherents have been involved in multiple violent acts, including the January 6th, 2021 seditious storming of the US Capitol building. Nevertheless, we still have relatively little understanding of how the theory evolved and spread on the Web, and the role played in that by multiple platforms. To address this gap, we study QAnon from the perspective of "Q" themself. We build a dataset of 4,949 canonical Q drops collected from six "aggregation sites," which curate and archive them from their original posting to anonymous and ephemeral image boards. We expose that these sites have a relatively low (overall) agreement, and thus at least some Q drops should probably be considered apocryphal. We then analyze the Q drops' contents to identify topics of discussion and find statistically significant indications that drops were not authored by a single individual. Finally, we look at how posts on Reddit are used to disseminate Q drops to wider audiences. We find that dissemination was (initially) limited to a few sub-communities and that, while heavy-handed moderation decisions have reduced the overall issue, the "gospel" of Q persists on the Web.}, + archiveprefix = {arXiv}, + keywords = {Computer Science - Computers and Society,Computer Science - Social and Information Networks}, + file = {/home/nathante/Zotero/storage/V96424CW/Aliapoulios et al_2021_The Gospel According to Q.pdf;/home/nathante/Zotero/storage/USF2Z7ZX/2101.html} +} + +@article{araujo_automated_2020, + title = {Automated {{Visual Content Analysis}} ({{AVCA}}) in {{Communication Research}}: {{A Protocol}} for {{Large Scale Image Classification}} with {{Pre-Trained Computer Vision Models}}}, + shorttitle = {Automated {{Visual Content Analysis}} ({{AVCA}}) in {{Communication Research}}}, + author = {Araujo, Theo and Lock, Irina and van de Velde, Bob}, + options = {useprefix=true}, + date = {2020-10-01}, + journaltitle = {Communication Methods and Measures}, + volume = {14}, + number = {4}, + pages = {239--265}, + publisher = {{Routledge}}, + issn = {1931-2458}, + abstract = {The increasing volume of images published online in a wide variety of contexts requires communication researchers to address this reality by analyzing visual content at a large scale. Ongoing advances in computer vision to automatically detect objects, concepts, and features in images provide a promising opportunity for communication research. We propose a research protocol for Automated Visual Content Analysis (AVCA) to enable large-scale content analysis of images. It offers inductive and deductive ways to use commercial pre-trained models for theory building in communication science. Using the example of corporations’ website images on sustainability, we show in a step-by-step fashion how to classify a large sample (N = 21,876) of images with unsupervised and supervised machine learning, as well as custom models. The possibilities and pitfalls of these approaches are discussed, ethical issues are addressed, and application examples for future communication research are detailed.}, + annotation = {\_eprint: https://doi.org/10.1080/19312458.2020.1810648}, + file = {/home/nathante/Zotero/storage/YUAKMGKV/Araujo et al_2020_Automated Visual Content Analysis (AVCA) in Communication Research.pdf} +} + +@article{bachl_correcting_2017, + title = {Correcting {{Measurement Error}} in {{Content Analysis}}}, + author = {Bachl, Marko and Scharkow, Michael}, + date = {2017-04-03}, + journaltitle = {Communication Methods and Measures}, + shortjournal = {Communication Methods and Measures}, + volume = {11}, + number = {2}, + pages = {87--104}, + issn = {1931-2458, 1931-2466}, + langid = {english}, + file = {/home/nathante/Zotero/storage/3D3G9IP7/Bachl & Scharkow (2017) Correcting Measurement Error in CA.pdf;/home/nathante/Zotero/storage/76CKDXD8/Bachl und Scharkow - 2017 - Correcting Measurement Error in Content Analysis.pdf} +} + +@article{baden_three_2022, + title = {Three {{Gaps}} in {{Computational Text Analysis Methods}} for {{Social Sciences}}: {{A Research Agenda}}}, + shorttitle = {Three {{Gaps}} in {{Computational Text Analysis Methods}} for {{Social Sciences}}}, + author = {Baden, Christian and Pipal, Christian and Schoonvelde, Martijn and van der Velden, Mariken A. C. G}, + options = {useprefix=true}, + date = {2022-01-02}, + journaltitle = {Communication Methods and Measures}, + shortjournal = {Communication Methods and Measures}, + volume = {16}, + number = {1}, + pages = {1--18}, + issn = {1931-2458, 1931-2466}, + abstract = {We identify three gaps that limit the utility and obstruct the progress of computational text analysis methods (CTAM) for social science research. First, we contend that CTAM development has prioritized technological over validity concerns, giving limited attention to the operationalization of social scientific measurements. Second, we identify a mismatch between CTAMs’ focus on extracting specific contents and document-level patterns, and social science researchers’ need for measuring multiple, often complex contents in the text. Third, we argue that the dominance of English language tools depresses comparative research and inclusivity toward scholarly commu­ nities examining languages other than English. We substantiate our claims by drawing upon a broad review of methodological work in the computa­ tional social sciences, as well as an inventory of leading research publications using quantitative textual analysis. Subsequently, we discuss implications of these three gaps for social scientists’ uneven uptake of CTAM, as well as the field of computational social science text research as a whole. Finally, we propose a research agenda intended to bridge the identified gaps and improve the validity, utility, and inclusiveness of CTAM.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/4HHJ9VCN/Baden et al. - 2022 - Three Gaps in Computational Text Analysis Methods .pdf} +} + +@book{barocas_fairness_2019, + title = {Fairness in {{Machine Learning}}}, + author = {Barocas, Solon and Hardt, Moritz and Narayanan, Arvind}, + date = {2019}, + publisher = {{fairmlbook.org}}, + langid = {english}, + file = {/home/nathante/Zotero/storage/UJ59IVEC/Barocas et al_2019_Fairness in Machine Learning.pdf} +} + +@inproceedings{bender_dangers_2021, + title = {On the {{Dangers}} of {{Stochastic Parrots}}: {{Can Language Models Be Too Big}}? 🦜}, + shorttitle = {On the {{Dangers}} of {{Stochastic Parrots}}}, + booktitle = {Proceedings of the 2021 {{ACM Conference}} on {{Fairness}}, {{Accountability}}, and {{Transparency}}}, + author = {Bender, Emily M. and Gebru, Timnit and McMillan-Major, Angelina and Shmitchell, Shmargaret}, + date = {2021-03-03}, + series = {{{FAccT}} '21}, + pages = {610--623}, + publisher = {{Association for Computing Machinery}}, + location = {{New York, NY, USA}}, + abstract = {The past 3 years of work in NLP have been characterized by the development and deployment of ever larger language models, especially for English. BERT, its variants, GPT-2/3, and others, most recently Switch-C, have pushed the boundaries of the possible both through architectural innovations and through sheer size. Using these pretrained models and the methodology of fine-tuning them for specific tasks, researchers have extended the state of the art on a wide array of tasks as measured by leaderboards on specific benchmarks for English. In this paper, we take a step back and ask: How big is too big? What are the possible risks associated with this technology and what paths are available for mitigating those risks? We provide recommendations including weighing the environmental and financial costs first, investing resources into curating and carefully documenting datasets rather than ingesting everything on the web, carrying out pre-development exercises evaluating how the planned approach fits into research and development goals and supports stakeholder values, and encouraging research directions beyond ever larger language models.}, + isbn = {978-1-4503-8309-7}, + file = {/home/nathante/Zotero/storage/VIEBVAWK/Bender et al_2021_On the Dangers of Stochastic Parrots.pdf} +} + +@article{blackwell_multiple_2012, + title = {Multiple {{Overimputation}}: {{A Unified Approach}} to {{Measurement Error}} and {{Missing Data}}}, + author = {Blackwell, Matthew and Honaker, James and King, Gary}, + date = {2012}, + pages = {50}, + abstract = {Although social scientists devote considerable effort to mitigating measurement error during data collection, they usually ignore the issue during data analysis. And although many statistical methods have been proposed for reducing measurement error-induced biases, few have been widely used because of implausible assumptions, high levels of model dependence, difficult computation, or inapplicability with multiple mismeasured variables. We develop an easy-to-use alternative without these problems; it generalizes the popular multiple imputation (mi) framework by treating missing data problems as a special case of extreme measurement error and corrects for both. Like mi, the proposed “multiple overimputation” (mo) framework is a simple two-step procedure. First, multiple (≈ 5) completed copies of the data set are created where cells measured without error are held constant, those missing are imputed from the distribution of predicted values, and cells (or entire variables) with measurement error are “overimputed,” that is imputed from the predictive distribution with observation-level priors defined by the mismeasured values and available external information, if any. In the second step, analysts can then run whatever statistical method they would have run on each of the overimputed data sets as if there had been no missingness or measurement error; the results are then combined via a simple averaging procedure. We also offer easy-to-use open source software that implements all the methods described herein.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/CZAYZNIY/Blackwell et al_2012_Multiple Overimputation.pdf} +} + +@article{blackwell_unified_2017, + title = {A {{Unified Approach}} to {{Measurement Error}} and {{Missing Data}}: {{Overview}} and {{Applications}}}, + shorttitle = {A {{Unified Approach}} to {{Measurement Error}} and {{Missing Data}}}, + author = {Blackwell, Matthew and Honaker, James and King, Gary}, + date = {2017-08}, + journaltitle = {Sociological Methods \& Research}, + shortjournal = {Sociological Methods \& Research}, + volume = {46}, + number = {3}, + pages = {303--341}, + issn = {0049-1241, 1552-8294}, + abstract = {Although social scientists devote considerable effort to mitigating measurement error during data collection, they often ignore the issue during data analysis. And although many statistical methods have been proposed for reducing measurement error-induced biases, few have been widely used because of implausible assumptions, high levels of model dependence, difficult computation, or inapplicability with multiple mismeasured variables. We develop an easy-to-use alternative without these problems; it generalizes the popular multiple imputation (MI) framework by treating missing data problems as a limiting special case of extreme measurement error and corrects for both. Like MI, the proposed framework is a simple two-step procedure, so that in the second step researchers can use whatever statistical method they would have if there had been no problem in the first place. We also offer empirical illustrations, open source software that implements all the methods described herein, and a companion article with technical details and extensions.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/AWQFGUYU/Blackwell et al. - 2017 - A Unified Approach to Measurement Error and Missin.pdf} +} + +@article{blackwell_unified_2017-1, + ids = {blackwell_unified_2017}, + title = {A {{Unified Approach}} to {{Measurement Error}} and {{Missing Data}}: {{Details}} and {{Extensions}}}, + shorttitle = {A {{Unified Approach}} to {{Measurement Error}} and {{Missing Data}}}, + author = {Blackwell, Matthew and Honaker, James and King, Gary}, + date = {2017-08-01}, + journaltitle = {Sociological Methods \& Research}, + shortjournal = {Sociological Methods \& Research}, + volume = {46}, + number = {3}, + pages = {342--369}, + publisher = {{SAGE Publications Inc}}, + issn = {0049-1241}, + abstract = {We extend a unified and easy-to-use approach to measurement error and missing data. In our companion article, Blackwell, Honaker, and King give an intuitive overview of the new technique, along with practical suggestions and empirical applications. Here, we offer more precise technical details, more sophisticated measurement error model specifications and estimation procedures, and analyses to assess the approach’s robustness to correlated measurement errors and to errors in categorical variables. These results support using the technique to reduce bias and increase efficiency in a wide variety of empirical research.}, + langid = {english}, + keywords = {imputation,inference,measurement error,missing data,modeling,multiple overimputation,selection}, + file = {/home/nathante/Zotero/storage/DNEG8WCP/Blackwell et al_2017_A Unified Approach to Measurement Error and Missing Data.pdf;/home/nathante/Zotero/storage/XH3BDRFS/Blackwell et al_2017_A Unified Approach to Measurement Error and Missing Data.pdf} +} + +@article{boukes_whats_2020, + title = {What’s the {{Tone}}? {{Easy Doesn}}’t {{Do It}}: {{Analyzing Performance}} and {{Agreement Between Off-the-Shelf Sentiment Analysis Tools}}}, + shorttitle = {What’s the {{Tone}}?}, + author = {Boukes, Mark and van de Velde, Bob and Araujo, Theo and Vliegenthart, Rens}, + options = {useprefix=true}, + date = {2020-04-02}, + journaltitle = {Communication Methods and Measures}, + volume = {14}, + number = {2}, + pages = {83--104}, + publisher = {{Routledge}}, + issn = {1931-2458}, + abstract = {This article scrutinizes the method of automated content analysis to measure the tone of news coverage. We compare a range of off-the-shelf sentiment analysis tools to manually coded economic news as well as examine the agreement between these dictionary approaches themselves. We assess the performance of five off-the-shelf sentiment analysis tools and two tailor-made dictionary-based approaches. The analyses result in five conclusions. First, there is little overlap between the off-the-shelf tools; causing wide divergence in terms of tone measurement. Second, there is no stronger overlap with manual coding for short texts (i.e., headlines) than for long texts (i.e., full articles). Third, an approach that combines individual dictionaries achieves a comparably good performance. Fourth, precision may increase to acceptable levels at higher levels of granularity. Fifth, performance of dictionary approaches depends more on the number of relevant keywords in the dictionary than on the number of valenced words as such; a small tailor-made lexicon was not inferior to large established dictionaries. Altogether, we conclude that off-the-shelf sentiment analysis tools are mostly unreliable and unsuitable for research purposes – at least in the context of Dutch economic news – and manual validation for the specific language, domain, and genre of the research project at hand is always warranted.}, + annotation = {\_eprint: https://doi.org/10.1080/19312458.2019.1671966}, + file = {/home/nathante/Zotero/storage/HXRTCXAZ/Boukes et al_2020_What’s the Tone.pdf} +} + +@article{boumans_taking_2015, + title = {Taking {{Stock}} of the {{Toolkit}}}, + author = {Boumans, Jelle W. and Trilling, Damian}, + date = {2015-11}, + journaltitle = {Digital Journalism}, + volume = {4}, + number = {1}, + pages = {8--23}, + publisher = {{Informa UK Limited}}, + issn = {2167-082X} +} + +@article{breiman_statistical_2001, + title = {Statistical {{Modeling}}: {{The Two Cultures}} (with Comments and a Rejoinder by the Author)}, + shorttitle = {Statistical {{Modeling}}}, + author = {Breiman, Leo}, + date = {2001-08}, + journaltitle = {Statistical Science}, + volume = {16}, + number = {3}, + pages = {199--231}, + publisher = {{Institute of Mathematical Statistics}}, + issn = {0883-4237, 2168-8745}, + abstract = {There are two cultures in the use of statistical modeling to reach conclusions from data. One assumes that the data are generated by a given stochastic data model. The other uses algorithmic models and treats the data mechanism as unknown. The statistical community has been committed to the almost exclusive use of data models. This commitment has led to irrelevant theory, questionable conclusions, and has kept statisticians from working on a large range of interesting current problems. Algorithmic modeling, both in theory and practice, has developed rapidly in fields outside statistics. It can be used both on large complex data sets and as a more accurate and informative alternative to data modeling on smaller data sets. If our goal as a field is to use data to solve problems, then we need to move away from exclusive dependence on data models and adopt a more diverse set of tools.}, + file = {/home/nathante/Zotero/storage/7ANK3STI/Breiman_2001_Statistical Modeling.pdf;/home/nathante/Zotero/storage/CHU57W33/1009213726.html} +} + +@article{budak_better_2021, + title = {Better {{Crowdcoding}}: {{Strategies}} for {{Promoting Accuracy}} in {{Crowdsourced Content Analysis}}}, + shorttitle = {Better {{Crowdcoding}}}, + author = {Budak, Ceren and Garrett, R. Kelly and Sude, Daniel}, + date = {2021-04-03}, + journaltitle = {Communication Methods and Measures}, + volume = {15}, + number = {2}, + pages = {141--155}, + publisher = {{Routledge}}, + issn = {1931-2458}, + abstract = {In this work, we evaluate different instruction strategies to improve the quality of crowdcoding for the concept of civility. We test the effectiveness of training, codebooks, and their combination through 2 × 2 experiments conducted on two different populations – students and Amazon Mechanical Turk workers. In addition, we perform simulations to evaluate the trade-off between cost and performance associated with different instructional strategies and the number of human coders. We find that training improves crowdcoding quality, while codebooks do not. We further show that relying on several human coders and applying majority rule to their assessments significantly improves performance.}, + annotation = {\_eprint: https://doi.org/10.1080/19312458.2021.1895977} +} + +@book{buonaccorsi_measurement_2010, + title = {Measurement {{Error}}: {{Models}}, {{Methods}}, and {{Applications}}}, + shorttitle = {Measurement {{Error}}}, + author = {Buonaccorsi, John P.}, + date = {2010-07-19}, + publisher = {{Chapman and Hall/CRC}}, + location = {{New York}}, + abstract = {Over the last 20 years, comprehensive strategies for treating measurement error in complex models and accounting for the use of extra data to estimate measurement error parameters have emerged. Focusing on both established and novel approaches, Measurement Error: Models, Methods, and Applications provides an overview of the main techniques and illu}, + isbn = {978-0-429-15035-7}, + pagetotal = {464}, + file = {/home/nathante/Zotero/storage/E8KV2QMH/Buonaccorsi_2010_Measurement Error.pdf} +} + +@article{burggraaff_through_2020, + title = {Through a Different Gate: {{An}} Automated Content Analysis of How Online News and Print News Differ}, + shorttitle = {Through a Different Gate}, + author = {Burggraaff, Christiaan and Trilling, Damian}, + date = {2020-01}, + journaltitle = {Journalism}, + shortjournal = {Journalism}, + volume = {21}, + number = {1}, + pages = {112--129}, + issn = {1464-8849, 1741-3001}, + abstract = {We investigate how news values differ between online and print news articles. We hypothesize that print and online articles differ in terms of news values because of differences in the routines used to produce them. Based on a quantitative automated content analysis of N\,=\,762,095 Dutch news items, we show that online news items are more likely to be follow-up items than print items, and that there are further differences regarding news values like references to persons, the power elite, negativity, and positivity. In order to conduct this large-scale analysis, we developed innovative methods to automatically code a wide range of news values. In particular, this article demonstrates how techniques such as sentiment analysis, named entity recognition, supervised machine learning, and automated queries of external databases can be combined and used to study journalistic content. Possible explanations for the difference found between online and offline news are discussed.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/3DN8D8QJ/Burggraaff und Trilling - 2020 - Through a different gate An automated content ana.pdf} +} + +@article{burscher_teaching_2014, + title = {Teaching the {{Computer}} to {{Code Frames}} in {{News}}: {{Comparing Two Supervised Machine Learning Approaches}} to {{Frame Analysis}}}, + shorttitle = {Teaching the {{Computer}} to {{Code Frames}} in {{News}}}, + author = {Burscher, Björn and Odijk, Daan and Vliegenthart, Rens and de Rijke, Maarten and de Vreese, Claes H.}, + options = {useprefix=true}, + date = {2014-07-03}, + journaltitle = {Communication Methods and Measures}, + shortjournal = {Communication Methods and Measures}, + volume = {8}, + number = {3}, + pages = {190--206}, + issn = {1931-2458, 1931-2466}, + langid = {english} +} + +@article{burscher_using_2015, + title = {Using {{Supervised Machine Learning}} to {{Code Policy Issues}}: {{Can Classifiers Generalize}} across {{Contexts}}?}, + shorttitle = {Using {{Supervised Machine Learning}} to {{Code Policy Issues}}}, + author = {Burscher, Bjorn and Vliegenthart, Rens and De Vreese, Claes H.}, + date = {2015-05}, + journaltitle = {The ANNALS of the American Academy of Political and Social Science}, + shortjournal = {The ANNALS of the American Academy of Political and Social Science}, + volume = {659}, + number = {1}, + pages = {122--131}, + issn = {0002-7162, 1552-3349}, + abstract = {Content analysis of political communication usually covers large amounts of material and makes the study of dynamics in issue salience a costly enterprise. In this article, we present a supervised machine learning approach for the automatic coding of policy issues, which we apply to news articles and parliamentary questions. Comparing computer-based annotations with human annotations shows that our method approaches the performance of human coders. Furthermore, we investigate the capability of an automatic coding tool, which is based on supervised machine learning, to generalize across contexts. We conclude by highlighting implications for methodological advances and empirical theory testing.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/697KE5ZI/Burscher et al. - 2015 - Using Supervised Machine Learning to Code Policy I.pdf} +} + +@article{calude_deluge_2017, + title = {The {{Deluge}} of {{Spurious Correlations}} in {{Big Data}}}, + author = {Calude, Cristian S. and Longo, Giuseppe}, + date = {2017-09-01}, + journaltitle = {Foundations of Science}, + shortjournal = {Found Sci}, + volume = {22}, + number = {3}, + pages = {595--612}, + issn = {1572-8471}, + abstract = {Very large databases are a major opportunity for science and data analytics is a remarkable new field of investigation in computer science. The effectiveness of these tools is used to support a “philosophy” against the scientific method as developed throughout history. According to this view, computer-discovered correlations should replace understanding and guide prediction and action. Consequently, there will be no need to give scientific meaning to phenomena, by proposing, say, causal relations, since regularities in very large databases are enough: “with enough data, the numbers speak for themselves”. The “end of science” is proclaimed. Using classical results from ergodic theory, Ramsey theory and algorithmic information theory, we show that this “philosophy” is wrong. For example, we prove that very large databases have to contain arbitrary correlations. These correlations appear only due to the size, not the nature, of data. They can be found in “randomly” generated, large enough databases, which—as we will prove—implies that most correlations are spurious. Too much information tends to behave like very little information. The scientific method can be enriched by computer mining in immense databases, but not replaced by it.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/QKXXLRSH/Calude_Longo_2017_The Deluge of Spurious Correlations in Big Data.pdf} +} + +@book{carroll_measurement_2006, + title = {Measurement {{Error}} in {{Nonlinear Models}}}, + author = {Carroll, Raymond J and Ruppert, David and Stefanski, Leonard A and Crainiceanu, Ciprian M}, + date = {2006}, + series = {Monographs on {{Statistics}} and {{Applied Probability}}}, + edition = {2}, + number = {105}, + publisher = {{Chapman \& Hall/CRC}}, + location = {{Boca Raton}}, + langid = {english}, + pagetotal = {484}, + file = {/home/nathante/Zotero/storage/K4V878P6/Carroll et al_2006_Measurement Error in Nonlinear Models.pdf} +} + +@report{chan_automation-coerced_2022, + type = {preprint}, + title = {Automation-Coerced, Increased Dilution of Correlation}, + author = {Chan, Chung-hong}, + date = {2022-05-19}, + institution = {{SocArXiv}}, + abstract = {Automated data-making methods in content analysis —like all measurements— are fallible. The purpose of this simulation study is to show this fallibility can lead to the correlation dilution effect: the biased estimation of true effect size towards zero, or, in other words, the unexpected reduction in statistical power. An alternative way to measure the performance of automated procedures, which focuses on the retention of statistical power, is proposed. This paper ends with best practices regarding planning, executing, and reporting of automated content analyses.}, + file = {/home/nathante/Zotero/storage/JGAH7KJS/Chan - 2022 - Automation-coerced, increased dilution of correlat.pdf} +} + +@article{chen_alternative_2015, + title = {Alternative Errors-in-Variables Models and Their Applications in Finance Research}, + author = {Chen, Hong-Yi and Lee, Alice C. and Lee, Cheng-Few}, + date = {2015-11-01}, + journaltitle = {The Quarterly Review of Economics and Finance}, + shortjournal = {The Quarterly Review of Economics and Finance}, + volume = {58}, + pages = {213--227}, + issn = {1062-9769}, + abstract = {Specification error and measurement error are two major issues in finance research. The main purpose of this paper is (i) to review and extend existing errors-in-variables (EIV) estimation methods, including classical method, grouping method, instrumental variable method, mathematical programming method, maximum likelihood method, LISREL method, and the Bayesian approach; (ii) to investigate how EIV estimation methods have been used to finance related studies, such as cost of capital, capital structure, investment equation, and test capital asset pricing models; and (iii) to give a more detailed explanation of the methods used by Almeida et al. (2010).}, + langid = {english}, + keywords = {Capital asset pricing model,Capital structure,Cost of capital,Errors-in-variables,Investment equation,Measurement error}, + file = {/home/nathante/Zotero/storage/DQQQVE8K/S1062976914001057.html} +} + +@article{chiu_spin_2017, + title = {‘{{Spin}}’ in Published Biomedical Literature: {{A}} Methodological Systematic Review}, + shorttitle = {‘{{Spin}}’ in Published Biomedical Literature}, + author = {Chiu, Kellia and Grundy, Quinn and Bero, Lisa}, + editor = {Boutron, Isabelle}, + date = {2017-09-11}, + journaltitle = {PLOS Biology}, + shortjournal = {PLoS Biol}, + volume = {15}, + number = {9}, + pages = {e2002173}, + issn = {1545-7885}, + langid = {english}, + file = {/home/nathante/Zotero/storage/5ZPKBUV9/Chiu et al. (2017).pdf;/home/nathante/Zotero/storage/CWU2AZI6/Chiu et al. - 2017 - ‘Spin’ in published biomedical literature A metho.pdf} +} + +@misc{cjadams_jigsaw_2019, + title = {Jigsaw {{Unintended Bias}} in {{Toxicity Classification}}}, + author = {{cjadams} and {Daniel Borkan} and {inversion} and {Jeffery Sorensen} and {Lucas Dixon} and {Lucy Vasserman} and {nithum}}, + date = {2019}, + publisher = {{Kaggle}} +} + +@article{colleoni_echo_2014, + title = {Echo {{Chamber}} or {{Public Sphere}}? {{Predicting Political Orientation}} and {{Measuring Political Homophily}} in {{Twitter Using Big Data}}}, + shorttitle = {Echo {{Chamber}} or {{Public Sphere}}?}, + author = {Colleoni, Elanor and Rozza, Alessandro and Arvidsson, Adam}, + date = {2014-04-01}, + journaltitle = {Journal of Communication}, + shortjournal = {Journal of Communication}, + volume = {64}, + number = {2}, + pages = {317--332}, + issn = {0021-9916}, + abstract = {This paper investigates political homophily on Twitter. Using a combination of machine learning and social network analysis we classify users as Democrats or as Republicans based on the political content shared. We then investigate political homophily both in the network of reciprocated and nonreciprocated ties. We find that structures of political homophily differ strongly between Democrats and Republicans. In general, Democrats exhibit higher levels of political homophily. But Republicans who follow official Republican accounts exhibit higher levels of homophily than Democrats. In addition, levels of homophily are higher in the network of reciprocated followers than in the nonreciprocated network. We suggest that research on political homophily on the Internet should take the political culture and practices of users seriously.}, + file = {/home/nathante/Zotero/storage/T9R2UPEF/Colleoni et al_2014_Echo Chamber or Public Sphere.pdf;/home/nathante/Zotero/storage/IVJ4I8CA/4085994.html} +} + +@article{courtney_automatic_2020, + title = {Automatic Translation, Context, and Supervised Learning in Comparative Politics}, + author = {Courtney, Michael and Breen, Michael and McMenamin, Iain and McNulty, Gemma}, + date = {2020}, + journaltitle = {Journal of Information Technology \& Politics}, + volume = {17}, + number = {3}, + pages = {208--217}, + publisher = {{Taylor \& Francis}} +} + +@article{dobbrick_enhancing_2021, + title = {Enhancing {{Theory-Informed Dictionary Approaches}} with “{{Glass-box}}” {{Machine Learning}}: {{The Case}} of {{Integrative Complexity}} in {{Social Media Comments}}}, + shorttitle = {Enhancing {{Theory-Informed Dictionary Approaches}} with “{{Glass-box}}” {{Machine Learning}}}, + author = {Dobbrick, Timo and Jakob, Julia and Chan, Chung-Hong and Wessler, Hartmut}, + date = {2021-11-17}, + journaltitle = {Communication Methods and Measures}, + volume = {0}, + number = {0}, + pages = {1--18}, + publisher = {{Routledge}}, + issn = {1931-2458}, + abstract = {Dictionary-based approaches to computational text analysis have been shown to perform relatively poorly, particularly when the dictionaries rely on simple bags of words, are not specified for the domain under study, and add word scores without weighting. While machine learning approaches usually perform better, they offer little insight into (a) which of the assumptions underlying dictionary approaches (bag-of-words, domain transferability, or additivity) impedes performance most, and (b) which language features drive the algorithmic classification most strongly. To fill both gaps, we offer a systematic assumption-based error analysis, using the integrative complexity of social media comments as our case in point. We show that attacking the additivity assumption offers the strongest potential for improving dictionary performance. We also propose to combine off-the-shelf dictionaries with supervised “glass box” machine learning algorithms (as opposed to the usual “black box” machine learning approaches) to classify texts and learn about the most important features for classification. This dictionary-plus-supervised-learning approach performs similarly well as classic full-text machine learning or deep learning approaches, but yields interpretable results in addition, which can inform theory development on top of enabling a valid classification.}, + annotation = {\_eprint: https://doi.org/10.1080/19312458.2021.1999913}, + file = {/home/nathante/Zotero/storage/TVUYGPSE/Dobbrick et al_2021_Enhancing Theory-Informed Dictionary Approaches with “Glass-box” Machine.pdf} +} + +@article{elsherief_hate_2018, + title = {Hate {{Lingo}}: {{A Target-Based Linguistic Analysis}} of {{Hate Speech}} in {{Social Media}}}, + shorttitle = {Hate {{Lingo}}}, + author = {ElSherief, Mai and Kulkarni, Vivek and Nguyen, Dana and Wang, William Yang and Belding, Elizabeth}, + date = {2018-06-15}, + journaltitle = {Proceedings of the International AAAI Conference on Web and Social Media}, + volume = {12}, + number = {1}, + issn = {2334-0770}, + abstract = {While social media empowers freedom of expression and individual voices, it also enables anti-social behavior, online harassment, cyberbullying, and hate speech. In this paper, we deepen our understanding of online hate speech by focusing on a largely neglected but crucial aspect of hate speech -- its target: either directed towards a specific person or entity, or generalized towards a group of people sharing a common protected characteristic. We perform the first linguistic and psycholinguistic analysis of these two forms of hate speech and reveal the presence of interesting markers that distinguish these types of hate speech. Our analysis reveals that Directed hate speech, in addition to being more personal and directed, is more informal, angrier, and often explicitly attacks the target (via name calling) with fewer analytic words and more words suggesting authority and influence. Generalized hate speech, on the other hand, is dominated by religious hate, is characterized by the use of lethal words such as murder, exterminate, and kill; and quantity words such as million and many. Altogether, our work provides a data-driven analysis of the nuances of online-hate speech that enables not only a deepened understanding of hate speech and its social implications, but also its detection.}, + issue = {1}, + langid = {english}, + keywords = {generalized hate}, + file = {/home/nathante/Zotero/storage/6RZTDKS4/ElSherief et al_2018_Hate Lingo.pdf} +} + +@article{erickson_two-step_2002, + title = {Two-{{Step GMM Estimation}} of the {{Errors-in-Variables Model Using High-Order Moments}}}, + author = {Erickson, Timothy and Whited, Toni M.}, + date = {2002}, + journaltitle = {Econometric Theory}, + volume = {18}, + number = {3}, + eprint = {3533649}, + eprinttype = {jstor}, + pages = {776--799}, + publisher = {{Cambridge University Press}}, + issn = {0266-4666}, + abstract = {We consider a multiple mismeasured regressor errors-in-variables model where the measurement and equation errors are independent and have moments of every order but otherwise are arbitrarily distributed. We present parsimonious two-step generalized method of moments (GMM) estimators that exploit overidentifying information contained in the high-order moments of residuals obtained by "partialling out" perfectly measured regressors. Using high-order moments requires that the GMM covariance matrices be adjusted to account for the use of estimated residuals instead of true residuals defined by population projections. This adjustment is also needed to determine the optimal GMM estimator. The estimators perform well in Monte Carlo simulations and in some cases minimize mean absolute error by using moments up to seventh order. We also determine the distributions for functions that depend on both a GMM estimate and a statistic not jointly estimated with the GMM estimate.}, + file = {/home/nathante/Zotero/storage/WV3FS83S/Erickson_Whited_2002_Two-Step GMM Estimation of the Errors-in-Variables Model Using High-Order.pdf} +} + +@article{felderer_using_nodate, + title = {Using {{Double Machine Learning}} to {{Understand Nonresponse}} in the {{Recruitment}} of a {{Mixed-Mode Online Panel}}}, + author = {Felderer, Barbara and Kueck, Jannis and Spindler, Martin}, + journaltitle = {Social Science Computer Review}, + pages = {21}, + abstract = {Survey scientists increasingly face the problem of high-dimensionality in their research as digitization makes it much easier to construct high-dimensional (or “big”) data sets through tools such as online surveys and mobile applications. Machine learning methods are able to handle such data, and they have been successfully applied to solve predictive problems. However, in many situations, survey statisticians want to learn about causal relationships to draw conclusions and be able to transfer the findings of one survey to another. Standard machine learning methods provide biased estimates of such relationships. We introduce into survey statistics the double machine learning approach, which gives approximately unbiased estimators of parameters of interest, and show how it can be used to analyze survey nonresponse in a high-dimensional panel setting. The double machine learning approach here assumes unconfoundedness of variables as its identification strategy. In high-dimensional settings, where the number of potential confounders to include in the model is too large, the double machine learning approach secures valid inference by selecting the relevant confounding variables.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/V36TN6SF/Felderer et al. - Using Double Machine Learning to Understand Nonres.pdf} +} + +@inproceedings{fiesler_reddit_2018, + title = {Reddit Rules! {{Characterizing}} an Ecosystem of Governance.}, + booktitle = {Proceedings of the {{International AAAI Conference}} on {{Web}} and {{Social Media}}}, + author = {Fiesler, Casey and Jiang, Jialun" Aaron" and McCann, Joshua and Frye, Kyle and Brubaker, Jed R.}, + date = {2018}, + pages = {72--81}, + publisher = {{AAAI}}, + location = {{Stanford, CA}}, + eventtitle = {{{ICWSM}}}, + file = {/home/nathante/Zotero/storage/65MQFFUB/Fiesler et al. - 2018 - Reddit rules! Characterizing an ecosystem of gover.pdf;/home/nathante/Zotero/storage/75956PAL/Fiesler et al. - Reddit Rules! Characterizing an Ecosystem of Gover.pdf;/home/nathante/Zotero/storage/HHY4DJB6/Fiesler - Reddit Rules! Characterizing an Ecosystem of Gover.pdf} +} + +@article{fong_machine_2021, + title = {Machine {{Learning Predictions}} as {{Regression Covariates}}}, + author = {Fong, Christian and Tyler, Matthew}, + date = {2021-10}, + journaltitle = {Political Analysis}, + volume = {29}, + number = {4}, + pages = {467--484}, + issn = {1047-1987, 1476-4989}, + abstract = {In text, images, merged surveys, voter files, and elsewhere, data sets are often missing important covariates, either because they are latent features of observations (such as sentiment in text) or because they are not collected (such as race in voter files). One promising approach for coping with this missing data is to find the true values of the missing covariates for a subset of the observations and then train a machine learning algorithm to predict the values of those covariates for the rest. However, plugging in these predictions without regard for prediction error renders regression analyses biased, inconsistent, and overconfident. We characterize the severity of the problem posed by prediction error, describe a procedure to avoid these inconsistencies under comparatively general assumptions, and demonstrate the performance of our estimators through simulations and a study of hostile political dialogue on the Internet. We provide software implementing our approach.}, + langid = {english}, + keywords = {classification,inference,instrumental variables,machine learning}, + file = {/home/nathante/Zotero/storage/D52UU9YC/Fong - Online Appendix for Machine Learning Predictions a.pdf;/home/nathante/Zotero/storage/RF8VGCKM/Fong_Tyler_2021_Machine Learning Predictions as Regression Covariates.pdf;/home/nathante/Zotero/storage/SUIH8GNP/Fong und Tyler - 2021 - Machine Learning Predictions as Regression Covaria.pdf;/home/nathante/Zotero/storage/WP4QKQL2/462A74A46A97C20A17CF640BDA72B826.html} +} + +@article{fong_online_nodate, + title = {Online {{Appendix}} for {{Machine Learning Predictions}} as {{Regression Covariates}}}, + author = {Fong, Christian}, + pages = {38}, + langid = {english}, + file = {/home/nathante/Zotero/storage/R78H4JIM/Fong - Online Appendix for Machine Learning Predictions a.pdf} +} + +@inproceedings{fortuna_toxic_2020, + title = {Toxic, {{Hateful}}, {{Offensive}} or {{Abusive}}? {{What Are We Really Classifying}}? {{An Empirical Analysis}} of {{Hate Speech Datasets}}}, + shorttitle = {Toxic, {{Hateful}}, {{Offensive}} or {{Abusive}}?}, + booktitle = {Proceedings of the 12th {{Language Resources}} and {{Evaluation Conference}}}, + author = {Fortuna, Paula and Soler, Juan and Wanner, Leo}, + date = {2020-05}, + pages = {6786--6794}, + publisher = {{European Language Resources Association}}, + location = {{Marseille, France}}, + abstract = {The field of the automatic detection of hate speech and related concepts has raised a lot of interest in the last years. Different datasets were annotated and classified by means of applying different machine learning algorithms. However, few efforts were done in order to clarify the applied categories and homogenize different datasets. Our study takes up this demand. We analyze six different publicly available datasets in this field with respect to their similarity and compatibility. We conduct two different experiments. First, we try to make the datasets compatible and represent the dataset classes as Fast Text word vectors analyzing the similarity between different classes in a intra and inter dataset manner. Second, we submit the chosen datasets to the Perspective API Toxicity classifier, achieving different performances depending on the categories and datasets. One of the main conclusions of these experiments is that many different definitions are being used for equivalent concepts, which makes most of the publicly available datasets incompatible. Grounded in our analysis, we provide guidelines for future dataset collection and annotation.}, + eventtitle = {{{LREC}} 2020}, + isbn = {979-10-95546-34-4}, + langid = {english}, + file = {/home/nathante/Zotero/storage/D4ZXDYTH/Fortuna et al_2020_Toxic, Hateful, Offensive or Abusive.pdf} +} + +@book{fuller_measurement_1987, + title = {Measurement Error Models}, + author = {Fuller, Wayne A.}, + date = {1987}, + series = {Wiley Series in Probability and Mathematical Statistics}, + publisher = {{Wiley}}, + location = {{New York}}, + isbn = {978-0-471-86187-4}, + langid = {english}, + pagetotal = {440}, + keywords = {Error analysis (Mathematics),Regression analysis}, + file = {/home/nathante/Zotero/storage/HD88JCCY/Fuller_1987_Measurement error models.pdf} +} + +@article{geis_statistical_2021, + title = {Statistical {{Power}} in {{Content Analysis Designs}}: {{How Effect Size}}, {{Sample Size}} and {{Coding Accuracy Jointly Affect Hypothesis Testing}} – {{A Monte Carlo Simulation Approach}}.}, + shorttitle = {Statistical {{Power}} in {{Content Analysis Designs}}}, + author = {Geiß, Stefan}, + date = {2021-03-01}, + journaltitle = {Computational Communication Research}, + volume = {3}, + number = {1}, + pages = {61--89}, + issn = {2665-9085, 2665-9085}, + abstract = {This study uses Monte Carlo simulation techniques to estimate the minimum required levels of intercoder reliability in content analysis data for testing correlational hypotheses, depending on sample size, effect size and coder behavior under uncertainty. The ensuing procedure is analogous to power calculations for experimental designs. In most widespread sample size/effect size settings, the rule-of-thumb that chance-adjusted agreement should be ≥.80 or ≥.667 corresponds to the simulation results, resulting in acceptable α and β error rates. However, this simulation allows making precise power calculations that can consider the specifics of each study’s context, moving beyond one-size-fits-all recommendations. Studies with low sample sizes and/or low expected effect sizes may need coder agreement above .800 to test a hypothesis with sufficient statistical power. In studies with high sample sizes and/or high expected effect sizes, coder agreement below .667 may suffice. Such calculations can help in both evaluating and in designing studies. Particularly in pre-registered research, higher sample sizes may be used to compensate for low expected effect sizes and/or borderline coding reliability (e.g. when constructs are hard to measure). I supply equations, easy-to-use tables and R functions to facilitate use of this framework, along with example code as online appendix.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/QJNTR5AU/Geiß - 2021 - Statistical Power in Content Analysis Designs How.pdf} +} + +@article{gilardi_social_2022, + title = {Social {{Media}} and {{Political Agenda Setting}}}, + author = {Gilardi, Fabrizio and Gessler, Theresa and Kubli, Maël and Müller, Stefan}, + date = {2022-01-02}, + journaltitle = {Political Communication}, + shortjournal = {Political Communication}, + volume = {39}, + number = {1}, + pages = {39--60}, + issn = {1058-4609, 1091-7675}, + langid = {english}, + file = {/home/nathante/Zotero/storage/S7BXIGP3/Gilardi et al. - 2022 - Social Media and Political Agenda Setting.pdf} +} + +@book{gillespie_custodians_2018, + title = {Custodians of the {{Internet}}: Platforms, Content Moderation, and the Hidden Decisions That Shape Social Media}, + shorttitle = {Custodians of the Internet}, + author = {Gillespie, Tarleton}, + date = {2018}, + publisher = {{Yale University Press}}, + location = {{New Haven}}, + abstract = {"Most users want their Twitter feed, Facebook page, and YouTube comments to be free of harassment and porn. Whether faced with 'fake news' or livestreamed violence, 'content moderators'--who censor or promote user-posted content--have never been more important. This is especially true when the tools that social media platforms use to curb trolling, ban hate speech, and censor pornography can also silence the speech you need to hear. [The author] provides an overview of current social media practices and explains the underlying rationales for how, when, and why these policies are enforced. In doing so, [the author] highlights that content moderation receives too little public scrutiny even as it is shapes social norms and creates consequences for public discourse, cultural production, and the fabric of society. Based on interviews with content moderators, creators, and consumers, this...book is...for anyone who's ever clicked 'like' or 'retweet.'"--}, + isbn = {978-0-300-17313-0}, + pagetotal = {288}, + keywords = {Business & Economics / Industries / Media & Communications,Censorship,Computers / Web / Social Media,Political Science / Censorship,Social media,Social Science / Media Studies}, + annotation = {OCLC: on1005113962}, + file = {/home/nathante/Zotero/storage/I84YKU5K/Gillespie_2018_Custodians of the Internet.pdf} +} + +@article{gonzalez-bailon_signals_2015, + title = {Signals of {{Public Opinion}} in {{Online Communication}}: {{A Comparison}} of {{Methods}} and {{Data Sources}}}, + shorttitle = {Signals of {{Public Opinion}} in {{Online Communication}}}, + author = {González-Bailón, Sandra and Paltoglou, Georgios}, + date = {2015-05-01}, + journaltitle = {The ANNALS of the American Academy of Political and Social Science}, + shortjournal = {The ANNALS of the American Academy of Political and Social Science}, + volume = {659}, + number = {1}, + pages = {95--107}, + publisher = {{SAGE Publications Inc}}, + issn = {0002-7162}, + abstract = {This study offers a systematic comparison of automated content analysis tools. The ability of different lexicons to correctly identify affective tone (e.g., positive vs. negative) is assessed in different social media environments. Our comparisons examine the reliability and validity of publicly available, off-the-shelf classifiers. We use datasets from a range of online sources that vary in the diversity and formality of the language used, and we apply different classifiers to extract information about the affective tone in these datasets. We first measure agreement (reliability test) and then compare their classifications with the benchmark of human coding (validity test). Our analyses show that validity and reliability vary with the formality and diversity of the text; we also show that ready-to-use methods leave much space for improvement when analyzing domain-specific content and that a machine-learning approach offers more accurate predictions across communication domains.}, + langid = {english}, + keywords = {content analysis,information diversity,language formality,lexicon-based methods,machine learning,sentiment analysis,text mining} +} + +@article{gorwa_algorithmic_2020, + title = {Algorithmic Content Moderation: {{Technical}} and Political Challenges in the Automation of Platform Governance}, + shorttitle = {Algorithmic Content Moderation}, + author = {Gorwa, Robert and Binns, Reuben and Katzenbach, Christian}, + date = {2020-01-01}, + journaltitle = {Big Data \& Society}, + shortjournal = {Big Data \& Society}, + volume = {7}, + number = {1}, + pages = {2053951719897945}, + publisher = {{SAGE Publications Ltd}}, + issn = {2053-9517}, + abstract = {As government pressure on major technology companies builds, both firms and legislators are searching for technical solutions to difficult platform governance puzzles such as hate speech and misinformation. Automated hash-matching and predictive machine learning tools – what we define here as algorithmic moderation systems – are increasingly being deployed to conduct content moderation at scale by major platforms for user-generated content such as Facebook, YouTube and Twitter. This article provides an accessible technical primer on how algorithmic moderation works; examines some of the existing automated tools used by major platforms to handle copyright infringement, terrorism and toxic speech; and identifies key political and ethical issues for these systems as the reliance on them grows. Recent events suggest that algorithmic moderation has become necessary to manage growing public expectations for increased platform responsibility, safety and security on the global stage; however, as we demonstrate, these systems remain opaque, unaccountable and poorly understood. Despite the potential promise of algorithms or ‘AI’, we show that even ‘well optimized’ moderation systems could exacerbate, rather than relieve, many existing problems with content policy as enacted by platforms for three main reasons: automated moderation threatens to (a) further increase opacity, making a famously non-transparent set of practices even more difficult to understand or audit, (b) further complicate outstanding issues of fairness and justice in large-scale sociotechnical systems and (c) re-obscure the fundamentally political nature of speech decisions being executed at scale.}, + langid = {english}, + keywords = {algorithms,artificial intelligence,content moderation,copyright,Platform governance,toxic speech}, + file = {/home/nathante/Zotero/storage/HKY4DC38/Gorwa et al_2020_Algorithmic content moderation.pdf} +} + +@article{grimmer_machine_2021, + title = {Machine {{Learning}} for {{Social Science}}: {{An Agnostic Approach}}}, + shorttitle = {Machine {{Learning}} for {{Social Science}}}, + author = {Grimmer, Justin and Roberts, Margaret E. and Stewart, Brandon M.}, + date = {2021}, + journaltitle = {Annual Review of Political Science}, + volume = {24}, + number = {1}, + pages = {395--419}, + abstract = {Social scientists are now in an era of data abundance, and machine learning tools are increasingly used to extract meaning from data sets both massive and small. We explain how the inclusion of machine learning in the social sciences requires us to rethink not only applications of machine learning methods but also best practices in the social sciences. In contrast to the traditional tasks for machine learning in computer science and statistics, when machine learning is applied to social scientific data, it is used to discover new concepts, measure the prevalence of those concepts, assess causal effects, and make predictions. The abundance of data and resources facilitates the move away from a deductive social science to a more sequential, interactive, and ultimately inductive approach to inference. We explain how an agnostic approach to machine learning methods focused on the social science tasks facilitates progress across a wide range of questions.}, + keywords = {machine learning,research design,text as data}, + annotation = {\_eprint: https://doi.org/10.1146/annurev-polisci-053119-015921}, + file = {/home/nathante/Zotero/storage/N4PR8YCM/Grimmer et al_2021_Machine Learning for Social Science.pdf} +} + +@article{grimmer_machine_2021-1, + title = {Machine {{Learning}} for {{Social Science}}: {{An Agnostic Approach}}}, + shorttitle = {Machine {{Learning}} for {{Social Science}}}, + author = {Grimmer, Justin and Roberts, Margaret E. and Stewart, Brandon M.}, + date = {2021-05-11}, + journaltitle = {Annual Review of Political Science}, + shortjournal = {Annu. Rev. Polit. Sci.}, + volume = {24}, + number = {1}, + pages = {395--419}, + issn = {1094-2939, 1545-1577}, + abstract = {Social scientists are now in an era of data abundance, and machine learning tools are increasingly used to extract meaning from data sets both massive and small. We explain how the inclusion of machine learning in the social sciences requires us to rethink not only applications of machine learning methods but also best practices in the social sciences. In contrast to the traditional tasks for machine learning in computer science and statistics, when machine learning is applied to social scientific data, it is used to discover new concepts, measure the prevalence of those concepts, assess causal effects, and make predictions. The abundance of data and resources facilitates the move away from a deductive social science to a more sequential, interactive, and ultimately inductive approach to inference. We explain how an agnostic approach to machine learning methods focused on the social science tasks facilitates progress across a wide range of questions.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/NTS5M7DS/Grimmer et al. - 2021 - Machine Learning for Social Science An Agnostic A.pdf} +} + +@article{grimmer_text_2013, + title = {Text as {{Data}}: {{The Promise}} and {{Pitfalls}} of {{Automatic Content Analysis Methods}} for {{Political Texts}}}, + shorttitle = {Text as {{Data}}}, + author = {Grimmer, Justin and Stewart, Brandon M.}, + date = {2013}, + journaltitle = {Political Analysis}, + volume = {21}, + number = {3}, + pages = {267--297}, + issn = {1047-1987, 1476-4989}, + abstract = {Politics and political conflict often occur in the written and spoken word. Scholars have long recognized this, but the massive costs of analyzing even moderately sized collections of texts have hindered their use in political science research. Here lies the promise of automated text analysis: it substantially reduces the costs of analyzing large collections of text. We provide a guide to this exciting new area of research and show how, in many instances, the methods have already obtained part of their promise. But there are pitfalls to using automated methods—they are no substitute for careful thought and close reading and require extensive and problem-specific validation. We survey a wide range of new methods, provide guidance on how to validate the output of the models, and clarify misconceptions and errors in the literature. To conclude, we argue that for automated text methods to become a standard tool for political scientists, methodologists must contribute new methods and new methods of validation.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/TZULVLRZ/Grimmer_Stewart_2013_Text as Data.pdf;/home/nathante/Zotero/storage/P8HGL73A/F7AAC8B2909441603FEB25C156448F20.html} +} + +@book{grimmer_text_2022, + title = {Text as {{Data}}: {{A New Framework}} for {{Machine Learning}} and the {{Social Sciences}}}, + shorttitle = {Text as {{Data}}}, + author = {Grimmer, Justin and Roberts, Margaret E. and Stewart, Brandon M.}, + date = {2022-01-04}, + eprint = {dL40EAAAQBAJ}, + eprinttype = {googlebooks}, + publisher = {{Princeton University Press}}, + abstract = {A guide for using computational text analysis to learn about the social world From social media posts and text messages to digital government documents and archives, researchers are bombarded with a deluge of text reflecting the social world. This textual data gives unprecedented insights into fundamental questions in the social sciences, humanities, and industry. Meanwhile new machine learning tools are rapidly transforming the way science and business are conducted. Text as Data shows how to combine new sources of data, machine learning tools, and social science research design to develop and evaluate new insights.Text as Data is organized around the core tasks in research projects using text—representation, discovery, measurement, prediction, and causal inference. The authors offer a sequential, iterative, and inductive approach to research design. Each research task is presented complete with real-world applications, example methods, and a distinct style of task-focused research.Bridging many divides—computer science and social science, the qualitative and the quantitative, and industry and academia—Text as Data is an ideal resource for anyone wanting to analyze large collections of text in an era when data is abundant and computation is cheap, but the enduring challenges of social science remain.Overview of how to use text as dataResearch design for a world of data delugeExamples from across the social sciences and industry}, + isbn = {978-0-691-20799-5}, + langid = {english}, + pagetotal = {360}, + keywords = {Computers / Data Science / Data Analytics,Computers / Data Science / Data Modeling & Design,Computers / Data Science / Machine Learning,Social Science / Methodology,Social Science / Sociology / General} +} + +@article{guess_how_2019, + title = {How {{Accurate Are Survey Responses}} on {{Social Media}} and {{Politics}}?}, + author = {Guess, Andrew and Munger, Kevin and Nagler, Jonathan and Tucker, Joshua}, + date = {2019-04-03}, + journaltitle = {Political Communication}, + shortjournal = {Political Communication}, + volume = {36}, + number = {2}, + pages = {241--258}, + issn = {1058-4609, 1091-7675}, + langid = {english} +} + +@article{gummer_using_2022, + title = {Using {{Google Trends Data}} to {{Learn More About Survey Participation}}}, + author = {Gummer, Tobias and Oehrlein, Anne-Sophie}, + date = {2022-09-20}, + journaltitle = {Social Science Computer Review}, + shortjournal = {Social Science Computer Review}, + pages = {089443932211291}, + issn = {0894-4393, 1552-8286}, + abstract = {As response rates continue to decline, the need to learn more about the survey participation process remains an important task for survey researchers. Search engine data may be one possible source for learning about what information some potential respondents are looking up about a survey when they are making a participation decision. In the present study, we explored the potential of search engine data for learning about survey participation and how it can inform survey design decisions. We drew on freely available Google Trends (GT) data to learn about the use of Google Search with respect to our case study: participation in the Family Research and Demographic Analysis (FReDA) panel survey. Our results showed that some potential respondents were using Google Search to gather information on the FReDA survey. We also showed that the additional data obtained via GT can help survey researchers to discover topics of interest to respondents and geographically stratified search patterns. Moreover, we introduced different approaches for obtaining data via GT, discussed the challenges that come with these data, and closed with practical recommendations on how survey researchers might utilize GT data to learn about survey participation.}, + langid = {english} +} + +@article{guo_who_2020, + title = {Who Is Responsible for {{Twitter}}’s Echo Chamber Problem? {{Evidence}} from 2016 {{U}}.{{S}}. Election Networks}, + shorttitle = {Who Is Responsible for {{Twitter}}’s Echo Chamber Problem?}, + author = {Guo, Lei and A. Rohde, Jacob and Wu, H. Denis}, + date = {2020-01-28}, + journaltitle = {Information, Communication \& Society}, + shortjournal = {Information, Communication \& Society}, + volume = {23}, + number = {2}, + pages = {234--251}, + issn = {1369-118X, 1468-4462}, + langid = {english} +} + +@article{gwet_computing_2008, + title = {Computing Inter-Rater Reliability and Its Variance in the Presence of High Agreement}, + author = {Gwet, Kilem Li}, + date = {2008}, + journaltitle = {British Journal of Mathematical and Statistical Psychology}, + volume = {61}, + number = {1}, + pages = {29--48}, + issn = {2044-8317}, + abstract = {Pi (π) and kappa (κ) statistics are widely used in the areas of psychiatry and psychological testing to compute the extent of agreement between raters on nominally scaled data. It is a fact that these coefficients occasionally yield unexpected results in situations known as the paradoxes of kappa. This paper explores the origin of these limitations, and introduces an alternative and more stable agreement coefficient referred to as the AC1 coefficient. Also proposed are new variance estimators for the multiple-rater generalized π and AC1 statistics, whose validity does not depend upon the hypothesis of independence between raters. This is an improvement over existing alternative variances, which depend on the independence assumption. A Monte-Carlo simulation study demonstrates the validity of these variance estimators for confidence interval construction, and confirms the value of AC1 as an improved alternative to existing inter-rater reliability statistics.}, + langid = {english}, + annotation = {\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1348/000711006X126600}, + file = {/home/nathante/Zotero/storage/2Y58TMMP/000711006X126600.html} +} + +@article{haber_causal_2018, + title = {Causal Language and Strength of Inference in Academic and Media Articles Shared in Social Media ({{CLAIMS}}): {{A}} Systematic Review}, + shorttitle = {Causal Language and Strength of Inference in Academic and Media Articles Shared in Social Media ({{CLAIMS}})}, + author = {Haber, Noah and Smith, Emily R. and Moscoe, Ellen and Andrews, Kathryn and Audy, Robin and Bell, Winnie and Brennan, Alana T. and Breskin, Alexander and Kane, Jeremy C. and Karra, Mahesh and McClure, Elizabeth S. and Suarez, Elizabeth A. and {on behalf of the CLAIMS research team}}, + editor = {Dorta-González, Pablo}, + date = {2018-05-30}, + journaltitle = {PLOS ONE}, + shortjournal = {PLoS ONE}, + volume = {13}, + number = {5}, + pages = {e0196346}, + issn = {1932-6203}, + langid = {english}, + file = {/home/nathante/Zotero/storage/79U7LRPJ/Haber et al. (2018) Causal language and strength of inference in academic and media articles shared in social media (CLAIMS).pdf;/home/nathante/Zotero/storage/RSPSPK2X/Haber et al. - 2018 - Causal language and strength of inference in acade.pdf} +} + +@article{hand_classifier_2006, + title = {Classifier {{Technology}} and the {{Illusion}} of {{Progress}}}, + author = {Hand, David J.}, + date = {2006-02-01}, + journaltitle = {Statistical Science}, + shortjournal = {Statist. Sci.}, + volume = {21}, + number = {1}, + issn = {0883-4237}, + file = {/home/nathante/Zotero/storage/2PPZII9T/Hand - 2006 - Classifier Technology and the Illusion of Progress.pdf} +} + +@article{hardin_regression-calibration_2003, + title = {The {{Regression-calibration Method}} for {{Fitting Generalized Linear Models}} with {{Additive Measurement Error}}}, + author = {Hardin, James W. and Schmiediche, Henrik and Carroll, Raymond J.}, + date = {2003-12-01}, + journaltitle = {The Stata Journal}, + shortjournal = {The Stata Journal}, + volume = {3}, + number = {4}, + pages = {361--372}, + publisher = {{SAGE Publications}}, + issn = {1536-867X}, + abstract = {This paper discusses and illustrates the method of regression calibration. This is a straightforward technique for fitting models with additive measurement error. We present this discussion in terms of generalized linear models (GLMs) following the notation defined in Hardin and Carroll (2003). Discussion will include specified measurement error, measurement error estimated by replicate error-prone proxies, and measurement error estimated by instrumental variables. The discussion focuses on software developed as part of a small business innovation research (SBIR) grant from the National Institutes of Health (NIH).}, + langid = {english}, + keywords = {generalized linear models,instrumental variables,measurement error,regression calibration,replicate measures,st0050}, + file = {/home/nathante/Zotero/storage/5WZVFPXQ/Hardin et al_2003_The Regression-calibration Method for Fitting Generalized Linear Models with.pdf} +} + +@article{hase_computational_2022, + title = {Der „{{Computational Turn}}“: Ein „interdisziplinärer {{Turn}}“? {{Ein}} Systematischer {{Überblick}} Zur {{Nutzung}} Der Automatisierten {{Inhaltsanalyse}} in Der {{Journalismusforschung}}}, + shorttitle = {Der „{{Computational Turn}}“}, + author = {Hase, Valerie and Mahl, Daniela and Schäfer, Mike S.}, + date = {2022}, + journaltitle = {Medien \& Kommunikationswissenschaft}, + shortjournal = {M\&K}, + volume = {70}, + number = {1-2}, + pages = {60--78}, + issn = {1615-634X}, + abstract = {Themen journalistischer Berichterstattung durch maschinelles Lernen identifizieren oder Nachrichtendiffusion automatisiert messen: Die Anwendungsmöglichkeiten der automatisierten Inhaltsanalyse in der Journalismusforschung scheinen vielfältig. Aber wie wird die computerbasierte Methode bisher eingesetzt - und welche Konsequenzen hat der „Computational Turn“ der Kommunikationswissenschaft, besonders im Hinblick auf Interdisziplinarität? Dieser Beitrag fasst auf Basis eines systematischen Literaturüberblicks zusammen, wie die automatisierte Inhaltsanalyse im Forschungsfeld der Journalismusforschung genutzt wird. Dabei zeigt sich, dass die zunehmende Nutzung der Methode ein Indikator für methodische Interdisziplinarität in der ohnehin interdisziplinären Kommunikationswissenschaft ist. Gleichzeitig finden sich kaum Hinweise auf eine Zunahme theoretischer Interdisziplinarität, z. B. Rückgriffe auf fachfremde Theorien. Auch im Hinblick auf praktische Interdisziplinarität, z. B. Kooperationen mit anderen Disziplinen, wird unser Fach keineswegs interdisziplinärer. Vielmehr findet eine Verschiebung zugunsten technischer Disziplinen statt. Der „Computational Turn“ der Kommunikationswissenschaft ist daher zumindest bisher nur teils als „interdisziplinärer Turn“ zu verstehen. , Possibilities of applying automated content analysis in journalism research include, for example, machine learning to identify topics in journalistic coverage or measuring news diffusion via automated approaches. But how has the computational method been applied thus far? And what are consequences of the “computational turn” in communication research, especially concerning interdisciplinarity? Based on a systematic literature review, this article summarizes the use of automated content analysis in journalism research. Results illustrate an increasing use of the method by communication scientists as yet another indicator of methodological interdisciplinarity in communication research. However, there is little evidence of an increase in theoretical interdisciplinarity: Studies relying on computational methods do not increasingly refer to theories from other disciplines. With respect to practical interdisciplinarity, for instance collaborations, our discipline is by no means becoming more interdisciplinary. Instead, we find a shift in favor of technical disciplines. At least up to now, the “computational turn” in communication research should not be equated with an “interdisciplinary turn”.}, + file = {/home/nathante/Zotero/storage/IWVHZAWM/Hase et al. - 2022 - Der „Computational Turn“ ein „interdisziplinärer .pdf} +} + +@article{hausman_mismeasured_2001, + title = {Mismeasured {{Variables}} in {{Econometric Analysis}}: {{Problems}} from the {{Right}} and {{Problems}} from the {{Left}}}, + shorttitle = {Mismeasured {{Variables}} in {{Econometric Analysis}}}, + author = {Hausman, Jerry}, + date = {2001-12}, + journaltitle = {Journal of Economic Perspectives}, + volume = {15}, + number = {4}, + pages = {57--67}, + issn = {0895-3309}, + abstract = {The effect of mismeasured variables in the most straightforward regression analysis with a single regressor variable leads to a least squares estimate that is downward biased in magnitude toward zero. I begin by reviewing classical issues involving mismeasured variables. I then consider three recent developments for mismeasurement econometric models. The first issue involves difficulties in using instrumental variables. A second involves the consistent estimators that have recently been developed for mismeasured nonlinear regression models. Finally, I return to mismeasured left hand side variables, where I will focus on issues in binary choice models and duration models.}, + langid = {english}, + keywords = {Multiple or Simultaneous Equation Models: General}, + file = {/home/nathante/Zotero/storage/3M539ACE/Hausman_2001_Mismeasured Variables in Econometric Analysis.pdf;/home/nathante/Zotero/storage/4BN25KNR/articles.html} +} + +@article{hayes_answering_2007, + title = {Answering the {{Call}} for a {{Standard Reliability Measure}} for {{Coding Data}}}, + author = {Hayes, Andrew F. and Krippendorff, Klaus}, + date = {2007-04-01}, + journaltitle = {Communication Methods and Measures}, + volume = {1}, + number = {1}, + pages = {77--89}, + publisher = {{Routledge}}, + issn = {1931-2458}, + abstract = {In content analysis and similar methods, data are typically generated by trained human observers who record or transcribe textual, pictorial, or audible matter in terms suitable for analysis. Conclusions from such data can be trusted only after demonstrating their reliability. Unfortunately, the content analysis literature is full of proposals for so-called reliability coefficients, leaving investigators easily confused, not knowing which to choose. After describing the criteria for a good measure of reliability, we propose Krippendorff's alpha as the standard reliability measure. It is general in that it can be used regardless of the number of observers, levels of measurement, sample sizes, and presence or absence of missing data. To facilitate the adoption of this recommendation, we describe a freely available macro written for SPSS and SAS to calculate Krippendorff's alpha and illustrate its use with a simple example.}, + annotation = {\_eprint: https://doi.org/10.1080/19312450709336664} +} + +@inproceedings{hede_toxicity_2021, + title = {From {{Toxicity}} in {{Online Comments}} to {{Incivility}} in {{American News}}: {{Proceed}} with {{Caution}}}, + shorttitle = {From {{Toxicity}} in {{Online Comments}} to {{Incivility}} in {{American News}}}, + booktitle = {Proceedings of the 16th {{Conference}} of the {{European Chapter}} of the {{Association}} for {{Computational Linguistics}}: {{Main Volume}}}, + author = {Hede, Anushree and Agarwal, Oshin and Lu, Linda and Mutz, Diana C. and Nenkova, Ani}, + date = {2021}, + pages = {2620--2630}, + publisher = {{Association for Computational Linguistics}}, + location = {{Online}}, + eventtitle = {Proceedings of the 16th {{Conference}} of the {{European Chapter}} of the {{Association}} for {{Computational Linguistics}}: {{Main Volume}}}, + langid = {english}, + file = {/home/nathante/Zotero/storage/53RFCQSU/Hede et al. - 2021 - From Toxicity in Online Comments to Incivility in .pdf} +} + +@article{heidenreich_discontentment_2022, + title = {Discontentment Trumps {{Euphoria}}: {{Interacting}} with {{European Politicians}}’ Migration-Related Messages on Social Media}, + author = {Heidenreich, Tobias and Eberl, Jakob-Moritz and Lind, Fabienne and Boomgaarden, Hajo G}, + date = {2022}, + journaltitle = {new media \& society}, + pages = {14614448221074648}, + publisher = {{SAGE Publications Sage UK: London, England}} +} + +@article{hillard_computer-assisted_2008, + title = {Computer-{{Assisted Topic Classification}} for {{Mixed-Methods Social Science Research}}}, + author = {Hillard, Dustin and Purpura, Stephen and Wilkerson, John}, + date = {2008-05-15}, + journaltitle = {Journal of Information Technology \& Politics}, + shortjournal = {Journal of Information Technology \& Politics}, + volume = {4}, + number = {4}, + pages = {31--46}, + issn = {1933-1681, 1933-169X}, + langid = {english} +} + +@article{hopkins_method_2010, + title = {A {{Method}} of {{Automated Nonparametric Content Analysis}} for {{Social Science}}}, + author = {Hopkins, Daniel J. and King, Gary}, + date = {2010-01}, + journaltitle = {American Journal of Political Science}, + volume = {54}, + number = {1}, + pages = {229--247}, + issn = {00925853, 15405907}, + langid = {english}, + file = {/home/nathante/Zotero/storage/55EKSIUK/Hopkins und King - 2010 - A Method of Automated Nonparametric Content Analys.pdf} +} + +@article{hopp_correlating_2020, + title = {Correlating {{Self-Report}} and {{Trace Data Measures}} of {{Incivility}}: {{A Proof}} of {{Concept}}}, + shorttitle = {Correlating {{Self-Report}} and {{Trace Data Measures}} of {{Incivility}}}, + author = {Hopp, Toby and Vargo, Chris J. and Dixon, Lucas and Thain, Nithum}, + date = {2020-10-01}, + journaltitle = {Social Science Computer Review}, + shortjournal = {Social Science Computer Review}, + volume = {38}, + number = {5}, + pages = {584--599}, + publisher = {{SAGE Publications Inc}}, + issn = {0894-4393}, + abstract = {This study correlated self-report and trace data measures of political incivility. Specifically, we asked respondents to provide estimates of the degree to which they engage in uncivil political communication online. These estimates were then compared to computational measures of uncivil social media discussion behavior. The results indicated that those who self-disclose uncivil online behavior also tend to generate content on social media that is uncivil as identified by Google’s Perspective application programming interface. Taken as a whole, this work suggests that combining self-report and behavioral trace data may be a fruitful means of developing multimethod measures of complex communication behaviors.}, + langid = {english}, + keywords = {computational social sciences,incivility,political discussion,survey,toxicity}, + file = {/home/nathante/Zotero/storage/I6YWVQW4/Hopp et al_2020_Correlating Self-Report and Trace Data Measures of Incivility.pdf} +} + +@article{hopp_social_2019, + title = {Social {{Capital}} as an {{Inhibitor}} of {{Online Political Incivility}}: {{An Analysis}} of {{Behavioral Patterns Among Politically Active Facebook Users}}}, + shorttitle = {Social {{Capital}} as an {{Inhibitor}} of {{Online Political Incivility}}}, + author = {Hopp, Toby and Vargo, Chris J.}, + date = {2019-09-13}, + journaltitle = {International Journal of Communication}, + volume = {13}, + number = {0}, + pages = {21}, + issn = {1932-8036}, + abstract = {This study examines the relationship between social capital and uncivil political communication online using a sample of politically active Facebook users and their Facebook post data. Theory suggests that social capital, in both its bonded and bridged forms, may inhibit the frequency and severity of online political incivility. The results here indicate that bonded social capital is negatively associated with political incivility on Facebook. Bridged capital is not, however, statistically related to posting uncivil content on Facebook.}, + issue = {0}, + langid = {english}, + keywords = {bonded social capital,bridged social capital,incivility,political discussion}, + file = {/home/nathante/Zotero/storage/UREW3WG6/Hopp_Vargo_2019_Social Capital as an Inhibitor of Online Political Incivility.pdf} +} + +@misc{hosseini_deceiving_2017, + title = {Deceiving {{Google}}'s {{Perspective API Built}} for {{Detecting Toxic Comments}}}, + author = {Hosseini, Hossein and Kannan, Sreeram and Zhang, Baosen and Poovendran, Radha}, + date = {2017-02-26}, + number = {arXiv:1702.08138}, + eprint = {1702.08138}, + eprinttype = {arxiv}, + primaryclass = {cs}, + publisher = {{arXiv}}, + abstract = {Social media platforms provide an environment where people can freely engage in discussions. Unfortunately, they also enable several problems, such as online harassment. Recently, Google and Jigsaw started a project called Perspective, which uses machine learning to automatically detect toxic language. A demonstration website has been also launched, which allows anyone to type a phrase in the interface and instantaneously see the toxicity score [1]. In this paper, we propose an attack on the Perspective toxic detection system based on the adversarial examples. We show that an adversary can subtly modify a highly toxic phrase in a way that the system assigns significantly lower toxicity score to it. We apply the attack on the sample phrases provided in the Perspective website and show that we can consistently reduce the toxicity scores to the level of the non-toxic phrases. The existence of such adversarial examples is very harmful for toxic detection systems and seriously undermines their usability.}, + archiveprefix = {arXiv}, + keywords = {Computer Science - Computers and Society,Computer Science - Machine Learning,Computer Science - Social and Information Networks}, + file = {/home/nathante/Zotero/storage/7DNERYPW/Hosseini et al_2017_Deceiving Google's Perspective API Built for Detecting Toxic Comments.pdf;/home/nathante/Zotero/storage/AJM3CAWA/1702.html} +} + +@incollection{hua_characterizing_2020, + title = {Characterizing {{Twitter Users Who Engage}} in {{Adversarial Interactions}} against {{Political Candidates}}}, + booktitle = {Proceedings of the 2020 {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}}, + author = {Hua, Yiqing and Naaman, Mor and Ristenpart, Thomas}, + date = {2020-04-21}, + pages = {1--13}, + publisher = {{Association for Computing Machinery}}, + location = {{New York, NY, USA}}, + abstract = {Social media provides a critical communication platform for political figures, but also makes them easy targets for harassment. In this paper, we characterize users who adversarially interact with political figures on Twitter using mixed-method techniques. The analysis is based on a dataset of 400 thousand users' 1.2 million replies to 756 candidates for the U.S. House of Representatives in the two months leading up to the 2018 midterm elections. We show that among moderately active users, adversarial activity is associated with decreased centrality in the social graph and increased attention to candidates from the opposing party. When compared to users who are similarly active, highly adversarial users tend to engage in fewer supportive interactions with their own party's candidates and express negativity in their user profiles. Our results can inform the design of platform moderation mechanisms to support political figures countering online harassment.}, + isbn = {978-1-4503-6708-0}, + keywords = {online harassment,political candidates,twitter,user behavior}, + file = {/home/nathante/Zotero/storage/LJMBWTZH/Hua et al_2020_Characterizing Twitter Users Who Engage in Adversarial Interactions against.pdf} +} + +@inproceedings{hullman_worst_2022, + title = {The {{Worst}} of {{Both Worlds}}: {{A Comparative Analysis}} of {{Errors}} in {{Learning}} from {{Data}} in {{Psychology}} and {{Machine Learning}}}, + shorttitle = {The {{Worst}} of {{Both Worlds}}}, + booktitle = {Proceedings of the 2022 {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}}, + author = {Hullman, Jessica and Kapoor, Sayash and Nanayakkara, Priyanka and Gelman, Andrew and Narayanan, Arvind}, + date = {2022-07-26}, + pages = {335--348}, + publisher = {{ACM}}, + location = {{Oxford United Kingdom}}, + eventtitle = {{{AIES}} '22: {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}}, + isbn = {978-1-4503-9247-1}, + langid = {english}, + file = {/home/nathante/Zotero/storage/D5R2AWJG/Hullman et al. - 2022 - The Worst of Both Worlds A Comparative Analysis o.pdf;/home/nathante/Zotero/storage/TVV3M3QL/Hullman et al. (2022) The Worst of Both Worlds.pdf} +} + +@incollection{im_synthesized_2020, + title = {Synthesized {{Social Signals}}: {{Computationally-Derived Social Signals}} from {{Account Histories}}}, + shorttitle = {Synthesized {{Social Signals}}}, + booktitle = {Proceedings of the 2020 {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}}, + author = {Im, Jane and Tandon, Sonali and Chandrasekharan, Eshwar and Denby, Taylor and Gilbert, Eric}, + date = {2020-04-21}, + pages = {1--12}, + publisher = {{Association for Computing Machinery}}, + location = {{New York, NY, USA}}, + abstract = {Social signals are crucial when we decide if we want to interact with someone online. However, social signals are typically limited to the few that platform designers provide, and most can be easily manipulated. In this paper, we propose a new idea called synthesized social signals (S3s): social signals computationally derived from an account's history, and then rendered into the profile. Unlike conventional social signals such as profile bios, S3s use computational summarization to reduce receiver costs and raise the cost of faking signals. To demonstrate and explore the concept, we built Sig, an extensible Chrome extension that computes and visualizes S3s. After a formative study, we conducted a field deployment of Sig on Twitter, targeting two well-known problems on social media: toxic accounts and misinformation. Results show that Sig reduced receiver costs, added important signals beyond conventionally available ones, and that a few users felt safer using Twitter as a result. We conclude by reflecting on the opportunities and challenges S3s provide for augmenting interaction on social platforms.}, + isbn = {978-1-4503-6708-0}, + keywords = {social computing,social media,social platform,social signals}, + file = {/home/nathante/Zotero/storage/RY5ENJPR/Im et al_2020_Synthesized Social Signals.pdf} +} + +@article{jacobucci_machine_2020, + title = {Machine {{Learning}} and {{Psychological Research}}: {{The Unexplored Effect}} of {{Measurement}}}, + shorttitle = {Machine {{Learning}} and {{Psychological Research}}}, + author = {Jacobucci, Ross and Grimm, Kevin J.}, + date = {2020-05-01}, + journaltitle = {Perspectives on Psychological Science}, + shortjournal = {Perspect Psychol Sci}, + volume = {15}, + number = {3}, + pages = {809--816}, + publisher = {{SAGE Publications Inc}}, + issn = {1745-6916}, + abstract = {Machine learning (i.e., data mining, artificial intelligence, big data) has been increasingly applied in psychological science. Although some areas of research have benefited tremendously from a new set of statistical tools, most often in the use of biological or genetic variables, the hype has not been substantiated in more traditional areas of research. We argue that this phenomenon results from measurement errors that prevent machine-learning algorithms from accurately modeling nonlinear relationships, if indeed they exist. This shortcoming is showcased across a set of simulated examples, demonstrating that model selection between a machine-learning algorithm and regression depends on the measurement quality, regardless of sample size. We conclude with a set of recommendations and a discussion of ways to better integrate machine learning with statistics as traditionally practiced in psychological science.}, + langid = {english}, + keywords = {data mining,machine learning,measurement error,psychometrics,structural-equation modeling}, + file = {/home/nathante/Zotero/storage/IYLNBMSN/Jacobucci_Grimm_2020_Machine Learning and Psychological Research.pdf} +} + +@inproceedings{jain_adversarial_2018, + title = {Adversarial {{Text Generation}} for {{Google}}'s {{Perspective API}}}, + booktitle = {2018 {{International Conference}} on {{Computational Science}} and {{Computational Intelligence}} ({{CSCI}})}, + author = {Jain, Edwin and Brown, Stephan and Chen, Jeffery and Neaton, Erin and Baidas, Mohammad and Dong, Ziqian and Gu, Huanying and Artan, Nabi Sertac}, + date = {2018-12}, + pages = {1136--1141}, + abstract = {With the preponderance of harassment and abuse, social media platforms and online discussion platforms seek to curb toxic comments. Google's Perspective aims to help platforms classify toxic comments. We have created a pipeline to modify toxic comments to evade Perspective. This pipeline uses existing adversarial machine learning attacks to find the optimal perturbation which will evade the model. Since these attacks typically target images, as opposed to discrete text data, we include a process to generate text candidates from perturbed features and select candidates to retain syntactic similarity. We demonstrated that using a model with just 10,000 queries, changing three words in each comment evades Perspective 25\% of the time, suggesting that building a surrogate model may not require many queries and a more robust approach is needed to improve the toxic comment classifier accuracy.}, + eventtitle = {2018 {{International Conference}} on {{Computational Science}} and {{Computational Intelligence}} ({{CSCI}})}, + keywords = {Adversarial,Adversarial machine learning,Deep Learning,Google,Google Perspective,Machine Learning,Natural Language Processing,Perturbation methods,Semantics,Syntactics,Task analysis}, + file = {/home/nathante/Zotero/storage/HND429IV/Jain et al_2018_Adversarial Text Generation for Google's Perspective API.pdf;/home/nathante/Zotero/storage/3BH3ARY2/8947631.html} +} + +@article{jiang_addressing_2021, + ids = {jiang_addressing_2021-1}, + title = {Addressing {{Measurement Error}} in {{Random Forests Using Quantitative Bias Analysis}}}, + author = {Jiang, Tammy and Gradus, Jaimie L and Lash, Timothy L and Fox, Matthew P}, + date = {2021-09-01}, + journaltitle = {American Journal of Epidemiology}, + shortjournal = {American Journal of Epidemiology}, + volume = {190}, + number = {9}, + pages = {1830--1840}, + issn = {0002-9262}, + abstract = {Although variables are often measured with error, the impact of measurement error on machine-learning predictions is seldom quantified. The purpose of this study was to assess the impact of measurement error on the performance of random-forest models and variable importance. First, we assessed the impact of misclassification (i.e., measurement error of categorical variables) of predictors on random-forest model performance (e.g., accuracy, sensitivity) and variable importance (mean decrease in accuracy) using data from the National Comorbidity Survey Replication (2001–2003). Second, we created simulated data sets in which we knew the true model performance and variable importance measures and could verify that quantitative bias analysis was recovering the truth in misclassified versions of the data sets. Our findings showed that measurement error in the data used to construct random forests can distort model performance and variable importance measures and that bias analysis can recover the correct results. This study highlights the utility of applying quantitative bias analysis in machine learning to quantify the impact of measurement error on study results.}, + file = {/home/nathante/Zotero/storage/3BRTJHQW/Jiang et al. - 2021 - Addressing Measurement Error in Random Forests Usi.pdf;/home/nathante/Zotero/storage/4XIUHUVG/Jiang et al_2021_Addressing Measurement Error in Random Forests Using Quantitative Bias Analysis.pdf;/home/nathante/Zotero/storage/UK34NAWR/6123935.html} +} + +@article{johnson_learning_2020, + title = {Learning from the Past and Considering the Future of Chemicals in the Environment}, + author = {Johnson, Andrew C. and Jin, Xiaowei and Nakada, Norihide and Sumpter, John P.}, + date = {2020-01-24}, + journaltitle = {Science}, + shortjournal = {Science}, + volume = {367}, + number = {6476}, + pages = {384--387}, + issn = {0036-8075, 1095-9203}, + abstract = {Knowledge of the hazards and associated risks from chemicals discharged to the environment has grown considerably over the past 40 years. This improving awareness stems from advances in our ability to measure chemicals at low environmental concentrations, recognition of a range of effects on organisms, and a worldwide growth in expertise. Environmental scientists and companies have learned from the experiences of the past; in theory, the next generation of chemicals will cause less acute toxicity and be less environmentally persistent and bioaccumulative. However, researchers still struggle to establish whether the nonlethal effects associated with some modern chemicals and substances will have serious consequences for wildlife. Obtaining the resources to address issues associated with chemicals in the environment remains a challenge.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/KWTMTJBA/Johnson et al. (2017) Measurement error and the replication crisis.pdf;/home/nathante/Zotero/storage/VDIKYHGM/Johnson et al. - 2020 - Learning from the past and considering the future .pdf} +} + +@article{junger_unboxing_2022, + title = {Unboxing {{Computational Social Media Research From}} a {{Datahermeneutical Perspective}}: {{How Do Scholars Address}} the {{Tension Between Automation}} and {{Interpretation}}?}, + author = {Jünger, Jakob and Geise, Stephanie and Hännelt, Maria}, + date = {2022}, + journaltitle = {International Journal of Communication}, + volume = {16}, + pages = {1482--1505}, + issn = {1932-8036}, + abstract = {Communication researchers have fruitfully applied computational methods in their analysis of communication processes. However, the automation of scientific data collection and analysis confronts scholars with fundamental epistemological and practical challenges. Particularly, automation implies that the processing of data is highly standardized for all cases. In the context of social science research, this contrasts with the expectation that meaning is always attributed in individual interaction processes. Based on a literature review of peer-reviewed journal articles, our study explores the resulting tension between automated and interpretive research. We first analyze the extent to which automated methods play a role in social media research. We then identify the challenges and limitations researchers addressed in their studies. On this basis, we propose steps for a data hermeneutical perspective that combines computational methods with interpretive approaches.}, + keywords = {automated data collection,computational communication science,computational methods,computational social science,data hermeneutics,interpretive paradigm,process-generated data} +} + +@article{kaplan_big_2014, + title = {Big {{Data}} and {{Large Sample Size}}: {{A Cautionary Note}} on the {{Potential}} for {{Bias}}}, + shorttitle = {Big {{Data}} and {{Large Sample Size}}}, + author = {Kaplan, Robert M. and Chambers, David A. and Glasgow, Russell E.}, + date = {2014}, + journaltitle = {Clinical and Translational Science}, + volume = {7}, + number = {4}, + pages = {342--346}, + issn = {1752-8062}, + abstract = {A number of commentaries have suggested that large studies are more reliable than smaller studies and there is a growing interest in the analysis of “big data” that integrates information from many thousands of persons and/or different data sources. We consider a variety of biases that are likely in the era of big data, including sampling error, measurement error, multiple comparisons errors, aggregation error, and errors associated with the systematic exclusion of information. Using examples from epidemiology, health services research, studies on determinants of health, and clinical trials, we conclude that it is necessary to exercise greater caution to be sure that big sample size does not lead to big inferential errors. Despite the advantages of big studies, large sample size can magnify the bias associated with error resulting from sampling or study design. Clin Trans Sci 2014; Volume \#: 1–5}, + langid = {english}, + keywords = {bias,big data,research methods,sampling}, + annotation = {\_eprint: https://ascpt.onlinelibrary.wiley.com/doi/pdf/10.1111/cts.12178}, + file = {/home/nathante/Zotero/storage/PTGVP2WW/Kaplan et al_2014_Big Data and Large Sample Size.pdf;/home/nathante/Zotero/storage/KBURTV5N/cts.html} +} + +@article{kim_distorting_2021, + title = {The {{Distorting Prism}} of {{Social Media}}: {{How Self-Selection}} and {{Exposure}} to {{Incivility Fuel Online Comment Toxicity}}}, + shorttitle = {The {{Distorting Prism}} of {{Social Media}}}, + author = {Kim, Jin Woo and Guess, Andrew and Nyhan, Brendan and Reifler, Jason}, + date = {2021-12-01}, + journaltitle = {Journal of Communication}, + shortjournal = {Journal of Communication}, + volume = {71}, + number = {6}, + pages = {922--946}, + issn = {0021-9916}, + abstract = {Though prior studies have analyzed the textual characteristics of online comments about politics, less is known about how selection into commenting behavior and exposure to other people’s comments changes the tone and content of political discourse. This article makes three contributions. First, we show that frequent commenters on Facebook are more likely to be interested in politics, to have more polarized opinions, and to use toxic language in comments in an elicitation task. Second, we find that people who comment on articles in the real world use more toxic language on average than the public as a whole; levels of toxicity in comments scraped from media outlet Facebook pages greatly exceed what is observed in comments we elicit on the same articles from a nationally representative sample. Finally, we demonstrate experimentally that exposure to toxic language in comments increases the toxicity of subsequent comments.}, + file = {/home/nathante/Zotero/storage/T89NGBE4/Kim et al_2021_The Distorting Prism of Social Media.pdf;/home/nathante/Zotero/storage/Q8JWJ7LZ/6363640.html} +} + +@article{king_analyzing_2001, + title = {Analyzing {{Incomplete Political Science Data}}: {{An Alternative Algorithm}} for {{Multiple Imputation}}}, + shorttitle = {Analyzing {{Incomplete Political Science Data}}}, + author = {King, Gary and Honaker, James and Joseph, Anne and Scheve, Kenneth}, + date = {2001-03}, + journaltitle = {American Political Science Review}, + volume = {95}, + number = {1}, + pages = {49--69}, + publisher = {{Cambridge University Press}}, + issn = {1537-5943, 0003-0554}, + abstract = {We propose a remedy for the discrepancy between the way political scientists analyze data with missing values and the recommendations of the statistics community. Methodologists and statisticians agree that “multiple imputation” is a superior approach to the problem of missing data scattered through one’s explanatory and dependent variables than the methods currently used in applied data analysis. The discrepancy occurs because the computational algorithms used to apply the best multiple imputation models have been slow, difficult to implement, impossible to run with existing commercial statistical packages, and have demanded considerable expertise. We adapt an algorithm and use it to implement a general-purpose, multiple imputation model for missing data. This algorithm is considerably faster and easier to use than the leading method recommended in the statistics literature. We also quantify the risks of current missing data practices, illustrate how to use the new procedure, and evaluate this alternative through simulated data as well as actual empirical examples. Finally, we offer easy-to-use software that implements all methods discussed.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/SRZTVUSY/King et al_2001_Analyzing Incomplete Political Science Data.pdf;/home/nathante/Zotero/storage/EJP7I7RQ/9E712982CCE2DE79A574FE98488F212B.html} +} + +@article{kleinberg_algorithmic_2018, + title = {Algorithmic {{Fairness}}}, + author = {Kleinberg, Jon and Ludwig, Jens and Mullainathan, Sendhil and Rambachan, Ashesh}, + date = {2018}, + journaltitle = {AEA Papers and Proceedings}, + volume = {108}, + pages = {22--27}, + issn = {2574-0768}, + abstract = {Concerns that algorithms may discriminate against certain groups have led to numerous efforts to 'blind' the algorithm to race. We argue that this intuitive perspective is misleading and may do harm. Our primary result is exceedingly simple, yet often overlooked. A preference for fairness should not change the choice of estimator. Equity preferences can change how the estimated prediction function is used (e.g., different threshold for different groups) but the function itself should not change. We show in an empirical example for college admissions that the inclusion of variables such as race can increase both equity and efficiency.}, + langid = {english}, + keywords = {Cluster Analysis,Factor Models; Equity; Justice; Inequality; and Other Normative Criteria and Measurement; Higher Education,Multiple or Simultaneous Equation Models: Classification Methods,Non-labor Discrimination,Principal Components,Research Institutions; Economics of Minorities; Races; Indigenous Peoples; and Immigrants}, + file = {/home/nathante/Zotero/storage/67KVXZIU/Kleinberg et al_2018_Algorithmic Fairness.pdf;/home/nathante/Zotero/storage/TSV3T4KE/articles.html} +} + +@article{knox_testing_2022, + title = {Testing {{Causal Theories}} with {{Learned Proxies}}}, + author = {Knox, Dean and Lucas, Christopher and Cho, Wendy K. Tam}, + date = {2022-05-12}, + journaltitle = {Annual Review of Political Science}, + shortjournal = {Annu. Rev. Polit. Sci.}, + volume = {25}, + number = {1}, + pages = {419--441}, + issn = {1094-2939, 1545-1577}, + abstract = {Social scientists commonly use computational models to estimate proxies of unobserved concepts, then incorporate these proxies into subsequent tests of their theories. The consequences of this practice, which occurs in over two-thirds of recent computational work in political science, are underappreciated. Imperfect proxies can reflect noise and contamination from other concepts, producing biased point estimates and standard errors. We demonstrate how analysts can use causal diagrams to articulate theoretical concepts and their relationships to estimated proxies, then apply straightforward rules to assess which conclusions are rigorously supportable. We formalize and extend common heuristics for “signing the bias”—a technique for reasoning about unobserved confounding—to scenarios with imperfect proxies. Using these tools, we demonstrate how, in often-encountered research settings, proxy-based analyses allow for valid tests for the existence and direction of theorized effects. We conclude with best-practice recommendations for the rapidly growing literature using learned proxies to test causal theories.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/T64YP7NJ/Knox et al. - 2022 - Testing Causal Theories with Learned Proxies.pdf} +} + +@book{krippendorff_content_2018, + title = {Content Analysis: {{An}} Introduction to Its Methodology}, + author = {Krippendorff, Klaus}, + date = {2018}, + publisher = {{SAGE}} +} + +@article{krippendorff_estimating_1970, + title = {Estimating the {{Reliability}}, {{Systematic Error}} and {{Random Error}} of {{Interval Data}}}, + author = {Krippendorff, Klaus}, + date = {1970-04-01}, + journaltitle = {Educational and Psychological Measurement}, + shortjournal = {Educational and Psychological Measurement}, + volume = {30}, + number = {1}, + pages = {61--70}, + publisher = {{SAGE Publications Inc}}, + issn = {0013-1644}, + langid = {english}, + file = {/home/nathante/Zotero/storage/YSDM7Z7Q/Krippendorff_1970_Estimating the Reliability, Systematic Error and Random Error of Interval Data.pdf} +} + +@article{krippendorff_reliability_2004, + title = {Reliability in {{Content Analysis}}}, + author = {Krippendorff, Klaus}, + date = {2004}, + journaltitle = {Human Communication Research}, + volume = {30}, + number = {3}, + pages = {411--433}, + issn = {1468-2958}, + abstract = {In a recent article in this journal, Lombard, Snyder-Duch, and Bracken (2002) surveyed 200 content analyses for their reporting of reliability tests, compared the virtues and drawbacks of five popular reliability measures, and proposed guidelines and standards for their use. Their discussion revealed that numerous misconceptions circulate in the content analysis literature regarding how these measures behave and can aid or deceive content analysts in their effort to ensure the reliability of their data. This article proposes three conditions for statistical measures to serve as indices of the reliability of data and examines the mathematical structure and the behavior of the five coefficients discussed by the authors, as well as two others. It compares common beliefs about these coefficients with what they actually do and concludes with alternative recommendations for testing reliability in content analysis and similar data-making efforts.}, + langid = {english}, + annotation = {\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/j.1468-2958.2004.tb00738.x}, + file = {/home/nathante/Zotero/storage/GBK9844Z/j.1468-2958.2004.tb00738.html} +} + +@article{kroon_beyond_2022, + title = {Beyond {{Counting Words}}: {{Assessing Performance}} of {{Dictionaries}}, {{Supervised Machine Learning}}, and {{Embeddings}} in {{Topic}} and {{Frame Classification}}}, + shorttitle = {Beyond {{Counting Words}}}, + author = {Kroon, Anne C. and van der Meer, Toni and Vliegenthart, Rens}, + options = {useprefix=true}, + date = {2022-10-01}, + journaltitle = {Computational Communication Research}, + volume = {4}, + number = {2}, + pages = {528--570}, + issn = {2665-9085, 2665-9085}, + langid = {english} +} + +@inproceedings{kurrek_towards_2020, + title = {Towards a {{Comprehensive Taxonomy}} and {{Large-Scale Annotated Corpus}} for {{Online Slur Usage}}}, + booktitle = {Proceedings of the {{Fourth Workshop}} on {{Online Abuse}} and {{Harms}}}, + author = {Kurrek, Jana and Saleem, Haji Mohammad and Ruths, Derek}, + date = {2020-11}, + pages = {138--149}, + publisher = {{Association for Computational Linguistics}}, + location = {{Online}}, + abstract = {Abusive language classifiers have been shown to exhibit bias against women and racial minorities. Since these models are trained on data that is collected using keywords, they tend to exhibit a high sensitivity towards pejoratives. As a result, comments written by victims of abuse are frequently labelled as hateful, even if they discuss or reclaim slurs. Any attempt to address bias in keyword-based corpora requires a better understanding of pejorative language, as well as an equitable representation of targeted users in data collection. We make two main contributions to this end. First, we provide an annotation guide that outlines 4 main categories of online slur usage, which we further divide into a total of 12 sub-categories. Second, we present a publicly available corpus based on our taxonomy, with 39.8k human annotated comments extracted from Reddit. This corpus was annotated by a diverse cohort of coders, with Shannon equitability indices of 0.90, 0.92, and 0.87 across sexuality, ethnicity, and gender. Taken together, our taxonomy and corpus allow researchers to evaluate classifiers on a wider range of speech containing slurs.}, + eventtitle = {{{ALW-EMNLP}} 2020}, + file = {/home/nathante/Zotero/storage/8EURY5H3/Kurrek et al_2020_Towards a Comprehensive Taxonomy and Large-Scale Annotated Corpus for Online.pdf} +} + +@article{lazer_meaningful_2021, + title = {Meaningful Measures of Human Society in the Twenty-First Century}, + author = {Lazer, David and Hargittai, Eszter and Freelon, Deen and Gonzalez-Bailon, Sandra and Munger, Kevin and Ognyanova, Katherine and Radford, Jason}, + date = {2021-07}, + journaltitle = {Nature}, + volume = {595}, + number = {7866}, + pages = {189--196}, + publisher = {{Nature Publishing Group}}, + issn = {1476-4687}, + abstract = {Science rarely proceeds beyond what scientists can observe and measure, and sometimes what can be observed proceeds far ahead of scientific understanding. The twenty-first century offers such a moment in the study of human societies. A vastly larger share of behaviours is observed today than would have been imaginable at the close of the twentieth century. Our interpersonal communication, our movements and many of our everyday actions, are all potentially accessible for scientific research; sometimes through purposive instrumentation for scientific objectives (for example, satellite imagery), but far more often these objectives are, literally, an afterthought (for example, Twitter data streams). Here we evaluate the potential of this massive instrumentation—the creation of techniques for the structured representation and quantification—of human behaviour through the lens of scientific measurement and its principles. In particular, we focus on the question of how we extract scientific meaning from data that often were not created for such purposes. These data present conceptual, computational and ethical challenges that require a rejuvenation of our scientific theories to keep up with the rapidly changing social realities and our capacities to capture them. We require, in other words, new approaches to manage, use and analyse data.}, + issue = {7866}, + langid = {english}, + keywords = {Scientific community} +} + +@article{lazer_meaningful_2021-1, + title = {Meaningful Measures of Human Society in the Twenty-First Century}, + author = {Lazer, David and Hargittai, Eszter and Freelon, Deen and Gonzalez-Bailon, Sandra and Munger, Kevin and Ognyanova, Katherine and Radford, Jason}, + date = {2021-07}, + journaltitle = {Nature}, + volume = {595}, + number = {7866}, + pages = {189--196}, + publisher = {{Nature Publishing Group}}, + issn = {1476-4687}, + abstract = {Science rarely proceeds beyond what scientists can observe and measure, and sometimes what can be observed proceeds far ahead of scientific understanding. The twenty-first century offers such a moment in the study of human societies. A vastly larger share of behaviours is observed today than would have been imaginable at the close of the twentieth century. Our interpersonal communication, our movements and many of our everyday actions, are all potentially accessible for scientific research; sometimes through purposive instrumentation for scientific objectives (for example, satellite imagery), but far more often these objectives are, literally, an afterthought (for example, Twitter data streams). Here we evaluate the potential of this massive instrumentation—the creation of techniques for the structured representation and quantification—of human behaviour through the lens of scientific measurement and its principles. In particular, we focus on the question of how we extract scientific meaning from data that often were not created for such purposes. These data present conceptual, computational and ethical challenges that require a rejuvenation of our scientific theories to keep up with the rapidly changing social realities and our capacities to capture them. We require, in other words, new approaches to manage, use and analyse data.}, + issue = {7866}, + langid = {english}, + keywords = {Scientific community} +} + +@article{lederer_short_nodate, + title = {A Short Introduction to the {{SIMEX}} and {{MCSIMEX}}}, + author = {Lederer, Wolfgang and Küchenhoff, Helmut}, + pages = {5}, + langid = {english}, + file = {/home/nathante/Zotero/storage/5SPGL6VF/Lederer und Küchenhoff - A short Introduction to the SIMEX and.pdf} +} + +@inproceedings{lima_characterizing_2020, + title = {Characterizing ({{Un}})Moderated {{Textual Data}} in {{Social Systems}}}, + booktitle = {2020 {{IEEE}}/{{ACM International Conference}} on {{Advances}} in {{Social Networks Analysis}} and {{Mining}} ({{ASONAM}})}, + author = {Lima, Lucas and Reis, Julio C. S. and Melo, Philipe and Murai, Fabrício and Benevenuto, Fabrício}, + date = {2020-12}, + pages = {430--434}, + issn = {2473-991X}, + abstract = {Despite the valuable social interactions that online media promote, these systems provide space for speech that would be potentially detrimental to different groups of people. The moderation of content imposed by many social media has motivated the emergence of a new social system for free speech named Gab, which lacks moderation of content. This article characterizes and compares moderated textual data from Twitter with a set of unmoderated data from Gab. In particular, we analyze distinguishing characteristics of moderated and unmoderated content in terms of linguistic features, evaluate hate speech and its different forms in both environments. Our work shows that unmoderated content presents different psycholinguistic features, more negative sentiment and higher toxicity. Our findings support that unmoderated environments may have proportionally more online hate speech. We hope our analysis and findings contribute to the debate about hate speech and benefit systems aiming at deploying hate speech detection approaches.}, + eventtitle = {2020 {{IEEE}}/{{ACM International Conference}} on {{Advances}} in {{Social Networks Analysis}} and {{Mining}} ({{ASONAM}})}, + keywords = {Blogs,Gab,Hate Speech,Linguistics,Media,Moderated Content,Social Network,Social networking (online),Statistical analysis,Toxicology,Twitter,Unmoderated Content,Voice activity detection}, + file = {/home/nathante/Zotero/storage/MK4JUJR4/Lima et al_2020_Characterizing (Un)moderated Textual Data in Social Systems.pdf} +} + +@article{lind_greasing_2021, + title = {Greasing the Wheels for Comparative Communication Research: {{Supervised}} Text Classification for Multilingual Corpora}, + author = {Lind, Fabienne and Heidenreich, Tobias and Kralj, Christoph and Boomgaarden, Hajo G}, + date = {2021}, + journaltitle = {Computational Communication Research}, + volume = {3}, + number = {3}, + publisher = {{Amsterdam University Press}} +} + +@report{lockhart_whats_2022, + type = {preprint}, + title = {What’s in a {{Name}}? {{Name-Based Demographic Inference}} and the {{Unequal Distribution}} of {{Misrecognition}}}, + shorttitle = {What’s in a {{Name}}?}, + author = {Lockhart, Jeffrey W and King, Molly M. and Munsch, Christin}, + date = {2022-09-06}, + institution = {{SocArXiv}}, + abstract = {Academics and companies increasingly draw on large datasets to understand the social world. Name-based demographic ascription tools are widespread for imputing information like gender and race that are often missing from these large datasets, but these approaches have drawn criticism on ethical, empirical, and theoretical grounds. Employing a survey of all authors listed on articles in sociology, economics, and communications journals in the Web of Science between 2015 and 2020, we compared self-identified demographics with name-based imputations of gender and race/ethnicity for 19,924 scholars across four gender ascription tools (genderize.io, M3-inference, R’s `predictrace` and `gender` packages) and four race/ethnicity ascription tools (ethnicolor’s Florida and North Carolina voter models, and R’s `predictrace` and wru packages). We find substantial inequalities in how these tools misgender and misrecognize the race/ethnicity of authors, distributing erroneous ascriptions unevenly along other demographic traits. Because of the empirical and ethical consequences of these errors, scholars need to be cautious with the use of name-based demographic imputation, particularly when studying subgroups. We recommend five principles for the responsible use of name-based demographic ascription.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/HC5MCC2C/Lockhart et al. - 2022 - What’s in a Name Name-Based Demographic Inference.pdf} +} + +@article{loken_measurement_2017, + title = {Measurement Error and the Replication Crisis}, + author = {Loken, Eric and Gelman, Andrew}, + date = {2017-02-10}, + journaltitle = {Science}, + volume = {355}, + number = {6325}, + pages = {584--585}, + publisher = {{American Association for the Advancement of Science}}, + file = {/home/nathante/Zotero/storage/EK6AQXQE/Loken_Gelman_2017_Measurement error and the replication crisis.pdf} +} + +@article{lorcher_discussing_2017, + title = {Discussing Climate Change Online. {{Topics}} and Perceptions in Online Climate Change Communication in Different Online Public Arenas}, + author = {Lörcher, Ines and Taddicken, Monika}, + date = {2017-05-18}, + journaltitle = {Journal of Science Communication}, + shortjournal = {JCOM}, + volume = {16}, + number = {02}, + pages = {A03}, + issn = {1824-2049}, + abstract = {How users discuss climate change online is one of the crucial questions (science) communication scholars address nowadays. This study contributes by approaching the issue through the theoretical concept of online public arenas. The diversity of topics and perceptions in the climate change discourse is explored by comparing different arenas. German journalistic articles and their reader comments as well as scientific expert blogs are analyzed by quantitative manual and automated content analysis (n=5,301). Findings demonstrate a larger diversity of topics and interpretations in arenas with low barriers to communication. Overall, climate change skepticism is rare, but mostly present in lay publics.}, + file = {/home/nathante/Zotero/storage/QPTH7GQZ/Lörcher und Taddicken - 2017 - Discussing climate change online. Topics and perce.pdf} +} + +@article{lovejoy_assessing_2014, + title = {Assessing the {{Reporting}} of {{Reliability}} in {{Published Content Analyses}}: 1985–2010}, + shorttitle = {Assessing the {{Reporting}} of {{Reliability}} in {{Published Content Analyses}}}, + author = {Lovejoy, Jennette and Watson, Brendan R. and Lacy, Stephen and Riffe, Daniel}, + date = {2014-07-03}, + journaltitle = {Communication Methods and Measures}, + shortjournal = {Communication Methods and Measures}, + volume = {8}, + number = {3}, + pages = {207--221}, + issn = {1931-2458, 1931-2466}, + langid = {english} +} + +@article{mahl_noise_2022, + title = {Noise {{Pollution}}: {{A Multi-Step Approach}} to {{Assessing}} the {{Consequences}} of ({{Not}}) {{Validating Search Terms}} on {{Automated Content Analyses}}}, + author = {Mahl, Daniela and von Nordheim, Gerret and Guenther, Lars}, + options = {useprefix=true}, + date = {2022-09-23}, + journaltitle = {Digital Journalism}, + shortjournal = {Digital Journalism}, + pages = {1--23}, + publisher = {{Routledge}}, + issn = {2167-0811} +} + +@article{maier_applying_2018, + title = {Applying {{LDA Topic Modeling}} in {{Communication Research}}: {{Toward}} a {{Valid}} and {{Reliable Methodology}}}, + author = {Maier, Daniel and Waldherr, A. and Miltner, P. and Wiedemann, G. and Niekler, A. and Keinert, A. and Pfetsch, B. and Heyer, G. and Reber, U. and Häussler, T. and {al}, et}, + date = {2018-02}, + journaltitle = {Communication Methods and Measures}, + volume = {12}, + number = {2-3}, + pages = {93--118}, + publisher = {{Informa UK Limited}}, + issn = {1931-2466} +} + +@incollection{mall_four_2020, + title = {Four {{Types}} of {{Toxic People}}: {{Characterizing Online Users}}\&\#x2019; {{Toxicity}} over {{Time}}}, + shorttitle = {Four {{Types}} of {{Toxic People}}}, + booktitle = {Proceedings of the 11th {{Nordic Conference}} on {{Human-Computer Interaction}}: {{Shaping Experiences}}, {{Shaping Society}}}, + author = {Mall, Raghvendra and Nagpal, Mridul and Salminen, Joni and Almerekhi, Hind and Jung, Soon-Gyo and Jansen, Bernard J.}, + date = {2020-10-25}, + number = {37}, + pages = {1--11}, + publisher = {{Association for Computing Machinery}}, + location = {{New York, NY, USA}}, + abstract = {Identifying types of online users’ toxic behavior reveals important insights from social media interactions, including whether a user becomes “radicalized” (more toxic) or “pacified” (less toxic) over time. In this research, we design two metrics to identify toxic user types: F score that captures the changes in a user’s toxicity, and G score that captures the direction of the shift taking place in the user’s toxicity pattern. We apply these metrics to a dataset of 4M user comments from Reddit by defining four toxic user types based on the toxicity scores of a user’s comments: (a) Steady Users whose toxicity scores are steady over time, (b) Fickle-Minded Users that switch between toxic and non-toxic commenting, (c) Pacified Users whose commenting becomes less toxic in time, and (d) Radicalized Users that become gradually toxic. Findings from the Reddit dataset indicate that fickle-minded users form the largest group (31.2\%), followed by pacified (25.8\%), radicalized (25.4\%), and steadily toxic users (17.6\%). The results suggest that the most typical behavior type of toxicity is switching between toxic and non-toxic commenting. This research has implications for preserving the user-friendliness of online communities by identifying continuously toxic users and users in danger of becoming radicalized (in terms of their toxic behavior), and designing interventions to mitigate these behavior types. Using the metrics we have defined, identifying these user types becomes possible. More research is needed to understand why these patterns take place and how they could be mitigated.}, + isbn = {978-1-4503-7579-5}, + keywords = {online toxicity,Reddit,social media behavior,user analysis}, + file = {/home/nathante/Zotero/storage/27PC9H72/Mall et al_2020_Four Types of Toxic People.pdf} +} + +@article{malloch_estimation_2021, + title = {Estimation with {{Errors}} in {{Variables}} via the {{Characteristic Function}}*}, + author = {Malloch, H and Philip, R and Satchell, S}, + date = {2021-10-18}, + journaltitle = {Journal of Financial Econometrics}, + shortjournal = {Journal of Financial Econometrics}, + pages = {nbab011}, + issn = {1479-8409}, + abstract = {Errors in variables in linear regression continue to be a significant empirical issue in financial econometrics. We propose using the characteristic function (CF) to obtain estimates for linear models with errors in the variables. By assuming that the explanatory variable follows a flexible double gamma distribution, we obtain closed-form expressions for the analytic CF of the data generating process. We show that our method performs well relative to existing techniques that address error-in-variables (EIVs) through simulations. We further extend our CF technique to a multivariate setting where it continues to produce accurate estimates. We illustrate the performance of our procedure by estimating the capital asset pricing model and a two-factor model.}, + file = {/home/nathante/Zotero/storage/FFQ9NE3N/6400037.html} +} + +@article{mangold_metrics_2016, + title = {Metrics of {{News Audience Polarization}}: {{Same}} or {{Different}}?}, + author = {Mangold, Frank and Scharkow, Michael}, + date = {2016}, + pages = {26}, + abstract = {Although media and communication scholars have suggested various ana­ lytical methods for measuring and comparing news audience polarization across countries, we lack a systematic assessment of the metrics produced by these techniques. Using survey data from the 2016 Reuters Institute Digital News Report on news use in 26 countries, we address this gap through a resampling simulation experiment. Our simulation revealed a strong impact of analytical choices, which invited disparate interpretations in terms of how polarized news audiences are, how strongly audience polariza­ tion structurally varies between news environments, and how news audience polarization is distributed cross-nationally. Alternative choices led to pro­ found differences in the compatibility, consistency, and validity of the empiri­ cal news audience polarization estimates. We conclude from these results that a more precise methodological understanding of news audience polar­ ization metrics informs our capability to draw meaningful inferences from empirical work.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/C33ZMIUK/Mangold und Scharkow - 2016 - Metrics of News Audience Polarization Same or Dif.pdf} +} + +@article{mcnamara_not_2022, + title = {Not Just “Big” Data: {{Importance}} of Sample Size, Measurement Error, and Uninformative Predictors for Developing Prognostic Models for Digital Interventions}, + shorttitle = {Not Just “Big” Data}, + author = {McNamara, Mary E. and Zisser, Mackenzie and Beevers, Christopher G. and Shumake, Jason}, + date = {2022-06}, + journaltitle = {Behaviour Research and Therapy}, + shortjournal = {Behaviour Research and Therapy}, + volume = {153}, + pages = {104086}, + issn = {00057967}, + abstract = {There is strong interest in developing a more efficient mental health care system. Digital interventions and predictive models of treatment prognosis will likely play an important role in this endeavor. This article reviews the application of popular machine learning models to the prediction of treatment prognosis, with a particular focus on digital interventions. Assuming that the prediction of treatment prognosis will involve modeling a complex combination of interacting features with measurement error in both the predictors and outcomes, our simulations suggest that to optimize complex prediction models, sample sizes in the thousands will be required. Machine learning methods capable of discovering complex interactions and nonlinear effects (e.g., decision tree ensembles such as gradient boosted machines) perform particularly well in large samples when the predictors and outcomes have virtually no measurement error. However, in the presence of moderate measurement error, these methods provide little or no benefit over regularized linear regression, even with very large sample sizes (N = 100,000) and a non-linear ground truth. Given these sample size requirements, we argue that the scalability of digital interventions, especially when used in combination with optimal measurement practices, provides one of the most effective ways to study treatment prediction models. We conclude with suggestions about how to implement these algorithms into clinical practice.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/VF7RGRD4/McNamara et al. - 2022 - Not just “big” data Importance of sample size, me.pdf} +} + +@article{merkley_are_2020, + title = {Are {{Experts}} ({{News}}){{Worthy}}? {{Balance}}, {{Conflict}}, and {{Mass Media Coverage}} of {{Expert Consensus}}}, + shorttitle = {Are {{Experts}} ({{News}}){{Worthy}}?}, + author = {Merkley, Eric}, + date = {2020-07-03}, + journaltitle = {Political Communication}, + shortjournal = {Political Communication}, + volume = {37}, + number = {4}, + pages = {530--549}, + issn = {1058-4609, 1091-7675}, + langid = {english}, + file = {/home/nathante/Zotero/storage/U8ZU363W/Merkley - 2020 - Are Experts (News)Worthy Balance, Conflict, and M.pdf} +} + +@article{millimet_accounting_2022, + title = {Accounting for {{Skewed}} or {{One-Sided Measurement Error}} in the {{Dependent Variable}}}, + author = {Millimet, Daniel L. and Parmeter, Christopher F.}, + date = {2022-01}, + journaltitle = {Political Analysis}, + shortjournal = {Polit. Anal.}, + volume = {30}, + number = {1}, + pages = {66--88}, + issn = {1047-1987, 1476-4989}, + abstract = {While classical measurement error in the dependent variable in a linear regression framework results only in a loss of precision, nonclassical measurement error can lead to estimates, which are biased and inference which lacks power. Here, we consider a particular type of nonclassical measurement error: skewed errors. Unfortunately, skewed measurement error is likely to be a relatively common feature of many outcomes of interest in political science research. This study highlights the bias that can result even from relatively “small” amounts of skewed measurement error, particularly, if the measurement error is heteroskedastic. We also assess potential solutions to this problem, focusing on the stochastic frontier model and Nonlinear Least Squares. Simulations and three replications highlight the importance of thinking carefully about skewed measurement error as well as appropriate solutions.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/2W869JT8/Millimet und Parmeter - 2022 - Accounting for Skewed or One-Sided Measurement Err.pdf} +} + +@article{mittos_and_2020, + title = {“{{And We Will Fight}} for {{Our Race}}!” {{A Measurement Study}} of {{Genetic Testing Conversations}} on {{Reddit}} and 4chan}, + author = {Mittos, Alexandros and Zannettou, Savvas and Blackburn, Jeremy and Cristofaro, Emiliano De}, + date = {2020-05-26}, + journaltitle = {Proceedings of the International AAAI Conference on Web and Social Media}, + volume = {14}, + pages = {452--463}, + issn = {2334-0770}, + abstract = {Progress in genomics has enabled the emergence of a booming market for “direct-to-consumer” genetic testing. Nowadays, companies like 23andMe and AncestryDNA provide affordable health, genealogy, and ancestry reports, and have already tested tens of millions of customers. At the same time, alt- and far-right groups have also taken an interest in genetic testing, using them to attack minorities and prove their genetic “purity.” In this paper, we present a measurement study shedding light on how genetic testing is being discussed on Web communities in Reddit and 4chan. We collect 1.3M comments posted over 27 months on the two platforms, using a set of 280 keywords related to genetic testing. We then use NLP and computer vision tools to identify trends, themes, and topics of discussion. Our analysis shows that genetic testing attracts a lot of attention on Reddit and 4chan, with discussions often including highly toxic language expressed through hateful, racist, and misogynistic comments. In particular, on 4chan's politically incorrect board (/pol/), content from genetic testing conversations involves several alt-right personalities and openly antisemitic rhetoric, often conveyed through memes. Finally, we find that discussions build around user groups, from technology enthusiasts to communities promoting fringe political views.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/MJ4ZR4IJ/Mittos et al_2020_“And We Will Fight for Our Race.pdf;/home/nathante/Zotero/storage/XRRGGABM/Fong & Tyler (2020).pdf} +} + +@book{mooney_monte_1997, + title = {Monte {{Carlo}} Simulation}, + author = {Mooney, Christopher Z.}, + date = {1997}, + series = {Monte {{Carlo}} Simulation}, + pages = {viii, 103}, + publisher = {{Sage Publications, Inc}}, + location = {{Thousand Oaks, CA, US}}, + abstract = {The statistics of classical parametric inference inform us about how the world works to the extent necessary assumptions are met. When certain regression assumptions are violated, or are under suspicion of violation, Monte Carlo simulation can be a way out. For example, it allows exploration of parameter estimation granting a variety of distributions—uniform, Pareto, exponential, normal, lognormal, chi-square, Student's t, mixture or beta. Monte Carlo simulation can be used to compare estimator properties from multiequation systems, for example, 2-stage vs 3-stage estimators. Furthermore, it promises considerable payoff in the study of valuable statistics that are simply calculated but about which little is known inferentially, for example, the median or the absolute average deviation. The logic of Monte Carlo simulation is presented, a population of interest is simulated, and how to prepare the computer algorithm is explained. (PsycINFO Database Record (c) 2016 APA, all rights reserved)}, + isbn = {978-0-8039-5943-9}, + pagetotal = {viii, 103}, + keywords = {Social Sciences,Statistical Analysis}, + file = {/home/nathante/Zotero/storage/N7PS594X/1997-08836-000.html} +} + +@article{muddiman_reclaiming_2019, + ids = {muddiman_reclaiming_2019-1}, + title = {({{Re}}){{Claiming Our Expertise}}: {{Parsing Large Text Corpora With Manually Validated}} and {{Organic Dictionaries}}}, + shorttitle = {({{Re}}){{Claiming Our Expertise}}}, + author = {Muddiman, Ashley and McGregor, Shannon C. and Stroud, Natalie Jomini}, + date = {2019-04-03}, + journaltitle = {Political Communication}, + volume = {36}, + number = {2}, + pages = {214--226}, + publisher = {{Routledge}}, + issn = {1058-4609}, + abstract = {Content analysis of large-scale textual data sets poses myriad problems, particularly when researchers seek to analyze content that is both theoretically derived and context dependent. In this piece, we detail the approach we developed to tackle the analysis of the context-dependent content of political incivility. After describing our manually validated organic dictionaries approach, we compare the method to others we could have used and then replicate the method in a different—but still context-dependent—project examining political issue content on social media. We conclude by summarizing the strengths and weaknesses of the approach and offering suggestions for future research that can refine and expand the method.}, + keywords = {computer-aided content analysis,incivility,news comments,news issues,Twitter}, + annotation = {\_eprint: https://doi.org/10.1080/10584609.2018.1517843}, + file = {/home/nathante/Zotero/storage/MKDWDL4K/Muddiman et al_2019_(Re)Claiming Our Expertise.pdf} +} + +@article{mueller_twitter_2020, + title = {Twitter Made Me Do It! {{Twitter}}'s Tonal Platform Incentive and Its Effect on Online Campaigning}, + author = {Mueller, Samuel David and Saeltzer, Marius}, + date = {2020-12-12}, + journaltitle = {Information, Communication \& Society}, + shortjournal = {Information, Communication \& Society}, + pages = {1--26}, + issn = {1369-118X, 1468-4462}, + langid = {english}, + file = {/home/nathante/Zotero/storage/XKGW5R5V/Mueller und Saeltzer - 2020 - Twitter made me do it! Twitter's tonal platform in.pdf} +} + +@article{nab_mecor_2021, + title = {Mecor: {{An R}} Package for Measurement Error Correction in Linear Regression Models with a Continuous Outcome}, + shorttitle = {Mecor}, + author = {Nab, Linda and van Smeden, Maarten and Keogh, Ruth H. and Groenwold, Rolf H. H.}, + options = {useprefix=true}, + date = {2021-09-01}, + journaltitle = {Computer Methods and Programs in Biomedicine}, + shortjournal = {Computer Methods and Programs in Biomedicine}, + volume = {208}, + pages = {106238}, + issn = {0169-2607}, + abstract = {Measurement error in a covariate or the outcome of regression models is common, but is often ignored, even though measurement error can lead to substantial bias in the estimated covariate-outcome association. While several texts on measurement error correction methods are available, these methods remain seldomly applied. To improve the use of measurement error correction methodology, we developed mecor, an R package that implements measurement error correction methods for regression models with a continuous outcome. Measurement error correction requires information about the measurement error model and its parameters. This information can be obtained from four types of studies, used to estimate the parameters of the measurement error model: an internal validation study, a replicates study, a calibration study and an external validation study. In the package mecor, regression calibration methods and a maximum likelihood method are implemented to correct for measurement error in a continuous covariate in regression analyses. Additionally, methods of moments methods are implemented to correct for measurement error in the continuous outcome in regression analyses. Variance estimation of the corrected estimators is provided in closed form and using the bootstrap.}, + langid = {english}, + keywords = {Maximum likelihood,Measurement error correction,Method of moments,Regression calibration}, + file = {/home/nathante/Zotero/storage/P95Z6A7N/Nab et al_2021_Mecor.pdf} +} + +@article{nab_quantitative_2020, + title = {Quantitative {{Bias Analysis}} for a {{Misclassified Confounder}}: {{A Comparison Between Marginal Structural Models}} and {{Conditional Models}} for {{Point Treatments}}}, + shorttitle = {Quantitative {{Bias Analysis}} for a {{Misclassified Confounder}}}, + author = {Nab, Linda and Groenwold, Rolf H. H. and van Smeden, Maarten and Keogh, Ruth H.}, + options = {useprefix=true}, + date = {2020-11}, + journaltitle = {Epidemiology}, + volume = {31}, + number = {6}, + pages = {796--805}, + issn = {1044-3983}, + abstract = {Observational data are increasingly used with the aim of estimating causal effects of treatments, through careful control for confounding. Marginal structural models estimated using inverse probability weighting (MSMs-IPW), like other methods to control for confounding, assume that confounding variables are measured without error. The average treatment effect in an MSM-IPW may however be biased when a confounding variable is error prone. Using the potential outcome framework, we derive expressions for the bias due to confounder misclassification in analyses that aim to estimate the average treatment effect using an marginal structural model estimated using inverse probability weighting (MSM-IPW). We compare this bias with the bias due to confounder misclassification in analyses based on a conditional regression model. Focus is on a point-treatment study with a continuous outcome. Compared with bias in the average treatment effect in a conditional model, the bias in an MSM-IPW can be different in magnitude but is equal in sign. Also, we use a simulation study to investigate the finite sample performance of MSM-IPW and conditional models when a confounding variable is misclassified. Simulation results indicate that confidence intervals of the treatment effect obtained from MSM-IPW are generally wider, and coverage of the true treatment effect is higher compared with a conditional model, ranging from overcoverage if there is no confounder misclassification to undercoverage when there is confounder misclassification. Further, we illustrate in a study of blood pressure-lowering therapy, how the bias expressions can be used to inform a quantitative bias analysis to study the impact of confounder misclassification, supported by an online tool.}, + langid = {american}, + file = {/home/nathante/Zotero/storage/TIKY8Z49/Nab et al_2020_Quantitative Bias Analysis for a Misclassified Confounder.pdf;/home/nathante/Zotero/storage/YPZQ4NGF/Quantitative_Bias_Analysis_for_a_Misclassified.7.html} +} + +@misc{nicholls_deep_nodate, + title = {Deep Learning Models for Multilingual Supervised Political Text Classification}, + author = {Nicholls, Thomas and Culpepper, Pepper D} +} + +@online{noauthor_jigsaw_nodate, + title = {Jigsaw {{Unintended Bias}} in {{Toxicity Classification}}}, + abstract = {Detect toxicity across a diverse range of conversations}, + langid = {english}, + file = {/home/nathante/Zotero/storage/7A9N58UN/data.html} +} + +@online{noauthor_place_nodate, + title = {Place {{Your Order}} - {{Amazon}}.Com {{Checkout}}} +} + +@book{noble_algorithms_2018, + title = {Algorithms of {{Oppression}}: {{How Search Engines Reinforce Racism}}}, + shorttitle = {Algorithms of {{Oppression}}}, + author = {Noble, Safiya Umoja}, + date = {2018-02-20}, + edition = {Illustrated edition}, + publisher = {{NYU Press}}, + location = {{New York}}, + isbn = {978-1-4798-3724-3}, + langid = {english}, + pagetotal = {248} +} + +@article{obermeyer_dissecting_2019, + title = {Dissecting Racial Bias in an Algorithm Used to Manage the Health of Populations}, + author = {Obermeyer, Ziad and Powers, Brian and Vogeli, Christine and Mullainathan, Sendhil}, + date = {2019-10-25}, + journaltitle = {Science}, + volume = {366}, + number = {6464}, + eprint = {31649194}, + eprinttype = {pmid}, + pages = {447--453}, + issn = {0036-8075, 1095-9203}, + abstract = {Racial bias in health algorithms The U.S. health care system uses commercial algorithms to guide health decisions. Obermeyer et al. find evidence of racial bias in one widely used algorithm, such that Black patients assigned the same level of risk by the algorithm are sicker than White patients (see the Perspective by Benjamin). The authors estimated that this racial bias reduces the number of Black patients identified for extra care by more than half. Bias occurs because the algorithm uses health costs as a proxy for health needs. Less money is spent on Black patients who have the same level of need, and the algorithm thus falsely concludes that Black patients are healthier than equally sick White patients. Reformulating the algorithm so that it no longer uses costs as a proxy for needs eliminates the racial bias in predicting who needs extra care. Science, this issue p. 447; see also p. 421 Health systems rely on commercial prediction algorithms to identify and help patients with complex health needs. We show that a widely used algorithm, typical of this industry-wide approach and affecting millions of patients, exhibits significant racial bias: At a given risk score, Black patients are considerably sicker than White patients, as evidenced by signs of uncontrolled illnesses. Remedying this disparity would increase the percentage of Black patients receiving additional help from 17.7 to 46.5\%. The bias arises because the algorithm predicts health care costs rather than illness, but unequal access to care means that we spend less money caring for Black patients than for White patients. Thus, despite health care cost appearing to be an effective proxy for health by some measures of predictive accuracy, large racial biases arise. We suggest that the choice of convenient, seemingly effective proxies for ground truth can be an important source of algorithmic bias in many contexts. A health algorithm that uses health costs as a proxy for health needs leads to racial bias against Black patients. A health algorithm that uses health costs as a proxy for health needs leads to racial bias against Black patients.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/HWSVCC4M/447.html} +} + +@incollection{oehmer-pedrazzi_automated_2023, + title = {Automated Content Analysis}, + booktitle = {Standardisierte Inhaltsanalyse in der Kommunikationswissenschaft – Standardized Content Analysis in Communication Research}, + author = {Hase, Valerie}, + editor = {Oehmer-Pedrazzi, Franziska and Kessler, Sabrina Heike and Humprecht, Edda and Sommer, Katharina and Castro, Laia}, + date = {2023}, + pages = {23--36}, + publisher = {{Springer Fachmedien Wiesbaden}}, + location = {{Wiesbaden}}, + abstract = {Abstract Due to the rise in processing power, advancements in machine learning, and the availability of large text corpora online, the use of computational methods including automated content analysis has rapidly increased. Automated content analysis is applied and developed across disciplines such as computer science, linguistics, political science, economics and – increasingly – communication science. This chapter offers a theoretical and applied introduction to the method, including promises and pitfalls associated with the method.}, + isbn = {978-3-658-36178-5 978-3-658-36179-2}, + langid = {ngerman}, + file = {/home/nathante/Zotero/storage/EYMUDQCL/Hase - 2023 - Automated Content Analysis.pdf} +} + +@article{opperhuizen_framing_2019, + ids = {opperhuizen_framing_2019-1}, + title = {Framing a {{Conflict}}! {{How Media Report}} on {{Earthquake Risks Caused}} by {{Gas Drilling}}: {{A Longitudinal Analysis Using Machine Learning Techniques}} of {{Media Reporting}} on {{Gas Drilling}} from 1990 to 2015}, + shorttitle = {Framing a {{Conflict}}! {{How Media Report}} on {{Earthquake Risks Caused}} by {{Gas Drilling}}}, + author = {Opperhuizen, Alette Eva and Schouten, Kim and Klijn, Erik Hans}, + date = {2019-04-04}, + journaltitle = {Journalism Studies}, + shortjournal = {Journalism Studies}, + volume = {20}, + number = {5}, + pages = {714--734}, + issn = {1461-670X, 1469-9699}, + langid = {english}, + file = {/home/nathante/Zotero/storage/DJMSRL6B/Opperhuizen et al. - 2019 - Framing a Conflict! How Media Report on Earthquake.pdf} +} + +@article{papasavva_is_2020, + title = {"{{Is}} It a {{Qoincidence}}?": {{A First Step Towards Understanding}} and {{Characterizing}} the {{QAnon Movement}} on {{Voat}}.Co}, + shorttitle = {"{{Is}} It a {{Qoincidence}}?}, + author = {Papasavva, Antonis and Blackburn, Jeremy and Stringhini, Gianluca and Zannettou, Savvas and De Cristofaro, Emiliano}, + date = {2020}, + langid = {english}, + keywords = {Computer Science,Computers and Society,cs.CY}, + file = {/home/nathante/Zotero/storage/U8M5WSNH/Papasavva et al_2020_Is it a Qoincidence.pdf} +} + +@article{papasavva_raiders_2020, + title = {Raiders of the {{Lost Kek}}: 3.5 {{Years}} of {{Augmented}} 4chan {{Posts}} from the {{Politically Incorrect Board}}}, + shorttitle = {Raiders of the {{Lost Kek}}}, + author = {Papasavva, Antonis and Zannettou, Savvas and Cristofaro, Emiliano De and Stringhini, Gianluca and Blackburn, Jeremy}, + date = {2020-05-26}, + journaltitle = {Proceedings of the International AAAI Conference on Web and Social Media}, + volume = {14}, + pages = {885--894}, + issn = {2334-0770}, + abstract = {This paper presents a dataset with over 3.3M threads and 134.5M posts from the Politically Incorrect board (/pol/) of the imageboard forum 4chan, posted over a period of almost 3.5 years (June 2016-November 2019). To the best of our knowledge, this represents the largest publicly available 4chan dataset, providing the community with an archive of posts that have been permanently deleted from 4chan and are otherwise inaccessible. We augment the data with a set of additional labels, including toxicity scores and the named entities mentioned in each post. We also present a statistical analysis of the dataset, providing an overview of what researchers interested in using it can expect, as well as a simple content analysis, shedding light on the most prominent discussion topics, the most popular entities mentioned, and the toxicity level of each post. Overall, we are confident that our work will motivate and assist researchers in studying and understanding 4chan, as well as its role on the greater Web. For instance, we hope this dataset may be used for cross-platform studies of social media, as well as being useful for other types of research like natural language processing. Finally, our dataset can assist qualitative work focusing on in-depth case studies of specific narratives, events, or social theories.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/VI53KWD3/Papasavva et al_2020_Raiders of the Lost Kek.pdf} +} + +@article{pearl_fusion_1986, + title = {Fusion, Propagation, and Structuring in Belief Networks}, + author = {Pearl, Judea}, + date = {1986-09-01}, + journaltitle = {Artificial Intelligence}, + shortjournal = {Artificial Intelligence}, + volume = {29}, + number = {3}, + pages = {241--288}, + issn = {0004-3702}, + abstract = {Belief networks are directed acyclic graphs in which the nodes represent propositions (or variables), the arcs signify direct dependencies between the linked propositions, and the strengths of these dependencies are quantified by conditional probabilities. A network of this sort can be used to represent the generic knowledge of a domain expert, and it turns into a computational architecture if the links are used not merely for storing factual knowledge but also for directing and activating the data flow in the computations which manipulate this knowledge. The first part of the paper deals with the task of fusing and propagating the impacts of new information through the networks in such a way that, when equilibrium is reached, each proposition will be assigned a measure of belief consistent with the axioms of probability theory. It is shown that if the network is singly connected (e.g. tree-structured), then probabilities can be updated by local propagation in an isomorphic network of parallel and autonomous processors and that the impact of new information can be imparted to all propositions in time proportional to the longest path in the network. The second part of the paper deals with the problem of finding a tree-structured representation for a collection of probabilistically coupled propositions using auxiliary (dummy) variables, colloquially called “hidden causes.” It is shown that if such a tree-structured representation exists, then it is possible to uniquely uncover the topology of the tree by observing pairwise dependencies among the available propositions (i.e., the leaves of the tree). The entire tree structure, including the strengths of all internal relationships, can be reconstructed in time proportional to n log n, where n is the number of leaves.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/RQ4HHQDE/Pearl_1986_Fusion, propagation, and structuring in belief networks.pdf;/home/nathante/Zotero/storage/TJGHM5D8/000437028690072X.html} +} + +@article{pepe_insights_2007, + title = {Insights into Latent Class Analysis of Diagnostic Test Performance}, + author = {Pepe, Margaret Sullivan and Janes, Holly}, + date = {2007-04-01}, + journaltitle = {Biostatistics}, + shortjournal = {Biostatistics}, + volume = {8}, + number = {2}, + pages = {474--484}, + issn = {1465-4644}, + abstract = {Latent class analysis is used to assess diagnostic test accuracy when a gold standard assessment of disease is not available but results of multiple imperfect tests are. We consider the simplest setting, where 3 tests are observed and conditional independence (CI) is assumed. Closed-form expressions for maximum likelihood parameter estimates are derived. They show explicitly how observed 2- and 3-way associations between test results are used to infer disease prevalence and test true- and false-positive rates. Although interesting and reasonable under CI, the estimators clearly have no basis when it fails. Intuition for bias induced by conditional dependence follows from the analytic expressions. Further intuition derives from an Expectation Maximization (EM) approach to calculating the estimates. We discuss implications of our results and related work for settings where more than 3 tests are available. We conclude that careful justification of assumptions about the dependence between tests in diseased and nondiseased subjects is necessary in order to ensure unbiased estimates of prevalence and test operating characteristics and to provide these estimates clinical interpretations. Such justification must be based in part on a clear clinical definition of disease and biological knowledge about mechanisms giving rise to test results.}, + file = {/home/nathante/Zotero/storage/MI5DX4GP/Pepe_Janes_2007_Insights into latent class analysis of diagnostic test performance.pdf;/home/nathante/Zotero/storage/4HJEMBH2/232752.html} +} + +@article{pilny_using_2019, + ids = {pilny_using_2019-1}, + title = {Using {{Supervised Machine Learning}} in {{Automated Content Analysis}}: {{An Example Using Relational Uncertainty}}}, + shorttitle = {Using {{Supervised Machine Learning}} in {{Automated Content Analysis}}}, + author = {Pilny, Andrew and McAninch, Kelly and Slone, Amanda and Moore, Kelsey}, + date = {2019-10-02}, + journaltitle = {Communication Methods and Measures}, + volume = {13}, + number = {4}, + pages = {287--304}, + publisher = {{Routledge}}, + issn = {1931-2458}, + abstract = {The goal of this research is to make progress towards using supervised machine learning for automated content analysis dealing with complex interpretations of text. For Step 1, two humans coded a sub-sample of online forum posts for relational uncertainty. For Step 2, we evaluated reliability, in which we trained three different classifiers to learn from those subjective human interpretations. Reliability was established when two different metrics of inter-coder reliability could not distinguish whether a human or a machine coded the text on a separate hold-out set. Finally, in Step 3 we assessed validity. To accomplish this, we administered a survey in which participants described their own relational uncertainty/certainty via text and completed a questionnaire. After classifying the text, the machine’s classifications of the participants’ text positively correlated with the subjects’ own self-reported relational uncertainty and relational satisfaction. We discuss our results in line with areas of computational communication science, content analysis, and interpersonal communication.}, + annotation = {\_eprint: https://doi.org/10.1080/19312458.2019.1650166}, + file = {/home/nathante/Zotero/storage/6W4S82UP/Pilny et al_2019_Using Supervised Machine Learning in Automated Content Analysis.pdf;/home/nathante/Zotero/storage/VZHKQWIE/19312458.2019.html} +} + +@article{pipal_if_2022, + title = {If {{You Have Choices}}, {{Why Not Choose}} (and {{Share}}) {{All}} of {{Them}}? {{A Multiverse Approach}} to {{Understanding News Engagement}} on {{Social Media}}}, + shorttitle = {If {{You Have Choices}}, {{Why Not Choose}} (and {{Share}}) {{All}} of {{Them}}?}, + author = {Pipal, Christian and Song, Hyunjin and Boomgaarden, Hajo G.}, + date = {2022-03-02}, + journaltitle = {Digital Journalism}, + shortjournal = {Digital Journalism}, + pages = {1--21}, + issn = {2167-0811, 2167-082X}, + langid = {english} +} + +@article{rajadesingan_quick_2020, + title = {Quick, {{Community-Specific Learning}}: {{How Distinctive Toxicity Norms Are Maintained}} in {{Political Subreddits}}}, + shorttitle = {Quick, {{Community-Specific Learning}}}, + author = {Rajadesingan, Ashwin and Resnick, Paul and Budak, Ceren}, + date = {2020-05-26}, + journaltitle = {Proceedings of the International AAAI Conference on Web and Social Media}, + volume = {14}, + pages = {557--568}, + issn = {2334-0770}, + abstract = {Online communities about similar topics may maintain very different norms of interaction. Past research identifies many processes that contribute to maintaining stable norms, including self-selection, pre-entry learning, post-entry learning, and retention. We analyzed political subreddits that had distinctive, stable levels of toxic comments on Reddit, in order to identify the relative contribution of these four processes. Surprisingly, we find that the largest source of norm stability is pre-entry learning. That is, newcomers' first comments in these distinctive subreddits differ from those same people's prior behavior in other subreddits. Through this adjustment, they nearly match the toxicity level of the subreddit they are joining. We also show that behavior adjustments are community-specific and not broadly transformative. That is, people continue to post toxic comments at their previous rates in other political subreddits. Thus, we conclude that in political subreddits, compatible newcomers are neither born nor made– they make local adjustments on their own.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/CTW2MSVA/Rajadesingan et al_2020_Quick, Community-Specific Learning.pdf} +} + +@report{ralund_measurement_2022, + type = {preprint}, + title = {Measurement Error and Model Instability in Automated Text Analysis: The Case of Topic Models}, + shorttitle = {Measurement Error and Model Instability in Automated Text Analysis}, + author = {Ralund, Snorre and Carlsen, Hjalmar Bang and Klemmensen, Robert and Lassen, David Dreyer}, + date = {2022-09-29}, + institution = {{SocArXiv}}, + abstract = {Topic models (TMs) have become the de facto standard for automated content analysis in the social sciences. Some problematic aspects of the models, however, have been recently identified. These mainly concern the high variability of solutions that is the result of both preprocessing and non-deterministic inference in high-dimensional and very large solution spaces. Whether current model selection and validation practices are sufficient to ensure precise and unbiased measurement has been the subject of debate. This paper argues that the prevalent practices of model selection and validation for ensuring unbiased measurement are indeed insufficient. This paper focus is on the use of summarization techniques during labeling, indirect validation techniques, and the employment of heuristics for model selection. Our results document non-trivial biases and show that current model validation techniques lead to arbitrary research outcomes. The study concludes by providing recommendations on ways to validate work based on topic models.} +} + +@article{rauchfleisch_false_2020, + title = {The {{False}} Positive Problem of Automatic Bot Detection in Social Science Research}, + author = {Rauchfleisch, Adrian and Kaiser, Jonas}, + date = {2020-10-22}, + journaltitle = {PLOS ONE}, + shortjournal = {PLOS ONE}, + volume = {15}, + number = {10}, + pages = {e0241045}, + publisher = {{Public Library of Science}}, + issn = {1932-6203}, + abstract = {The identification of bots is an important and complicated task. The bot classifier "Botometer" was successfully introduced as a way to estimate the number of bots in a given list of accounts and, as a consequence, has been frequently used in academic publications. Given its relevance for academic research and our understanding of the presence of automated accounts in any given Twitter discourse, we are interested in Botometer’s diagnostic ability over time. To do so, we collected the Botometer scores for five datasets (three verified as bots, two verified as human; n = 4,134) in two languages (English/German) over three months. We show that the Botometer scores are imprecise when it comes to estimating bots; especially in a different language. We further show in an analysis of Botometer scores over time that Botometer's thresholds, even when used very conservatively, are prone to variance, which, in turn, will lead to false negatives (i.e., bots being classified as humans) and false positives (i.e., humans being classified as bots). This has immediate consequences for academic research as most studies in social science using the tool will unknowingly count a high number of human users as bots and vice versa. We conclude our study with a discussion about how computational social scientists should evaluate machine learning systems that are developed for identifying bots.}, + langid = {english}, + keywords = {Automation,Machine learning,Scientists,Social communication,Social media,Social research,Social sciences,Twitter}, + file = {/home/nathante/Zotero/storage/CSEHIDQE/Rauchfleisch_Kaiser_2020_The False positive problem of automatic bot detection in social science research.pdf;/home/nathante/Zotero/storage/37AK3T2Q/article.html} +} + +@unpublished{reiss_reporting_2022, + title = {Reporting {{Supervised Text Analysis}} for {{Communication Science}}}, + author = {Reiss, Michael and Kobilke, Lara and Stoll, Anke}, + date = {2022-06-10}, + venue = {{Annual Conference of the Methods Section of the German Communication Section, Munich}} +} + +@article{rettberg_algorithmic_2022-1, + title = {Algorithmic Failure as a Humanities Methodology: {{Machine}} Learning's Mispredictions Identify Rich Cases for Qualitative Analysis}, + shorttitle = {Algorithmic Failure as a Humanities Methodology}, + author = {Rettberg, Jill Walker}, + date = {2022-07}, + journaltitle = {Big Data \& Society}, + shortjournal = {Big Data \& Society}, + volume = {9}, + number = {2}, + pages = {205395172211312}, + issn = {2053-9517, 2053-9517}, + abstract = {This commentary tests a methodology proposed by Munk et al. (2022) for using failed predictions in machine learning as a method to identify ambiguous and rich cases for qualitative analysis. Using a dataset describing actions performed by fictional characters interacting with machine vision technologies in 500 artworks, movies, novels and videogames, I trained a simple machine learning algorithm (using the kNN algorithm in R) to predict whether or not an action was active or passive using only information about the fictional characters. Predictable actions were generally unemotional and unambiguous activities where machine vision technologies were treated as simple tools. Unpredictable actions, that is, actions that the algorithm could not correctly predict, were more ambivalent and emotionally loaded, with more complex power relationships between characters and technologies. The results thus support Munk et al.'s theory that failed predictions can be productively used to identify rich cases for qualitative analysis. This test goes beyond simply replicating Munk et al.'s results by demonstrating that the method can be applied to a broader humanities domain, and that it does not require complex neural networks but can also work with a simpler machine learning algorithm. Further research is needed to develop an understanding of what kinds of data the method is useful for and which kinds of machine learning are most generative. To support this, the R code required to produce the results is included so the test can be replicated. The code can also be reused or adapted to test the method on other datasets.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/MPCYENAB/Rettberg - 2022 - Algorithmic failure as a humanities methodology M.pdf} +} + +@article{rice_machine_2021, + title = {Machine Coding of Policy Texts with the {{Institutional Grammar}}}, + author = {Rice, Douglas and Siddiki, Saba and Frey, Seth and Kwon, Jay H. and Sawyer, Adam}, + date = {2021}, + journaltitle = {Public Administration}, + volume = {99}, + number = {2}, + pages = {248--262}, + issn = {1467-9299}, + abstract = {The Institutional Grammar (IG) is used to analyse the syntactic structure of statements constituting institutions (e.g., policies, regulations, and norms) that indicate behavioural constraints and parameterize features of institutionally governed domains. Policy and administration scholars have made considerable progress in methodologically developing the IG, offering increasingly clear guidelines for IG-based coding, identifying unique considerations for applying the IG to different types of institutions, and expanding its syntactic scope. However, while validated as a robust institutional analysis approach, the resource and time commitment associated with its application has precipitated concerns over whether the IG might ever enjoy widespread use. Needed now in the methodological development of the IG are reliable and accessible (i.e., open source) approaches that reduce the costs associated with its application. We propose an automated approach leveraging computational text analysis and natural language processing. We then present results from an evaluation in the context of food system regulations.}, + langid = {english}, + annotation = {\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/padm.12711}, + file = {/home/nathante/Zotero/storage/C7ZBPYPY/padm.html} +} + +@article{rieder_fabrics_2021, + title = {The Fabrics of Machine Moderation: {{Studying}} the Technical, Normative, and Organizational Structure of {{Perspective API}}}, + shorttitle = {The Fabrics of Machine Moderation}, + author = {Rieder, Bernhard and Skop, Yarden}, + date = {2021-07-01}, + journaltitle = {Big Data \& Society}, + shortjournal = {Big Data \& Society}, + volume = {8}, + number = {2}, + pages = {20539517211046181}, + publisher = {{SAGE Publications Ltd}}, + issn = {2053-9517}, + abstract = {Over recent years, the stakes and complexity of online content moderation have been steadily raised, swelling from concerns about personal conflict in smaller communities to worries about effects on public life and democracy. Because of the massive growth in online expressions, automated tools based on machine learning are increasingly used to moderate speech. While ‘design-based governance’ through complex algorithmic techniques has come under intense scrutiny, critical research covering algorithmic content moderation is still rare. To add to our understanding of concrete instances of machine moderation, this article examines Perspective API, a system for the automated detection of ‘toxicity’ developed and run by the Google unit Jigsaw that can be used by websites to help moderate their forums and comment sections. The article proceeds in four steps. First, we present our methodological strategy and the empirical materials we were able to draw on, including interviews, documentation, and GitHub repositories. We then summarize our findings along five axes to identify the various threads Perspective API brings together to deliver a working product. The third section discusses two conflicting organizational logics within the project, paying attention to both critique and what can be learned from the specific case at hand. We conclude by arguing that the opposition between ‘human’ and ‘machine’ in speech moderation obscures the many ways these two come together in concrete systems, and suggest that the way forward requires proactive engagement with the design of technologies as well as the institutions they are embedded in.}, + langid = {english}, + keywords = {Algorithmic content moderation,Google Jigsaw,machine learning,moral engineering,Perspective API,platformization}, + file = {/home/nathante/Zotero/storage/XQZZZJU6/Rieder_Skop_2021_The fabrics of machine moderation.pdf} +} + +@inproceedings{salminen_online_2018, + title = {Online {{Hate Interpretation Varies}} by {{Country}}, {{But More}} by {{Individual}}: {{A Statistical Analysis Using Crowdsourced Ratings}}}, + shorttitle = {Online {{Hate Interpretation Varies}} by {{Country}}, {{But More}} by {{Individual}}}, + booktitle = {2018 {{Fifth International Conference}} on {{Social Networks Analysis}}, {{Management}} and {{Security}} ({{SNAMS}})}, + author = {Salminen, Joni and Veronesi, Fabio and Almerekhi, Hind and Jung, Soon-Gvo and Jansen, Bernard J.}, + date = {2018-10}, + pages = {88--94}, + abstract = {Hate is prevalent in online social media. This has resulted in a considerable amount of research in detecting and scoring it. Most computational efforts involve machine learning with crowdsourced ratings as training data. A prominent example of this is the Perspective API., a tool by Google to score toxicity of online comments. However., a major issue in the existing approaches is the lack of consideration for the subjective nature of online hate. While there is research that shows the intensity of hate varies and the hate depends on the context., there is no research that systematically investigates how hate interpretation varies by country or individual. In this exploratory research, we undertake this challenge. We sample crowd workers from 50 countries, have them score the same social media comments for toxicity and then evaluate the differences in the scores., altogether 18.,125 ratings. We find that the interpretation score differences among countries are highly significant. However., the hate interpretations vary more by the individual raters than by countries. These findings suggest that hate scoring systems should consider user-level features when scoring and automating the processing of online hate.}, + eventtitle = {2018 {{Fifth International Conference}} on {{Social Networks Analysis}}, {{Management}} and {{Security}} ({{SNAMS}})}, + keywords = {Dictionaries,Facebook,hateinterpretation,Media,Online hate,Security,social media,Task analysis,YouTube}, + file = {/home/nathante/Zotero/storage/WWS3JFLS/Salminen et al_2018_Online Hate Interpretation Varies by Country, But More by Individual.pdf;/home/nathante/Zotero/storage/7IY8BXP4/8554954.html} +} + +@article{salminen_topic-driven_2020, + title = {Topic-Driven Toxicity: {{Exploring}} the Relationship between Online Toxicity and News Topics}, + shorttitle = {Topic-Driven Toxicity}, + author = {Salminen, Joni and Sengün, Sercan and Corporan, Juan and Jung, Soon-gyo and Jansen, Bernard J.}, + date = {2020-02-21}, + journaltitle = {PLOS ONE}, + shortjournal = {PLOS ONE}, + volume = {15}, + number = {2}, + pages = {e0228723}, + publisher = {{Public Library of Science}}, + issn = {1932-6203}, + abstract = {Hateful commenting, also known as ‘toxicity’, frequently takes place within news stories in social media. Yet, the relationship between toxicity and news topics is poorly understood. To analyze how news topics relate to the toxicity of user comments, we classify topics of 63,886 online news videos of a large news channel using a neural network and topical tags used by journalists to label content. We score 320,246 user comments from those videos for toxicity and compare how the average toxicity of comments varies by topic. Findings show that topics like Racism, Israel-Palestine, and War \& Conflict have more toxicity in the comments, and topics such as Science \& Technology, Environment \& Weather, and Arts \& Culture have less toxic commenting. Qualitative analysis reveals five themes: Graphic videos, Humanistic stories, History and historical facts, Media as a manipulator, and Religion. We also observe cases where a typically more toxic topic becomes non-toxic and where a typically less toxic topic becomes “toxicified” when it involves sensitive elements, such as politics and religion. Findings suggest that news comment toxicity can be characterized as topic-driven toxicity that targets topics rather than as vindictive toxicity that targets users or groups. Practical implications suggest that humanistic framing of the news story (i.e., reporting stories through real everyday people) can reduce toxicity in the comments of an otherwise toxic topic.}, + langid = {english}, + keywords = {Internet,Language,Machine learning,Racial discrimination,Religion,Russia,Social media,Toxicity}, + file = {/home/nathante/Zotero/storage/V8AU2PHU/Salminen et al_2020_Topic-driven toxicity.pdf;/home/nathante/Zotero/storage/GZMK7WD6/article.html} +} + +@article{scharkow_content_2017, + title = {Content Analysis, Automatic}, + author = {Scharkow, Michael}, + date = {2017}, + journaltitle = {The international encyclopedia of communication research methods}, + pages = {1--14}, + publisher = {{John Wiley \& Sons, Inc. Hoboken, NJ, USA}}, + file = {/home/nathante/Zotero/storage/VU8JC2YH/Scharkow_2017_Content analysis, automatic.pdf} +} + +@article{scharkow_how_2017, + title = {How {{Measurement Error}} in {{Content Analysis}} and {{Self-Reported Media Use Leads}} to {{Minimal Media Effect Findings}} in {{Linkage Analyses}}: {{A Simulation Study}}}, + shorttitle = {How {{Measurement Error}} in {{Content Analysis}} and {{Self-Reported Media Use Leads}} to {{Minimal Media Effect Findings}} in {{Linkage Analyses}}}, + author = {Scharkow, Michael and Bachl, Marko}, + date = {2017-07-03}, + journaltitle = {Political Communication}, + volume = {34}, + number = {3}, + pages = {323--343}, + publisher = {{Routledge}}, + issn = {1058-4609}, + abstract = {In the debate on minimal media effects and their causes, methodological concerns about measurement are rarely discussed. We argue that even in state-of-the-art media-effects studies that combine measures of media messages and media use (i.e., linkage analyses), measurement error in both the media content analysis and the media use self-reports will typically lead to severely downward-biased effect estimates. We demonstrate this phenomenon using a large Monte Carlo simulation with varying parameters of the content analysis and the survey study. Results show that measurement error in the content analysis and media use variables does indeed lead to smaller effect estimates, especially when the media messages of interest are relatively rare. We discuss these findings as well as possible remedies and implications for future research.}, + keywords = {content analysis,Corrigendum,linkage analysis,media effects,media use,Monte Carlo simulation,reliability}, + annotation = {\_eprint: https://doi.org/10.1080/10584609.2016.1235640}, + file = {/home/nathante/Zotero/storage/M5A6LIZQ/Scharkow_Bachl_2017_How Measurement Error in Content Analysis and Self-Reported Media Use Leads to.pdf} +} + +@article{scharkow_thematic_2013, + title = {Thematic Content Analysis Using Supervised Machine Learning: {{An}} Empirical Evaluation Using {{German}} Online News}, + shorttitle = {Thematic Content Analysis Using Supervised Machine Learning}, + author = {Scharkow, Michael}, + date = {2013-02-01}, + journaltitle = {Quality \& Quantity}, + shortjournal = {Qual Quant}, + volume = {47}, + number = {2}, + pages = {761--773}, + issn = {1573-7845}, + abstract = {In recent years, two approaches to automatic content analysis have been introduced in the social sciences: semantic network analysis and supervised text classification. We argue that, although less linguistically sophisticated than semantic parsing techniques, statistical machine learning offers many advantages for applied communication research. By using manually coded material for training, supervised classification seamlessly bridges the gap between traditional and automatic content analysis. In this paper, we briefly introduce the conceptual foundations of machine learning approaches to text classification and discuss their application in social science research. We then evaluate their potential in an experimental study in which German online news was coded with established thematic categories. Moreover, we investigate whether and how linguistic preprocessing can improve classification quality. Results indicate that supervised text classification is generally robust and reliable for some categories, but may even be useful when it fails.}, + langid = {english}, + keywords = {Bayesian classifier,Content analysis,Machine learning,Online news}, + file = {/home/nathante/Zotero/storage/L6G36ZJV/Scharkow_2013_Thematic content analysis using supervised machine learning.pdf} +} + +@article{schwartz_neglected_1985, + title = {The {{Neglected Problem}} of {{Measurement Error}} in {{Categorical Data}}}, + author = {Schwartz, Joseph E.}, + date = {1985-05}, + journaltitle = {Sociological Methods \& Research}, + shortjournal = {Sociological Methods \& Research}, + volume = {13}, + number = {4}, + pages = {435--466}, + issn = {0049-1241, 1552-8294}, + abstract = {The problems created by measurement error are entirely ignored in the vast majority of statistical analyses. To adjust for the effects of measurement error requires both a theory, or model, of measurement and estimates of the relevant measurement parameters (e.g., reliability coefficients). A fairly well-developed measurement theory for interval level data has been known for quite some time. A corresponding measurement theory for categorical data is not widely known even though such data are at least as important in the social sciences as interval data. Nevertheless, such a theory exists in the statistical journals. The primary purpose of this article is pedagogical: that is, to present the foundation of this theory for binary variables, the simplest type of categorical variable, and to demonstrate that the consequences of measurement errors in binary data are different from and probably more serious than the effects of measurement errors in interval level data. The principal reason for this is that measurement errors in a binary variable are likely to have a nonzero mean and will always be negatively correlated with the underlying true scores. The former has the effect of biasing the sample estimate of the mean, often to such a degree that the likelihood that a 95\% confidence interval will contain the population mean is almost nil.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/3HPFWPK6/Schwartz (1985) The Neglected Problem of Measurement Error in Categorical Data.pdf} +} + +@article{song_validations_2020, + title = {In {{Validations We Trust}}? {{The Impact}} of {{Imperfect Human Annotations}} as a {{Gold Standard}} on the {{Quality}} of {{Validation}} of {{Automated Content Analysis}}}, + shorttitle = {In {{Validations We Trust}}?}, + author = {Song, Hyunjin and Tolochko, Petro and Eberl, Jakob-Moritz and Eisele, Olga and Greussing, Esther and Heidenreich, Tobias and Lind, Fabienne and Galyga, Sebastian and Boomgaarden, Hajo G.}, + date = {2020-07-03}, + journaltitle = {Political Communication}, + shortjournal = {Political Communication}, + volume = {37}, + number = {4}, + pages = {550--572}, + issn = {1058-4609, 1091-7675}, + abstract = {Political communication has become one of the central arenas of innovation in the application of automated analysis approaches to ever-growing quantities of digitized texts. However, although researchers routinely and conveniently resort to certain forms of human coding to validate the results derived from automated procedures, in practice the actual “quality assurance” of such a “gold standard” often goes unchecked. Contemporary practices of validation via manual annotations are far from being acknowledged as best practices in the literature, and the reporting and interpretation of validation procedures differ greatly. We systematically assess the connection between the quality of human judgment in manual annotations and the relative performance evaluations of automated procedures against true standards by relying on large-scale Monte Carlo simulations. The results from the simulations confirm that there is a substantially greater risk of a researcher reaching an incorrect conclusion regarding the performance of automated procedures when the quality of manual annotations used for validation is not properly ensured. Our contribution should therefore be regarded as a call for the systematic application of high-quality manual validation materials in any political communication study, drawing on automated text analysis procedures.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/FIX97HQ6/Song et al. - 2020 - In Validations We Trust The Impact of Imperfect H.pdf} +} + +@article{stoll_developing_2023, + title = {Developing an {{Incivility Dictionary}} for {{German Online Discussions}} – a {{Semi-Automated Approach Combining Human}} and {{Artificial Knowledge}}}, + author = {Stoll, Anke and Wilms, Lena and Ziegele, Marc}, + date = {2023-02-05}, + journaltitle = {Communication Methods and Measures}, + shortjournal = {Communication Methods and Measures}, + pages = {1--19}, + issn = {1931-2458, 1931-2466}, + langid = {english} +} + +@article{stoll_supervised_2020, + title = {Supervised Machine Learning mit Nutzergenerierten Inhalten: Oversampling für nicht balancierte Trainingsdaten}, + shorttitle = {Supervised Machine Learning mit Nutzergenerierten Inhalten}, + author = {Stoll, Anke}, + date = {2020-05}, + journaltitle = {Publizistik}, + shortjournal = {Publizistik}, + volume = {65}, + number = {2}, + pages = {233--251}, + issn = {0033-4006, 1862-2569}, + langid = {ngerman}, + file = {/home/nathante/Zotero/storage/LYDJHHFJ/Stoll - 2020 - Supervised Machine Learning mit Nutzergenerierten .pdf} +} + +@article{su_uncivil_2018, + title = {Uncivil and Personal? {{Comparing}} Patterns of Incivility in Comments on the {{Facebook}} Pages of News Outlets}, + shorttitle = {Uncivil and Personal?}, + author = {Su, Leona Yi-Fan and Xenos, Michael A and Rose, Kathleen M and Wirz, Christopher and Scheufele, Dietram A and Brossard, Dominique}, + date = {2018-10}, + journaltitle = {New Media \& Society}, + shortjournal = {New Media \& Society}, + volume = {20}, + number = {10}, + pages = {3678--3699}, + issn = {1461-4448, 1461-7315}, + abstract = {Social media and its embedded user commentary are playing increasingly influential roles in the news process. However, researchers’ understanding of the social media commenting environment remains limited, despite rising concerns over uncivil comments. Accordingly, this study used a supervised machine learning–based method of content analysis to examine the extent and patterns of incivility in the comment sections of 42 US news outlets’ Facebook pages over an 18-month period in 2015–2016. These outlets were selected as being broadly representative of national, local, conservative, and liberal-news media. The findings provide the first empirical evidence that both the level and the targets of incivility in the comments posted on news outlets’ Facebook pages vary greatly according to such entities’ general type and ideological stance.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/ZSEXMRA3/Su et al. - 2018 - Uncivil and personal Comparing patterns of incivi.pdf} +} + +@unpublished{tay_conceptual_2022, + title = {A {{Conceptual Framework}} for {{Investigating}} and {{Mitigating Machine Learning Measurement Bias}} ({{MLMB}}) in {{Psychological Assessment}}}, + author = {Tay, Louis and Woo, Sang Eun and Hickman, Louis and Booth, Brandon and D'Mello, Sidney K.}, + date = {2022}, + howpublished = {Working Draft}, + file = {/home/nathante/Zotero/storage/ELRHYNKL/Tayetal.2021MachineLearningMeasurementBias.pdf} +} + +@article{teblunthuis_effects_2021, + ids = {teblunthuis_effects_2020}, + title = {Effects of {{Algorithmic Flagging}} on {{Fairness}}: {{Quasi-experimental Evidence}} from {{Wikipedia}}}, + shorttitle = {Effects of {{Algorithmic Flagging}} on {{Fairness}}}, + author = {TeBlunthuis, Nathan and Hill, Benjamin Mako and Halfaker, Aaron}, + date = {2021-04-22}, + journaltitle = {Proceedings of the ACM on Human-Computer Interaction}, + shortjournal = {Proc. ACM Hum.-Comput. Interact.}, + volume = {5}, + eprint = {2006.03121}, + eprinttype = {arxiv}, + pages = {56:1--56:27}, + abstract = {Online community moderators often rely on social signals such as whether or not a user has an account or a profile page as clues that users may cause problems. Reliance on these clues can lead to "overprofiling'' bias when moderators focus on these signals but overlook the misbehavior of others. We propose that algorithmic flagging systems deployed to improve the efficiency of moderation work can also make moderation actions more fair to these users by reducing reliance on social signals and making norm violations by everyone else more visible. We analyze moderator behavior in Wikipedia as mediated by RCFilters, a system which displays social signals and algorithmic flags, and estimate the causal effect of being flagged on moderator actions. We show that algorithmically flagged edits are reverted more often, especially those by established editors with positive social signals, and that flagging decreases the likelihood that moderation actions will be undone. Our results suggest that algorithmic flagging systems can lead to increased fairness in some contexts but that the relationship is complex and contingent.}, + archiveprefix = {arXiv}, + issue = {CSCW1}, + keywords = {ai,causal inference,community norms,fairness,machine learning,moderation,online communities,peer production,sociotechnical systems,wikipedia}, + file = {/home/nathante/Zotero/storage/8KVI8QKZ/TeBlunthuis et al. - 2021 - Effects of Algorithmic Flagging on Fairness Quasi.pdf;/home/nathante/Zotero/storage/E2RPTEMM/TeBlunthuis et al_2021_Effects of Algorithmic Flagging on Fairness.pdf;/home/nathante/Zotero/storage/LAJEZ9JV/TeBlunthuis et al. - 2021 - Effects of Algorithmic Flagging on Fairness Quasi.pdf;/home/nathante/Zotero/storage/NWM56G48/TeBlunthuis et al_2020_The effects of algorithmic flagging on fairness.pdf;/home/nathante/Zotero/storage/YBYI7VSP/2006.html} +} + +@inproceedings{teblunthuis_measuring_2021, + title = {Measuring {{Wikipedia Article Quality}} in {{One Dimension}} by {{Extending ORES}} with {{Ordinal Regression}}}, + booktitle = {17th {{International Symposium}} on {{Open Collaboration}}}, + author = {Teblunthuis, Nathan}, + date = {2021-09-15}, + series = {{{OpenSym}} 2021}, + pages = {1--10}, + publisher = {{Association for Computing Machinery}}, + location = {{New York, NY, USA}}, + abstract = {Organizing complex peer production projects and advancing scientific knowledge of open collaboration each depend on the ability to measure quality. Wikipedia community members and academic researchers have used article quality ratings for purposes like tracking knowledge gaps and studying how political polarization shapes collaboration. Even so, measuring quality presents many methodological challenges. The most widely used systems use quality assesements on discrete ordinal scales, but such labels can be inconvenient for statistics and machine learning. Prior work handles this by assuming that different levels of quality are “evenly spaced” from one another. This assumption runs counter to intuitions about degrees of effort needed to raise Wikipedia articles to different quality levels. I describe a technique extending the Wikimedia Foundations’ ORES article quality model to address these limitations. My method uses weighted ordinal regression models to construct one-dimensional continuous measures of quality. While scores from my technique and from prior approaches are correlated, my approach improves accuracy for research datasets and provides evidence that the “evenly spaced” assumption is unfounded in practice on English Wikipedia. I conclude with recommendations for using quality scores in future research and include the full code, data, and models.}, + isbn = {978-1-4503-8500-8}, + keywords = {datasets,machine learning,measurement,methods,online communities,peer production,quality,sociotechnical systems,statistics,Wikipedia}, + file = {/home/nathante/Zotero/storage/5PU87696/Teblunthuis_2021_Measuring Wikipedia Article Quality in One Dimension by Extending ORES with.pdf} +} + +@article{theocharis_dynamics_2020, + title = {The {{Dynamics}} of {{Political Incivility}} on {{Twitter}}}, + author = {Theocharis, Yannis and Barberá, Pablo and Fazekas, Zoltán and Popa, Sebastian Adrian}, + date = {2020-04}, + journaltitle = {SAGE Open}, + shortjournal = {SAGE Open}, + volume = {10}, + number = {2}, + pages = {215824402091944}, + issn = {2158-2440, 2158-2440}, + abstract = {Online incivility and harassment in political communication have become an important topic of concern among politicians, journalists, and academics. This study provides a descriptive account of uncivil interactions between citizens and politicians on Twitter. We develop a conceptual framework for understanding the dynamics of incivility at three distinct levels: macro (temporal), meso (contextual), and micro (individual). Using longitudinal data from the Twitter communication mentioning Members of Congress in the United States across a time span of over a year and relying on supervised machine learning methods and topic models, we offer new insights about the prevalence and dynamics of incivility toward legislators. We find that uncivil tweets represent consistently around 18\% of all tweets mentioning legislators, but with spikes that correspond to controversial policy debates and political events. Although we find evidence of coordinated attacks, our analysis reveals that the use of uncivil language is common to a large number of users.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/6W97WXV2/Theocharis et al. - 2020 - The Dynamics of Political Incivility on Twitter.pdf} +} + +@article{trilling_scaling_2018, + title = {Scaling up {{Content Analysis}}}, + author = {Trilling, Damian and Jonkman, Jeroen G. F.}, + date = {2018-04-03}, + journaltitle = {Communication Methods and Measures}, + volume = {12}, + number = {2-3}, + pages = {158--174}, + publisher = {{Routledge}}, + issn = {1931-2458}, + abstract = {Employing a number of different standalone programs is a prevalent approach among communication scholars who use computational methods to analyze media content. For instance, a researcher might use a specific program or a paid service to scrape some content from the Web, then use another program to process the resulting data, and finally conduct statistical analysis or produce some visualizations in yet another program. This makes it hard to build reproducible workflows, and even harder to build on the work of earlier studies. To improve this situation, we propose and discuss four criteria that a framework for automated content analysis should fulfill: scalability, free and open source, adaptability, and accessibility via multiple interfaces. We also describe how to put these considerations into practice, discuss their feasibility, and point toward future developments.}, + annotation = {\_eprint: https://doi.org/10.1080/19312458.2018.1447655}, + file = {/home/nathante/Zotero/storage/8EAAYQQE/Trilling_Jonkman_2018_Scaling up Content Analysis.pdf} +} + +@article{van_atteveldt_validity_2021, + title = {The {{Validity}} of {{Sentiment Analysis}}: {{Comparing Manual Annotation}}, {{Crowd-Coding}}, {{Dictionary Approaches}}, and {{Machine Learning Algorithms}}}, + shorttitle = {The {{Validity}} of {{Sentiment Analysis}}}, + author = {van Atteveldt, Wouter and van der Velden, Mariken A. C. G. and Boukes, Mark}, + options = {useprefix=true}, + date = {2021-04-03}, + journaltitle = {Communication Methods and Measures}, + volume = {15}, + number = {2}, + pages = {121--140}, + issn = {1931-2458}, + abstract = {Sentiment is central to many studies of communication science, from negativity and polarization in political communication to analyzing product reviews and social media comments in other sub-fields. This study provides an exhaustive comparison of sentiment analysis methods, using a validation set of Dutch economic headlines to compare the performance of manual annotation, crowd coding, numerous dictionaries and machine learning using both traditional and deep learning algorithms. The three main conclusions of this article are that: (1) The best performance is still attained with trained human or crowd coding; (2) None of the used dictionaries come close to acceptable levels of validity; and (3) machine learning, especially deep learning, substantially outperforms dictionary-based methods but falls short of human performance. From these findings, we stress the importance of always validating automatic text analysis methods before usage. Moreover, we provide a recommended step-by-step approach for (automated) text analysis projects to ensure both efficiency and validity.}, + keywords = {Automated Approaches,Evaluation,Manual Annotation,Measurement,Sentiment Analysis,Validity}, + file = {/home/nathante/Zotero/storage/M658DYHG/van Atteveldt et al_2021_The Validity of Sentiment Analysis.pdf} +} + +@article{van_smeden_reflection_2020, + title = {Reflection on Modern Methods: Five Myths about Measurement Error in Epidemiological Research}, + shorttitle = {Reflection on Modern Methods}, + author = {van Smeden, Maarten and Lash, Timothy L and Groenwold, Rolf H H}, + options = {useprefix=true}, + date = {2020-02-01}, + journaltitle = {International Journal of Epidemiology}, + shortjournal = {International Journal of Epidemiology}, + volume = {49}, + number = {1}, + pages = {338--347}, + issn = {0300-5771}, + abstract = {Epidemiologists are often confronted with datasets to analyse which contain measurement error due to, for instance, mistaken data entries, inaccurate recordings and measurement instrument or procedural errors. If the effect of measurement error is misjudged, the data analyses are hampered and the validity of the study’s inferences may be affected. In this paper, we describe five myths that contribute to misjudgments about measurement error, regarding expected structure, impact and solutions to mitigate the problems resulting from mismeasurements. The aim is to clarify these measurement error misconceptions. We show that the influence of measurement error in an epidemiological data analysis can play out in ways that go beyond simple heuristics, such as heuristics about whether or not to expect attenuation of the effect estimates. Whereas we encourage epidemiologists to deliberate about the structure and potential impact of measurement error in their analyses, we also recommend exercising restraint when making claims about the magnitude or even direction of effect of measurement error if not accompanied by statistical measurement error corrections or quantitative bias analysis. Suggestions for alleviating the problems or investigating the structure and magnitude of measurement error are given.}, + file = {/home/nathante/Zotero/storage/GPMMPFYB/van Smeden et al_2020_Reflection on modern methods.pdf;/home/nathante/Zotero/storage/9FVJBERI/5671729.html} +} + +@article{vermeer_online_2020, + title = {Online {{News User Journeys}}: {{The Role}} of {{Social Media}}, {{News Websites}}, and {{Topics}}}, + shorttitle = {Online {{News User Journeys}}}, + author = {Vermeer, Susan and Trilling, Damian and Kruikemeier, Sanne and de Vreese, Claes}, + options = {useprefix=true}, + date = {2020-10-20}, + journaltitle = {Digital Journalism}, + shortjournal = {Digital Journalism}, + volume = {8}, + number = {9}, + pages = {1114--1141}, + issn = {2167-0811, 2167-082X}, + langid = {english}, + file = {/home/nathante/Zotero/storage/NPE7CB6S/Vermeer et al. - 2020 - Online News User Journeys The Role of Social Medi.pdf} +} + +@article{votta_going_2023, + title = {Going {{Micro}} to {{Go Negative}}?: {{Targeting Toxicity}} Using {{Facebook}} and {{Instagram Ads}}}, + shorttitle = {Going {{Micro}} to {{Go Negative}}?}, + author = {Votta, Fabio and Noroozian, Arman and Dobber, Tom and Helberger, Natali and de Vreese, Claes}, + options = {useprefix=true}, + date = {2023-02-01}, + journaltitle = {Computational Communication Research}, + volume = {5}, + number = {1}, + pages = {1--50}, + issn = {2665-9085, 2665-9085}, + langid = {english} +} + +@article{wallach_big_2019, + title = {Big {{Data}}, {{Machine Learning}}, and the {{Social Sciences}}: {{Fairness}}, {{Accountability}}, and {{Transparency}}}, + shorttitle = {Big {{Data}}, {{Machine Learning}}, and the {{Social Sciences}}}, + author = {Wallach, Hanna}, + date = {2019-01-16}, + journaltitle = {Medium}, + abstract = {This essay is a (near) transcript of a talk I recently gave at a NIPS 2014 workshop on “Fairness, Accountability, and Transparency in Machine Learning,” organized by Solon Barocas and Moritz Hardt.}, + langid = {american}, + file = {/home/nathante/Zotero/storage/XYTVY7WV/big-data-machine-learning-and-the-social-sciences-fairness-accountability-and-transparency.html} +} + +@article{weber_extracting_2018, + title = {Extracting {{Latent Moral Information}} from {{Text Narratives}}: {{Relevance}}, {{Challenges}}, and {{Solutions}}}, + shorttitle = {Extracting {{Latent Moral Information}} from {{Text Narratives}}}, + author = {Weber, René and Mangus, J. Michael and Huskey, Richard and Hopp, Frederic R. and Amir, Ori and Swanson, Reid and Gordon, Andrew and Khooshabeh, Peter and Hahn, Lindsay and Tamborini, Ron}, + date = {2018-04-03}, + journaltitle = {Communication Methods and Measures}, + volume = {12}, + number = {2-3}, + pages = {119--139}, + publisher = {{Routledge}}, + issn = {1931-2458}, + abstract = {Moral Foundations Theory (MFT) and the Model of Intuitive Morality and Exemplars (MIME) contend that moral judgments are built on a universal set of basic moral intuitions. A large body of research has supported many of MFT’s and the MIME’s central hypotheses. Yet, an important prerequisite of this research—the ability to extract latent moral content represented in media stimuli with a reliable procedure—has not been systematically studied. In this article, we subject different extraction procedures to rigorous tests, underscore challenges by identifying a range of reliabilities, develop new reliability test and coding procedures employing computational methods, and provide solutions that maximize the reliability and validity of moral intuition extraction. In six content analytical studies, including a large crowd-based study, we demonstrate that: (1) traditional content analytical approaches lead to rather low reliabilities; (2) variation in coding reliabilities can be predicted by both text features and characteristics of the human coders; and (3) reliability is largely unaffected by the detail of coder training. We show that a coding task with simplified training and a coding technique that treats moral foundations as fast, spontaneous intuitions leads to acceptable inter-rater agreement, and potentially to more valid moral intuition extractions. While this study was motivated by issues related to MFT and MIME research, the methods and findings in this study have implications for extracting latent content from text narratives that go beyond moral information. Accordingly, we provide a tool for researchers interested in applying this new approach in their own work.}, + annotation = {\_eprint: https://doi.org/10.1080/19312458.2018.1447656} +} + +@article{weld_adjusting_2022, + title = {Adjusting for {{Confounders}} with {{Text}}: {{Challenges}} and an {{Empirical Evaluation Framework}} for {{Causal Inference}}}, + shorttitle = {Adjusting for {{Confounders}} with {{Text}}}, + author = {Weld, Galen and West, Peter and Glenski, Maria and Arbour, David and Rossi, Ryan A. and Althoff, Tim}, + date = {2022-05-31}, + journaltitle = {Proceedings of the International AAAI Conference on Web and Social Media}, + volume = {16}, + pages = {1109--1120}, + issn = {2334-0770}, + abstract = {Causal inference studies using textual social media data can provide actionable insights on human behavior. Making accurate causal inferences with text requires controlling for confounding which could otherwise impart bias. Recently, many different methods for adjusting for confounders have been proposed, and we show that these existing methods disagree with one another on two datasets inspired by previous social media studies. Evaluating causal methods is challenging, as ground truth counterfactuals are almost never available. Presently, no empirical evaluation framework for causal methods using text exists, and as such, practitioners must select their methods without guidance. We contribute the first such framework, which consists of five tasks drawn from real world studies. Our framework enables the evaluation of any casual inference method using text. Across 648 experiments and two datasets, we evaluate every commonly used causal inference method and identify their strengths and weaknesses to inform social media researchers seeking to use such methods, and guide future improvements. We make all tasks, data, and models public to inform applications and encourage additional research.}, + langid = {english}, + keywords = {Web and Social Media}, + file = {/home/nathante/Zotero/storage/LD3DS8GA/Weld et al_2022_Adjusting for Confounders with Text.pdf} +} + +@article{wiernik_obtaining_2020, + title = {Obtaining {{Unbiased Results}} in {{Meta-Analysis}}: {{The Importance}} of {{Correcting}} for {{Statistical Artifacts}}}, + shorttitle = {Obtaining {{Unbiased Results}} in {{Meta-Analysis}}}, + author = {Wiernik, Brenton M. and Dahlke, Jeffrey A.}, + date = {2020-03}, + journaltitle = {Advances in Methods and Practices in Psychological Science}, + shortjournal = {Advances in Methods and Practices in Psychological Science}, + volume = {3}, + number = {1}, + pages = {94--123}, + issn = {2515-2459, 2515-2467}, + abstract = {Most published meta-analyses address only artifactual variance due to sampling error and ignore the role of other statistical and psychometric artifacts, such as measurement error variance (due to factors including unreliability of measurements, group misclassification, and variable treatment strength) and selection effects (including range restriction or enhancement and collider biases). These artifacts can have severe biasing effects on the results of individual studies and meta-analyses. Failing to account for these artifacts can lead to inaccurate conclusions about the mean effect size and between-studies effect-size heterogeneity, and can influence the results of meta-regression, publication-bias, and sensitivity analyses. In this article, we provide a brief introduction to the biasing effects of measurement error variance and selection effects and their relevance to a variety of research designs. We describe how to estimate the effects of these artifacts in different research designs and correct for their impacts in primary studies and meta-analyses. We consider meta-analyses of correlations, observational group differences, and experimental effects. We provide R code to implement the corrections described.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/HNEUY89U/Wiernik und Dahlke - 2020 - Obtaining Unbiased Results in Meta-Analysis The I.pdf} +} + +@article{williams_bayesian_1998, + title = {Bayesian Classification with {{Gaussian}} Processes}, + author = {Williams, C.K.I. and Barber, D.}, + date = {1998-12}, + journaltitle = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + volume = {20}, + number = {12}, + pages = {1342--1351}, + issn = {1939-3539}, + abstract = {We consider the problem of assigning an input vector to one of m classes by predicting P(c|x) for c=1,...,m. For a two-class problem, the probability of class one given x is estimated by /spl sigma/(y(x)), where /spl sigma/(y)=1/(1+e/sup -y/). A Gaussian process prior is placed on y(x), and is combined with the training data to obtain predictions for new x points. We provide a Bayesian treatment, integrating over uncertainty in y and in the parameters that control the Gaussian process prior the necessary integration over y is carried out using Laplace's approximation. The method is generalized to multiclass problems (m{$>$}2) using the softmax function. We demonstrate the effectiveness of the method on a number of datasets.}, + eventtitle = {{{IEEE Transactions}} on {{Pattern Analysis}} and {{Machine Intelligence}}}, + keywords = {Bayesian methods,Computer Society,Gaussian noise,Gaussian processes,Logistics,Monte Carlo methods,Process control,Training data,Uncertain systems,Uncertainty}, + file = {/home/nathante/Zotero/storage/BL4LP5X2/Williams_Barber_1998_Bayesian classification with Gaussian processes.pdf;/home/nathante/Zotero/storage/TX4DTGA2/735807.html} +} + +@article{wozniak_event-centered_2021, + title = {The {{Event-Centered Nature}} of {{Global Public Spheres}}: {{The UN Climate Change Conferences}}, {{Fridays}} for {{Future}}, and the ({{Limited}}) {{Transnationalization}} of {{Media Debates}}}, + author = {Wozniak, Antal and Wessler, Hartmut and Chan, Chung-hong and Lück, Julia}, + date = {2021-01-14}, + journaltitle = {International Journal of Communication}, + volume = {15}, + pages = {688--714}, + abstract = {Research has shown how unpremeditated events can influence media attention and media framing. But how do staged political events influence patterns of news coverage across countries, and are such changes sustainable beyond the immediate event context? We examined whether the UN climate change conferences are conducive to an emergence of a transnational public sphere by triggering issue convergence and increased transnational interconnectedness across national media debates. An automated content analysis of climate change coverage in newspapers from Germany, India, South Africa, and the United States between 2012 and 2019 revealed largely event-focused reporting. Media coverage quickly returned to preconference patterns after each conference. References to foreign countries showed almost no relationship to the climate change conferences’ coverage. We found similar results for the effects of the Fridays for Future movement. The significance of these events lies less in long-term changes in media reporting but more in short-term attention generation and coordinated message production.}, + keywords = {climate change coverage,comparative research,media content analysis,media events,time series analysis,transnational public sphere}, + file = {/home/nathante/Zotero/storage/GR8HDAYJ/Wozniak et al. - 2021 - The Event-Centered Nature of Global Public Spheres.pdf} +} + +@book{yi_handbook_2021, + title = {Handbook of {{Measurement Error Models}}}, + editor = {Yi, Grace Y. and Delaigle, Aurore and Gustafson, Paul}, + date = {2021-10-17}, + publisher = {{Chapman and Hall/CRC}}, + location = {{New York}}, + abstract = {Measurement error arises ubiquitously in applications and has been of long-standing concern in a variety of fields, including medical research, epidemiological studies, economics, environmental studies, and survey research. While several research monographs are available to summarize methods and strategies of handling different measurement error problems, research in this area continues to attract extensive attention. The Handbook of Measurement Error Models provides overviews of various topics on measurement error problems. It collects carefully edited chapters concerning issues of measurement error and evolving statistical methods, with a good balance of methodology and applications. It is prepared for readers who wish to start research and gain insights into challenges, methods, and applications related to error-prone data. It also serves as a reference text on statistical methods and applications pertinent to measurement error models, for researchers and data analysts alike. Features: Provides an account of past development and modern advancement concerning measurement error problems Highlights the challenges induced by error-contaminated data Introduces off-the-shelf methods for mitigating deleterious impacts of measurement error Describes state-of-the-art strategies for conducting in-depth research}, + isbn = {978-1-315-10127-9}, + pagetotal = {592}, + file = {/home/nathante/Zotero/storage/47CS3UND/Yi et al_2021_Handbook of Measurement Error Models.pdf} +} + +@inproceedings{zannettou_measuring_2020, + title = {Measuring and {{Characterizing Hate Speech}} on {{News}}\&\#xa0;{{Websites}}}, + booktitle = {12th {{ACM Conference}} on {{Web Science}}}, + author = {Zannettou, Savvas and Elsherief, Mai and Belding, Elizabeth and Nilizadeh, Shirin and Stringhini, Gianluca}, + date = {2020-07-06}, + series = {{{WebSci}} '20}, + pages = {125--134}, + publisher = {{Association for Computing Machinery}}, + location = {{New York, NY, USA}}, + abstract = {The Web has become the main source for news acquisition. At the same time, news discussion has become more social: users can post comments on news articles or discuss news articles on other platforms like Reddit. These features empower and enable discussions among the users; however, they also act as the medium for the dissemination of toxic discourse and hate speech. The research community lacks a general understanding on what type of content attracts hateful discourse and the possible effects of social networks on the commenting activity on news articles. In this work, we perform a large-scale quantitative analysis of 125M comments posted on 412K news articles over the course of 19 months. We analyze the content of the collected articles and their comments using temporal analysis, user-based analysis, and linguistic analysis, to shed light on what elements attract hateful comments on news articles. We also investigate commenting activity when an article is posted on either 4chan’s Politically Incorrect board (/pol/) or six selected subreddits. We find statistically significant increases in hateful commenting activity around real-world divisive events like the “Unite the Right” rally in Charlottesville and political events like the second and third 2016 US presidential debates. Also, we find that articles that attract a substantial number of hateful comments have different linguistic characteristics when compared to articles that do not attract hateful comments. Furthermore, we observe that the post of a news articles on either /pol/ or the six subreddits is correlated with an increase of (hateful) commenting activity on the news articles.}, + isbn = {978-1-4503-7989-2}, + file = {/home/nathante/Zotero/storage/GPCWVQLY/Zannettou et al_2020_Measuring and Characterizing Hate Speech on News \;Websites.pdf} +} + +@report{zhang_how_2021, + type = {preprint}, + title = {How {{Using Machine Learning Classification}} as a {{Variable}} in {{Regression Leads}} to {{Attenuation Bias}} and {{What}} to {{Do About It}}}, + author = {Zhang, Han}, + date = {2021-05-29}, + institution = {{SocArXiv}}, + abstract = {Social scientists have increasingly been applying machine learning algorithms to big data to measure theoretical concepts and then using these machinepredicted variables in regression. This article rst demonstrates that directly inserting binary predictions (i.e. classi cation) without regard for prediction error will generally lead to the attenuation bias of slope coe cients or marginal e ect estimates. We then propose ve estimators with which to obtain consistent estimates of the coe cients. The estimators require validation data; both machine prediction and true values can be used. Monte Carlo simulations are used to demonstrate the e ectiveness and robustness of the proposed estimators. We summarize the pattern of usage of machine learning predictions in 12 recent publications in the top social science journals, apply our proposed estimators to four of them, and o er some practical recommendations. We develop an R package (CCER) to help researchers use the proposed estimators.}, + langid = {english}, + file = {/home/nathante/Zotero/storage/HYJ5LBR6/Zhang - 2021 - How Using Machine Learning Classification as a Var.pdf} +} + +@article{zhao_assumptions_2013, + title = {Assumptions behind {{Intercoder Reliability Indices}}}, + author = {Zhao, Xinshu and Liu, Jun S. and Deng, Ke}, + date = {2013-01-01}, + journaltitle = {Annals of the International Communication Association}, + volume = {36}, + number = {1}, + pages = {419--480}, + publisher = {{Routledge}}, + issn = {2380-8985}, + annotation = {\_eprint: https://doi.org/10.1080/23808985.2013.11679142}, + file = {/home/nathante/Zotero/storage/TDF2I55Y/Zhao et al_2013_Assumptions behind Intercoder Reliability Indices.pdf;/home/nathante/Zotero/storage/64NWAITD/23808985.2013.html} +} diff --git a/CMM_query_letter b/CMM_query_letter new file mode 100644 index 0000000..a55a34c --- /dev/null +++ b/CMM_query_letter @@ -0,0 +1,13 @@ +Dear Marko, + +On behalf of myself and my collaborators, I am writing to inquire if the attached manuscript is of interest to Communication Methods and Measures. With the continuing rise of attention to machine learning and the rapid adoption of related methods in Communication Science, we feel a rigorous understanding of the statistical problems with misclassification and error correction methods as a potential solution is urgently needed in our field. + +We aim to accomplish this in an accessible way via a real-data example using Google's perspective API, a systematic literature review, and Monte-Carlo simulations evaluating our proposed approach for error correction in comparison to others. As we could find no open-source software implementing the most effective solution we tested, we are developing an R package to make it possible for our field to routinely correct for misclassification bias in automated content analysis. + +Our study reveals that misclassification bias can easily distort statistical analysis into giving misleading results, that this threat to validity is not normally recognized in communication research applying supervised text classification, but that, perhaps surprisingly, it is possible to correct bias with data from manual content analysis. Although the statistical literature on the topic is well-developed, it is also quite technical, as I am sure you are aware. As a result, our contribution in this article is targeted to raise social scientists' awareness and comprehension of the problem and how it can be solved. + +Based on your recent work on the issue of measurement error in content analysis, you are clearly an ideal associate editor for this article. In short, we are wondering whether you think our article will be a good fit for CMM. If so, we will submit it through the system in the next days. + +Sincerely, +Nathan TeBlunthuis + diff --git a/Flowchart.png b/Flowchart.png new file mode 100644 index 0000000..91ea14c Binary files /dev/null and b/Flowchart.png differ diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..f438ad8 --- /dev/null +++ b/Makefile @@ -0,0 +1,50 @@ +#!/usr/bin/make +all: $(patsubst %.Rtex,%.pdf,$(wildcard *.Rtex)) + +# refs.bib: + +# wget -r -q -O refs.bib "http://127.0.0.1:23119/better-bibtex/export/collection?/2/Nate//Change - Population Ecology.bibtex" + +sync.remember: + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/remembr.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/remember_irr.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/remember_grid_sweep.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_1.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_2.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_3.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_3_proflik.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_4.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_1_dv.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_2_dv.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_3_dv.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_3_dv_proflik.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/simulations/robustness_4_dv.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/civil_comments/dv_perspective_example.RDS . + scp klone:/gscratch/comdata/users/nathante/ml_measurement_error_public/civil_comments/iv_perspective_example.RDS . + + +%.tex: %.Rtex remembr.RDS resources/*.R # refs.bib + Rscript -e "library(knitr); knit('$<')" + +autoupdate: + latexmk -f -xelatex -pvc $< + +%.pdf: %.tex + latexmk -f -pdf $< + +clean: + latexmk -C *.tex + rm -f article.tex + rm -f *.bbl + rm -f *.run.xml + +viewpdf: all + evince *.pdf + +spell: + aspell -c -t --tex-check-comments -b text.tex + +pdf: all + +.PHONY: clean all refs.bib autoupdate +.PRECIOUS: %.tex diff --git a/Recommendations.PNG b/Recommendations.PNG new file mode 100644 index 0000000..ece5c5f Binary files /dev/null and b/Recommendations.PNG differ diff --git a/TODO b/TODO new file mode 100644 index 0000000..71b45dd --- /dev/null +++ b/TODO @@ -0,0 +1,35 @@ + +DONE 1. As IRR is not even a major focus now: on Page 8, just keep the citation to "the Bible" (Krippendorf 2004) and remove all the other single/double-serving references: Gwat; Krippendor +2007; Hayes & Krippendof. + +DONE 2. On page 9: I know you are super animated by the Wikipedia example. But I would argue reviewers would be more animated by the Perspective API. But whatever your choice, I would suggest +keeping just one: Wikipedia / Perspective. + +DONE 3. I don't mind you cut Dobbrick et al. and keep the big name: Boukes et al., 2020 + +DONE 4. pilny_using_2019 doesn't sound very important. + + +5. This "citation dragon": \citep{obermeyer_dissecting_2019, kleinberg_algorithmic_2018, bender_dangers_2021, wallach_big_2019, noble_algorithms_2018, gillespie_custodians_2018} + +bender_dangers_2021, wallach_big_2019, noble_algorithms_2018, gillespie_custodians_2018 are single-serving. We must cut Tarleton Gillespie at least to save space, he has enough fame and +doesn't need our citation. Maybe we can keep bender_dangers_2021, it's meaningful. Yes, bender_dangers is my favorite of these. + + +6. another dragon: \citep[see]{carroll_measurement_2006, yi_handbook_2021, fuller_measurement_1987, buonaccorsi_measurement_2010} + +?cut fuller_measurement_1987 + +7. I don't recommending removing the DAGs. Those lazy reviewers can skip the descriptions of the simulations and read the DAGs and pretend to be understanding the whole paper. + +By considering the above, probably we can cut around 1 to 2 pages (~37p). + +8. There are also some room to cut in the recommendations + +8a: Could we cut the caveat about the API and PL in R I? We don't want to promote the usage of PL and it would save 1/3 page. + +8b: I trimmed some words in R II. I think R II is quite compact already and it is super important. + +8c: Strategically for this ICA submission: Should we just promote MLE in R III, i.e. don't say anything about adding another adjacent method? Other than that, R III is quite compact also. + +8c: Recommendation IV: Cut the last paragraph for this submission. We are facing the same problem and learn from us. diff --git a/appendix.Rtex b/appendix.Rtex new file mode 100644 index 0000000..d4fd435 --- /dev/null +++ b/appendix.Rtex @@ -0,0 +1,111 @@ +\documentclass[floatsintext, man, draftfirst]{apa7} +<>= +library(knitr) +library(ggplot2) +library(data.table) +knitr::opts_chunk$set(fig.show='hold') +f <- function (x) {formatC(x, format="d", big.mark=',')} +format.percent <- function(x) {paste(f(x*100),"\\%",sep='')} + +theme_set(theme_bw()) +source('resources/functions.R') +source('resources/variables.R') +@ + +% maxwidth is the original width if it is less than linewidth +% otherwise use linewidth (to make sure the graphics do not exceed the margin) +\makeatletter +\def\maxwidth{ % + \ifdim\Gin@nat@width>\linewidth + \linewidth + \else + \Gin@nat@width + \fi +} + +\usepackage{alltt} + + +\usepackage{epstopdf}% To incorporate .eps illustrations using PDFLaTeX, etc. +\usepackage{subcaption}% Support for small, `sub' figures and tables +\usepackage{tikz} +\usetikzlibrary{positioning, shapes, arrows, shadows} + +\def \parrotpdf {\includegraphics[]{parrot.pdf}} +\DeclareUnicodeCharacter{1F99C}{\parrotpdf} +\usepackage{tabularx} +\usepackage[utf8]{inputenc} +\usepackage{wrapfig} +\usepackage[T1]{fontenc} +\usepackage{textcomp} +% \usepackage[garamond]{mathdesign} + +% \usepackage[letterpaper,left=1in,right=1in,top=1in,bottom=1in]{geometry} + +% packages i use in essentially every document +\usepackage{graphicx} +\usepackage{enumerate} + +% packages i use in many documents but leave off by default +\usepackage{amsmath}%}, amsthm, amssymb} +\DeclareMathOperator*{\argmin}{arg\,min} % thin space, limits underneath in displays +\DeclareMathOperator*{\argmax}{arg\,max} % thin space, limits underneath in displays + + +\usepackage{subcaption} +% import and customize urls +% \usepackage[usenames,dvipsnames]{color} +% \usepackage[breaklinks]{hyperref} + +\hypersetup{colorlinks=true, linkcolor=black, citecolor=black, filecolor=blue, + urlcolor=blue, unicode=true} + +% add bibliographic stuff +\usepackage[american]{babel} +\usepackage{csquotes} +\usepackage[natbib=true, style=apa, sortcites=true, backend=biber]{biblatex} +\addbibresource{Bibliography.bib} +\DeclareLanguageMapping{american}{american-apa} + +\defbibheading{secbib}[\bibname]{% + \section*{#1}% + \markboth{#1}{#1}% + \baselineskip 14.2pt% + \prebibhook} + +\def\citepos#1{\citeauthor{#1}'s (\citeyear{#1})} +\def\citespos#1{\citeauthor{#1}' (\citeyear{#1})} +\newcommand\TODO[1]{\textsc{\color{red} #1}} + +% I've gotten advice to make this as general as possible to attract the widest possible audience. +\title{Appendices for: What to do about prediction errors in automated content analysis} + +\shorttitle{Appendices: Prediction errors in automated content analysis} + +\authorsnames[1,2,3]{Nathan TeBlunthuis, Valerie Hase, Chung-hong Chan} +\authorsaffiliations{{Northwestern University}, {LMU Munich}, {GESIS - Leibniz-Institut für Sozialwissenschaften}} +\leftheader{TeBlunthuis, Hase \& Chan} + +\keywords{ +Content Analysis; Machine Learning; Classification Error; Attenuation Bias; Simulation; Computational Methods; Big Data; AI; +} + +\IfFileExists{upquote.sty}{\usepackage{upquote}}{} + +\abstract{} +\begin{document} +\maketitle +\section{Additional plots from Simulations 1 and 2} +\label{appendix:main.sim.plots} +\begin{figure} +<>= + +p <- plot.simulation.iv(plot.df.example.1,iv='z') + +grid.draw(p) +@ +\caption{Estimates of $B_z$ in multivariate regression with $X$ measured using machine learning and model accuracy independent of $X$, $Y$, and $Z$. All methods obtain precise and accurate estimates given sufficient validation data.} +\end{figure} + + +\end{document} \ No newline at end of file diff --git a/appendix.bcf b/appendix.bcf new file mode 100644 index 0000000..be33e3a --- /dev/null +++ b/appendix.bcf @@ -0,0 +1,2866 @@ + + + + + + output_encoding + utf8 + + + input_encoding + utf8 + + + debug + 0 + + + mincrossrefs + 999 + + + minxrefs + 2 + + + sortcase + 1 + + + sortupper + 1 + + + + + + + alphaothers + + + + + labelalpha + 0 + + + labelnamespec + shortauthor + author + shorteditor + editor + + + labeltitle + 0 + + + labeltitlespec + shorttitle + title + maintitle + + + labeltitleyear + 0 + + + labeldateparts + 1 + + + labeldatespec + pubstate + date + eventdate + year + nodate + + + julian + 0 + + + gregorianstart + 1582-10-15 + + + maxalphanames + 3 + + + maxbibnames + 20 + + + maxcitenames + 2 + + + maxsortnames + 20 + + + maxitems + 999 + + + minalphanames + 1 + + + minbibnames + 19 + + + mincitenames + 1 + + + minsortnames + 19 + + + minitems + 1 + + + nohashothers + 0 + + + noroman + 0 + + + nosortothers + 0 + + + pluralothers + 1 + + + singletitle + 0 + + + skipbib + 0 + + + skipbiblist + 0 + + + skiplab + 0 + + + sortalphaothers + + + + + sortlocale + american + + + sortingtemplatename + apa + + + sortsets + 0 + + + uniquelist + minyear + + + uniquename + init + + + uniqueprimaryauthor + 1 + + + uniquetitle + 0 + + + uniquebaretitle + 0 + + + uniquework + 0 + + + useprefix + 1 + + + useafterword + 1 + + + useannotator + 1 + + + useauthor + 1 + + + usebookauthor + 1 + + + usecommentator + 1 + + + useeditor + 1 + + + useeditora + 1 + + + useeditorb + 1 + + + useeditorc + 1 + + + useforeword + 1 + + + useholder + 1 + + + useintroduction + 1 + + + usenamea + 1 + + + usenameb + 1 + + + usenamec + 1 + + + usetranslator + 1 + + + useshortauthor + 1 + + + useshorteditor + 1 + + + usenarrator + 1 + + + useexecproducer + 1 + + + useexecdirector + 1 + + + usewith + 1 + + + + + + labelalpha + 0 + + + labelnamespec + shortauthor + author + shorteditor + editor + + + labeltitle + 0 + + + labeltitlespec + shorttitle + title + maintitle + + + labeltitleyear + 0 + + + labeldateparts + 1 + + + labeldatespec + pubstate + date + eventdate + year + nodate + + + maxalphanames + 3 + + + maxbibnames + 20 + + + maxcitenames + 2 + + + maxsortnames + 20 + + + maxitems + 999 + + + minalphanames + 1 + + + minbibnames + 19 + + + mincitenames + 1 + + + minsortnames + 19 + + + minitems + 1 + + + nohashothers + 0 + + + noroman + 0 + + + nosortothers + 0 + + + singletitle + 0 + + + skipbib + 0 + + + skipbiblist + 0 + + + skiplab + 0 + + + uniquelist + minyear + + + uniquename + init + + + uniqueprimaryauthor + 1 + + + uniquetitle + 0 + + + uniquebaretitle + 0 + + + uniquework + 0 + + + useprefix + 1 + + + useafterword + 1 + + + useannotator + 1 + + + useauthor + 1 + + + usebookauthor + 1 + + + usecommentator + 1 + + + useeditor + 1 + + + useeditora + 1 + + + useeditorb + 1 + + + useeditorc + 1 + + + useforeword + 1 + + + useholder + 1 + + + useintroduction + 1 + + + usenamea + 1 + + + usenameb + 1 + + + usenamec + 1 + + + usetranslator + 1 + + + useshortauthor + 1 + + + useshorteditor + 1 + + + usenarrator + 1 + + + useexecproducer + 1 + + + useexecdirector + 1 + + + usewith + 1 + + + + + + labelalpha + 0 + + + labelnamespec + shortauthor + author + shorteditor + editor + + + labeltitle + 0 + + + labeltitlespec + shorttitle + title + maintitle + + + labeltitleyear + 0 + + + labeldateparts + 1 + + + labeldatespec + date + + + maxalphanames + 3 + + + maxbibnames + 20 + + + maxcitenames + 2 + + + maxsortnames + 20 + + + maxitems + 999 + + + minalphanames + 1 + + + minbibnames + 19 + + + mincitenames + 1 + + + minsortnames + 19 + + + minitems + 1 + + + nohashothers + 0 + + + noroman + 0 + + + nosortothers + 0 + + + singletitle + 0 + + + skipbib + 0 + + + skipbiblist + 0 + + + skiplab + 0 + + + uniquelist + minyear + + + uniquename + init + + + uniqueprimaryauthor + 1 + + + uniquetitle + 0 + + + uniquebaretitle + 0 + + + uniquework + 0 + + + useprefix + 1 + + + useafterword + 1 + + + useannotator + 1 + + + useauthor + 1 + + + usebookauthor + 1 + + + usecommentator + 1 + + + useeditor + 1 + + + useeditora + 1 + + + useeditorb + 1 + + + useeditorc + 1 + + + useforeword + 1 + + + useholder + 1 + + + useintroduction + 1 + + + usenamea + 1 + + + usenameb + 1 + + + usenamec + 1 + + + usetranslator + 1 + + + useshortauthor + 1 + + + useshorteditor + 1 + + + usenarrator + 1 + + + useexecproducer + 1 + + + useexecdirector + 1 + + + usewith + 1 + + + + + datamodel + labelalphanametemplate + labelalphatemplate + inheritance + translit + uniquenametemplate + sortingnamekeytemplate + sortingtemplate + extradatespec + labelnamespec + labeltitlespec + labeldatespec + controlversion + alphaothers + sortalphaothers + presort + texencoding + bibencoding + sortingtemplatename + sortlocale + language + autolang + langhook + indexing + hyperref + backrefsetstyle + block + pagetracker + citecounter + citetracker + ibidtracker + idemtracker + opcittracker + loccittracker + labeldate + labeltime + dateera + date + time + eventdate + eventtime + origdate + origtime + urldate + urltime + alldatesusetime + alldates + alltimes + gregorianstart + autocite + notetype + uniquelist + uniquename + refsection + refsegment + citereset + sortlos + babel + datelabel + backrefstyle + arxiv + familyinits + giveninits + prefixinits + suffixinits + useafterword + useannotator + useauthor + usebookauthor + usecommentator + useeditor + useeditora + useeditorb + useeditorc + useforeword + useholder + useintroduction + usenamea + usenameb + usenamec + usetranslator + useshortauthor + useshorteditor + usenarrator + useexecproducer + useexecdirector + usewith + debug + loadfiles + safeinputenc + sortcase + sortupper + terseinits + abbreviate + dateabbrev + clearlang + sortcites + sortsets + backref + backreffloats + trackfloats + parentracker + labeldateusetime + datecirca + dateuncertain + dateusetime + eventdateusetime + origdateusetime + urldateusetime + julian + datezeros + timezeros + timezones + seconds + autopunct + punctfont + labelnumber + labelalpha + labeltitle + labeltitleyear + labeldateparts + pluralothers + nohashothers + nosortothers + noroman + singletitle + uniquetitle + uniquebaretitle + uniquework + uniqueprimaryauthor + defernumbers + locallabelwidth + bibwarn + useprefix + skipbib + skipbiblist + skiplab + dataonly + defernums + firstinits + sortfirstinits + sortgiveninits + labelyear + isbn + url + doi + eprint + related + apamaxprtauth + annotation + dashed + bibtexcaseprotection + mincrossrefs + minxrefs + maxnames + minnames + maxbibnames + minbibnames + maxcitenames + mincitenames + maxsortnames + minsortnames + maxitems + minitems + maxalphanames + minalphanames + maxparens + dateeraauto + + + alphaothers + sortalphaothers + presort + indexing + citetracker + ibidtracker + idemtracker + opcittracker + loccittracker + uniquelist + uniquename + familyinits + giveninits + prefixinits + suffixinits + useafterword + useannotator + useauthor + usebookauthor + usecommentator + useeditor + useeditora + useeditorb + useeditorc + useforeword + useholder + useintroduction + usenamea + usenameb + usenamec + usetranslator + useshortauthor + useshorteditor + usenarrator + useexecproducer + useexecdirector + usewith + terseinits + abbreviate + dateabbrev + clearlang + labelnumber + labelalpha + labeltitle + labeltitleyear + labeldateparts + nohashothers + nosortothers + noroman + singletitle + uniquetitle + uniquebaretitle + uniquework + uniqueprimaryauthor + useprefix + skipbib + skipbiblist + skiplab + dataonly + skiplos + labelyear + isbn + url + doi + eprint + related + annotation + bibtexcaseprotection + labelalphatemplate + translit + sortexclusion + sortinclusion + labelnamespec + labeltitlespec + labeldatespec + maxnames + minnames + maxbibnames + minbibnames + maxcitenames + mincitenames + maxsortnames + minsortnames + maxitems + minitems + maxalphanames + minalphanames + + + noinherit + nametemplates + labelalphanametemplatename + uniquenametemplatename + sortingnamekeytemplatename + presort + indexing + citetracker + ibidtracker + idemtracker + opcittracker + loccittracker + uniquelist + uniquename + familyinits + giveninits + prefixinits + suffixinits + useafterword + useannotator + useauthor + usebookauthor + usecommentator + useeditor + useeditora + useeditorb + useeditorc + useforeword + useholder + useintroduction + usenamea + usenameb + usenamec + usetranslator + useshortauthor + useshorteditor + usenarrator + useexecproducer + useexecdirector + usewith + terseinits + abbreviate + dateabbrev + clearlang + labelnumber + labelalpha + labeltitle + labeltitleyear + labeldateparts + nohashothers + nosortothers + noroman + singletitle + uniquetitle + uniquebaretitle + uniquework + uniqueprimaryauthor + useprefix + skipbib + skipbiblist + skiplab + dataonly + skiplos + isbn + url + doi + eprint + related + annotation + bibtexcaseprotection + maxnames + minnames + maxbibnames + minbibnames + maxcitenames + mincitenames + maxsortnames + minsortnames + maxitems + minitems + maxalphanames + minalphanames + + + nametemplates + labelalphanametemplatename + uniquenametemplatename + sortingnamekeytemplatename + uniquelist + uniquename + familyinits + giveninits + prefixinits + suffixinits + terseinits + nohashothers + nosortothers + useprefix + + + nametemplates + labelalphanametemplatename + uniquenametemplatename + sortingnamekeytemplatename + uniquename + familyinits + giveninits + prefixinits + suffixinits + terseinits + useprefix + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + proceedings + + + + + inproceedings + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + prefix + family + + + + + shorthand + label + labelname + labelname + + + year + + + + + + labelyear + year + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + prefix + family + given + suffix + + + + + prefix + family + + + given + + + suffix + + + prefix + + + + + prefix + family + + + given + + + suffix + + + prefix + + + mm + + + + sf,sm,sn,pf,pm,pn,pp + family,given,prefix,suffix + boolean,integer,string,xml + default,transliteration,transcription,translation + + + article + artwork + audio + bibnote + book + bookinbook + booklet + collection + commentary + customa + customb + customc + customd + custome + customf + dataset + inbook + incollection + inproceedings + inreference + image + jurisdiction + legal + legislation + letter + manual + misc + movie + music + mvcollection + mvreference + mvproceedings + mvbook + online + patent + performance + periodical + proceedings + reference + report + review + set + software + standard + suppbook + suppcollection + suppperiodical + thesis + unpublished + video + xdata + presentation + constitution + legmaterial + legadminmaterial + nameonly + + + sortyear + volume + volumes + abstract + addendum + annotation + booksubtitle + booktitle + booktitleaddon + chapter + edition + eid + entrysubtype + eprintclass + eprinttype + eventtitle + eventtitleaddon + gender + howpublished + indexsorttitle + indextitle + isan + isbn + ismn + isrn + issn + issue + issuesubtitle + issuetitle + issuetitleaddon + iswc + journalsubtitle + journaltitle + journaltitleaddon + label + langid + langidopts + library + mainsubtitle + maintitle + maintitleaddon + nameaddon + note + number + origtitle + pagetotal + part + relatedstring + relatedtype + reprinttitle + series + shorthandintro + subtitle + title + titleaddon + usera + userb + userc + userd + usere + userf + venue + version + shorthand + shortjournal + shortseries + shorttitle + sorttitle + sortshorthand + sortkey + presort + institution + lista + listb + listc + listd + liste + listf + location + organization + origlocation + origpublisher + publisher + afterword + annotator + author + bookauthor + commentator + editor + editora + editorb + editorc + foreword + holder + introduction + namea + nameb + namec + translator + shortauthor + shorteditor + sortname + authortype + editoratype + editorbtype + editorctype + editortype + bookpagination + nameatype + namebtype + namectype + pagination + pubstate + type + language + origlanguage + crossref + xref + date + endyear + year + month + day + hour + minute + second + timezone + yeardivision + endmonth + endday + endhour + endminute + endsecond + endtimezone + endyeardivision + eventdate + eventendyear + eventyear + eventmonth + eventday + eventhour + eventminute + eventsecond + eventtimezone + eventyeardivision + eventendmonth + eventendday + eventendhour + eventendminute + eventendsecond + eventendtimezone + eventendyeardivision + origdate + origendyear + origyear + origmonth + origday + orighour + origminute + origsecond + origtimezone + origyeardivision + origendmonth + origendday + origendhour + origendminute + origendsecond + origendtimezone + origendyeardivision + urldate + urlendyear + urlyear + urlmonth + urlday + urlhour + urlminute + urlsecond + urltimezone + urlyeardivision + urlendmonth + urlendday + urlendhour + urlendminute + urlendsecond + urlendtimezone + urlendyeardivision + doi + eprint + file + verba + verbb + verbc + url + xdata + ids + entryset + related + keywords + options + relatedoptions + pages + execute + narrator + execproducer + execdirector + with + citation + source + article + section + amendment + appentry + + + abstract + annotation + authortype + bookpagination + crossref + day + doi + eprint + eprintclass + eprinttype + endday + endhour + endminute + endmonth + endsecond + endtimezone + endyear + endyeardivision + entryset + entrysubtype + execute + file + gender + hour + ids + indextitle + indexsorttitle + isan + ismn + iswc + keywords + label + langid + langidopts + library + lista + listb + listc + listd + liste + listf + minute + month + namea + nameb + namec + nameatype + namebtype + namectype + nameaddon + options + origday + origendday + origendhour + origendminute + origendmonth + origendsecond + origendtimezone + origendyear + origendyeardivision + orighour + origminute + origmonth + origsecond + origtimezone + origyear + origyeardivision + origlocation + origpublisher + origtitle + pagination + presort + related + relatedoptions + relatedstring + relatedtype + second + shortauthor + shorteditor + shorthand + shorthandintro + shortjournal + shortseries + shorttitle + sortkey + sortname + sortshorthand + sorttitle + sortyear + timezone + url + urlday + urlendday + urlendhour + urlendminute + urlendmonth + urlendsecond + urlendtimezone + urlendyear + urlhour + urlminute + urlmonth + urlsecond + urltimezone + urlyear + usera + userb + userc + userd + usere + userf + verba + verbb + verbc + xdata + xref + year + yeardivision + + + set + entryset + + + article + addendum + annotator + author + commentator + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + eid + issn + issue + issuetitle + issuesubtitle + issuetitleaddon + journalsubtitle + journaltitle + journaltitleaddon + language + note + number + origlanguage + pages + pubstate + series + subtitle + title + titleaddon + translator + version + volume + + + bibnote + note + + + book + author + addendum + afterword + annotator + chapter + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + eid + foreword + introduction + isbn + language + location + maintitle + maintitleaddon + mainsubtitle + note + number + origlanguage + pages + pagetotal + part + publisher + pubstate + series + subtitle + title + titleaddon + translator + volume + volumes + + + mvbook + addendum + afterword + annotator + author + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + foreword + introduction + isbn + language + location + note + number + origlanguage + pagetotal + publisher + pubstate + series + subtitle + title + titleaddon + translator + volume + volumes + + + inbook + bookinbook + suppbook + addendum + afterword + annotator + author + booktitle + bookauthor + booksubtitle + booktitleaddon + chapter + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + eid + foreword + introduction + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + origlanguage + part + publisher + pages + pubstate + series + subtitle + title + titleaddon + translator + volume + volumes + + + booklet + addendum + author + chapter + editor + editortype + eid + howpublished + language + location + note + pages + pagetotal + pubstate + subtitle + title + titleaddon + type + + + collection + reference + addendum + afterword + annotator + chapter + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + eid + foreword + introduction + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + origlanguage + pages + pagetotal + part + publisher + pubstate + series + subtitle + title + titleaddon + translator + volume + volumes + + + mvcollection + mvreference + addendum + afterword + annotator + author + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + foreword + introduction + isbn + language + location + note + number + origlanguage + publisher + pubstate + subtitle + title + titleaddon + translator + volume + volumes + + + incollection + suppcollection + inreference + addendum + afterword + annotator + author + booksubtitle + booktitle + booktitleaddon + chapter + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + eid + foreword + introduction + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + origlanguage + pages + part + publisher + pubstate + series + subtitle + title + titleaddon + translator + volume + volumes + + + dataset + addendum + author + edition + editor + editortype + language + location + note + number + organization + publisher + pubstate + series + subtitle + title + titleaddon + type + version + + + manual + addendum + author + chapter + edition + editor + editortype + eid + isbn + language + location + note + number + organization + pages + pagetotal + publisher + pubstate + series + subtitle + title + titleaddon + type + version + + + misc + software + addendum + author + editor + editortype + howpublished + language + location + note + organization + pubstate + subtitle + title + titleaddon + type + version + + + online + addendum + author + editor + editortype + language + note + organization + pubstate + subtitle + title + titleaddon + version + + + patent + addendum + author + holder + location + note + number + pubstate + subtitle + title + titleaddon + type + version + + + periodical + addendum + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + issn + issue + issuesubtitle + issuetitle + issuetitleaddon + language + note + number + pubstate + series + subtitle + title + titleaddon + volume + yeardivision + + + mvproceedings + addendum + editor + editortype + eventday + eventendday + eventendhour + eventendminute + eventendmonth + eventendsecond + eventendtimezone + eventendyear + eventendyeardivision + eventhour + eventminute + eventmonth + eventsecond + eventtimezone + eventyear + eventyeardivision + eventtitle + eventtitleaddon + isbn + language + location + note + number + organization + pagetotal + publisher + pubstate + series + subtitle + title + titleaddon + venue + volumes + + + proceedings + addendum + chapter + editor + editortype + eid + eventday + eventendday + eventendhour + eventendminute + eventendmonth + eventendsecond + eventendtimezone + eventendyear + eventendyeardivision + eventhour + eventminute + eventmonth + eventsecond + eventtimezone + eventyear + eventyeardivision + eventtitle + eventtitleaddon + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + organization + pages + pagetotal + part + publisher + pubstate + series + subtitle + title + titleaddon + venue + volume + volumes + + + inproceedings + addendum + author + booksubtitle + booktitle + booktitleaddon + chapter + editor + editortype + eid + eventday + eventendday + eventendhour + eventendminute + eventendmonth + eventendsecond + eventendtimezone + eventendyear + eventendyeardivision + eventhour + eventminute + eventmonth + eventsecond + eventtimezone + eventyear + eventyeardivision + eventtitle + eventtitleaddon + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + organization + pages + part + publisher + pubstate + series + subtitle + title + titleaddon + venue + volume + volumes + + + report + addendum + author + chapter + eid + institution + isrn + language + location + note + number + pages + pagetotal + pubstate + subtitle + title + titleaddon + type + version + + + thesis + addendum + author + chapter + eid + institution + language + location + note + pages + pagetotal + pubstate + subtitle + title + titleaddon + type + + + unpublished + addendum + author + eventday + eventendday + eventendhour + eventendminute + eventendmonth + eventendsecond + eventendtimezone + eventendyear + eventendyeardivision + eventhour + eventminute + eventmonth + eventsecond + eventtimezone + eventyear + eventyeardivision + eventtitle + eventtitleaddon + howpublished + language + location + note + pubstate + subtitle + title + titleaddon + type + venue + + + with + narrator + execproducer + execdirector + + + jurisdiction + organization citation + + + legmaterial + source + + + legadminmaterial + citation + source + + + constitution + article + section + amendment + + + software + appentry + + + report + addendum + author + authortype + chapter + doi + eprint + eprintclass + eprinttype + institution + isrn + language + location + note + number + pages + pagetotal + pubstate + subtitle + title + titleaddon + type + version + + + presentation + addendum + author + booksubtitle + booktitle + booktitleaddon + chapter + doi + editor + editortype + eprint + eprintclass + eprinttype + eventday + eventendday + eventendhour + eventendminute + eventendmonth + eventendseason + eventendsecond + eventendtimezone + eventendyear + eventhour + eventminute + eventmonth + eventseason + eventsecond + eventtimezone + eventyear + eventtitle + eventtitleaddon + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + organization + pages + part + publisher + pubstate + series + subtitle + title + titleaddon + venue + volume + volumes + + + abstract + addendum + afterword + annotator + author + bookauthor + booksubtitle + booktitle + booktitleaddon + chapter + commentator + editor + editora + editorb + editorc + foreword + holder + institution + introduction + issuesubtitle + issuetitle + issuetitleaddon + journalsubtitle + journaltitle + journaltitleaddon + location + mainsubtitle + maintitle + maintitleaddon + nameaddon + note + organization + origlanguage + origlocation + origpublisher + origtitle + part + publisher + relatedstring + series + shortauthor + shorteditor + shorthand + shortjournal + shortseries + shorttitle + sortname + sortshorthand + sorttitle + subtitle + title + titleaddon + translator + venue + + + article + book + inbook + bookinbook + suppbook + booklet + collection + incollection + suppcollection + manual + misc + mvbook + mvcollection + online + patent + periodical + suppperiodical + proceedings + inproceedings + reference + inreference + report + set + thesis + unpublished + + + date + year + + + + + set + + entryset + + + + article + + author + journaltitle + title + + + + book + mvbook + + author + title + + + + inbook + bookinbook + suppbook + + author + title + booktitle + + + + booklet + + + author + editor + + title + + + + collection + reference + mvcollection + mvreference + + editor + title + + + + incollection + suppcollection + inreference + + author + editor + title + booktitle + + + + dataset + + title + + + + manual + + title + + + + misc + software + + title + + + + online + + title + + url + doi + eprint + + + + + patent + + author + title + number + + + + periodical + + editor + title + + + + proceedings + mvproceedings + + title + + + + inproceedings + + author + title + booktitle + + + + report + + author + title + type + institution + + + + thesis + + author + title + type + institution + + + + unpublished + + author + title + + + + + isbn + + + issn + + + ismn + + + gender + + + + book + inbook + article + report + + author + title + + + + + + + Bibliography.bib + + + + + + + presort + + + sortkey + + + sortname + author + editor + sorttitle + title + + + pubstate + + + sortyear + year + -2000000000 + + + month + -2000000000 + + + day + -2000000000 + + + sorttitle + title + + + volume + 0 + + + + + + diff --git a/appendix.fdb_latexmk b/appendix.fdb_latexmk new file mode 100644 index 0000000..3e08948 --- /dev/null +++ b/appendix.fdb_latexmk @@ -0,0 +1,257 @@ +# Fdb version 3 +["biber appendix"] 1665102555 "appendix.bcf" "appendix.bbl" "appendix" 1667261231 + "appendix.bcf" 1667261231 124448 076a2157e0a773217c16a962cfb54936 "pdflatex" + (generated) + "appendix.bbl" + "appendix.blg" +["pdflatex"] 1667261228 "appendix.tex" "appendix.pdf" "appendix" 1667261231 + "/etc/texmf/web2c/texmf.cnf" 1666310162 475 c0e671620eb5563b2130f56340a5fde8 "" + "/usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map" 1577235249 3524 cb3e574dea2d1052e39280babc910dc8 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm" 1136768653 1288 655e228510b4c2a1abe905c368440826 "" + "/usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii" 1461363279 71627 94eb9990bed73c364d7f53f960cc8c5b "" + "/usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty" 1575674566 24708 5584a51a7101caf7e6bbf1fc27d8f7b1 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel-english/american.ldf" 1496785618 2768 564633551858ab4a7568c71525151d11 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel-english/english.ldf" 1496785618 7008 9ff5fdcc865b01beca2b0fe4a46231d4 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty" 1658348618 151308 f48d89beb96c9b108345f21bd476da55 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel/locale/en/babel-american.tex" 1656274800 339 4c91b3e348320102d0fa5bf0680df231 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel/locale/en/babel-en-US.ini" 1654547330 4191 48296e139c650c07eee6beafc5250bf6 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel/txtbabel.def" 1643231327 5233 d5e383ed66bf272b71b1a90b596e21c6 "" + "/usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty" 1576625341 40635 c40361e206be584d448876bba8a64a3b "" + "/usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty" 1576016050 33961 6b5c75130e435b2bfdb9f480a09a39f9 "" + "/usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty" 1576625273 7734 b98cbb34c81f667027c1e3ebdbfce34b "" + "/usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty" 1576625223 8371 9d55b8bd010bc717624922fb3477d92e "" + "/usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty" 1644112042 7237 bdd120a32c8fdb4b433cf9ca2e7cd98a "" + "/usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty" 1572645307 1057 525c2192b5febbd8c1f662c9468335bb "" + "/usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty" 1575499628 8356 7bbb2c2373aa810be568c29e333da8ed "" + "/usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty" 1576625065 31769 002a487f55041f8e805cfbf6385ffd97 "" + "/usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty" 1576878844 5412 d5a2436094cd7be85769db90f29250a6 "" + "/usr/share/texlive/texmf-dist/tex/generic/kvsetkeys/kvsetkeys.sty" 1576624944 13807 952b0226d4efca026f0e19dd266dcc22 "" + "/usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty" 1600895880 17859 4409f8f50cd365c68e684407e5350b1b "" + "/usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty" 1576015897 19007 15924f7228aca6c6d184b115f4baa231 "" + "/usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty" 1593379760 20089 80423eac55aa175305d35b49e04fe23b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex" 1601326656 992 855ff26741653ab54814101ca36e153c "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorearrows.code.tex" 1601326656 43820 1fef971b75380574ab35a0d37fd92608 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreexternal.code.tex" 1601326656 19324 f4e4c6403dd0f1605fd20ed22fa79dea "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoregraphicstate.code.tex" 1601326656 6038 ccb406740cc3f03bbfb58ad504fe8c27 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreimage.code.tex" 1601326656 6944 e12f8f7a7364ddf66f93ba30fb3a3742 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorelayers.code.tex" 1601326656 4883 42daaf41e27c3735286e23e48d2d7af9 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreobjects.code.tex" 1601326656 2544 8c06d2a7f0f469616ac9e13db6d2f842 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathconstruct.code.tex" 1601326656 44195 5e390c414de027626ca5e2df888fa68d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathprocessing.code.tex" 1601326656 17311 2ef6b2e29e2fc6a2fc8d6d652176e257 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathusage.code.tex" 1601326656 21302 788a79944eb22192a4929e46963a3067 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepatterns.code.tex" 1601326656 9690 01feb7cde25d4293ef36eef45123eb80 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepoints.code.tex" 1601326656 33335 dd1fa4814d4e51f18be97d88bf0da60c "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorequick.code.tex" 1601326656 2965 4c2b1f4e0826925746439038172e5d6f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorerdf.code.tex" 1601326656 5196 2cc249e0ee7e03da5f5f6589257b1e5b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorescopes.code.tex" 1601326656 20726 d4c8db1e2e53b72721d29916314a22ea "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreshade.code.tex" 1601326656 35249 abd4adf948f960299a4b3d27c5dddf46 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransformations.code.tex" 1601326656 21989 fdc867d05d228316de137a9fc5ec3bbe "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransparency.code.tex" 1601326656 8893 e851de2175338fdf7c17f3e091d94618 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex" 1601326656 319 225dfe354ba678ff3c194968db39d447 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex" 1601326656 1179 5483d86c1582c569e665c74efab6281f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex" 1601326656 3937 3f208572dd82c71103831da976d74f1a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex" 1601326656 2889 d698e3a959304efa342d47e3bb86da5b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex" 1601326656 410 048d1174dabde96757a5387b8f23d968 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex" 1601326656 1201 8bd51e254d3ecf0cd2f21edd9ab6f1bb "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex" 1601326656 494 8de62576191924285b021f4fc4292e16 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex" 1601326656 339 be0fe46d92a80e3385dd6a83511a46f2 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex" 1601326656 329 ba6d5440f8c16779c2384e0614158266 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex" 1601326656 919 938802205ca20d7c36615aabc4d34be2 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex" 1601326656 475 4b4056fe07caa0603fede9a162fe666d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarysvg.path.code.tex" 1601326656 911 6574fc8fd117350d2b19ffbc21415df7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex" 1608933718 11518 738408f795261b70ce8dd47459171309 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex" 1621110968 186007 6e7dfe0bd57520fd5f91641aa72dcac8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex" 1601326656 31874 89148c383c49d4c72114a76fd0062299 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex" 1601326656 2563 d5b174eb7709fd6bdcc2f70953dbdf8e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex" 1601326656 32995 ac577023e12c0e4bd8aa420b2e852d1a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibrarysvg.path.code.tex" 1601326656 24742 2664b65ba02d7355a10bbd4e3a69b2e7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex" 1601326656 91587 e30123381f7b9bcf1341c31c6be18b94 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex" 1601326656 33336 427c354e28a4802ffd781da22ae9f383 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex" 1606168878 160993 6a81d63e475cc43874b46ed32a0a37c8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex" 1601326656 46241 588910a2f1e0a99f2c3e14490683c20d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex" 1601326656 62281 aff261ef10ba6cbe8e3c872a38c05a61 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex" 1601326656 90515 e30b2c9c93aacc373e47917c0c2a48ed "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfint.code.tex" 1557692582 3063 8c415c68a0f3394e45cfeca0b65f6ee6 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex" 1601326656 521 8e224a7af69b7fee4451d1bf76b46654 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathcalc.code.tex" 1601326656 13391 84d29568c13bdce4133ab4a214711112 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfloat.code.tex" 1601326656 104935 184ed87524e76d4957860df4ce0cd1c3 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.base.code.tex" 1601326656 10165 cec5fa73d49da442e56efc2d605ef154 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.basic.code.tex" 1601326656 28178 41c17713108e0795aac6fef3d275fbca "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.code.tex" 1601326656 9989 c55967bf45126ff9b061fa2ca0c4694f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.comparison.code.tex" 1601326656 3865 ac538ab80c5cf82b345016e474786549 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.integerarithmetics.code.tex" 1557692582 3177 27d85c44fbfe09ff3b2cf2879e3ea434 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.misc.code.tex" 1621110968 11024 0179538121bc2dba172013a3ef89519f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.random.code.tex" 1608933718 7854 4176998eeefd8745ac6d2d4bd9c98451 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.round.code.tex" 1601326656 3379 781797a101f647bab82741a99944a229 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.trigonometric.code.tex" 1601326656 92405 f515f31275db273f97b9d8f52e1b0736 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathparser.code.tex" 1601326656 37376 11cd75aac3da1c1b152b2848f30adc14 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathutil.code.tex" 1601326656 8471 c2883569d03f69e8e1cabfef4999cfd7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmodulematrix.code.tex" 1601326656 21201 08d231a2386e2b61d64641c50dc15abd "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleparser.code.tex" 1601326656 19581 c8cc0eb77d3c8a725f41ccfbc23bbb9d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleplot.code.tex" 1601326656 16121 346f9013d34804439f7436ff6786cef7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleshapes.code.tex" 1621110968 44784 cedaa399d15f95e68e22906e2cc09ef8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex" 1621110968 465 d68603f8b820ea4a08cce534944db581 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgf.cfg" 1601326656 926 2963ea0dcf6cc6c0a770b69ec46a477b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-common-pdf.def" 1601326656 5546 f3f24d7898386cb7daac70bdd2c4d6dc "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def" 1601326656 12601 4786e597516eddd82097506db7cfa098 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex" 1621110968 61163 9b2eefc24e021323e0fc140e9826d016 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex" 1601326656 1896 b8e0ca0ac371d74c0ca05583f6313c91 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex" 1601326656 7778 53c8b5623d80238f6a20aa1df1868e63 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex" 1606168878 23997 a4bed72405fa644418bea7eac2887006 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex" 1621110968 37060 797782f0eb50075c9bc952374d9a659a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeysfiltered.code.tex" 1601326656 37431 9abe862035de1b29c7a677f3205e3d9f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex" 1601326656 4494 af17fb7efeafe423710479858e42fa7e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common-lists.tex" 1601326656 7251 fb18c67117e09c64de82267e12cd8aa4 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common.tex" 1621110968 29274 e15c5b7157d21523bd9c9f1dfa146b8e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-latex.def" 1621110968 6825 a2b0ea5b539dda0625e99dd15785ab59 "" + "/usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty" 1576624663 7008 f92eaa0a3872ed622bbf538217cd2ab7 "" + "/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsbsy.sty" 1654720880 2222 78b930a5a6e3dc2ac69b78c2057b94d7 "" + "/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsgen.sty" 1654720880 4173 c989ee3ced31418e3593916ab26c793a "" + "/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsmath.sty" 1654720880 88393 1adf6fa3f245270d06e3d4f8910f7fc5 "" + "/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsopn.sty" 1654720880 4474 f04cd1cc7bd76eb033e6fb12eb6a0d77 "" + "/usr/share/texlive/texmf-dist/tex/latex/amsmath/amstext.sty" 1654720880 2444 70065bddd85997dc1fd0bb7ae634e5fa "" + "/usr/share/texlive/texmf-dist/tex/latex/apa7/apa7.cls" 1642541407 60920 30ce14ae2be740aa48eef81166b905b3 "" + "/usr/share/texlive/texmf-dist/tex/latex/apa7/config/APA7american.txt" 1642541407 3217 0343f6cfdd853e7dea4ca37ce622214f "" + "/usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty" 1576191570 19336 ce7ae9438967282886b3b036cfad1e4d "" + "/usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty" 1576625391 3935 57aa3c3e203a5c2effb4d2bd2efbc323 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/alltt.sty" 1654720880 3137 837d2e4f1defd7c190a44408f494ec95 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/article.cls" 1654720880 20144 7555b7429d80bef287ebb82117811acc "" + "/usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty" 1654720880 3122 8df402c6591ccc8ed35ce64c1c49c50b "" + "/usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty" 1654720880 2462 2ab3964e30f8e7a2977395016edcbbc6 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty" 1654720880 5119 4ce42f43368f652f9c9522d943cce8e4 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty" 1654720880 5319 48d7f3cfa322abd2788e3c09d624b922 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty" 1654720880 5048 84b05796b49b69e2d4257d537721c960 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/size12.clo" 1654720880 8449 7fbdc9c8596083427317c1e525489c81 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty" 1654720880 2894 f2f8ee7d4fb94263f9f255fa22cab2d3 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex-apa/american-apa.lbx" 1656163846 15338 f9a26813c7ed13105ae11354f255582b "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex-apa/apa.bbx" 1656163846 68091 f4525fa4793d60658a3e9448826fb7fe "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex-apa/apa.cbx" 1656163846 20185 96097b806b41a55551394c1afad28064 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex-apa/apa.dbx" 1656163846 2676 5880b0b8d6c12bbdfb64146ad361fe84 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex-apa/english-apa.lbx" 1656163846 9935 19aa139f6a8ff2d300f300a6fb282646 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/bbx/standard.bbx" 1609451401 25680 409c3f3d570418bc545e8065bebd0688 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/biblatex.cfg" 1342308459 69 249fa6df04d948e51b6d5c67bea30c42 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/biblatex.def" 1656017808 92456 21e687f013958a6cb57adaa61a04572a "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/biblatex.sty" 1657655400 526811 a1f8c6dfa1788c26d4b7587a2e99a625 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/blx-case-expl3.sty" 1609451401 8433 72f8188742e7214b7068f345cd0287ac "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/blx-compat.def" 1643926307 13919 5426dbe90e723f089052b4e908b56ef9 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/blx-dm.def" 1643926307 32455 8d3e554836db11aab80a8e11be62e1b1 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/blx-natbib.def" 1541279461 2190 4b4fcc6752fa7201177431e523c40d74 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/lbx/american.lbx" 1342308459 169 40f2892b6b9cee1ffa9c07b78605a5a1 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/lbx/english.lbx" 1643926307 39965 48ce9ce3350aba9457f1020b1deba5cf "" + "/usr/share/texlive/texmf-dist/tex/latex/booktabs/booktabs.sty" 1579038678 6078 f1cb470c9199e7110a27851508ed7a5c "" + "/usr/share/texlive/texmf-dist/tex/latex/caption/caption.sty" 1647548653 54291 b8e5c600d4aa37b48a740dd2a6c26163 "" + "/usr/share/texlive/texmf-dist/tex/latex/caption/caption3.sty" 1647548653 71241 d2cd3a1c5acef9cb31f945b93c0bb6e3 "" + "/usr/share/texlive/texmf-dist/tex/latex/caption/subcaption.sty" 1645391520 11546 6c5257d230d8c5626812b45bc2f31212 "" + "/usr/share/texlive/texmf-dist/tex/latex/csquotes/csquotes.cfg" 1429144587 7068 06f8d141725d114847527a66439066b6 "" + "/usr/share/texlive/texmf-dist/tex/latex/csquotes/csquotes.def" 1614030765 20781 dc1bec6693d5466d8972ecc6b81f9f0b "" + "/usr/share/texlive/texmf-dist/tex/latex/csquotes/csquotes.sty" 1614030765 62518 6e0d74482f5cb16b3b0755031e72faf1 "" + "/usr/share/texlive/texmf-dist/tex/latex/draftwatermark/draftwatermark.sty" 1607465154 6990 c19f0e2ee25970cd4873c23ce67e5a94 "" + "/usr/share/texlive/texmf-dist/tex/latex/endfloat/endfloat.sty" 1557078193 17055 97f1b7400dd9c8c5e7d57643a8807fd7 "" + "/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty" 1579991033 13886 d1306dcf79a944f6988e688c1785f9ce "" + "/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf.sty" 1579991033 4393 47f27fd4d95928d20b1885ba77de11d2 "" + "/usr/share/texlive/texmf-dist/tex/latex/etoolbox/etoolbox.sty" 1601931149 46845 3b58f70c6e861a13d927bff09d35ecbc "" + "/usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty" 1652903436 17280 7856508378dfe40ed74280f5b81e31b6 "" + "/usr/share/texlive/texmf-dist/tex/latex/float/float.sty" 1137110151 6749 16d2656a1984957e674b149555f1ea1d "" + "/usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty" 1338588508 22449 7ec15c16d0d66790f28e90343c5434a3 "" + "/usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty" 1578002852 41601 9cf6c5257b1bc7af01a58859749dd37a "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg" 1459978653 1213 620bba36b25224fa9b7e1ccb4ecb76fd "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg" 1465944070 1224 978390e9c2234eab29404bc21b268d1e "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def" 1601931164 19103 48d29b6e2a64cb717117ef65f107b404 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty" 1654720880 7233 e46ce9241d2b2ca2a78155475fdd557a "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty" 1654720880 18387 8f900a490197ebaf93c02ae9476d4b09 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty" 1654720880 8010 a8d949cbdbc5c983593827c9eec252e1 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty" 1654720880 2671 7e67d78d9b88c845599a85b2d41f2e39 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx" 1654720880 3171 1cf0d440b5464e2f034398ce4ef36f75 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty" 1654720880 4023 293ea1c16429fc0c4cf605f4da1791a9 "" + "/usr/share/texlive/texmf-dist/tex/latex/grfext/grfext.sty" 1575499774 7133 b94bbacbee6e4fdccdc7f810b2aec370 "" + "/usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty" 1580250785 17914 4c28a13fc3d975e6e81c9bea1d697276 "" + "/usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def" 1655759286 47964 eeb2a5ee738d9e82276d44d01b7f8855 "" + "/usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty" 1655759286 222567 bf49823ea499fb02153a3135548e8552 "" + "/usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty" 1652818262 12951 45609f529c67717a6d5046d7f3d77f03 "" + "/usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def" 1655759286 14249 5722edfd0a97304b67eaad1229597886 "" + "/usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def" 1655759286 117125 21f7791400296a3ca7ace2461e9f1794 "" + "/usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty" 1655478651 22555 6d8e155cfef6d82c3d5c742fea7c992e "" + "/usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def" 1656792629 31050 293f2cc98a3575c4228b5157bf850b8f "" + "/usr/share/texlive/texmf-dist/tex/latex/l3kernel/expl3.sty" 1657921463 6107 b3c06bf83accea84563c47d52b03b82f "" + "/usr/share/texlive/texmf-dist/tex/latex/l3packages/xparse/xparse.sty" 1656017767 6812 d2f733947d73940b228845829d585700 "" + "/usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg" 1279039959 678 4792914a8f45be57bb98413425e4c7af "" + "/usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty" 1575499565 5766 13a9e8766c47f30327caf893ece86ac8 "" + "/usr/share/texlive/texmf-dist/tex/latex/logreq/logreq.def" 1284153563 1620 fb1c32b818f2058eca187e5c41dfae77 "" + "/usr/share/texlive/texmf-dist/tex/latex/logreq/logreq.sty" 1284153563 6187 b27afc771af565d3a9ff1ca7d16d0d46 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty" 1601326656 1090 bae35ef70b3168089ef166db3e66f5b2 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty" 1601326656 410 615550c46f918fcbee37641b02a862d9 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty" 1601326656 21013 f4ff83d25bb56552493b030f27c075ae "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty" 1601326656 989 c49c8ae06d96f8b15869da7428047b1e "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty" 1601326656 339 c2e180022e3afdb99c7d0ea5ce469b7d "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty" 1601326656 306 c56a323ca5bf9242f54474ced10fca71 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty" 1601326656 443 8c872229db56122037e86bcda49e14f3 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty" 1601326656 348 ee405e64380c11319f0e249fed57e6c5 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty" 1601326656 274 5ae372b7df79135d240456a1c6f2cf9a "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty" 1601326656 325 f9f16d12354225b7dd52a3321f085955 "" + "/usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty" 1576624809 9878 9e94e8fa600d95f9c7731bb21dfb67a4 "" + "/usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty" 1657483315 9714 ba3194bd52c8499b3f1e3eb91d409670 "" + "/usr/share/texlive/texmf-dist/tex/latex/scalerel/scalerel.sty" 1483104048 7825 43f8c26a0a3916d218f4f48c29aa92e4 "" + "/usr/share/texlive/texmf-dist/tex/latex/threeparttable/threeparttable.sty" 1267981840 13506 a4e71a27db1a69b6fabada5beebf0844 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/array.sty" 1654720880 12694 4770336659ba563be5de2e0739d61ddc "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/bm.sty" 1654720880 13231 b52297489a0e9d929aae403417d92a02 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/calc.sty" 1654720880 10214 de3e21cfc0eccc98ca7f8dac0ef263d2 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/enumerate.sty" 1654720880 3468 46ba9177f0f0a79fe79845d3eebff113 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/tabularx.sty" 1654720880 7147 be6981d9f5d866a5634048c4a11814a9 "" + "/usr/share/texlive/texmf-dist/tex/latex/upquote/upquote.sty" 1334873510 1048 517e01cde97c1c0baf72e69d43aa5a2e "" + "/usr/share/texlive/texmf-dist/tex/latex/url/url.sty" 1388531844 12796 8edb7d69a20b857904dd0ea757c14ec9 "" + "/usr/share/texlive/texmf-dist/tex/latex/wrapfig/wrapfig.sty" 1137111090 26220 3701aebf80ccdef248c0c20dd062fea9 "" + "/usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty" 1655066402 56148 51a9a8571c07b9921892ae11063ae853 "" + "/usr/share/texlive/texmf-dist/web2c/texmf.cnf" 1658757727 39561 34c98e380bf7c7201ee6a7909aff625a "" + "/usr/share/texmf/fonts/enc/dvips/lm/lm-ec.enc" 1254269338 2375 baa924870cfb487815765f9094cf3728 "" + "/usr/share/texmf/fonts/enc/dvips/lm/lm-mathit.enc" 1202520719 2405 5dcf2c1b967ee25cc46c58cd52244aed "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmbx12.tfm" 1254269338 12088 d750ac78274fa7c9f73ba09914c04f8a "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmr12.tfm" 1254269338 12092 7b1546e2d096cfd5dcbd4049b0b1ec2e "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmr17.tfm" 1254269338 12156 ca1ae6a3c8564e89597f1f993fba1608 "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmr8.tfm" 1254269338 12064 a35db870f0b76c338d749c56dc030ef5 "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmri12.tfm" 1254269338 17144 271aaf9ebb339934b04110dc5211fba4 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmbsy10.tfm" 1148093231 1300 2df9da0fc09d4a8c772b3dd386a47c6a "" + "/usr/share/texmf/fonts/tfm/public/lm/lmbsy7.tfm" 1148093231 1304 535e0954c1961c817723e44bc6a9662c "" + "/usr/share/texmf/fonts/tfm/public/lm/lmex10.tfm" 1148093231 992 ce925c9346c7613270a79afbee98c070 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmi12.tfm" 1148093231 1524 753b192b18f2991794f9d41a8228510b "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmi6.tfm" 1148093231 1512 94a3fd88c6f27dbd9ecb46987e297a4e "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmi8.tfm" 1148093231 1520 a3fe5596932db2db2cbda300920dd4e9 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmib10.tfm" 1148093231 1524 94d8ba2701edc3d8c3337e16e222f220 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmib7.tfm" 1148093231 1508 e1d41318430466dfe2207ded55ef3af5 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmsy10.tfm" 1148093231 1308 02cc510f9dd6012e5815d0c0ffbf6869 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmsy6.tfm" 1148093231 1300 b0605d44c16c22d99dc001808e4f24ea "" + "/usr/share/texmf/fonts/tfm/public/lm/lmsy8.tfm" 1148093231 1304 cdc9a17df9ef0d2dc320eff37bbab1c4 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmbx12.tfm" 1254269338 11880 ea60d06924270684e6f852f3141c992b "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmbx6.tfm" 1254269338 11852 eda7061aa4cc8552ba736dae866e4460 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmbx8.tfm" 1254269338 11868 731e03b24d399279cf9609d002110394 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmr12.tfm" 1254269338 11888 6841b91e46b65cf41a49b160e6e74130 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmr6.tfm" 1254269338 11836 e3b6ce3e601aec94f64a536e7f4224d5 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmr8.tfm" 1254269338 11864 309fd7f43e4a0ba39f6f7644d76e8edf "" + "/usr/share/texmf/fonts/type1/public/lm/lmbx12.pfb" 1255129361 116908 1fca96723793882c2e0160350c192fc8 "" + "/usr/share/texmf/fonts/type1/public/lm/lmmi12.pfb" 1254269338 30696 2654571912f9cd384da9f7cb8a60c568 "" + "/usr/share/texmf/fonts/type1/public/lm/lmmi8.pfb" 1254269338 30635 833ec815d446ec453a4913fc26d24cbc "" + "/usr/share/texmf/fonts/type1/public/lm/lmr12.pfb" 1255129361 113634 f99c44d58bae0863375faf0e1d74d612 "" + "/usr/share/texmf/fonts/type1/public/lm/lmr17.pfb" 1255129361 119752 1bd8d06e4079df624bf59ce3ad7c9aa6 "" + "/usr/share/texmf/fonts/type1/public/lm/lmr8.pfb" 1255129361 122174 a7a08406857c9530a0320a2517f60370 "" + "/usr/share/texmf/fonts/type1/public/lm/lmri12.pfb" 1255129361 109265 32320cb6133d4d76bf83e27b5eb4009b "" + "/usr/share/texmf/tex/latex/lm/lmodern.sty" 1616454256 1608 b00724785a9e9c599e5181bb8729160b "" + "/usr/share/texmf/tex/latex/lm/omllmm.fd" 1616454256 890 57f5adccd504fb5c98bdf99ed7e7f195 "" + "/usr/share/texmf/tex/latex/lm/omslmsy.fd" 1616454256 807 3de192f3efa968913bd2f096a7b430d8 "" + "/usr/share/texmf/tex/latex/lm/omxlmex.fd" 1616454256 568 a5494d810f2680caf10205cd1226c76c "" + "/usr/share/texmf/tex/latex/lm/ot1lmr.fd" 1616454256 1882 28c08db1407ebff35a658fd141753d16 "" + "/usr/share/texmf/tex/latex/lm/t1lmr.fd" 1616454256 1867 996fe743d88a01aca041ed22cc10e1bb "" + "/usr/share/texmf/web2c/texmf.cnf" 1658757727 39561 34c98e380bf7c7201ee6a7909aff625a "" + "/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map" 1666310487 4578607 d069bc70ba6abc8a4382683ea17c45fb "" + "/var/lib/texmf/web2c/pdftex/pdflatex.fmt" 1666310189 1628849 80260b62ec39921905d6c2cfa2d7ef0f "" + "appendix.aux" 1667261231 1316 73658d86e9736a13e22c1e8dd5223f76 "pdflatex" + "appendix.bbl" 1665102556 466 ee04f40f768daeed26442158e95aba61 "biber appendix" + "appendix.out" 1667261231 267 d306f7b588e73d1e14310c90d67dd0d6 "pdflatex" + "appendix.run.xml" 1667261231 2491 7032f44bda5f9b46bb7e133f2b29e81f "pdflatex" + "appendix.tex" 1667261228 5069 607c6e04261e88d20eed9450dc3d3598 "" + "figure/example1_g-1.pdf" 1667261228 9062 eb9f61c7ca29002f294d15de07e997f2 "" + (generated) + "appendix.aux" + "appendix.bcf" + "appendix.log" + "appendix.out" + "appendix.pdf" + "appendix.run.xml" diff --git a/appendix.pdf b/appendix.pdf new file mode 100644 index 0000000..1ba0a28 Binary files /dev/null and b/appendix.pdf differ diff --git a/appendix.run.xml b/appendix.run.xml new file mode 100644 index 0000000..b425501 --- /dev/null +++ b/appendix.run.xml @@ -0,0 +1,90 @@ + + + + + + + + + + + + + + + + + + + + + + + + +]> + + + latex + + appendix.bcf + + + appendix.bbl + + + blx-dm.def + apa.dbx + blx-compat.def + biblatex.def + blx-natbib.def + standard.bbx + apa.bbx + apa.cbx + biblatex.cfg + english.lbx + english-apa.lbx + american.lbx + american-apa.lbx + + + + biber + + biber + appendix + + + appendix.bcf + + + appendix.bbl + + + appendix.bbl + + + appendix.bcf + + + Bibliography.bib + + + diff --git a/appendix.tex b/appendix.tex new file mode 100644 index 0000000..2e21574 --- /dev/null +++ b/appendix.tex @@ -0,0 +1,149 @@ +\documentclass[floatsintext, man, draftfirst]{apa7}\usepackage[]{graphicx}\usepackage[]{xcolor} +% maxwidth is the original width if it is less than linewidth +% otherwise use linewidth (to make sure the graphics do not exceed the margin) +\makeatletter +\def\maxwidth{ % + \ifdim\Gin@nat@width>\linewidth + \linewidth + \else + \Gin@nat@width + \fi +} +\makeatother + +\definecolor{fgcolor}{rgb}{0.345, 0.345, 0.345} +\newcommand{\hlnum}[1]{\textcolor[rgb]{0.686,0.059,0.569}{#1}}% +\newcommand{\hlstr}[1]{\textcolor[rgb]{0.192,0.494,0.8}{#1}}% +\newcommand{\hlcom}[1]{\textcolor[rgb]{0.678,0.584,0.686}{\textit{#1}}}% +\newcommand{\hlopt}[1]{\textcolor[rgb]{0,0,0}{#1}}% +\newcommand{\hlstd}[1]{\textcolor[rgb]{0.345,0.345,0.345}{#1}}% +\newcommand{\hlkwa}[1]{\textcolor[rgb]{0.161,0.373,0.58}{\textbf{#1}}}% +\newcommand{\hlkwb}[1]{\textcolor[rgb]{0.69,0.353,0.396}{#1}}% +\newcommand{\hlkwc}[1]{\textcolor[rgb]{0.333,0.667,0.333}{#1}}% +\newcommand{\hlkwd}[1]{\textcolor[rgb]{0.737,0.353,0.396}{\textbf{#1}}}% +\let\hlipl\hlkwb + +\usepackage{framed} +\makeatletter +\newenvironment{kframe}{% + \def\at@end@of@kframe{}% + \ifinner\ifhmode% + \def\at@end@of@kframe{\end{minipage}}% + \begin{minipage}{\columnwidth}% + \fi\fi% + \def\FrameCommand##1{\hskip\@totalleftmargin \hskip-\fboxsep + \colorbox{shadecolor}{##1}\hskip-\fboxsep + % There is no \\@totalrightmargin, so: + \hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}% + \MakeFramed {\advance\hsize-\width + \@totalleftmargin\z@ \linewidth\hsize + \@setminipage}}% + {\par\unskip\endMakeFramed% + \at@end@of@kframe} +\makeatother + +\definecolor{shadecolor}{rgb}{.97, .97, .97} +\definecolor{messagecolor}{rgb}{0, 0, 0} +\definecolor{warningcolor}{rgb}{1, 0, 1} +\definecolor{errorcolor}{rgb}{1, 0, 0} +\newenvironment{knitrout}{}{} % an empty environment to be redefined in TeX + +\usepackage{alltt} + + +% maxwidth is the original width if it is less than linewidth +% otherwise use linewidth (to make sure the graphics do not exceed the margin) +\makeatletter +\def\maxwidth{ % + \ifdim\Gin@nat@width>\linewidth + \linewidth + \else + \Gin@nat@width + \fi +} + +\usepackage{alltt} + + +\usepackage{epstopdf}% To incorporate .eps illustrations using PDFLaTeX, etc. +\usepackage{subcaption}% Support for small, `sub' figures and tables +\usepackage{tikz} +\usetikzlibrary{positioning, shapes, arrows, shadows} + +\def \parrotpdf {\includegraphics[]{parrot.pdf}} +\DeclareUnicodeCharacter{1F99C}{\parrotpdf} +\usepackage{tabularx} +\usepackage[utf8]{inputenc} +\usepackage{wrapfig} +\usepackage[T1]{fontenc} +\usepackage{textcomp} +% \usepackage[garamond]{mathdesign} + +% \usepackage[letterpaper,left=1in,right=1in,top=1in,bottom=1in]{geometry} + +% packages i use in essentially every document +\usepackage{graphicx} +\usepackage{enumerate} + +% packages i use in many documents but leave off by default +\usepackage{amsmath}%}, amsthm, amssymb} +\DeclareMathOperator*{\argmin}{arg\,min} % thin space, limits underneath in displays +\DeclareMathOperator*{\argmax}{arg\,max} % thin space, limits underneath in displays + + +\usepackage{subcaption} +% import and customize urls +% \usepackage[usenames,dvipsnames]{color} +% \usepackage[breaklinks]{hyperref} + +\hypersetup{colorlinks=true, linkcolor=black, citecolor=black, filecolor=blue, + urlcolor=blue, unicode=true} + +% add bibliographic stuff +\usepackage[american]{babel} +\usepackage{csquotes} +\usepackage[natbib=true, style=apa, sortcites=true, backend=biber]{biblatex} +\addbibresource{Bibliography.bib} +\DeclareLanguageMapping{american}{american-apa} + +\defbibheading{secbib}[\bibname]{% + \section*{#1}% + \markboth{#1}{#1}% + \baselineskip 14.2pt% + \prebibhook} + +\def\citepos#1{\citeauthor{#1}'s (\citeyear{#1})} +\def\citespos#1{\citeauthor{#1}' (\citeyear{#1})} +\newcommand\TODO[1]{\textsc{\color{red} #1}} + +% I've gotten advice to make this as general as possible to attract the widest possible audience. +\title{Appendices for: What to do about prediction errors in automated content analysis} + +\shorttitle{Appendices: Prediction errors in automated content analysis} + +\authorsnames[1,2,3]{Nathan TeBlunthuis, Valerie Hase, Chung-hong Chan} +\authorsaffiliations{{Northwestern University}, {LMU Munich}, {GESIS - Leibniz-Institut für Sozialwissenschaften}} +\leftheader{TeBlunthuis, Hase \& Chan} + +\keywords{ +Content Analysis; Machine Learning; Classification Error; Attenuation Bias; Simulation; Computational Methods; Big Data; AI; +} + +\IfFileExists{upquote.sty}{\usepackage{upquote}}{} + +\abstract{} +\IfFileExists{upquote.sty}{\usepackage{upquote}}{} +\begin{document} +\maketitle +\section{Additional plots from Simulations 1 and 2} +\label{appendix:main.sim.plots} +\begin{figure} +\begin{knitrout} +\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor} +\includegraphics[width=\maxwidth]{figure/example1_g-1} +\end{knitrout} +\caption{Estimates of $B_z$ in multivariate regression with $X$ measured using machine learning and model accuracy independent of $X$, $Y$, and $Z$. All methods obtain precise and accurate estimates given sufficient validation data.} +\end{figure} + + +\end{document} diff --git a/article.Rtex b/article.Rtex new file mode 100644 index 0000000..f9e3243 --- /dev/null +++ b/article.Rtex @@ -0,0 +1,1226 @@ +\documentclass[floatsintext, draftfirst, man]{apa7} +<>= +library(knitr) +library(ggplot2) +library(data.table) +knitr::opts_chunk$set(fig.show='hold') +f <- function (x) {formatC(x, format="d", big.mark=',')} +format.percent <- function(x) {paste(f(x*100),"\\%",sep='')} + +theme_set(theme_bw()) +source('resources/functions.R') +source('resources/variables.R') +source('resources/real_data_example.R') +@ + + +\usepackage{epstopdf}% To incorporate .eps illustrations using PDFLaTeX, etc. +\usepackage{subcaption}% Support for small, `sub' figures and tables +\usepackage{tikz} +\usetikzlibrary{positioning, shapes, arrows, shadows, arrows.meta} + +\def \parrotpdf {\includegraphics[]{parrot.pdf}} +\DeclareUnicodeCharacter{1F99C}{\parrotpdf} +\usepackage{tabularx} +\usepackage[utf8]{inputenc} +\usepackage{wrapfig} +\usepackage[T1]{fontenc} +\usepackage{textcomp} +\usepackage{listings} +\usepackage{xcolor} + +%New colors defined below +\definecolor{codegreen}{rgb}{0,0.6,0} +\definecolor{codegray}{rgb}{0.5,0.5,0.5} +\definecolor{codepurple}{rgb}{0.58,0,0.82} +\definecolor{backcolour}{rgb}{0.95,0.95,0.92} + +%Code listing style named "mystyle" +\lstdefinestyle{mystyle}{ + backgroundcolor=\color{backcolour}, commentstyle=\color{codegreen}, + keywordstyle=\color{magenta}, + numberstyle=\tiny\color{codegray}, + stringstyle=\color{codepurple}, + basicstyle=\ttfamily\footnotesize, + breakatwhitespace=false, + breaklines=true, + captionpos=b, + keepspaces=true, + numbers=left, + numbersep=5pt, + showspaces=false, + showstringspaces=false, + showtabs=false, + tabsize=2 +} + +% \usepackage[garamond]{mathdesign} + +% \usepackage[letterpaper,left=1in,right=1in,top=1in,bottom=1in]{geometry} + +% packages i use in essentially every document +\usepackage{graphicx} +\usepackage{enumerate} + +% packages i use in many documents but leave off by default +\usepackage{amsmath}%}, amsthm, amssymb} +\DeclareMathOperator*{\argmin}{arg\,min} % thin space, limits underneath in displays +\DeclareMathOperator*{\argmax}{arg\,max} % thin space, limits underneath in displays + + +\usepackage{subcaption} +% import and customize urls +% \usepackage[usenames,dvipsnames]{color} +% \usepackage[breaklinks]{hyperref} + +\hypersetup{colorlinks=true, linkcolor=black, citecolor=black, filecolor=blue, + urlcolor=blue, unicode=true} + +% add bibliographic stuff +\usepackage[american]{babel} +\usepackage{csquotes} +\usepackage[natbib=true, style=apa, sortcites=true, backend=biber]{biblatex} +\addbibresource{Bibliography.bib} +\DeclareLanguageMapping{american}{american-apa} + +\defbibheading{secbib}[\bibname]{% + \section*{#1}% + \markboth{#1}{#1}% +O \baselineskip 14.2pt% + \prebibhook} + +\def\citepos#1{\citeauthor{#1}'s (\citeyear{#1})} +\def\citespos#1{\citeauthor{#1}' (\citeyear{#1})} +\newcommand\TODO[1]{\textsc{\color{red} #1}} + +% I've gotten advice to make this as general as possible to attract the widest possible audience. +\title{Misclassification in Automated Content Analysis Causes Bias in Regression. Can We Fix It? Yes We Can!} + +\shorttitle{Automated Content Misclassification} + +\authorsnames[1,2,3]{Nathan TeBlunthuis, Valerie Hase, Chung-hong Chan} +\authorsaffiliations{{{School of Information, University of Michigan},{Department of Communication Studies, Northwestern University}}, {Department of Media and Communication, LMU Munich}, {GESIS - Leibniz-Institut für Sozialwissenschaften}} +\leftheader{TeBlunthuis, Hase \& Chan} + +\keywords{ +Automated Content Analysis; Machine Learning; Classification Error; Attenuation Bias; Simulation; Computational Methods; Big Data; AI +} + +\abstract{ + + +We show how automated classifiers (ACs), even biased ACs without high accuracy, can be statistically useful in communication research. +These classifiers, often built via supervised machine learning (SML), can categorize large, statistically powerful samples of data ranging from text to images and video, and have become widely popular measurement devices in communication science and related fields. +Despite this popularity, even highly accurate classifiers make errors that cause misclassification bias and misleading results in downstream analyses—unless such analyses account for these errors. +As we show in a systematic literature review of SML applications, +communication scholars largely ignore misclassification bias. +In principle, existing statistical methods can use ``gold standard'' validation data, such as that created by human annotators, to correct misclassification bias and produce consistent estimates. +We introduce and test such methods, including a new method we design and implement in the R package \texttt{misclassificationmodels}, via Monte-Carlo simulations designed to reveal each method's limitations. Based on our results, we provide recommendations for correcting misclassification bias. In sum, automated classifiers, even those below common accuracy standards or making systematic misclassifications, can be useful for measurement with careful study design and appropriate error correction methods. +} +\begin{document} +\maketitle +%\section{Introduction} + + +\emph{Automated classifiers} (ACs) based on supervised machine learning (SML) have rapidly gained popularity +as part of the \emph{automated content analysis} toolkit in communication science \citep{baden_three_2022}. With ACs, researchers can categorize large samples of text, images, video or other types of data into predefined categories \citep{scharkow_thematic_2013}. Studies for instance use SML-based classifiers to study frames \citep{burscher_teaching_2014}, tonality \citep{van_atteveldt_validity_2021}, %even ones as seemingly straightforward as sentiment \citep{van_atteveldt_validity_2021}, toxicity \citep{fortuna_toxic_2020} +or civility \citep{hede_toxicity_2021} in news media texts or social media posts. +%and institutional frameworks \citep{rice_machine_2021} + +% TODO: restore citation to fortuna_toxic_2020 below +However, there is increasing concern about the validity of automated content analysis for studying theories and concepts from communication science \citep{baden_three_2022, hase_computational_2022}. We add to this debate by analyzing \emph{misclassification bias}---how misclassifications by ACs distort statistical findings—unless correctly modeled \citep{fong_machine_2021}. Research areas where ACs have the greatest potential—e.g., content moderation, social media bots, affective polarization, or radicalization—are haunted by the specter of methodological questions related to misclassification bias \citep{rauchfleisch_false_2020}: How accurate must an AC be to measure a variable? Can an AC built for one context be used in another \citep{burscher_using_2015,hede_toxicity_2021}? Is comparing automated classifications to some external ground truth sufficient to claim validity? How do biases in AC-based measurements affect downstream statistical analyses \citep{millimet_accounting_2022}? + +%Knowing that high classification accuracy limits the risks of misleading inference, careful researchers might use only ACs with excellent predictive performance. + +Our study begins with a demonstration of misclassification bias in a real-world example based on the Perspective toxicity classifier. +Next, we provide a systematic literature review of \emph{N} = 48 studies employing SML-based text classification. +Although communication scholars have long scrutinized related questions about manual content analysis for which they have recently proposed statistical corrections \citep{bachl_correcting_2017, geis_statistical_2021}, misclassification bias in automated content analysis is largely ignored. +Our review demonstrates a troubling lack of attention to the threats ACs introduce and virtually no mitigation of such threats. As a result, in the current state of affairs, researchers are likely to either draw misleading conclusions from inaccurate ACs or avoid ACs in favor of costly methods such as manually coding large samples \citep{van_atteveldt_validity_2021}. + +Our primary contribution, an effort rescue ACs from this dismal state, is to \emph{introduce and test methods for correcting misclassification bias} \citep{carroll_measurement_2006, buonaccorsi_measurement_2010, yi_handbook_2021}. We consider three recently proposed methods: \citet{fong_machine_2021}'s generalized method of moments calibration method, \citet{zhang_how_2021}'s pseudo-likelihood models, and \citet{blackwell_unified_2017-1}'s application of imputation methods. To overcome these methods' limitations, we draw a general likelihood modeling framework from the statistical literature on measurement error \citep{carroll_measurement_2006} and tailor it to the problem of misclassification bias. Our novel implementation is the experimental R package \texttt{misclassificationmodels}.\footnote{The code for the experimental package can be found here: \url{https://osf.io/pyqf8/?view_only=c80e7b76d94645bd9543f04c2a95a87e}.} + + We test these four error correction methods and compare them against ignoring misclassification (the naïve approach) and refraining from automated content analysis by only using manual coding (the feasible approach). We use Monte Carlo simulations to model four prototypical situations identified by our review: Using ACs to measure either (1) an independent or (2) a dependent variable where the classifier makes misclassifications that are either (a) easy to correct (when an AC is unbiased and misclassifications are uncorrelated with covariates i.e., \emph{nonsystematic misclassification}) or (b) more difficult (when an AC is biased and misclassifications are correlated with covariates i.e., \emph{systematic misclassification}). +%The more difficult cases are important. +%As the real-data example we provide demonstrates, even modest biases in very accurate ACs can cause misleading statistical findings. + +% Such biases can easily result when classifier errors affect human behavior, such as that of social media moderators \maskparencite{teblunthuis_effects_2021}. Studies using classifiers from APIs that are also used in sociotechnical systems therefore be particularly prone to to differential error, which can cause misleading statistics even when classification accuracy is high. + +% Our Supplementary Materials present numerous extensions of these scenarios. We show that none of the existing error correction methods are effective in all scenarios. +%— multiple imputation fails in scenario 2; GMM calibration fails in scenario 1b and is not designed for scenario 2; and the pseudo-likelihood method fails in scenario 1 and in scenario 2b. When correctly applied, our likelihood modeling is the only correction method recovering the true parameters in all scenarios. %We provide our implementation as an R package. + +% , and our approach based on maximum likelihood methods \citep{carroll_measurement_2006} . + + %By doing so, we follow a handful of recent studies in which social scientists have used samples of human-labeled \emph{validation data} to account for misclassification by automated classifiers. + + % This paragraph is likely to get cut, but its useful so that we have a working outline: In what follows, we begin with an overview of automated content analysis to describe how AC-based measures can affect downstream analyses and how these errors thus threaten progress in automated text classification often used in the field of Computational Social Science (CSS). We substantiate our claims via a systematic literature review of \emph{N}=49 empirical studies employing SML for classification (see \nameref{appendix:lit.review} for details). + + +% Although the methods above are all effective in bivariate least squares regression when an AC is used to measure a covariate, validation data are error-free, and measurement error is \emph{nondifferential} (conditionally independent of the outcome given other covariates), +% these methods all have limitations in more general cases. Below, we present simulated scenarios in which each of these methods fail to recover the true parameters. + +% so long as the coders' errors are conditionally independent given observable variables. + +% In our discussion section, we provide detailed recommendations based on our literature review and our simulations. +According to our simulations, even biased classifiers without high predictive performance can be useful in conjunction with appropriate validation data and error correction methods. +As a result, we are optimistic about the potential of ACs and automated content analysis for communication science and related fields—if researchers correct for misclassification. +Current practices of ``validating'' ACs by making misclassification rates transparent via metrics such as the F1 score, however, provide little safegaurd against misclassification bias. + +In sum, we make a methodological contribution by introducing the often-ignored problem of misclassification bias in automated content analysis, testing error correction methods to address this problem via Monte Carlo simulations, and introducing a new method for error correction. +%The required assumptions for error correction methods are no more difficult than those already commonly adopted in traditional content analyses—and much more reasonable than the current default approach. +%This method can succeed where others fail, is easily applied by experienced regression modelers, and is straightforward to extend. +Profoundly, we conclude that automated content analysis will progress not only---or even primarily---by building more accurate classifiers but by rigorous human annotation and statistical error modeling. + +\section{Why Misclassification is a Problem: an Example Based on the Perspective API} + +There is no perfect AC. All ACs make errors. +This inevitable misclassification causes bias in statistical inference \citep{carroll_measurement_2006, scharkow_how_2017}, leading researchers to make both type-1 (false discovery) and type-2 errors (failure to reject the null) in hypotheses tests. To illustrate the problematic consequences of this misclassification bias, we focus on real-world data and a specific research area in communication research: detecting and understanding harmful social media content. Communication researchers often employ automated tools such as the Perspective toxicity classifier \citep{cjadams_jigsaw_2019} to detect toxicity in online content \citep[e.g.,][]{hopp_social_2019, kim_distorting_2021, votta_going_2023}. +As shown next, however, relying on toxicity scores created by ACs such as the Perspective API as (in-)dependent variables produces different results than using measurements created via manual annotation. + +To illustrate this, we use the Civil Comments dataset released in 2019 by Jigsaw, the Alphabet corporation subsidiary behind the Perspective API. Methodological details on the data and our example are available in Appendix \ref{appendix:perspective}. The dataset has \Sexpr{f(dv.example[['n.annotated.comments']])} English-language comments made on independent news sites. It also includes manual annotations of each comment concerning its toxicity (\emph{toxicity}), whether it discloses aspects of personal identity like race or ethnicity \emph{(identity disclosure)}, and the number of likes it received \emph{(number of likes)}. +%As obtaining manual annotations for 448,000 comments is impractical for all but the most well-resourced research teams, our subsequent analyses rely on the full dataset and, as a more realistic and feasible approach, a smaller random sample of \Sexpr{f(iv.sample.count)} (\Sexpr{format.percent(iv.sample.prop)}) manually annotated comments. + +In addition to manual annotations of each comment, we obtained AC-based toxicity classifications from the Perspective API in November 2022. Perspective's toxicity classifier performs very well, with an accuracy of \Sexpr{format.percent(iv.example[['civil_comments_accuracies']][['toxicity_acc']])} and an F1 score of \Sexpr{round(iv.example[['civil_comments_f1s']][['toxicity_f1']],2)}. Nevertheless, if we treat human annotations as the ground-truth, the classifier makes systematic misclassifications for it is modestly biased and disproportionately misclassifies comments disclosing racial or ethnic identity as toxic (Pearson's $\rho=\Sexpr{round(dv.example[['civil_comments_cortab']]['toxicity_error','race_disclosed'],2)}$). + +First, let us consider \emph{misclassification in an independent variable}. As an example, we use a logistic regression model to predict whether a comment contains \emph{identity disclosure} using \emph{number of likes}, \emph{toxicity}, and their interaction as independent variables. Although this is a toy example, it resembles a realistic investigation of how disclosing aspects of one's identity online relates to normative reception of one's behavior. +\begin{figure}[htbp!] +\centering +\begin{subfigure}{\linewidth} +<>= +p <- plot.civilcomments.iv.example() +print(p) +@ +\subcaption{\emph{Example 1}: Misclassification in an independent variable.\label{fig:real.data.example.iv}} +\end{subfigure} + +\begin{subfigure}{\linewidth} +<>= +p <- plot.civilcomments.dv.example() +print(p) +@ +\subcaption{\emph{Example 2}: Misclassification in a dependent variable. \label{fig:real.data.example.dv}} + +\end{subfigure} + +\caption{Bias through Misclassification: a Real-World Example Using the Perspective API and the Civil Comments Dataset. +%Figure \ref{fig:real.data.example.iv} compares a model using automatic toxicity classifications to a model using human toxicity annotations and shows that the 95\% confidence interval of the coefficient for likes contains 0. +%In Figure \ref{fig:real.data.example.dv}, a model predicting automatic toxicity classifications for toxicity detects a negative correlation between likes and toxicity that is not found when human annotations are used instead. A \Sexpr{format.percent(iv.sample.prop)} random sample of \Sexpr{f(iv.sample.count)} annotations does not provide sufficient statistical power to distinguish the false discovery from 0. +%In both examples, a random \Sexpr{format.percent(iv.sample.prop)} sample of \Sexpr{f(iv.sample.count)} annotations does not provide sufficient statistical power to distinguish the coefficient for likes from 0. Yet the methods we introduce can use this sample to model the misclassifications and obtain results close to those using the full dataset of annotations. +\label{fig:real.data.example} +} +\end{figure} +As shown in Figure \ref{fig:real.data.example.iv}, relying on AC-based toxicity classifications may lead researchers to reject a hypothesized direct relationship between likes and identity disclosure. Instead, the model suggests that their correlation is entirely mediated by toxicity. +%This is because the coefficient for likes is statistically indistinguishable from 0 and the coefficient for the interaction between likes and toxicity is positive and well-estimated. +In contrast, using human annotations would lead researchers to conclude a subtle positive direct relationship between likes and identity disclosure. %Using a smaller smaller sample of manually annotated data, as will often be more feasible due to limited resources, lacks sufficient statistical power to detect any such relationship. +%However, our method can use this sample of annotations to correct the bias introduced by Perspective's misclassifications while preserving enough statistical power to detect the direct relationship between likes and identity disclosure at the 95\% confidence level with estimates similar to those in the model using all \Sexpr{f(dv.example[['n.annotated.comments']])} annotations. +This demonstrates that even a very accurate AC can introduce type-2 error, i.e. researchers failing to rejecting a null hypothesis due to misclassification. + +Second, let us consider \emph{misclassification in a dependent variable}. We now predict the \emph{toxicity} of a comment with \emph{number of likes}, \emph{identity disclosure} in a comment, and their interaction as independent variables. +As shown in Figure \ref{fig:real.data.example.dv}, using Perspective's classification of toxicity results in a small negative direct effect of likes. However, there is no detectable relationship when using manual annotations. As such, misclassification can also lead to type-1 error, i.e., false discovery of a nonzero relationship. + +%The model using a more feasible sample of \Sexpr{format.percent(dv.sample.prop) } of manual annotations cannot rule out such a weak relationship. +%(the estimated effect using the AC is in the 95\% confidence interval), but our error correction method using this sample and Perspective's automatic classifications together can do so. + +\section{Why Transparency about Misclassification Is Not Enough} + +Although the Perspective API is no doubt accurate enough to be useful to content moderators, the example above demonstrates that this does not imply usefulness for social science \citep{grimmer_machine_2021-1}. +Machine learning takes the opposite position on the bias-variance trade-off than conventional statistics does and achieves high predictiveness at the cost of unbiased inference \citep{breiman_statistical_2001}. As a growing body of scholarship critical of the hasty adoption of machine learning in criminal justice, healthcare, or content moderation demonstrates, +ACs boasting high performance often have biases related to social categories \citep{barocas_fairness_2019}. Such biases in machine learning often result from non-representative training data and spurious correlations that neither reflect causal mechanisms nor generalize to different populations \citep{bender_dangers_2021}. +Much of this critique targets unjust consequences of these biases to individuals. Our example shows that these biases can also contaminate scientific studies using ACs as measurement devices. Even very accurate ACs can cause both type-I and type-II errors, which become more likely when classifiers are less accurate or more biased, or when effect sizes are small. + +We argue that current common practices to address such limitations are insufficient. These practices assert validity by reporting classifier performance on manually annotated data quantified as metrics including accuracy, precision, recall, or the F1 score \citep{hase_computational_2022, baden_three_2022, song_validations_2020}. +These steps promote confidence in results by making misclassification transparent, but our example indicates that high predictiveness may not protect researchers from biases flowing downstream into statistical inferences. +Instead of practicing transparancy and hoping not to be mislead by misclassification bias, researchers can and should use validation data to correct misclassification bias. + +% \citep{obermeyer_dissecting_2019, kleinberg_algorithmic_2018, bender_dangers_2021, wallach_big_2019, noble_algorithms_2018}. +%For example, \citet{hede_toxicity_2021} show that, when applied to news datasets, the Perspecitve API overestimates incivility related to topics such as racial identity, violence, and sex. +%These automatic classifications will likely introduce differential measurement error to a regression model of an outcome related to such topics. +%Although the effect sizes in these cases are rather subtle and would not be detectable in smaller datasets, such small effects commonly found using large datasets can easily result from subtle biases in observational study designs \citep{kaplan_big_2014}. Such small effect sizes may not appear practically or theoretically important, but note that the consequences of bias from automatic classification for coefficients in these examples (i.e., the interaction term in the first example and \emph{identity disclosure} in the second) are larger. +%Importantly, these errors are correctable using human annotations. Although this example required \Sexpr{iv.sample.count} annotations, a large number representing considerable effort, to consistently do so, this is a small fraction of the entire dataset. + +These claims may surprise because of the wide-spread misconception that misclassification causes only conservative bias (i.e., bias towards null effects). This is believed because it is true for bivariate least squares regression when misclassifications are nonsystematic +\citep{carroll_measurement_2006, loken_measurement_2017, van_smeden_reflection_2020}.\footnote{Measurement error is \emph{classical} when $W = X + \xi$ because the variance of an AC's predictions is greater than the variance of the true value \citep{carroll_measurement_2006}. +Non-classical measurement error in an independent variable can be ``differential'' if it is not conditionally independent of the dependent variable given the other independent variables. +Measurement error in an independent variable can be nondifferential and not classical. This is called Berkson and has the form $X = W + \xi$. In general, Berkson measurement error is easier to deal with than classical error. It is hard to imagine how a AC would have Berkson errors as predictions would then have lower variance than the training data. Following prior work, we thus do not consider Berkson errors \citep{fong_machine_2021, zhang_how_2021}. We call measurement error in the dependent variable \emph{systematic} when it is correlated with an independent variable.} As a result, researchers interested in a hypothesis of a statistically significant relationship may not consider misclassification an important threat to validity \citep{loken_measurement_2017}. + +However, as shown in our example, misclassification bias can be anti-conservative \citep{carroll_measurement_2006, loken_measurement_2017, van_smeden_reflection_2020}. In regression models with more than one independent variable, or in nonlinear models, such as the logistic regression we used in our example, even nonsystematic misclassification can cause bias away from 0. +Second, systematic misclassification can bias inference in any direction. + +%Researchers can check the assumption of nondifferential measurement error via graphical and statistical conditional independence tests \citep{carroll_measurement_2006, fong_machine_2021}. + +%Users of ACs should be especially conscious of differential error due to the nonlinear behavior of many ACs \citep{breiman_statistical_2001}. +ACs designed in one context and applied in another are likely to commit systematic misclassification. For example, the Perspective API used to classify toxic content was developed for social media comments but performs much worse when applied to news data \citep{hede_toxicity_2021}. Systematic misclassification may also arise when an AC used for measurement shapes behavior in a sociotechnical system under study. As examples, the Perspective API is used for online forum moderation \citep{hede_toxicity_2021}, as is the ORES API for Wikipedia moderators \citep{teblunthuis_effects_2021}. +Misclassifications from such classifiers can be systematic because they have causal effects on outcomes related to moderation. + +%TODO: uncomment citation below +If ACs become standard measurement devices, for instance +%the LIWC dictionary to measure sentiment \citep{boukes_whats_2020}, +%\citep{dobbrick_enhancing_2021} +Google's Perspective API for measuring toxicity \citep[see critically][]{hosseini_deceiving_2017} or Botometer for classifying social media bots \citep[see critically][]{rauchfleisch_false_2020}, entire literatures may have systematic biases. +Even if misclassification bias is usually conservative, it can slow progress in a research area. Consider how \citet{scharkow_how_2017} argue that media's ``minimal effects'' on political opinions and behavior in linkage studies may be an artifact of measurement errors both in manual content analyses and self-reported media use in surveys. Conversely, if researchers selectively report statistically significant hypothesis tests, misclassification can introduce an upward bias in the magnitude of reported effect sizes and contribute to a replication crisis \citep{loken_measurement_2017}. + + +% First, we note that when the anticipated effect size is large enough, traditional content analysis of a random sample has the advantage over the considerable complexity of automated content analysis. +% ACs should be used when costs prohibit traditional content analysis of sample size sufficient to detect anticipated effect sizes, but where collective a relatively small sample of validation data is tractable. + +% When the data used to train an AC is not representative of the study population, as is the case with commercial APIs or other black-box classifiers, this increases the risk of differential measurement error, which can introduce extremely misleading forms of statistical bias. Even this form of error can be addressed. + + +% Therefore, we recommend reporting (and preregistering) at least two aforementioned corrective methods in addition to uncorrected estimates. When machine learning classification is used for an independent variable, we recommend multiple imputation because it is robust to differential error and it simple to implement. However, our simulations show that multiple imputation does not work well when machine learning classification is used for the dependent variable. Greater care may be required if measurement error may be differential, because specifying the error model may open many degrees of research freedom and plausible error moe +\section{Quantifying the Problem: Error Correction Methods in SML-based Text Classification} + +% In traditional content analysis, humans use their judgement to classify messages, and automated content analysis uses computers as an instrument to + +% % can be defined either as a research approach or as an instrument. + +% In this paper, automated content analysis is defined as a research approach, which is a sub-type of content analysis for +% In contrast to manual content analysis, the difference is that the instrument used to code messages shifts from human judgment to computer algorithms \citep{scharkow2017content}. These computer algorithms, which can also be confusingly defined as ``automated content analysis" in the instrumental sense, are called automated coding techniques (versus manual coding techniques) in this paper. + + +% Social scientists have long recognized that measurement error can be an important methodological concern, but this concern has often been neglected \citep{schwartz_neglected_1985}. + + +% There have been several papers outlining what automated coding techniques are in the "toolbox" of communication researchers (key papers are \citep{scharkow2017content} and \citep{boumans:2015:tst}). +% Unsupervised and supervised machine learning procedures are deployed for coding. +% There has been discussion on the best practices for deploying unsupervised machine learning for communication research \citep{maier:2018:ALT}. +% This paper is going to focus only on classification. +% Researchers have raised concerns about validity issues of the approach \citep{scharkow2017content}. And by definition, the coding made by this technique is an imperfect surrogate of manual coding \citep{boumans:2015:tst}. When machine-classified surrogates are used in regression analyses for ``making replicable and valid inferences from texts", measurement errors are introduced \citep{fong_machine_2021}. A formal mathematical definition of these measurement errors is available later. + +% In the next section, all communication research studies with SML are reviewed to show how researchers deals with these measurement errors. + +% Furthermore, human classifiers also make errors and none of the prior methods consider how errors in the validation data can bias statistical results \citep{geis_statistical_2021, song_validations_2020, bachl_correcting_2017, scharkow_how_2017}. + + % Changeme to bring back citations after ICA + +%Content analysis focuses on ``\emph{making replicable and valid inferences from texts (or other meaningful matter) to the contexts of their use}'' \citep[p. 24, emphasis in original]{krippendorff_content_2018}. +To understand how social scientists, including communication scholars, engage with the problem of misclassification in automated content analysis, +%SML classifiers enable researchers to inexpensively measure categorical variables in large data sets. This promises to be useful for study designs requiring large samples such as to infer effect sizes smaller than would be possible using smaller samples humans could feasibly classify. +%But are scholars aware that misclassification by ACs poses threats to the validity of downstream analyses? Although such issues in the context of manual content analysis have attracted much debate \citep{bachl_correcting_2017}, this is less true for misclassification by newly popular automatic classifiers. +we conducted a systematic literature review of studies using supervised machine learning (SML) for text classification (see Appendix \ref{appendix:lit.review} in our Supplement for details).\footnote{Automated content analysis includes a range of methods both for assigning content to predefined categories (e.g., dictionaries) and for assigning content to unknown categories (e.g., topic modeling) \citep{grimmer_text_2013, oehmer-pedrazzi_automated_2023}. While we focus on SML, our arguments extend to other approaches such as dictionary-based classification and even beyond the specific context of text classification.} +Our sample consists of studies identified by similar reviews on automated content analysis \citep{baden_three_2022, hase_computational_2022, junger_unboxing_2022, song_validations_2020}. Our goal is not to comprehensively review all SML studies +%\footnote{In fact, our review likely underestimates the use of the method, as we focused on text-based SML methods in the social science domain employed for empirical analyses.} +but to provide a picture of common practices, with an eye toward awareness of misclassification and its statistical implications. + +We identified a total of 48 empirical studies published between 2013 and 2021, more than half of which were published in communication journals. Studies used SML-based text classification for purposes such as to measure frames \citep{opperhuizen_framing_2019} or topics \citep{vermeer_online_2020}. They often employed SML-based ACs to create dichotomous (50\%) or other categorical variables (23\%).\footnote{Metric variables were created in 35\% of studies, mostly via the non-parametric method by \citet{hopkins_method_2010}.} Of these empirical studies, many used SML-based ACs as independent variables (44\%) or dependent variables (40\%) in multivariate analyses, and 90\% reported univariate statistics such as proportions. +%— from the prevalence of topics in online news \citep{vermeer_online_2020} to incivility in social media posts \citep{su_uncivil_2018} —, + +Overall, our review reveals a \emph{lack of transparency when reporting SML-based text classification}, similar to that previously reported \citep{reiss_reporting_2022}: A large share of studies do not report important methodological decisions related to sampling and sizes of training and test sets (see Appendix \ref{appendix:lit.review}). This lack of transparency concerning model validation not only limits the degree to which researchers can evaluate studies, but also makes replicating such analyses to correct misclassification bias nearly impossible. Most important, our review finds that \emph{studies almost never reflected upon nor corrected misclassification bias}. According to our review, 85\% of studies reported metrics such as recall or precision, but only 19\% of studies explicitly stated that an AC misclassified texts which may introduce measurement error. Only a single article reported using error correction methods. To address the clear need for methods for understanding misclassification bias and correcting it, we now introduce and discuss existing methods to do so. + +%yi_handbook_2021,buonaccorsi_measurement_2010 +\section{Addressing the Problem: Existing Approaches for Correcting Misclassification} +Statisticians have extensively studied measurement error (including misclassification), the problems it causes for statistical inference, and methods for correcting these problems \citep[see][]{carroll_measurement_2006, fuller_measurement_1987}. +We narrow our focus to three existing methods recently proposed for dealing with misclassification bias in the context of automated content analysis: \citet{fong_machine_2021}'s GMM calibration method, multiple imputation \citep{blackwell_unified_2017-1}, and \citet{zhang_how_2021}'s pseudo-likelihood model.\footnote{Statisticians have studied other methods including simulation extrapolation, Bayesian estimation, and score function methods. As we argue in Appendix \ref{appendix:other.methods}, these error correction methods are not advantageous when manually annotated data is available, as is often the case with ACs.} +%Measurement error is a vast and deep subject in statistics. We recommend \citet{carroll_measurement_2006} as a graduate-level textbook on the subject. + +In the interest of clarity, we introduce some notation. Say we want to estimate a regression model $Y = B_0 + B_1 X + B_2 Z + \varepsilon$ where $X$ is an independent variable for which a small sample of manually annotated data $X^*$ and automated classifications $W$ are observed. Fully observed are $Z$, a second independent variable and $Y$, the dependent variable. +To illustrate, in our first real-world example, $X$ is toxicity, $X^*$ are the civil comment annotations, $W$ are the Perspective API's toxicity classification, $Z$ are likes, and $Y$ is identity disclosure. + +Say the sample of annotated data $X^*$ is too small to convincingly test a hypothesis, but collecting additional annotations is too expensive. +In contrast, an AC can make classifications $W$ for the entire dataset but introduces misclassification bias. How can we correct this bias in an automated content analysis? + +\emph{Regression calibration} uses observable variables, including automated classifications $W$ and other variables measured without error $Z$, to approximate the true value of $X$ \citep{carroll_measurement_2006}. \citet{fong_machine_2021} propose a regression calibration procedure designed for SML that we refer to as \emph{GMM calibration} or GMM.\footnote{\citet{fong_machine_2021} describe their method within an instrumental variable framework, but it is equivalent to regression calibration, the standard term in measurement error literature.} For their calibration model, \citet{fong_machine_2021} use 2-stage least squares (2SLS). They regress the observed variables $Z$ and AC predictions $W$ onto the manually annotated data and then use the resulting model to approximate $X$ as $\hat{X}$. They then use the generalized method of moments (gmm) to combine estimates based on the approximated independent variable $\hat{X}$ and estimates based on the manually annotated data $X^*$. This method makes efficient use of manually annotated data and provides an asymptotic theory for deriving confidence intervals. The GMM approach does not make strong assumptions about the distribution of the outcome $Y$, but can be invalidated by systematic misclassification \citep{fong_machine_2021}. GMM, like other regression calibration techniques, is not designed to correct for misclassification in the outcome. + +\emph{Multiple imputation} (MI) treats misclassification as a missing data problem. It understands the true value of $X$ to be observed in manually annotated data $X^*$ and missing otherwise \citep{blackwell_unified_2017-1}. +%For example, the regression calibration step in \citet{fong_machine_2021}'s GMM method uses least squares regression to impute unobserved values of the covariate $X$. Indeed, \citet{carroll_measurement_2006} describe regression calibration when validation data are available as ``simply a poor person's imputation methodology'' (pp. 70). +Like regression calibration, multiple imputation uses a model to infer likely values of possibly misclassified variables. The difference is that multiple imputation samples several (hence \emph{multiple} imputation) entire datasets filling in the missing data from the predictive probability distribution of $X$ conditional on other variables $\{W,Y,Z\}$, then runs a statistical analysis on each of these sampled datasets and pools the results of each of these analyses \citep{blackwell_unified_2017-1}. Note that $Y$ is included among the imputing variables, giving the MI approach the potential to address differential error. \citet{blackwell_unified_2017-1} claim that the MI method is relatively robust when it comes to small violations of the assumption of nondifferential error. Moreover, in theory, the MI approach can be used for correcting misclassifications both in independent and dependent variables. + +\emph{``Pseudo-likelihood''} methods (PL)—even if not always explicitly labeled this way—are another approach for correcting misclassification bias. \citet{zhang_how_2021} proposes a method that approximates the error model using quantities from the AC's confusion matrix—the positive and negative predictive values in the case of a mismeasured independent variable and the AC's false positive and false negative rates in the case of a mismeasured dependent variable. Because quantities from the confusion matrix are neither data nor model parameters, \citet{zhang_how_2021}'s method is technically a ``pseudo-likelihood'' method. A clear benefit is that this method only requires summary quantities derived from manually annotated data, for instance via a confusion matrix. %We will discuss likelihood methods in greater depth in the presentation of our MLE framework below. + +\subsection{Proposing a Likelihood Modeling Approach to Correct Misclassification} + +% This section basically translates Carroll et al. for a technically advanced 1st year graduate student. +We now elaborate on a new \emph{Maximum Likelihood Method} (MLE) we propose for correcting misclassification bias. Our method tailors \citet{carroll_measurement_2006}'s presentation of the general statistical theory of likelihood modeling for measurement error correction to context of automated content analysis.\footnote{In particular see Chapter 8 (especially example 8.4) and Chapter 15. (especially 15.4.2).} The MLE approach deals with misclassification bias by maximizing a likelihood that correctly specifies an \emph{error model} of the probability of the automated classifications conditional on the true value and the outcome \citep{carroll_measurement_2006}. +In contrast to the GMM and the MI approach, which predict values of the mismeasured variable, the MLE method accounts for all possible values of the variable by ``integrating them out'' of the likelihood. +``Integrating out'' means adding possible values of a variable to the likelihood, weighted by the likelihood of the error model. + +MLE methods have four advantages in the context of ACs. First, they are general in that they can be applied to any model with a convex likelihood including generalized linear models (GLMs) and generalized additive models (GAMs). +Second, assuming the model is correctly specified, MLE estimators are fully consistent whereas regression calibration estimators are only approximately consistent \citep{carroll_measurement_2006}. Practically, this means that MLE methods can have greater statistical efficiency and require less manually annotated data to make precise estimates. +%The MLE approach is conceptually different from the GMM one. The GMM approach first imputes likely values and then runs the main analysis on imputed values. By contrast, MLE approaches estimate—all in one step—the main analysis using the full dataset and the error model estimated using only the validation data \citep{carroll_measurement_2006}. +Third, the MLE approach is applicable both for correcting for misclassification in a dependent and an independent variable. +Fourth, and most important, this approach is effective when misclassification is systematic. + +%The idea is to use an \emph{error model} of the conditional probability of the automatic classifications given the true classifications and other variables on which automatic classifications depend. +%In other words, the error model estimates the conditional probability mass function of the automatic classifications. + +% When a variable is measured with error, this error introduces uncertainty. The overall idea of correcting an analysis with a mismeasured variable through likelihood modeling is to use + +%Including the error model in the likelihood effectively accounts for uncertainty of the true classifications and, assuming the error model gives consistent estimates of the conditional probability of the automatic classifications given the true values, is sufficient to obtain consistent estimates using MLE \citep{carroll_measurement_2006}. + +\subsubsection{When an Automated Classifier Predicts an Independent Variable} + +In general, if we want to estimate a model $P(Y|\Theta_Y, X, Z)$ for $Y$ given $X$ and $Z$ with parameters $\Theta_Y$, we can use AC classifications $W$ predicting $X$ to gain statistical power without introducing misclassification bias by maximizing ($\mathcal{L}(\Theta|Y,W)$), the likelihood of the parameters $\Theta = \{\Theta_Y, \Theta_W, \Theta_X\}$ in a joint model of $Y$ and the error model of $W$ \citep{carroll_measurement_2006}. +The joint probability of $Y$ and $W$, can be factored into the product of three terms: $P(Y|X,Z,\Theta_Y)$, the model we want to estimate, $P(W|X,Y, \Theta_W)$, a model for $W$ having parameters $\Theta_W$, and $P(X|Z, \Theta_X)$, a model for $X$ having parameters $\Theta_X$. +Calculating these three conditional probabilities is sufficient to calculate the joint probability of the dependent variable and automated classifications and thereby obtain a consistent estimate despite misclassification. $P(W|X,Y, \Theta_W)$ is called the \emph{error model} and $P(X|Z, \Theta_X)$ is called the \emph{exposure model} \cite{carroll_measurement_2006}. + +To illustrate, the regression model $Y=B_0 + B_1 X + B_2 Z + \varepsilon$, predicts the discrete independent variable $X$. +We can assume that the probability of $W$ follows a logistic regression model of $Y$, $X$ and $Z$ and that the probability of $X$ follows a logistic regression model of $Z$. In this case, the likelihood model below is sufficient to consistently estimate the parameters $\Theta = \{\Theta_Y, \Theta_W, \Theta_X\} = \{\{B_0, B_1, B_2\}, \{\alpha_0, \alpha_1, \alpha_2\}, \{\gamma_0, \gamma_1\}\}$. + +\begin{align} + \mathcal{L}(\Theta | Y, W) &= \prod_{i=0}^{N}\sum_{x} {P(Y_i| X_i, Z_i, \Theta_Y)P(W_i|X_i, Y_i, Z_i, \Theta_W)P(X_i|Z_i, \Theta_X)} \label{eq:covariate.reg.general}\\ + P(Y_i| X_i, Z_i, \Theta_Y) &= \phi(B_0 + B_1 X_i + B_2 Z_i) \\ + P(W_i| X_i, Y_i, Z_i, \Theta_W) &= \frac{1}{1 + e^{-(\alpha_0 + \alpha_1 Y_i + \alpha_2 X_i)}} \label{eq:covariate.logisticreg.w} \\ + P(X_i| Z_i, \Theta_X) &= \frac{1}{1 + e^{-(\gamma_0 + \gamma_1 Z_i)}} +\end{align} + + +\noindent where $\phi$ is the normal probability distribution function. Note that Equation \ref{eq:covariate.reg.general} models differential error (i.e., $Y$ is not independent of $W$ conditional on $X$ and $Z$) via a linear relationship between $W$ and $Y$. When error is nondifferential, the dependence between $W$ and $Y$ can be removed from Equations \ref{eq:covariate.reg.general} and \ref{eq:covariate.logisticreg.w}. + +Calculating the three conditional probabilities in practice requires specifying models on which validity of the method depends. +This framework is very general and a wide range of probability models, such as generalized additive models (GAMs) or Gaussian process classification, may be used to estimate $P(W| X, Y, Z, \Theta_W)$ and $P(X|Z,\Theta_X)$ \citep{williams_bayesian_1998}. + +\subsubsection{When an Automated Classifier Predicts a Dependent Variable} + +We now turn to the case when an AC makes classifications $W$ that predict a discrete dependent variable $Y$. +In our second real-data example, $W$ is the Perspective API's toxicity classifications and $Y$ is the true value of toxicity. +This case is simpler than the case above where an AC is used to measure an independent variable $X$ because there is no need to specify a model for the probability of $X$. + +If we assume that the probability of $Y$ follows a logistic regression model of $X$ and $Z$ and allow $W$ to be biased and to directly depend on $X$ and $Z$, then maximizing the following likelihood is sufficient to consistently estimate the parameters $\Theta = \{\Theta_Y, \Theta_W\} = \{\{B_0, B_1, B_2\},\{\alpha_0, \alpha_1, \alpha_2, \alpha_3\}\}$. + +\begin{align} + \mathcal{L}(\Theta|Y,W) &= \prod_{i=0}^{N} {\sum_{x}{P(Y_i | X_i, Z_i, \Theta_Y)P(W_i|X_i, Z_i, Y_i, \Theta_W)}} \label{eq:depvar.general}\\ + P(Y_i| X_i, Z_i, \Theta_Y) &= \frac{1}{1 + e^{-(B_0 + B_1 X_i + B_2 Z_i)}} \\ + P(W_i | Y_i, X_i, Z_i, \Theta_W) &= \frac{1}{1 + e^{-(\alpha_0 + \alpha_1 Y_i + \alpha_2 X_i + \alpha_3 Z_i)}} \label{eq:depvar.w} +\end{align} + +If the AC's errors are conditionally independent of $X$ and $Z$ given $W$, the dependence of $W$ on $X$ and $Z$ can be omitted from equations \ref{eq:depvar.general} and \ref{eq:depvar.w}. +Additional details on the likelihood modeling approach available in Appendix \ref{appendix:derivation} of the Supplement. + + +\section{Evaluating Misclassification Models: Monte-Carlo Simulations} + +% \TODO{Create a table summarizing the simulations and the parameters.} + +We now present four Monte Carlo simulations (\emph{Simulations 1a}, \emph{1b}, \emph{2a}, and \emph{2b}) with which we evaluate existing methods (GMM, MI, PL) and our approach (MLE) for correcting misclassification bias. + +Monte Carlo simulations are a tool for evaluating statistical methods, including (automated) content analysis \citep[e.g.,][]{song_validations_2020,bachl_correcting_2017,geis_statistical_2021, fong_machine_2021,zhang_how_2021}. +They are defined by a data generating process from which datasets are repeatedly sampled. Repeating an analyses for each of these datasets provides an empirical distribution of results the analysis would obtain over study replications. Monte-carlo simulation affords exploration of finite-sample performance, robustness to assumption violations, comparison across several methods, and ease of interpretability \citep{mooney_monte_1997}. + +\subsection{Parameters of the Monte Carlo Simulations} + +In our simulations, we tested four error correction methods: \emph{GMM calibration} (GMM) \citep{fong_machine_2021}, \emph{multiple imputation} (MI) \citep{blackwell_unified_2017-1}, \emph{Zhang's pseudo-likelihood model} (PL) \citep{zhang_how_2021}, and our \emph{likelihood modeling} approach (MLE). We use the \texttt{predictionError} R package \citep{fong_machine_2021} for the GMM method, the \texttt{Amelia} R package for the MI approach, and our own implementation of \citet{zhang_how_2021}'s PL approach in R. +We develop our MLE approach in the R package \texttt{misclassificationmodels}. +For PL and MLE, we quantify uncertainty using the fisher information quadratic approximation. + +In addition, we compare these error correction methods to two common approaches in communication science: the \emph{feasible} estimator (i.e., conventional content analysis that uses only manually annotated data and not ACs) +%and illustrates the motivation for using an AC in these scenarios—validation alone provide insufficient statistical power for a sufficiently precise hypothesis test. +and the \emph{naïve} estimator (i.e., using AC-based classifications $W$ as stand-ins for $X$, thereby ignoring misclassifications). According to our systematic review, the \emph{naïve} approach reflects standard practice in studies employing SML for text classification. + +We evaluate each of the six analytical approaches in terms of \emph{consistency} (whether the estimates of parameters $\hat{B_X}$ and $\hat{B_Z}$ have expected values nearly equal to the true values $B_X$ and $B_Z$), \emph{efficiency} (how precisely the parameters are estimated and how precision improves with additional data), and \emph{uncertainty quantification} (how well the 95\% confidence intervals approximate the range including 95\% of parameter estimates across simulations). +To evaluate efficiency, we repeat each simulation with different amounts of total observations, i.e., unlabeled data to be classified by an AC (ranging from \Sexpr{min(N.sizes)} to \Sexpr{max(N.sizes)} observations), and manually annotated observations (ranging from \Sexpr{min(m.sizes)} to \Sexpr{max(m.sizes)} +observations). Since our review indicated that ACs are most often used to create binary variables, we restrict our simulations to misclassifications related to a binary (in-)dependent variable. + +%\begin{equation} +% Y= B_0^* + B_1^*W + B_2^*Z + \varepsilon^* = B_0^* + B_1^*(X + \xi) + B_2^*Z +%\label{mod:measerr.ols} +%\end{equation} + + +%These simulations are designed to verify that error correction methods from prior work are effective in ideal scenarios and to create the simplest possible cases where these methods are inconsistent. Showing how prior methods fail is instructive for understanding how our MLE approach does better both in these artificial simulations and in practical projects. + +\subsection{Four Prototypical Scenarios for our Monte Carlo Simulations} + +We simulate regression models with two independent variables ($X$ and $Z$). This sufficiently constrains our study's scope but the scenario is general enough to be applied in a wide range of research studies. +%Simulating studies with two covariates lets us study how measurement error in one covariate can cause bias in coefficient estimates of other covariates. +Whether the methods we evaluate below are effective or not depends on the conditional dependence structure among independent variables, the dependent variable $Y$, and automated classifications $W$. +This structure determines if systematic misclassifications in an independent variable cause differential error and if systematic misclassifications in a dependent variable should be modeled + \citep{carroll_measurement_2006}. +In Figure \ref{bayesnets}, we illustrate our scenarios via Bayesian networks representing the conditional dependence structure of variables \citep{pearl_fusion_1986}: +%In these figures, an edge between two variables indicates that they have a direct relationship. Two nodes that are not neighbors are statistically independent given the variables between them on the graph. For example, in Figure \ref{fig:simulation.1a}, the automatic classifications $W$ are conditionally independent of $Y$ given $X$ because all paths between $W$ and $Y$ contain $X$. This indicates that the model $Y=B_0 +B_1 W+ B_2 Z$ (the \emph{naïve estimator}) has non-differential error because the automatic classifications $W$ are conditionally independent of $Y$ given $X$. However, in Figure \ref{fig:simulation.1b}, there is an edge between $W$ and $Y$ to indicate that $W$ is not conditionally independent of $Y$ given other variables. Therefore, the naïve estimator has differential error. +We first simulate two cases where an AC measures an independent variable without (\emph{Simulation 1a}) and with differential error (\emph{Simulation 1b}). Then, we simulate using an AC to measure the dependent variable, either one with misclassifications that are uncorrelated (\emph{Simulation 2a}) or correlated with an independent variable (\emph{Simulation 2b}). GMM is not designed to correct misclassifications in dependent variables, so we omit this method in \emph{Simulations 2a} and \emph{2b}. + +\input{bayesnets.tex} + +\subsubsection{Misclassification in an Independent Variable (\emph{Simulations 1a} and \emph{1b})} + +We first consider studies with the goal of testing hypotheses about the coefficients $B_1$ and $B_2$ in a least squares regression: +\begin{equation} +Y=B_0 + B_1 X + B_2 Z + \varepsilon + \label{mod:true.ols} +\end{equation} +In our first real-data example, $Y$ was a discrete variable-whether a comment self-disclosed a racial or ethnic identity, $X$ was if a comment was toxic, and $Z$ was the number of likes. +In this simulated example, $Y$ is continuous variable, $X$ is a binary variable measured with an AC, and $Z$ is a normally distributed variable with mean 0 and standard deviation \Sexpr{sim1.z.sd} measured without error. +%The simulated example could represent a study of $Y$, the time until an social media account is banned, $X$ if the account posted a comment including toxicity, and $Z$ the account's reputation score. $X$ and $Z$ are negatively correlated because high-reputation accounts may be less likely to post comments including toxicity. + +%$Z$ can indicate if the message is in German or English, the two possible languages in the hypothetical study. +%Say that human content coders can observe $X$ perfectly, but each observation is so expensive that observing $X$ for a large sample is infeasible. +%Instead, the human coders can measure $X$ without error for a subsample of size $m << N$. +%To scale up content analysis, a SML-based AC makes predictions $W$ of $X$—for instance predicting if any comments from a social media user include toxicity. +Both simulations have a normally distributed dependent variable $Y$ and two binary independent variables $X$ and $Z$, which are balanced ($P(X)=P(Z)=0.5$) and correlated (Pearson's $\rho=\Sexpr{round(sim1a.cor.xz,2)}$). %Simulating balanced covariates serves simplicity so that accuracy is adequate to quantify the predictive performance of our simulated classifier. Simulating correlated covariates is helpful to study how misclassification in one variable affects parameter inference in other covariates. +To represent a study design where an AC is needed to obtain sufficient statistical power, $Z$ and $X$ can explain only \Sexpr{format.percent(sim1.R2)} of the variance in $Y$. +% TODO, bring back when these simulations are in the appendix. +%Additional simulations in appendix \ref{appendix:sim1.imbalanced} show results for variations of \emph{Simulation 1} with imbalanced covariates explaining a range of variances, different classifier accuracies, heteroskedastic misclassifications and deviance from normality in the an outcome $Y$. + +In \emph{Simulation 1a} (Figure \ref{fig:simulation.1a}), we simulate an AC with \Sexpr{format.percent(sim1a.acc)} accuracy.\footnote{Classifier accuracy varies between our simulations because it is difficult to jointly specify classifier accuracy and the required correlations among variables and due to random variation between simulation runs. We report the median accuracy over simulation runs.} This reflects a situation where $X$ may be difficult to predict, but the AC, represented as a logistic regression model having linear predictor $W^*$, provides a useful signal. +We simulate nondifferential misclassification because $W=X+\xi$, $\xi$ is normally distributed with mean $0$, and $\xi$ and $W$ are conditionally independent of $Y$ given $X$ and $Z$. + +%($P(\xi| Y,X,Z) = P(\xi|X,Z)$). +%For simplicity, the AC's errors $\xi$ are independent of all other variables. In Appendix F, we demonstrate that the methods we study perform similarly when $\xi$ is heteroskedastic, correlated with $X$ or $Z$. Note that heteroskedasticity does not imply differential error. Suppose, for example, that AC's accuracy predicting rule violations $W$ depends on language $Z$. As a result, $\xi$ and $Z$ are correlated, and since time-till-ban $Y$ and repuation $Z$ are also correlated, $\xi$ is in turn correlated with $Y$. Despite this, the error in Model \ref{mod:measerr.ols} remains nondifferential, because $Y$ is conditionally independent of $\xi$ given $Z$ and $X$. + +% Measuring $X$ is expensive, perhaps requiring trained human annotators, but an automated classifier can predict $X$ with We choose this level of accuracy to reflect a situation where $X$ may be difficult to predict + +% The classifier, perhaps a proprietary API, has unobservable features $K$. The classifier's predictions $W=X + \xi$ are unbiased—the errors $\xi$ are not correlated with $Y$,$X$ or $Z$. Figure \ref{fig:simulation.1} shows a Bayesian network representing \emph{Simulation 1}'s conditional dependencies of $Z$, $Y$, $K$, $Z$ and $W$ as a directed acyclic graph (DAG). + +% \emph{Simulation 2} extends \emph{Simulation 1} by making the automated classifier classification errors $\xi$ that are correlated with $Y$ even after accounting for $Z$ and $x$. + +In our first real-data example, the Perspective API predicted comment toxicity, which was an independent variable of a regression model in which racial/ethnic identity disclosure was the dependent variable. The API disproportionately misclassified as toxic comments disclosing such identities which toxic which resulted in differential misclassification. + +In \emph{Simulation 1b} (Figure \ref{fig:simulation.1b}), we test how error correction methods can handle differential error by making AC predictions similarly depend on the dependent variable $Y$. +This simulated AC has $\Sexpr{format.percent(sim1b.acc)}$ accuracy and makes predictions $W$ that are negatively correlated with the residuals of the linear regression of $X$ and $Z$ on $Y$ (Pearson's $\rho=\Sexpr{round(sim1b.cor.resid.w_pred,2)}$). As a result, this AC makes fewer false-positives and more false-negatives at greater levels of $Y$. + +%Although the false-negative rate of the AC is \Sexpr{format.percent(sim1b.fnr)} overall, when $Y<=0$ the false-negative rate is only \Sexpr{format.percent(sim1b.fnr.y0)}, but when $Y>=0$ it rises to \Sexpr{format.percent(sim1b.fnr.y1)}. +%Figure \ref{fig:simulation.1b} shows a Bayesian network representing conditional dependencies of $Z$, $Y$, $Z$ and $W$ in \emph{Simulation 1b}. +%This is prototypical of an AC that influences behavior in a system under study. + + +% False negatives may cause delays in moderation increasing $Y$ (time-until-ban), while false-positives could draw moderator scrutiny and cause them to issue speedy bans. +% This mechanism is not mediated by observable variables such as reputation ($Z$) or the true use of toxicity ($X$). Therefore, we expect differential error. + +\subsubsection{Measurement Error in a Dependent Variable (Simulation 2a and 2b)} + +We then simulate using an AC to measure the dependent variable $Y$, a binary independent variable $X$, and a continuous independent variable $Z$. The goal is to estimate $B_1$ and $B_2$ in the following logistic regression model: + +\begin{equation} + P(Y) = \frac{1}{1 + e^{-(B_0 + B_1 X + B_2 Z)}} + \label{mod:measerr.logit} +\end{equation} + +%As was true for $X$ in \emph{Simulation 1}, human coders can observe $Y$ but doing so may be costly. We may thus instead use an AC that makes predictions $W = Y + \xi$ . + +\noindent In our second real-data example, $Y$ is if a comment contains toxicity, $X$ is if the comment discloses racial or ethnic identity, and $Z$ is the number of times the comment was ``liked''. + +In \emph{Simulation 2a} (see Figure \ref{fig:simulation.2a}) and \emph{Simulation 2b} (see Figure \ref{fig:simulation.2b}) $X$ and $Z$ are, again, balanced ($P(X)=P(Z)=0.5$) and correlated + (Pearson's $\rho=\Sexpr{round(sim2a.cor.xz,2)}$). +%As in \emph{Simulation 1} we simulate scenarios where an AC is of practical use to estimate subtle relationships. +In \emph{Simulation 1}, we chose the variance of the normally distributed outcome given our chosen coefficients $B_X$ and $B_Z$, but this is not appropriate for \emph{Simulation 2}'s logistic regression. We therefore choose, somewhat arbitrarily, $B_X=\Sexpr{sim2.Bx}$ and $B_Z=\Sexpr{sim2.Bz}$. We again simulate ACs with moderate predictive performance. +The AC in \emph{Simulation 2a} is \Sexpr{format.percent(sim2a.AC.acc)} accurate and the AC in \emph{Simulation 2b} is \Sexpr{format.percent(sim2b.AC.acc)} accurate. In \emph{Simulation 2a}, the misclassifications are nonsystematic as $\xi$ has mean $0$ and is independent of $X$ and $Z$. However, in \emph{Simulation 2b} the misclassifications $\xi$ are systematic and correlated with $Z$ (Pearson's $\rho = \Sexpr{round(sim2b.error.cor.z,2)}$). + +% Such differential error may arise if social media users are adept at skirting the rules without violating them. Such members are both likely to be warned by moderators and to leave comments misclassified as toxic. + +\section{Simulation Results} + +For each method, we visualize the consistency, efficiency, and the accuracy of uncertainty quantification of estimates across prototypical scenarios. +%Our main results are presented as plots visualizing the consistency (i.e., does the method, on average, recover the true parameter?), efficiency (i.e., how precise are estimates and does precision improve as sample size increases?), and the accuracy of uncertainty quantification of each method in each scenario. +For example, Figure \ref{fig:sim1a.x} visualizes results for \emph{Simulation 1a}. Each subplot shows a simulation with a given total sample size (No. observations) and a given sample of manually annotated observations (No. manually annotated observations). +To assess a method's consistency, we locate the expected value of the point estimate across simulations with the center of the black circle. As an example, see the leftmost column in the bottom-left subplot of Figure \ref{fig:sim1a.x}. For the naïve estimator, the circle is far below the dashed line indicating the true value of $B_X$. Here, ignoring misclassification causes bias toward 0 and the estimator is inconsistent. To assess a method's efficiency, we mark the region in which point estimate falls in 95\% of the simulations with black lines. +The black lines in the bottom-left subplot of Figure \ref{fig:sim1a.x} for example show that the feasible estimator, which uses only manually annotated data, is consistent but less precise than estimates from error correction methods. To assess each method's uncertainty quantification, compare the gray lines, which show the expected value of a method's approximate 95\% confidence intervals across simulations, to the neighboring black lines. + The \emph{PL} column in the bottom-left subplot of Figure \ref{fig:sim1a.x} for instance shows that the method's 95\% confidence interval is biased towards 0 when the number of manually annotated observations is smaller. This is to be expected because the PL estimator does not account for uncertainty in misclassification probabilities estimated using the sample of manually annotated observations. + %Now that we have explained how to interpret our plots, we unpack them for each simulated scenario. + +\subsection{Simulation 1a: Nonsystematic Misclassification of an Independent Variable} + +Figure \ref{fig:sim1a.x} illustrates \emph{Simulation 1a}. Here, the naïve estimator is severely biased in its estimation of $B_X$. +Fortunately, error correction methods (GMM, MI, MLE) produce consistent estimates and acceptably accurate confidence intervals. +Notably, the PL method is inconsistent and considerable bias remains when the sample of annotations is much smaller than the entire dataset. This is likely due to $P(X=x)$ missing from the PL estimation.\footnote{Compare Equation \ref{eq:mle.covariate.chainrule.4} in Appendix \ref{appendix:derivation} to Equations 24-28 from \citet{zhang_how_2021}.} Figure +\ref{fig:sim1a.x} also shows that MLE and GMM estimates become more precise in larger datasets. +This is is less pronounced for MI estimates, indicating that +GMM and MLE use automated classifications more efficiently than MI. + +\begin{figure} +<>= +p <- plot.simulation.iv(plot.df.example.1, iv='x') +grid.draw(p) +@ +\caption{Simulation 1a: Nonsystematic misclassification of an independent variable. Error correction methods, except for PL, obtain precise and accurate estimates given sufficient manually annotated data. \label{fig:sim1a.x}} +\end{figure} +%It is important to correct misclassification error even when an AC is only used as a statistical control \citep[for example]{weld_adjusting_2022}, because when a covariate $Z$ is correlated with $X$, misclassifications of $X$ cause bias in the \emph{naïve} estimates of $B_Z$, the regression coefficient of $Z$ on $Y$. As Figure \ref{fig:sim1a.z} in Appendix \ref{appendix:main.sim.plots} shows, methods that effectively correct estimates of $X$ in \emph{Simulation 1a} also correct estimates of $B_Z$. +In brief, when misclassifications cause nondifferential error, MLE and GMM are effective, efficient, and provide accurate uncertainty quantification. They complement each other due to different assumptions: MLE depends on correctly specifying the likelihood but its robustness to incorrect specifications is difficult to analyze \citep{carroll_measurement_2006}. The GMM approach depends on the exclusion restriction instead of distributional assumptions \citep{fong_machine_2021}. +MLE's advantage over GMM come from the relative ease with which it can be extended to for instance generalized linear models (GLMs) or generalized additive models (GAMs). +In cases similar to \emph{Simulation 1a}, we therefore recommend both the GMM and an appropriately specified MLE approach to correct for misclassification. + +\subsection{Simulation 1b: Systematic Misclassification of an Independent Variable} + +Figure \ref{fig:sim1b.x} illustrates \emph{Simulation 1b}. Here, systematic misclassification gives rise to differential error and creates more extreme misclassification bias that is more difficult to correct. +As Figure \ref{fig:sim1b.x} shows, the naïve estimator is opposite in sign to the true parameter. +Of the four methods we test, only the MLE and the MI approach provide consistent estimates. This is expected because they use $Y$ to adjust for misclassifications. The bottom row of Figure \ref{fig:sim1b.x} shows how the precision of the MI and MLE estimates increase with additional observations. As in \emph{Simulation 1a}, MLE uses this data more efficiently than MI does. However, due to the low accuracy and bias of the AC, additional unlabeled data improves precision less than one might expect. Both methods provide acceptably accurate confidence intervals. Figure \ref{fig:sim1b.z} in Appendix \ref{appendix:main.sim.plots} shows that, as in \emph{Simulation 1a}, effective correction for misclassifications of $X$ is required to consistently estimate $B_Z$, the coefficient of $Z$ on $Y$. Inspecting results from methods that do not correct for differential error is useful for understanding their limitations. When few annotations of $X$ are observed, GMM is nearly as bad as the naïve estimator. PL is also visibly biased. Both improve when a greater proportion of the data is labeled since they combine AC-based estimates with the feasible estimator. + +In sum, our simulations suggest that the MLE approach is superior in conditions of differential error. Although estimations by the MI approach are consistent, the method's practicality is limited by its inefficiency. + +\begin{figure} +<>= +p <- plot.simulation.iv(plot.df.example.2, iv='x') +grid.draw(p) +@ +\caption{Simulation 1b: Systematic misclassification of an independent variable. Only the the MLE approach obtains consistent estimates of $B_X$. \label{fig:sim1b.x}} +\end{figure} + +\subsection{Simulation 2a: Nonsystematic Misclassification of a Dependent Variable} + +Figure \ref{fig:sim2a.x} illustrates \emph{Simulation 2a}: nonsystematic misclassification of a dependent variable. This also introduces bias as evidenced by the naïve estimator's inaccuracy. Our MLE method +is able to correct this error and provide consistent estimates. +Surprisingly, the MI estimator is inconsistent and does not improve with more human-labeled data. +%Note that the GMM estimator is not designed to correct misclassifications in the outcome. +The PL approach is also inconsistent, especially when only few of all observations are annotated manually. It is closer to recovering the true parameter than the MI or the naïve estimator, but provides only modest improvements in precision compared to the feasible estimator. +It is clear that the precision of the MLE estimator improves with more observations data to a greater extent than the PL estimator. +When the amount of human-labled data is low, inaccuracies in the 95\% confidence intervals of both the MLE and PL become visible due to the poor finite-sample properties of the quadradic approximation for standard errors. +%As before, PL's inaccurate confidence intervals are due to its use of finite-sample estimates of automated classification probabilities. +%In both cases, the poor finite-sample properties of the fischer-information quadratic approximation contribute to this inaccuracy. In Appendix \ref{appendix:sim1.profile}, we show that the MLE method's inaccuracy vanishes when using the profile-likelihood method instead. + + In brief, our simulations suggest that MLE is the best error correction method when random misclassifications affect the dependent variable. It is the only consistent option and more efficient than the PL method, which is almost consistent. + + \begin{figure} +<>= +#plot.df <- +p <- plot.simulation.dv(plot.df.example.3,'z') +grid.draw(p) +@ +\caption{Simulation 2a: Nonsystematic misclassification of a dependent variable. Only the MLE approach obtains consistent estimates. \label{fig:sim2a.x}} +\end{figure} + +\subsection{Simulation 2b: Systematic Misclassification of a Dependent Variable} + +\begin{figure} +<>= + +p <- plot.simulation.dv(plot.df.example.4,'z') +grid.draw(p) +@ +\caption{Simulation 2b: Systematic misclassification of a dependent variable. Only the MLE approach obtains consistent estimates. \label{fig:sim2b.x}} +\end{figure} + +In \emph{Simulation 2b}, misclassifiations of the dependent variable $Y$ are correlated with an independent variable $X$. As shown in Figure \ref{fig:sim2b.x}, this causes dramatic bias in the naïve estimator. +Similar to \emph{Simulation 2a}, MI is inconsistent. PL is also inconsistent because it does not account for $Y$ when correcting for misclassifications. +As in \emph{Simulation 1b}, our MLE method obtains consistent estimates, but only does much better than the feasible estimator when the dataset is large. +Figure \ref{fig:sim2b.z} in Appendix \ref{appendix:main.sim.plots} shows that the precision of estimates for the coefficient for $X$ improves with additional data to a greater extent. As such, this imprecision is mainly in estimating the coefficient for $Z$, the variable correlated with misclassification. + +Therefore, our simulations suggest that MLE is the best method when misclassifications in the dependent variable are correlated with an independent variable. + +\section{Transparency about Misclassification Is Not Enough—We Have To Fix It! Recommendations for Automated Content Analysis} + +``Validate, Validate, Validate'' \citep[p. 269]{grimmer_text_2013} is one of the guiding mantras for automated content analysis. It reminds us that ACs can produce misleading results and of the importance of steps to ascertain validity, for instance by making misclassification transparent. +%\citet[p.5]{grimmer_text_2013} write that +%``when categories are known [...], scholars must demonstrate that the supervised methods are able to reliably replicate human coding.'' +%This suggests that quantifying an AC's predictive performance by comparing human-labeled validation data to automated classifications sufficiently establishes an AC's validity and thereby the validity of downstream analyses. +Like \citet{grimmer_text_2013}, we are deeply concerned that computational methods may produce invalid evidence. In this sense, their validation mantra animates this paper. But transparency about misclassification rates via metrics such as precision or recall leaves unanswered an important question: Is comparing automated classifications to some external ground truth sufficient to claim that results are valid? Or is there something else we can do and should do? + +We think there is: Using statistical methods to not only quantify but also correct for misclassification. Our study provides several recommendations in this regard, with an overview of recommendations provided in Figure \ref{fig:FigureRecommendations}. + +\begin{figure}[hbt!] +\centering +\input{flowchart_recommendations.tex} + \caption{Recommendations for Automated Content Analysis Study Design} + \label{fig:FigureRecommendations} +\end{figure} + +% \includegraphics{Recommendations.PNG} + + + +%Similar to recent work in communication science \citep{mahl_noise_2022, stoll_supervised_2020}, our goal is not only to \textit{highlight} and \textit{quantify} common pitfalls in automated content analysis applications of ACs but to also \textit{propose} constructive guidelines on the road ahead. + +\subsubsection{Step 1: Attempt Manual Content Analysis} + +Manual content annotation is often done \textit{post facto}, for instance to calculate predictiveness of an already existing AC such as Google's Perspective classifier. We propose to instead use manually annotated data \textit{ante facto}, i.e. before building or validating an AC. +Practically speaking, the main reason to use an AC is feasibility: to avoid the costs of manual coding a large dataset. +One may for example need a large dataset to study an effect one assumes to be small. Manually labeling such a dataset is expensive. +Often, ACs are seen as a cost-saving procedure without consideration of the threats to validity posed by misclassification. +Moreover, validating an existing AC or building a new AC is also expensive, for instance due to costs of computational resources or manual annotation of (perhaps smaller) test and training datasets. + +We therefore caution researchers against preferring automated over manual content analysis unless doing so is necessary to obtain useful evidence. We agree with \citet{baden_three_2022} who argue that ``social science researchers may be well-advised to eschew the promises of computational tools and invest instead into carefully researcher-controlled, limited-scale manual studies'' (p. 11). In particular, we recommend to use manually annotated data \textit{ante facto}: Researchers should begin by statistical modeling human-annotated data so to discern if an AC is necessary. In our simulations, the feasible estimator is less precise but consistent in all cases. So if fortune shines and this estimate sufficiently answers one's research question, manual coding is sufficient. Here, scholars should rely on existing recommendations for descriptive and inferential statistics in the context of manual content analysis \citep{geis_statistical_2021, bachl_correcting_2017}. If the feasible estimator however fails to provide convincing evidence, for example by not rejecting the null, manually annotated data is not wasted. It can be reused to build an AC or correct misclassification bias. +%One potential problem of this \textit{ante facto} approach is that conducting two statistical tests of the same hypothesis increases the chances of false discover. A simple solution to this is to adjust the significance threshold $\alpha$ for drawing conclusions from the feasible estimate. %We recommend p < .01. %That said, it might useful use an AC in a preliminary analysis, prior to collecting validation data when an AC such as one available from an API, is available for reuse and confusion matrix quantities necessary for the pseudo-likelihood (PL) method are published. Although (PL) is inconsistent when used for a covariate, this can be corrected if the true rate of $X$ can be estimated. +%Caution is still warranted because ACs can perform quite differently from one dataset to another so we recommend collecting validation representative of your study's dataset and using another appropriate method for published studies. + +\subsubsection{Step 2: Use Manually Annotated Data to Detect Systematic Misclassification} + +% Let's suppose an AC is used to the feasible estimator is insufficiently informative +%There are many guides on how to train and validate ACs \citep[e.g.][]{grimmer_text_2013,van_atteveldt_validity_2021}. However, they mostly refer to performance metrics such as the F1-score or Area under the Curve (AUC). The problem with this approach is that such criteria make misclassifications transparent but do not provide information on how misclassification will affect downstream analyses and how to correct for such effects. +%One reason for this is that such criterion do not account for differential error or for correlation between misclassifications in the outcome and a regression covariate—both of which can give rise to extremely misleading statistics. +As demonstrated in our simulations, knowing whether an AC makes systematic misclassifications is important: It determines which correction methods can work. +Fortunately, manually annotated data can be used to detect systematic misclassification. +For example, \citet{fong_machine_2021} suggest using Sargan's J-test of the null hypothesis that the product of the AC's predictions and regression residuals have an expected value of 0. +More generally, one can test if the data's conditional independence structures can represented by Figures \ref{fig:simulation.1a} or \ref{fig:simulation.2a}. This can be done, for example, via likelihood ratio tests of $P(W|X,Z) = P(W|X,Y,Z)$ (if an AC measures an independent variable $X$) or of $P(W|Y) = P(W|Y,Z,X)$ (if an AC measures a dependent variable $Y$) or by visual inspection of plots of relating misclassifications to other variables \citep{carroll_measurement_2006}. + We strongly recommend using such methods to test for differential error and to design an appropriate correction. + +% For example, ``algorithmic audits'' \citep[e.g.,][]{rauchfleisch_false_2020, kleinberg_algorithmic_2018} evaluate the performance of AC across different subgroups in the data. + +% This may be important when using different ACs for corpora of different languages or data from different social media platforms. If the accuracy of an AC varies with language or platforms, we may expect differential error. + +% In turn, differential misclassification can be ruled out if the performance of an AC is the same across all analytically relevant subgroups and other variables. + + +\subsubsection{Step 3: Correct for Misclassification Bias Instead of Being Naïve} + +Across our simulations, we showed that the naïve estimator is biased. Testing different error correction methods, we found that these generate different levels of consistency, efficiency, and accuracy in uncertainty quantification. That said, our proposed MLE method should be considered as a versatile method because it is the only method capable of producing consistent estimates in prototypical situations studied here. We recommend the MLE method as the first ``go-to'' method. As shown in Appendix \ref{appendix:noz}, this method requires specifying a valid error model to obtain consistent estimates. This may not be too difficul in practice because if one can assume the primary model for $Y$, this implies that an error model for $W$ that includes all observed variables is sufficient. Still, on should take care correctly model nonlinearities and interactions. +Our \textbf{misclassificationmodels} R package provides reasonable default error models and a user-friendly interface to facilitate adoption of our MLE method (see Appendix \ref{appendix:misclassificationmodels}). + +When feasible, we recommend comparing the MLE approach to another error correction method. Consistency between two correction methods shows that results are robust independent of the correction method. If the AC is used to predict an independent variable, GMM is a good choice if error is nondifferential. Otherwise, MI can be considered. +Unfortunantly, if the AC is used to predict a dependent variable, our simulations do not support a strong suggestion for a second method. +PL might be useful reasonable choice with enough manually annotated data and non-differential error. +This range of viable choices in error correction methods also motivates our next recommendation. + +\subsubsection{Step 4: Provide a Full Account of Methodological Decisions} + +Finally, we add our voices to those +recommending that researchers report methodological decisions so other can understand and replicate their design \citep{pipal_if_2022, reiss_reporting_2022}. These decisions include but are not limited to choices concerning test and training data (e.g., size, sampling, split in cross-validation procedures, balance), manual annotations (size, number of annotators, intercoder values, size of data annotated for intercoder testing), and the classifier itself (choice of algorithm or ensemble, different accuracy metrics). They extend to reporting different error correction methods as proposed by our third recommendation. +In our review, we found that reporting such decisions is not yet common, at least in the context of SML-based text classification. +When correcting for misclassification, uncorrected results will often provide a lower-bound on effect sizes; corrected analyses will provide more accurate but less conservative results. +Therefore, both corrected and uncorrected estimates should be presented as part of making potential multiverses of findings transparent. +% we +% To report instead of hiding methodological decisions and related uncertainty that may emerge in generated results, +%We realize that researchers might need to cut methodological information, especially for empirical studies, to conform to either word limits or reviewers. If word limitations are the problem, this information could be reported in appendices. +% Here, the field might consider adopting ---or adapting--- machine learning reporting standards such as DOME (Computational Biology) and PRIME (Diagnostic medicine). + + +\section{Conclusion and Limitations} + +In this study, we discuss the problem of misclassification in automated content analysis which may introduce misclassification bias in statistical models. We believe this is a topic that has not attracted enough attention within communication science \citep[but see][]{bachl_correcting_2017} and even in the broader computational social science community. After illustrating biases emerging from automated classifiers such as the Perspective API, we quantify how aware researchers are of the issue of misclassification. In a systematic review of studies using SML-based text classification, we show that scholars rarely acknowledge this problem and almost never address it. We therefore discuss a range of statistical methods that use manually annotated data as a ``gold standard'' to account for misclassification and produce correct statistical results, including a new MLE method we design. Using Monte-Carlo simulations, we show that our method provides consistent estimates, especially in situations involving differential error. Based on these results, we provide four recommendations for the future of automated content analysis: Researchers should (1) attempt manual content analyssis before building or validating ACs to see whether human-labeled data is sufficient, (2) use manually annotated data to test for systematic misclassification and choose appropiate error correction methods, (3) correct for misclassifications via error correction methods, and (4) be transparent about the methodological decisions involved in AC-based classifications and error correction. + +Our study has several limitations. First, the simulations and methods we introduce focus on misclassification by automated tools. They provisionally assume that human annotators do not make errors, especially systematic ones. +This assumption can be reasonable if intercoder reliability is very high but, as with ACs, this may not always be the case. +%Alternatively, validation data can be treated as a gold standard if the goal is measuring \emph{how a person categorizes content}, as opposed to the more common approach of measuring presumably objective content categories. That said, the prevailing approaches in content analysis use human coders to measure a latent category who are prone to misclassification. +Thus, it may be important to account for measurement error by human coders \citep{bachl_correcting_2017} and by automated classifiers simultaneously. In theory, it is possible to extend our MLE approach in order to do so \citep{carroll_measurement_2006}. +However, because the true values of content categories are never observed, accounting for automated and human misclassification at once requires latent variable methods that bear considerable additional complexity and assumptions \citep{pepe_insights_2007}. We leave the integration of such methods into our MLE framework for future work. Second, the simulations we present do not consider all possible factors that may influence the performance and robustness of error correction methods including classifier accuracy, heteroskedasticity, and violations of distributional assumptions. We are working to investigate such factors, as shown in Appendix \ref{appendix:main.sim.plots}, by extending our simulations. Third, we simulated datasets with balanced variables, but classifiers are often used to measure rare occurrences. Imbalanced covariates will require greater sample sizes of validation data to correct for misclassification. +In such cases, validation data may be collected more efficiently using approaches that provide balanced, but unrepresentative samples. However, non-representative sampling requires correction methods to account for the probability that a data point will be sampled. + +\setcounter{biburlnumpenalty}{9001} +\printbibliography[title = {References}] + +\clearpage +\appendix + +\section{Perspective API Example}\label{appendix:perspective} + +Our example relies on the publicly available Civil Comments dataset \citep{cjadams_jigsaw_2019}. The dataset contains around 2 million comments collected from independent English-language news sites between 2015 and 2017. We rely on a subset of \Sexpr{f(dv.example[['n.annotated.comments']])} comments which were manually annotated both for toxicity (\emph{toxicity}) and disclosure of identity (\emph{disclosure}) in a comment. The dataset also includes counts of likes each comment received (\emph{number of likes}). + +Each comment was labeled by up to ten manual annotators (although selected comments were labeled by even more annotators). Originally, the dataset represents \emph{toxicity} and \emph{disclosure} as proportions of annotators who labeled a comment as toxic or as disclosing aspects of personal identity including race and ethnicity. +For our analysis, we converted these proportions into indicators of the majority view to transform both variables to a binary scale. + +\begin{figure}[htbp!] +\centering +\begin{subfigure}{\linewidth} +<>= +p <- plot.civilcomments.iv.example(include.models=c("Automatic Classification", "All Annotations", "Annotation Sample", "Error Correction")) +print(p) +@ +\subcaption{\emph{Example 1}: Misclassification in an independent variable.\label{fig:real.data.example.iv.app}} +\end{subfigure} + +\begin{subfigure}{\linewidth} +<>= +p <- plot.civilcomments.dv.example(include.models=c("Automatic Classification", "All Annotations", "Annotation Sample", "Error Correction")) +print(p) +@ +\subcaption{\emph{Example 2}: Misclassification in a dependent variable. \label{fig:real.data.example.dv.app}} + +\end{subfigure} +\caption{Real-data example including correction using MLE.} +\end{figure} + +% Our maximum-likelihood based error correction technique in this example requires specifying models for the Perspective's scores and, in the case where these scores are used as a covariate, a model for the human annotations. In our first example, where toxicity was used as a covariate, we used the \emph{human annotations}, \emph{identity disclosure}, and the interaction of these two variables in the model for scores. We omitted \emph{likes} from this model because they are virtually uncorrelated with misclassifications (Pearson's $\rho=\Sexpr{iv.example[['civil_comments_cortab']]['toxicity_error','likes']}$). Our model for the human annotations is an intercept-only model. + +% In our second example, where toxicity is the outcome, we use the fully interacted model of the \emph{human annotations}, \emph{identity disclosure}, and \emph{likes} in our model for the human annotations because all three variables are correlated with the Perspective scores. + +\section{Systematic Literature Review} \label{appendix:lit.review} + +To inform our simulations, we reviewed studies using SML for text classification. + +\subsection{Identification of Relevant Studies} +Our sample was drawn from four recent reviews on the use of AC within the context of communication science and the social sciences more broadly \citep{baden_three_2022, hase_computational_2022, junger_unboxing_2022, song_validations_2020}. Authors of respective studies had either already published their data in an open-science approach or thankfully shared their data with us when contacted. +From their reviews, we collected \emph{N} = 110 studies that included some type of SML (for an overview, see Figure \ref{fig:FigureA1}). + +\begin{figure} + \centering + \includegraphics{measurement_flow.pdf} + \caption{Identifying relevant studies for the literature review} + \label{fig:FigureA1} +\end{figure} + +We first removed 8 duplicate studies identified by several reviews. Two coders then coded the remaining \emph{N} = 102 studies of our preliminary sample for relevance. After an intercoder test (\emph{N} = 10, $\alpha$ = .89), we excluded studies not fulfilling inclusion criteria, here studies not including any SML approach and studies only using SML for data cleaning, not data analysis—for instance to sort out topically irrelevant articles. Next, we removed studies focusing on methodologically advancing SML-based ACs since these studies often include far more robustness and validity tests than commonly employed in empirical settings. Subsequently, all relevant empirical studies (\emph{N} = 48) were coded in further detail. + +\subsection{Manual Coding of Relevant Empirical Studies} +For manual coding, we created a range of variables (for an overview, see Table \ref{tab:TableA1}). Based on data from the Social Sciences Citation Index (SSCI), we identified whether studies were published in journals classified as belonging to \emph{Communication} and their \emph{Impact} according to their H index. In addition, two authors manually coded... +\begin{itemize} + \item the type of variables created via SML-based ACS using the variables \emph{Dichotomous} (0 = No, 1 = Yes), \emph{Categorical} (0 = No, 1 = Yes), \emph{Ordinal} (0 = No, 1 = Yes), and \emph{Metric} (0 = No, 1 = Yes), + \item whether variables were used in descriptive or multivariate analyses using the variables \emph{Descriptive} (0 = No, 1 = Yes), \emph{Independent} (0 = No, 1 = Yes), and \emph{Dependent} (0 = No, 1 = Yes), + \item how classifiers were trained and validated via manually annotated data using the variables \emph{Size Training Data} (Open String), \emph{Size Test Data} (Open String), \emph{Size Data Intercoder Test} (Open String), \emph{Intercoder Reliability} (Open String), and \emph{Accuracy of Classifier} (Open String), + \item whether articles mentioned and/or corrected for misclassifications using the variables \emph{Error Mentioned} (0 = No, 1 = Yes) and \emph{Error Corrected} (0 = No, 1 = Yes). +\end{itemize} + +\begin{table} + \caption{Variables Coded for Relevant Empirical Studies} + \label{tab:TableA1} + \begin{tabular}{l l l l} \toprule + Category & Variable & Krippendorf's $\alpha$ & \% or \emph{M} (\emph{SD}) \\ \midrule + Type of Journal & \emph{Communication} & n.a. & 67\% \\ + & \emph{Impact} & n.a. & \emph{M = 4} \\ + Type of Variable & \emph{Dichotomous} & 0.86 & 50\% \\ + & \emph{Categorical} & 1 & 23\% \\ + & \emph{Ordinal} & 0.85 & 10\% \\ + & \emph{Metric} & 1 & 35\% \\ + Use of Variable & \emph{Descriptive} & 0.89 & 90\% \\ + & \emph{Independent} & 1 & 44\% \\ + & \emph{Dependent} & 1 & 40\% \\ + Information on Classifier & \emph{Size Training Data} & 0.95 & 67\% \\ + & \emph{Size Test Data} & 0.79 & 52\% \\ + & \emph{Size Data Intercoder Test} & 1 & 44\% \\ + & \emph{Intercoder Reliability} & 0.8 & 56\% \\ + & \emph{Accuracy of Classifier} & 0.77 & 85\% \\ + Measurement Error & \emph{Error Mentioned} & 1 & 19\% \\ + & \emph{Error Corrected} & 1 & 2\% \\ \bottomrule + \end{tabular} +\end{table} + +\subsection{Results} + +SML-based ACs were most often used to create dichotomous measurements (\emph{Dichotomous}: 50\%), followed by variables on a metric (\emph{Metric}: 35\%), categorical (\emph{Categorical}: 23\%), or ordinal scale (\emph{Ordinal}: 10\%). Almost all studies used SML-based classifications to report descriptive statistics on created variables (\emph{Descriptive}: 90\%). However, many also used these in downstream analyses, either as dependent variables (\emph{Dependent}: 40\%) or independent variables (\emph{Independent}: 44\%) in statistical models. + +Only slightly more than half of all studies included information on the size of training or test sets (\emph{Size Training Data}: 67\%, \emph{Size Test Data}: 52\%). Even fewer included information on the size of manually annotated data for intercoder testing (\emph{Size Data Intercoder Test}: 44\%) or respective reliability values (\emph{Intercoder Reliability}: 56\%). Lastly, not all studies reported how well their classifier performed by using metrics such as precision, recall, or F1-scores (\emph{Accuracy of Classifier}: 85\%). Lastly, few studies exlicitly mentioned the issue of misclassification (\emph{Error Mentioned}: 19\%, with only a single study correcting for such (\emph{Error Corrected}: 2\%). + +\section{Other Error Correction Methods} +\label{appendix:other.methods} +Statisticans have introduce a range of other error correction methods which we did not test in our simulations. Here, we shortly discuss three additional methods and explain why we did not include them in our simulations. + +\emph{Simulation extrapolation} (SIMEX) simulates the process generating measurement error to model how measurement error affects an analysis and ultimately to approximate an analysis with no measurement error \citep{carroll_measurement_2006}. SIMEX is a very powerful and general method that can be used without manually annotated data, but may be more complicated than necessary to correct measurement error from ACs when manually annotated data is available. Likelihood methods are easy to apply to misclassification so SIMEX seems unnecessary \citep{carroll_measurement_2006}. + +\emph{Score function methods} derive estimating equations for models without measurement error and then solve them either exactly or using numerical integration \citep{carroll_measurement_2006, yi_handbook_2021}. +The main advantage of score function methods may have over likelihood-based methods is that they do not require distributional assumptions about mismeasured independent variables. This advantage has limited use in the context of ACs because binary classifications must follow Bernoulli distributions. + +We also do not consider \emph{Bayesian methods} (aside from the Amelia implementation of the MI approach) because we expect these to have similar limitations to the maximum likelihood methods we consider. Bayesian methods may have other advantages resulting from posterior inference and may generalize to a wide range of applications. However, specifying prior distributions introduces additional methodological complexity and posterior inference is computationally intensive, making Bayesian methods less convenient for Monte-Carlo simulation. + + +\section{Deriving the Maximum Likelihood Approach} +\label{appendix:derivation} +In the following, we derive our MLE approach for addressing misclassifications. +\subsection{When an AC Measures an Independent Variable} +To show why $L(\theta|Y,W)$ can be factored, we follow \citet{carroll_measurement_2006} and begin by observing the following fact from basic probability theory. + +\begin{align} + P(Y,W) &= \sum_{x}{P(Y,W,X=x)} + \label{eq:mle.covariate.chainrule.1}\\ + &= \sum_{x}{P(Y|W,X=x)P(W,X=x)} + \label{eq:mle.covariate.chainrule.2}\\ + &= \sum_{x}{P(Y,X=x)P(W|Y,X=x)} \label{eq:mle.covariate.chainrule.3} \\ + &= \sum_{x}{P(Y|X=x)P(W|Y,X=x)P(X=x)} \label{eq:mle.covariate.chainrule.4} +\end{align} +\noindent +Equation \ref{eq:mle.covariate.chainrule.1} integrates $X$ out of the joint probability of $Y$ and $W$ by summing over its possible values $x$. If $X$ is binary, this means adding the probability given $x=1$ to the probability given $x=0$. When $X$ is observed, say $x=0$, then $P(X=0)=1$ and $P(X=1)=0$. As a result, only the true value of $X$ contributes to the likelihood. However, when $X$ is unobserved, all of its possible values contribute. In this way, integrating out $X$ allows us to include data where $X$ is not observed to the likelihood. + +Equation \ref{eq:mle.covariate.chainrule.2} uses the chain rule of probability to factor the joint probability $P(Y,W)$ of $Y$ and $W$ from $P(Y|W,X)$, the conditional probability of $Y$ given $W$ and $X$, and $P(W,X=x)$, the joint probability of $W$ and $X$. This lets us see how maximizing $\mathcal{L}(\Theta|Y,W)$, the joint likelihood of $\Theta$ given $Y$ and $W$ accounts for the uncertainty of automated classifications. For each possible value $x$ of $X$, it weights the model of the outcome $Y$ by the probability that $x$ is the true value and that the AC outputs $W$. + +Equation \ref{eq:mle.covariate.chainrule.3} shows a different way to factor the joint probability $P(Y,W)$ so that $W$ is not in the model of $Y$. Since $X$ and $W$ are correlated, if $W$ is in the model for $Y$, the estimation of $B_1$ will be biased. By including $Y$ in the model for $W$, Equation \ref{eq:mle.covariate.chainrule.3} can account for differential measurement error. + +Equation \ref{eq:mle.covariate.chainrule.4} factors $P(Y,X=x)$ the joint probability of $Y$ and $X$ into $P(Y|X=x)$, the conditional probability of $Y$ given $X$, $P(W|X=x,Y)$, the conditional probability of $W$ given $X$ and $Y$, and $P(X=x)$ the probability of $X$. This shows that fitting a model $Y$ given $X$ in this framework, such as the regression model $Y = B_0 + B_1 X + B_2 Z$ requires including $X$. Without validation data, $P(X=x)$ is difficult to calculate without strong assumptions \citep{carroll_measurement_2006}, but $P(X=x)$ can easily be estimated using a sample of validation data. + +%Our appendix includes supplementary simulations that explore how robust our method to model mispecification. +Equations \ref{eq:mle.covariate.chainrule.1}--\ref{eq:mle.covariate.chainrule.4} demonstrate the generality of this method because the conditional probabilities may be calculated using a wide range of probability models. + For simplicity, we have focused on linear regression for the probability of $Y$ and logistic regression for the probability of $W$ and the probability of $X$. However, more flexible probability models such as generalized additive models (GAMs) or Gaussian process classification may be useful for modeling nonlinear conditional probability functions \citep{williams_bayesian_1998}. + + +\subsection{When an AC Measures the Dependent Variable} + +Again, we will maximize $\mathcal{L}(\Theta|Y,W)$, the joint likelihood of the parameters $\Theta$ given the outcome $Y$ and automated classifications $W$ measuring the dependent variable $Y$ \citep{carroll_measurement_2006}. +We use the law of total probability to integrate out $Y$ and the chain rule of probability to factor the joint probability into $P(Y)$, the probability of $Y$, and $P(W|Y)$, the conditional probability of $W$ given $Y$. + +\begin{align} + P(Y,W) &= \sum_{y}{P(Y=y,W)} \\ + &= \sum_{y}{P(Y)P(W|Y)} +\end{align} + +As above, the conditional probability of $W$ given $Y$ must be calculated using a model. The range of possible models is vast and analysts must choose a model that accurately describes the conditional dependence of $W$ on $Y$. + +We implement these methods in \texttt{R} using the \texttt{optim} library for maximum likelihood estimation. Our implementation supports models specified using \texttt{R}'s formula syntax. It can fit linear and logistic regression models when an AC measures an independent variable and logistic regression models when an AC measures the dependent variable. Our implementation provides two methods for approximating confidence intervals: The Fischer information quadratic approximation and the profile likelihood method provided in the \texttt{R} package \texttt{bbmle}. The Fischer approximation usually works well in simple models fit to large samples and is fast enough for practical use for the large number of simulations we present. However, the profile likelihood method provides more accurate confidence intervals \citep{carroll_measurement_2006}. + + +\subsection{Comment on model assumptions} + +How burdensome is the assumption that the error model be able to consistently estimate the conditional probability of $W$ given $Y$? If this assumption were much more difficult than those already accepted by the model for $Y$ given $X$ and $Z$, one would fear that using the MLE correction method introduces greater validity threats than it removes. However, if we believe our model for $Y$ given $X$ is consistent this makes it unlikely that we have omitted variables from the error model. Any such variables must be correlated with both $W$ and $X$ or $Z$, but not with $Y$. + +To see why, first suppose $U$ is an omitted variable from $P(W|X,Y,Z)$. Then $U$ is correlated with $W$ and at least one of $X$, $Y$, $Z$. If $U$ is correlated with $Y$, then it is either an omitted variable from $P(Y|X,Y,Z)$ or otherwise $P(Y|X,Z)=P(Y|U,X,Z)$. + +Assuming the later, observe by conditional probability, +\begin{align} +P(W|U,X,Y,Z)&=\frac{P(U,W,X,Y,Z)}{P(U,X,Y,Z)} = \frac{P(Y|U,W,X,Z)P(U,W,X,Z)} +{P(Y|U,X,Z)P(U,X,Z)}\\ &= \frac{P(U,W,X,Z)}{P(U,X,Z)} = P(W|X,Y,Z) +\end{align} +\noindent Note that $W$ is not an omitted variable from $P(Y|X,Z)$). As a result, $P(W|U,X,Y,Z) = P(W|X,Y,Z)$ and $U$ is not omitted from our model for $P(W|X,Y,Z)$. + +In sum, if one can assume a model for $P(Y|X,Z)$, it is often reasonable assume the variables needed to model $P(W|X,Y,Z)$ are observed. Any such variables that are unobserved must be independent from $Y$. +As demonstrated in Appendix \ref{appendix:misspec}, the method is less effective when variables are omitted from the error model. + +\section{misclassificationmodels: The R package} \label{appendix:misclassificationmodels} + +The package provides a function to conduct regression analysis but also corrects for misclassification using information from manually annotated data. The function is very simular to \textbf{glm()} but with two changes: + +\begin{itemize} +\item The formula interface has been extended with the double-pipe operator to denote proxy variable. For example, \textbf{x || w} indicates that \textit{w} is the proxy of the ground truth \textit{x}. +\item The manually annotated data must be provided via the argument \textit{data2} +\end{itemize} + +The following snippet shows a typical scenario, here for correcting misclassifications in an independent variable: +\lstset{style=mystyle} +\begin{lstlisting}[language=R, caption=A demo of misclassificationmodels] +library(misclassificationmodels) +## research_data contains the following columns: y, w, z +## val_data contains the following columns: y, w, x, z +# w is a proxy of x +res <- glm_fixit(formula = y ~ x || w + z, + data = research_data, + data2 = val_data) +summary(res) +\end{lstlisting} + +For more information about the package, please see here: \url{https://osf.io/pyqf8/?view_only=c80e7b76d94645bd9543f04c2a95a87e}. + + +\section{Additional Plots and Simulations} + +In addition to the results reported in the main paper, we include in the next section auxiliary plots from the main simulations. Below, we present results from further simulations that show what happens when the error model is mispeccified, how results vary with classifier predictivness or when the classified variable is not balanced, but skewed, and as the degree to which misclassification is systematic varies. a + +\subsection{Additional plots for Simulations 1 and 2} +\label{appendix:main.sim.plots} + +\begin{figure} +<>= + +p <- plot.simulation.iv(plot.df.example.1,iv='z') + +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in \emph{simulation 1a}, multivariate regression with $X$ measured using machine learning and model accuracy independent of $X$, $Y$, and $Z$. All methods obtain precise and accurate estimates given sufficient validation data.} +\end{figure} + +\begin{figure} +<>= +p <- plot.simulation.iv(plot.df.example.2, iv='z') +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in multivariate regression with $X$ measured using machine learning and model accuracy correlated with $X$ and $Y$ and error is differential. Only multiple imputation and our MLE model with a full specification of the error model obtain consistent estimates of $B_X$.\label{fig:sim1b.z}} +\end{figure} + +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.dv(plot.df.example.3,'z') +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in \emph{simulation 2a}, multivariate regression with $Y$ measured using an AC that makes errors. Only our MLE model with a full specification of the error model obtains consistent estimates.} +\end{figure} + +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.dv(plot.df.example.4,'x') +grid.draw(p) +@ +\caption{Estimates of $B_X$ in \emph{simulation 2b} multivariate regression with $Y$ measured using machine learning, model accuracy correlated with $Z$ and $Y$ and differential error. Only our MLE model with a full specification of the error model obtains consistent estimates. \label{fig:sim2b.z}} +\end{figure} + +\subsection{Simulating what happens when an error model is misspecified.} +\label{appendix:misspec} +In simulations 1b and 2b, the MLE method was able to correct systematic misclassification using the error models in equations \ref{eq:covariate.reg.general} and \ref{eq:depvar.general}. +However, this depends on the error model consistently estimating the conditional probability of automatic classifications given the true value and the outcome. +If the misclassifications and the outcome are conditionally dependent given a variable $Z$ that is omitted from the model, this will not be possible. +Here, we demonstrate how such misspecification of the error model can affect results. + +\subsubsection{Systematic Misclassification of an Independent Variable with $Z$ omitted from the error model} + +What happens in simulation 1b, representing systematic misclassification of an independent variable, when the error model is missing variable $Z$? As shown in Figure \ref{fig:iv.noz} this incorrect MLE model is unable to fully correct misclassification bias. Although the estimate of $B_X$ is close to correct, estimation of $B_Z$ is clearly biased, if improved compared to the näive estimator. +%Here we refer to $P(Y|X,Z,\Theta_Y)$ as the ``outcome model'', $P(W|Y,X,Z,\Theta_W)$ as the ``proxy model'', and $P(X|Z,\Theta_X)$ as the ``truth model''. + + +\begin{figure}[htpb!] +\begin{subfigure}{0.95\textwidth} +<>= +source('resources/robustness_check_plots.R') +p <- plot.robustness.1('x') +grid.draw(p) +@ +\label{fig:iv.noz.x} +\caption{Estimates of $B_X$ with a misspecified error correction model that omits $Z$ are still close to the true value.} +\end{subfigure} + +\begin{subfigure}{0.95\textwidth} +<>= +source('resources/robustness_check_plots.R') +p <- plot.robustness.1('z') +grid.draw(p) +@ +\label{fig:iv.noz.z} +\caption{Estimates of $B_Z$ with a misspecified error correction model that omits $Z$ are noticeably biased but better than the näive estimator.} +\end{subfigure} +\caption{Failure to correct for misclassification in an independent variable when the error model is misspecified. } +\label{fig:iv.noz} +\end{figure} + +\subsubsection{Systematic misclassification of a dependent variable with $Z$ omitted from the error model.} + +Similarly, as shown in Figure \ref{fig:dv.noz}, in the case a dependent variable is systematically misclassified, an error model omitting a variable $Z$ required to make $W$ and $Y$ conditionally independent is unable to obtain consistent estimates. Again, the estimate of $B_X$ is close to the true value, but the estimate of $B_Z$ is biased, if less so than the näive estimate. + +\begin{figure}[htpb!] +\begin{subfigure}{0.95\textwidth} +<>= +source('resources/robustness_check_plots.R') +p <- plot.robustness.1.dv('x') +grid.draw(p) +@ +\label{fig:dv.noz.x} +\caption{Estimates of $B_X$ with a misspecified error correction model that omits $Z$ are still close to the true value.} +\end{subfigure} + +\begin{subfigure}{0.95\textwidth} +<>= +source('resources/robustness_check_plots.R') +p <- plot.robustness.1.dv('z') +grid.draw(p) +@ +\label{fig:dv.noz.z} +\caption{Estimates of $B_Z$ with a misspecified error correction model that omits $Z$ are noticeably biased but better than the näive estimator.} +\end{subfigure} +\caption{Failure to correct for misclassification in an independent variable when the error model is misspecified. } +\label{fig:dv.noz} +\end{figure} + + + +\subsection{Simulating varying automatic classifier accuracy} + +<>= +source('resources/robustness_check_plots.R') +@ + +To explore how misclassification bias and correction methods depend on classifier performance, we repeat Simulations 1a and 1b with levels classifier accuracy ranging +from \Sexpr{format.percent(min(robust_2_min_acc))} to \Sexpr{format.percent(max(robust_2_max_acc))}. +We present results with the sample size with 5,000 classifications and 100 annotations. +As expected, in both scenarios a more accurate classifier causes less misclassification bias. All the error correction methods provide more precise estimates when used with a more accurate classifiers. + +\begin{figure}[htpb!] +\begin{subfigure}{0.95\textwidth} +<>= +p <- plot.robustness.2.iv('x') +grid.draw(p) +@ +\label{fig:iv.predacc.x} +\caption{Estimates of $B_X$ from simulation 1a with 5,000 classifications and 100 annotations with varying levels of classifier accuracy indicated by the facet labels. } +\end{subfigure} + +\begin{subfigure}{0.95\textwidth} +<>= +p <- plot.robustness.2.iv('z') +grid.draw(p) +@ +\label{fig:iv.predacc.z} +\caption{Estimates of $B_X$ from simulation 1a with 5,000 classifications and 100 annotations with varying levels of classifier accuracy indicated by the facet labels.} +\end{subfigure} +\caption{Misclassification in an independent variable with more and less accurate automatic classifiers. More accurate classifiers cause less misclassification bias and more efficient estimates when used with error correction methods.} +\label{fig:iv.predacc} +\end{figure} + +\begin{figure}[htpb!] +\begin{subfigure}{0.95\textwidth} +<>= +p <- plot.robustness.2.dv('x') +grid.draw(p) +@ +\label{fig:dv.predacc.x} +\caption{Estimates of $B_X$ from simulation 2a with 5,000 classifications and 100 annotations with varying levels of classifier accuracy indicated by the facet labels.} +\end{subfigure} + +\begin{subfigure}{0.95\textwidth} +<>= +p <- plot.robustness.2.dv('z') +grid.draw(p) +@ +\label{fig:dv.predacc} +\caption{Estimates of Z from simulation 2a with 5,000 classifications and 100 annotations with varying levels of classifier accuracy indicated by the facet labels.} +\end{subfigure} +\caption{Dependent variable with more and less accurate automatic classifiers. More accurate classifiers cause less misclassification bias and more efficient estimates when used with error correction methods.} +\label{fig:dv.predacc} +\end{figure} + +\subsection{Simulating misclassification in skewed variables} + +For simplicity, our main simulations have balanced classified variables. But classifiers are often used to measure imbalanced or skewed variables, which can be more difficult to predict. Here, we show that MLE correction performs similarly well as with skewned classified variables. Although the Fischer approximation for confidence intervals performs poorly, the profile likelihood method works well. + + +%However, if one can assume the model for $Y$, then one believes that $Y$ and $X$ are conditionally independent given other observed variables. + +% \section{Addit equation ional simulations} +% \subsection{Heteroskedasktic but nondifferential misclassifications}\label{appendix:sim1.hetero} + +% \subsection{Imbalanced covariates} +% \label{appendix:sim1.imbalanced} +\end{document} + +\subsection{Profile likelihood improves uncertainty quantification} +\label{appendix:sim1.profile} + +\section{Four prototypical scenarios} + +We must clearly distinguish four types of measurement error that arise in this context. +The first type occurs when a covariate is measured with error and this error can be made statistically independent of the outcome by conditioning on other covariates. In this case the error is called nondifferential. +The second type, differential error occurs when a covariate is measured with error that is systematically correlated with the outcome, even after accounting for the other covariates \citep{carroll_measurement_2006}. +These two types of error apply when an AC is used to measure a covariate. +When an AC is used to measure an outcome, errors can be random—uncorrelated with the covariates or they can be systematic—correlated with a covariate. + +nondifferential measurement error and random error in the outcome are relatively straightforward to correct. We will argue below that differential measurement error can be avoided when an AC is carefully designed. Yet the risk of differential measurement error is considerable in such cases as multilingual text classification because the ease of classification may systematically vary in relation to the outcome and covariates or as when a model trained in one context is applied in another. + +Research using ACs based on supervised machine learning may be particularly prone to differential and systematic measurement error. Problems of bias and generalizability have machine learning field of machine learning more generally has + + + +%Statistical theory and simulations have shown that all these methods are effective (though some are more efficient) when ``ground-truth'' observations are unproblematic and when classifiers only make random, but not systematic, errors. We contribute by testing these methods in more difficult cases likely to arise in text-as-data studies. + +% +% All prior methods for correcting measurement error using validation data presume that the validation data is error-free. However, the methodological content analysis literature has extensively studied the difficulties in human-labeling theoretically and substantively significant content categories through the lens of inter-coder reliability. We contribute novel methods that account for both inter-coder reliability and machine classification error. + +Our monte-carlo simulations show that different error-correction methods fail in different cases and that none is always the best. For example, methods that can correct for differential error will be inefficient when none is present. In addition, Fong and Taylor \citep{fong_machine_2021}'s method-of-moments estimator exchanges distributional assumptions for an exclusion restriction and fails in different cases from methods based on parametric models, such as ours. + + +\subsection{Our Contributions} + +\begin{itemize} + \item Introduce this methodological problem to Communication Research; argue that this is not too far from ignoring disagreement in manual codings + \item Document the prevalence of automated content analysis to show the importance of the problem. + \item Summarize available statistical methods for adjusting for measurement error and bias. + \item Evaluate these methods in realistic scenarios to show when they work and when they do not. + \item Recommend best practices for applied automated content analysis. + \item Chart directions for future research to advance methods for automated content analysis. +\end{itemize} + +\section{Background} + +\subsection{Methods used to correct measurement error in simulation scenarios} + +We'll compare the performance of these methods in terms of: + +\begin{itemize} + \item Consistency: Does the method recover the true parameter on average? + \item Efficiency: How precise are the estimates? Does precision improve with sample size? + \item Robustness: Does the method work when parametric assumptions are violated? +\end{itemize} + +We'll run simulations that vary along these dimensions: + +\begin{itemize} + \item Explained variance (function of $B_XZ$ and $\varepsilon$) + \item Predictor accuracy (we'll always have balanced classes). + \item iterrater reliability + \item Data type of measured variable: binary / likert + \item Distribution of other variable: normal, lognormal, binary + \item Unlabeled sample size + \item Labeled sample size +\end{itemize} + + +\subsection{Explanation of Bayesian Networks / Causal Dags for representing scenarios} + +In this section we present the design of our simulation studies. So far I have designed the following three scenarios (though I have some work to do to polish them and fix bugs): + +\subsection{Definition of MLE Models} + +We model example 1 and 2, +\section{Discussion} + +\citet{fong_machine_2021} argue, and we agree, that a carefully designed AC can avoid forms of measurement error that are more difficult to deal with. However, tailoring an AC from scratch requires considerable effort and expense compared to reuse an AC developed for common purposes as the wide popularity that classifiers like LIWC and Perspective enjoy demonstrates. Our recommended approaches of GMM calibration, multiple imputation and likelihood modeling can all be concieved as fine-tuning steps that transform general purpose classifiers into tailored classifiers capable of providing reliable inferences. + +A natural response to the above extended meditation on measurement error in the context of automatic classifiers is to question the purpose of using ACs at all. It seems strange to think that by using model's predictions of a variable to build another model predicting that same variable we can solve the problems introduced by first model. Indeed, the more complex modeling strategies we propose are only necessary to correct the shortcomings of an AC. We envision ACs such as a commercial APIs, widely used dictionaries, or ACs that are generalized to new contexts that are likely to have such shortcomings because such ACs may provide information about a variable that would be difficult to obtain otherwise. + +Even though machine learning algorithms such as random forests might obtain greater performance at automatic classification, this comes at the expense of bias that may be difficult to model using validation data \citep{breiman_statistical_2001}. +Instead of tayloring an AC for a research study, using predictive features directly to infer missing validation data using multiple imputation or to model the probability of a variable in the likelihood modeling framework may be simpler and more likely to result in valid inferences. + +% A common strategy is to use a machine learning classifier $g(\mathbf{K})$ (e.g., the Perspective API) to obtain Often, researchers use the $N^*$ observations of $\mathbf{x}$ to build $\hat{\mathbf{w}}=g(\mathbf{Z})$. Other times they may use a different ``black-box'' model $g(\mathbf{Z})$ that is perhaps trained on a larger dataset different from that used to estimate $B$. + + +% Although it is often claimed that this bias is a conservative ``attenuation'' of estimates toward zero, this is only necessarily the case of ordinary linear regression with 2 variables when the bias is uncorrelated with $\mathbf{x}$ and $\mathbf{y}$ \citep{carroll_measurement_2006}. What's more, in conditions likely to occur in social scientific research, such as when the explained variance of the regression model is very low, the estimate of $\hat{B}^*$ can be \emph{more precise} than that of $\hat{B}$. As a result, the measurement error of a machine learning classifier is not always conservative but can result in false discovery \citep{carroll_measurement_2006}. + + + Note that specific forms of statistical bias are of particular concern for scientific measurement and although these may often be related to biases against social groups \cite[][e.g.]{obermeyer_dissecting_2019}, these notions of bias are not equivalent \cite{kleinberg_algorithmic_2018}. Introduce multi-lingual text classification as an example. + +(attenuation bias / correlation dilution), but this bias towards zero defeats the purpose of automated content analysis in the first place! +\subsection{Rationale} +\begin{itemize} + \item Automated content analysis is all the rage. Tons of people are doing it, but they all have the same problem: their models are inaccurate. They don't know if the model is accurate enough to trust their inferences. + + \item Social scientists often adopt performance criteria and standards for machine learning predictors used in computer science. These criteria do not tell how well a predictor works as a measurement device for a given scientific study. + + \item In general, prediction errors result in biased estimates of regression coefficients. In simple models with optimistic assumptions this bias will be conservative (attenuation bias / correlation dilution), but this bias towards zero defeats the purpose of automated content analysis in the first place! + + \item In more general scenarios (e.g., GLMs, differential error, multivariate regression), prediction errors can create bias that is not conservative. + + \item Statisticians have studied measurement error for a long time, and have developed several methods, but the settings they consider most often lack features of automated content analysis. Specifically: + + \begin{itemize} + \item The availability of (potentially inaccurate) validation data. (Most methods are designed for \emph{sensors} where the distribution of the error can be known, but error can be assumed to be nondifferential). + \item Differential error—the amount of noise is not independent of observations. + + \item The possibility of bias in addition to noise. + \end{itemize} + + \item Conducting simulations to evaluate existing methods including regression calibration, the extension of regression calibration by Fong and Taylor (2021) \cite{fong_machine_2021}, multiple imputation, and simulation extrapolation. + + \item These issues become even more important, and also more complex in important research designs such as those involving multiple languages. + + +\subsection{Imperfect human-coded validation data} + +All approaches stated above depend on the human-coded validation data $X^*$. Most often, ACs are also trained on human-coded material. The content analysis literature has long been documented how unreliable human coding and manual content analysis papers routinely report intercoder reliability as a result \citep{krippendorff_content_2018}. Intercoder reliability metrics typically assume that human coders are interchangeable and the only source of disagreement is ``coder idiosyncrasies'' \citep{krippendorff_reliability_2004}. A previous monte-carlo simulation operationalizes these ``coder idiosyncrasies'' as a fixed probability that a coder makes a random guess independent of the coder and of the material \citep{geis_statistical_2021}. In this work, we accept this ``interchangeable coders making random errors'' (ICMRE) assumption. Under this optimistic assumption, only ``coder idiosyncrasies'' cause misclassification error in the validation data. + +\citet{song_validations_2020}'s monte-carlo simulation demonstrates that human-coded $X^*$ with a lower intercoder reliability generates more biased classification accuracy of the AC. So even if manual annotation errors are only due to the ICMRE assumption, they may bias results. None of the above correction approaches account for the imperfect human coding of $X^*$, although \citet{zhang_how_2021} identifies the omission of this as a weakness of his proposed approach. Even in the context of manual content analysis, these ``coder idiosyncrasies'' are not routinely adjusted (although methods are available, e.g. \citet{bachl_correcting_2017}). +An advantage of our proposed method over prior approaches is that it automatically accounts for imperfection of human coding under the ICMRE assumption because the random errors in validation data are independent from the AC errors. + +Precision of estimates can be improved using more than one independent coder. With two coders, for example, two sets of validation data are generated, $X^*_{1}$, $X^*_{2}$. We then list-wise delete all data that $X^*_{1} \neq X^*_{2}$. If the ICMRE assumption holds, the deleted data, where two coders disagree, can only be due to ``coder idiosyncrasies''. As coders are assumed to be interchangeable, the probability of two interchangeable coders both making the same misclassification error is much less than the probability that one makes a misclassification error . Using such ``labeled-only, coherent-only'' (LOCO) data improves the precision of consistent estimates in our simulation. + + +\subsection{Measurement error in validation data} + +The simulations above assume that validation data is perfectly accurate. This is obviously unrealistic because, validation data, such as that obtained from human classifiers, normally has inaccuracies. +To evaluate the robustness of correction methods to imperfect validation data, we extend our scenarios with with nondifferential error with simulated validation data that is misclassified \Sexpr{format.percent(med.loco.accuracy)} of the time at random. + +\subsubsection{Recommendation II: Employ at Least Two Manual Coders, not One} + +Independent of whether researchers use manually annotated data for the feasible approach or AC, principles of manual content analysis, including justifying one's sample size, still apply. +%\citep[for details]{krippendorff_content_2018}. +%TODO uncomment below after ICA +Arguably, the most important problem in traditional content +analysis is whether human coders are capable of reliably classifying content into the categories under study. With multiple human coders labelling the same data, metrics such as Krippendorff's $\alpha$ +%and Gwet's $AC$ +can quantify ``intercoder reliability'' in terms of how often coders agree and disagree \citep{krippendorff_reliability_2004}. +These metrics all assume that disagreements are due to +``coder idiosyncrasies'' that are independent of the data \citep{krippendorff_reliability_2004}. + +We recommend that such metrics also be used to establish intercoder reliability in all of the human-labeled data, not only a smaller subset for intercoder testing. +Other than that, the gold standard data is also reused in later steps and those steps can be influenced by these ``coder idiosyncrasies'' \citep{song_validations_2020}. +We recommend that the gold standard data should be manually coded by two coders, not one. It allows the calculation of interrater reliability, a more accurate validation of the AC's performance, and better correction. With additional independent coders, would eliminate even more of these ``coder idiosyncrasies'' than two coders. + + + +However, the gains from introducing additional coders are diminishing so using more than two coders may not be cost effective. +\end{itemize} + + +\section{Accounting for errors in the validation data} + +In this section, we extend \emph{Simulation 1b} and \emph{Simulation 2b} with + + +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.irr(plot.df.example.5,'z') +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in multivariate regression with $X$ measured using machine learning, with validation data collected by 2 independent coders that make random errors.} +\end{figure} +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.irr(plot.df.example.5,'x') +grid.draw(p) +@ +\caption{Estimates of $B_X$ in multivariate regression with $X$ measured using machine learning, with validation data collected by 2 independent coders that make random errors.} +\end{figure} + +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.irr.dv(plot.df.example.6,'z') +grid.draw(p) +@ +\caption{Estimates of $B_Z$ in multivariate regression with $Y$ measured using machine learning, with validation data collected by 2 independent coders that make random errors.} +\end{figure} +\begin{figure} +<>= +#plot.df <- +p <- plot.simulation.irr.dv(plot.df.example.6,'x') +grid.draw(p) +@ +\caption{Estimates of $B_X$ in multivariate regression with $Y$ measured using machine learning, with validation data collected by 2 independent coders that make random errors.} +\end{figure} diff --git a/article.bcf b/article.bcf new file mode 100644 index 0000000..9651dda --- /dev/null +++ b/article.bcf @@ -0,0 +1,3021 @@ + + + + + + output_encoding + utf8 + + + input_encoding + utf8 + + + debug + 0 + + + mincrossrefs + 999 + + + minxrefs + 2 + + + sortcase + 1 + + + sortupper + 1 + + + + + + + alphaothers + + + + + labelalpha + 0 + + + labelnamespec + shortauthor + author + shorteditor + editor + + + labeltitle + 0 + + + labeltitlespec + shorttitle + title + maintitle + + + labeltitleyear + 0 + + + labeldateparts + 1 + + + labeldatespec + pubstate + date + eventdate + year + nodate + + + julian + 0 + + + gregorianstart + 1582-10-15 + + + maxalphanames + 3 + + + maxbibnames + 20 + + + maxcitenames + 2 + + + maxsortnames + 20 + + + maxitems + 999 + + + minalphanames + 1 + + + minbibnames + 19 + + + mincitenames + 1 + + + minsortnames + 19 + + + minitems + 1 + + + nohashothers + 0 + + + noroman + 0 + + + nosortothers + 0 + + + pluralothers + 1 + + + singletitle + 0 + + + skipbib + 0 + + + skipbiblist + 0 + + + skiplab + 0 + + + sortalphaothers + + + + + sortlocale + american + + + sortingtemplatename + apa + + + sortsets + 0 + + + uniquelist + minyear + + + uniquename + init + + + uniqueprimaryauthor + 1 + + + uniquetitle + 0 + + + uniquebaretitle + 0 + + + uniquework + 0 + + + useprefix + 1 + + + useafterword + 1 + + + useannotator + 1 + + + useauthor + 1 + + + usebookauthor + 1 + + + usecommentator + 1 + + + useeditor + 1 + + + useeditora + 1 + + + useeditorb + 1 + + + useeditorc + 1 + + + useforeword + 1 + + + useholder + 1 + + + useintroduction + 1 + + + usenamea + 1 + + + usenameb + 1 + + + usenamec + 1 + + + usetranslator + 1 + + + useshortauthor + 1 + + + useshorteditor + 1 + + + usenarrator + 1 + + + useexecproducer + 1 + + + useexecdirector + 1 + + + usewith + 1 + + + + + + labelalpha + 0 + + + labelnamespec + shortauthor + author + shorteditor + editor + + + labeltitle + 0 + + + labeltitlespec + shorttitle + title + maintitle + + + labeltitleyear + 0 + + + labeldateparts + 1 + + + labeldatespec + pubstate + date + eventdate + year + nodate + + + maxalphanames + 3 + + + maxbibnames + 20 + + + maxcitenames + 2 + + + maxsortnames + 20 + + + maxitems + 999 + + + minalphanames + 1 + + + minbibnames + 19 + + + mincitenames + 1 + + + minsortnames + 19 + + + minitems + 1 + + + nohashothers + 0 + + + noroman + 0 + + + nosortothers + 0 + + + singletitle + 0 + + + skipbib + 0 + + + skipbiblist + 0 + + + skiplab + 0 + + + uniquelist + minyear + + + uniquename + init + + + uniqueprimaryauthor + 1 + + + uniquetitle + 0 + + + uniquebaretitle + 0 + + + uniquework + 0 + + + useprefix + 1 + + + useafterword + 1 + + + useannotator + 1 + + + useauthor + 1 + + + usebookauthor + 1 + + + usecommentator + 1 + + + useeditor + 1 + + + useeditora + 1 + + + useeditorb + 1 + + + useeditorc + 1 + + + useforeword + 1 + + + useholder + 1 + + + useintroduction + 1 + + + usenamea + 1 + + + usenameb + 1 + + + usenamec + 1 + + + usetranslator + 1 + + + useshortauthor + 1 + + + useshorteditor + 1 + + + usenarrator + 1 + + + useexecproducer + 1 + + + useexecdirector + 1 + + + usewith + 1 + + + + + + labelalpha + 0 + + + labelnamespec + shortauthor + author + shorteditor + editor + + + labeltitle + 0 + + + labeltitlespec + shorttitle + title + maintitle + + + labeltitleyear + 0 + + + labeldateparts + 1 + + + labeldatespec + date + + + maxalphanames + 3 + + + maxbibnames + 20 + + + maxcitenames + 2 + + + maxsortnames + 20 + + + maxitems + 999 + + + minalphanames + 1 + + + minbibnames + 19 + + + mincitenames + 1 + + + minsortnames + 19 + + + minitems + 1 + + + nohashothers + 0 + + + noroman + 0 + + + nosortothers + 0 + + + singletitle + 0 + + + skipbib + 0 + + + skipbiblist + 0 + + + skiplab + 0 + + + uniquelist + minyear + + + uniquename + init + + + uniqueprimaryauthor + 1 + + + uniquetitle + 0 + + + uniquebaretitle + 0 + + + uniquework + 0 + + + useprefix + 1 + + + useafterword + 1 + + + useannotator + 1 + + + useauthor + 1 + + + usebookauthor + 1 + + + usecommentator + 1 + + + useeditor + 1 + + + useeditora + 1 + + + useeditorb + 1 + + + useeditorc + 1 + + + useforeword + 1 + + + useholder + 1 + + + useintroduction + 1 + + + usenamea + 1 + + + usenameb + 1 + + + usenamec + 1 + + + usetranslator + 1 + + + useshortauthor + 1 + + + useshorteditor + 1 + + + usenarrator + 1 + + + useexecproducer + 1 + + + useexecdirector + 1 + + + usewith + 1 + + + + + datamodel + labelalphanametemplate + labelalphatemplate + inheritance + translit + uniquenametemplate + sortingnamekeytemplate + sortingtemplate + extradatespec + labelnamespec + labeltitlespec + labeldatespec + controlversion + alphaothers + sortalphaothers + presort + texencoding + bibencoding + sortingtemplatename + sortlocale + language + autolang + langhook + indexing + hyperref + backrefsetstyle + block + pagetracker + citecounter + citetracker + ibidtracker + idemtracker + opcittracker + loccittracker + labeldate + labeltime + dateera + date + time + eventdate + eventtime + origdate + origtime + urldate + urltime + alldatesusetime + alldates + alltimes + gregorianstart + autocite + notetype + uniquelist + uniquename + refsection + refsegment + citereset + sortlos + babel + datelabel + backrefstyle + arxiv + familyinits + giveninits + prefixinits + suffixinits + useafterword + useannotator + useauthor + usebookauthor + usecommentator + useeditor + useeditora + useeditorb + useeditorc + useforeword + useholder + useintroduction + usenamea + usenameb + usenamec + usetranslator + useshortauthor + useshorteditor + usenarrator + useexecproducer + useexecdirector + usewith + debug + loadfiles + safeinputenc + sortcase + sortupper + terseinits + abbreviate + dateabbrev + clearlang + sortcites + sortsets + backref + backreffloats + trackfloats + parentracker + labeldateusetime + datecirca + dateuncertain + dateusetime + eventdateusetime + origdateusetime + urldateusetime + julian + datezeros + timezeros + timezones + seconds + autopunct + punctfont + labelnumber + labelalpha + labeltitle + labeltitleyear + labeldateparts + pluralothers + nohashothers + nosortothers + noroman + singletitle + uniquetitle + uniquebaretitle + uniquework + uniqueprimaryauthor + defernumbers + locallabelwidth + bibwarn + useprefix + skipbib + skipbiblist + skiplab + dataonly + defernums + firstinits + sortfirstinits + sortgiveninits + labelyear + isbn + url + doi + eprint + related + apamaxprtauth + annotation + dashed + bibtexcaseprotection + mincrossrefs + minxrefs + maxnames + minnames + maxbibnames + minbibnames + maxcitenames + mincitenames + maxsortnames + minsortnames + maxitems + minitems + maxalphanames + minalphanames + maxparens + dateeraauto + + + alphaothers + sortalphaothers + presort + indexing + citetracker + ibidtracker + idemtracker + opcittracker + loccittracker + uniquelist + uniquename + familyinits + giveninits + prefixinits + suffixinits + useafterword + useannotator + useauthor + usebookauthor + usecommentator + useeditor + useeditora + useeditorb + useeditorc + useforeword + useholder + useintroduction + usenamea + usenameb + usenamec + usetranslator + useshortauthor + useshorteditor + usenarrator + useexecproducer + useexecdirector + usewith + terseinits + abbreviate + dateabbrev + clearlang + labelnumber + labelalpha + labeltitle + labeltitleyear + labeldateparts + nohashothers + nosortothers + noroman + singletitle + uniquetitle + uniquebaretitle + uniquework + uniqueprimaryauthor + useprefix + skipbib + skipbiblist + skiplab + dataonly + skiplos + labelyear + isbn + url + doi + eprint + related + annotation + bibtexcaseprotection + labelalphatemplate + translit + sortexclusion + sortinclusion + labelnamespec + labeltitlespec + labeldatespec + maxnames + minnames + maxbibnames + minbibnames + maxcitenames + mincitenames + maxsortnames + minsortnames + maxitems + minitems + maxalphanames + minalphanames + + + noinherit + nametemplates + labelalphanametemplatename + uniquenametemplatename + sortingnamekeytemplatename + presort + indexing + citetracker + ibidtracker + idemtracker + opcittracker + loccittracker + uniquelist + uniquename + familyinits + giveninits + prefixinits + suffixinits + useafterword + useannotator + useauthor + usebookauthor + usecommentator + useeditor + useeditora + useeditorb + useeditorc + useforeword + useholder + useintroduction + usenamea + usenameb + usenamec + usetranslator + useshortauthor + useshorteditor + usenarrator + useexecproducer + useexecdirector + usewith + terseinits + abbreviate + dateabbrev + clearlang + labelnumber + labelalpha + labeltitle + labeltitleyear + labeldateparts + nohashothers + nosortothers + noroman + singletitle + uniquetitle + uniquebaretitle + uniquework + uniqueprimaryauthor + useprefix + skipbib + skipbiblist + skiplab + dataonly + skiplos + isbn + url + doi + eprint + related + annotation + bibtexcaseprotection + maxnames + minnames + maxbibnames + minbibnames + maxcitenames + mincitenames + maxsortnames + minsortnames + maxitems + minitems + maxalphanames + minalphanames + + + nametemplates + labelalphanametemplatename + uniquenametemplatename + sortingnamekeytemplatename + uniquelist + uniquename + familyinits + giveninits + prefixinits + suffixinits + terseinits + nohashothers + nosortothers + useprefix + + + nametemplates + labelalphanametemplatename + uniquenametemplatename + sortingnamekeytemplatename + uniquename + familyinits + giveninits + prefixinits + suffixinits + terseinits + useprefix + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + proceedings + + + + + inproceedings + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + prefix + family + + + + + shorthand + label + labelname + labelname + + + year + + + + + + labelyear + year + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + prefix + family + given + suffix + + + + + prefix + family + + + given + + + suffix + + + prefix + + + + + prefix + family + + + given + + + suffix + + + prefix + + + mm + + + + sf,sm,sn,pf,pm,pn,pp + family,given,prefix,suffix + boolean,integer,string,xml + default,transliteration,transcription,translation + + + article + artwork + audio + bibnote + book + bookinbook + booklet + collection + commentary + customa + customb + customc + customd + custome + customf + dataset + inbook + incollection + inproceedings + inreference + image + jurisdiction + legal + legislation + letter + manual + misc + movie + music + mvcollection + mvreference + mvproceedings + mvbook + online + patent + performance + periodical + proceedings + reference + report + review + set + software + standard + suppbook + suppcollection + suppperiodical + thesis + unpublished + video + xdata + presentation + constitution + legmaterial + legadminmaterial + nameonly + + + sortyear + volume + volumes + abstract + addendum + annotation + booksubtitle + booktitle + booktitleaddon + chapter + edition + eid + entrysubtype + eprintclass + eprinttype + eventtitle + eventtitleaddon + gender + howpublished + indexsorttitle + indextitle + isan + isbn + ismn + isrn + issn + issue + issuesubtitle + issuetitle + issuetitleaddon + iswc + journalsubtitle + journaltitle + journaltitleaddon + label + langid + langidopts + library + mainsubtitle + maintitle + maintitleaddon + nameaddon + note + number + origtitle + pagetotal + part + relatedstring + relatedtype + reprinttitle + series + shorthandintro + subtitle + title + titleaddon + usera + userb + userc + userd + usere + userf + venue + version + shorthand + shortjournal + shortseries + shorttitle + sorttitle + sortshorthand + sortkey + presort + institution + lista + listb + listc + listd + liste + listf + location + organization + origlocation + origpublisher + publisher + afterword + annotator + author + bookauthor + commentator + editor + editora + editorb + editorc + foreword + holder + introduction + namea + nameb + namec + translator + shortauthor + shorteditor + sortname + authortype + editoratype + editorbtype + editorctype + editortype + bookpagination + nameatype + namebtype + namectype + pagination + pubstate + type + language + origlanguage + crossref + xref + date + endyear + year + month + day + hour + minute + second + timezone + yeardivision + endmonth + endday + endhour + endminute + endsecond + endtimezone + endyeardivision + eventdate + eventendyear + eventyear + eventmonth + eventday + eventhour + eventminute + eventsecond + eventtimezone + eventyeardivision + eventendmonth + eventendday + eventendhour + eventendminute + eventendsecond + eventendtimezone + eventendyeardivision + origdate + origendyear + origyear + origmonth + origday + orighour + origminute + origsecond + origtimezone + origyeardivision + origendmonth + origendday + origendhour + origendminute + origendsecond + origendtimezone + origendyeardivision + urldate + urlendyear + urlyear + urlmonth + urlday + urlhour + urlminute + urlsecond + urltimezone + urlyeardivision + urlendmonth + urlendday + urlendhour + urlendminute + urlendsecond + urlendtimezone + urlendyeardivision + doi + eprint + file + verba + verbb + verbc + url + xdata + ids + entryset + related + keywords + options + relatedoptions + pages + execute + narrator + execproducer + execdirector + with + citation + source + article + section + amendment + appentry + + + abstract + annotation + authortype + bookpagination + crossref + day + doi + eprint + eprintclass + eprinttype + endday + endhour + endminute + endmonth + endsecond + endtimezone + endyear + endyeardivision + entryset + entrysubtype + execute + file + gender + hour + ids + indextitle + indexsorttitle + isan + ismn + iswc + keywords + label + langid + langidopts + library + lista + listb + listc + listd + liste + listf + minute + month + namea + nameb + namec + nameatype + namebtype + namectype + nameaddon + options + origday + origendday + origendhour + origendminute + origendmonth + origendsecond + origendtimezone + origendyear + origendyeardivision + orighour + origminute + origmonth + origsecond + origtimezone + origyear + origyeardivision + origlocation + origpublisher + origtitle + pagination + presort + related + relatedoptions + relatedstring + relatedtype + second + shortauthor + shorteditor + shorthand + shorthandintro + shortjournal + shortseries + shorttitle + sortkey + sortname + sortshorthand + sorttitle + sortyear + timezone + url + urlday + urlendday + urlendhour + urlendminute + urlendmonth + urlendsecond + urlendtimezone + urlendyear + urlhour + urlminute + urlmonth + urlsecond + urltimezone + urlyear + usera + userb + userc + userd + usere + userf + verba + verbb + verbc + xdata + xref + year + yeardivision + + + set + entryset + + + article + addendum + annotator + author + commentator + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + eid + issn + issue + issuetitle + issuesubtitle + issuetitleaddon + journalsubtitle + journaltitle + journaltitleaddon + language + note + number + origlanguage + pages + pubstate + series + subtitle + title + titleaddon + translator + version + volume + + + bibnote + note + + + book + author + addendum + afterword + annotator + chapter + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + eid + foreword + introduction + isbn + language + location + maintitle + maintitleaddon + mainsubtitle + note + number + origlanguage + pages + pagetotal + part + publisher + pubstate + series + subtitle + title + titleaddon + translator + volume + volumes + + + mvbook + addendum + afterword + annotator + author + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + foreword + introduction + isbn + language + location + note + number + origlanguage + pagetotal + publisher + pubstate + series + subtitle + title + titleaddon + translator + volume + volumes + + + inbook + bookinbook + suppbook + addendum + afterword + annotator + author + booktitle + bookauthor + booksubtitle + booktitleaddon + chapter + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + eid + foreword + introduction + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + origlanguage + part + publisher + pages + pubstate + series + subtitle + title + titleaddon + translator + volume + volumes + + + booklet + addendum + author + chapter + editor + editortype + eid + howpublished + language + location + note + pages + pagetotal + pubstate + subtitle + title + titleaddon + type + + + collection + reference + addendum + afterword + annotator + chapter + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + eid + foreword + introduction + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + origlanguage + pages + pagetotal + part + publisher + pubstate + series + subtitle + title + titleaddon + translator + volume + volumes + + + mvcollection + mvreference + addendum + afterword + annotator + author + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + foreword + introduction + isbn + language + location + note + number + origlanguage + publisher + pubstate + subtitle + title + titleaddon + translator + volume + volumes + + + incollection + suppcollection + inreference + addendum + afterword + annotator + author + booksubtitle + booktitle + booktitleaddon + chapter + commentator + edition + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + eid + foreword + introduction + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + origlanguage + pages + part + publisher + pubstate + series + subtitle + title + titleaddon + translator + volume + volumes + + + dataset + addendum + author + edition + editor + editortype + language + location + note + number + organization + publisher + pubstate + series + subtitle + title + titleaddon + type + version + + + manual + addendum + author + chapter + edition + editor + editortype + eid + isbn + language + location + note + number + organization + pages + pagetotal + publisher + pubstate + series + subtitle + title + titleaddon + type + version + + + misc + software + addendum + author + editor + editortype + howpublished + language + location + note + organization + pubstate + subtitle + title + titleaddon + type + version + + + online + addendum + author + editor + editortype + language + note + organization + pubstate + subtitle + title + titleaddon + version + + + patent + addendum + author + holder + location + note + number + pubstate + subtitle + title + titleaddon + type + version + + + periodical + addendum + editor + editora + editorb + editorc + editortype + editoratype + editorbtype + editorctype + issn + issue + issuesubtitle + issuetitle + issuetitleaddon + language + note + number + pubstate + series + subtitle + title + titleaddon + volume + yeardivision + + + mvproceedings + addendum + editor + editortype + eventday + eventendday + eventendhour + eventendminute + eventendmonth + eventendsecond + eventendtimezone + eventendyear + eventendyeardivision + eventhour + eventminute + eventmonth + eventsecond + eventtimezone + eventyear + eventyeardivision + eventtitle + eventtitleaddon + isbn + language + location + note + number + organization + pagetotal + publisher + pubstate + series + subtitle + title + titleaddon + venue + volumes + + + proceedings + addendum + chapter + editor + editortype + eid + eventday + eventendday + eventendhour + eventendminute + eventendmonth + eventendsecond + eventendtimezone + eventendyear + eventendyeardivision + eventhour + eventminute + eventmonth + eventsecond + eventtimezone + eventyear + eventyeardivision + eventtitle + eventtitleaddon + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + organization + pages + pagetotal + part + publisher + pubstate + series + subtitle + title + titleaddon + venue + volume + volumes + + + inproceedings + addendum + author + booksubtitle + booktitle + booktitleaddon + chapter + editor + editortype + eid + eventday + eventendday + eventendhour + eventendminute + eventendmonth + eventendsecond + eventendtimezone + eventendyear + eventendyeardivision + eventhour + eventminute + eventmonth + eventsecond + eventtimezone + eventyear + eventyeardivision + eventtitle + eventtitleaddon + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + organization + pages + part + publisher + pubstate + series + subtitle + title + titleaddon + venue + volume + volumes + + + report + addendum + author + chapter + eid + institution + isrn + language + location + note + number + pages + pagetotal + pubstate + subtitle + title + titleaddon + type + version + + + thesis + addendum + author + chapter + eid + institution + language + location + note + pages + pagetotal + pubstate + subtitle + title + titleaddon + type + + + unpublished + addendum + author + eventday + eventendday + eventendhour + eventendminute + eventendmonth + eventendsecond + eventendtimezone + eventendyear + eventendyeardivision + eventhour + eventminute + eventmonth + eventsecond + eventtimezone + eventyear + eventyeardivision + eventtitle + eventtitleaddon + howpublished + language + location + note + pubstate + subtitle + title + titleaddon + type + venue + + + with + narrator + execproducer + execdirector + + + jurisdiction + organization citation + + + legmaterial + source + + + legadminmaterial + citation + source + + + constitution + article + section + amendment + + + software + appentry + + + report + addendum + author + authortype + chapter + doi + eprint + eprintclass + eprinttype + institution + isrn + language + location + note + number + pages + pagetotal + pubstate + subtitle + title + titleaddon + type + version + + + presentation + addendum + author + booksubtitle + booktitle + booktitleaddon + chapter + doi + editor + editortype + eprint + eprintclass + eprinttype + eventday + eventendday + eventendhour + eventendminute + eventendmonth + eventendseason + eventendsecond + eventendtimezone + eventendyear + eventhour + eventminute + eventmonth + eventseason + eventsecond + eventtimezone + eventyear + eventtitle + eventtitleaddon + isbn + language + location + mainsubtitle + maintitle + maintitleaddon + note + number + organization + pages + part + publisher + pubstate + series + subtitle + title + titleaddon + venue + volume + volumes + + + abstract + addendum + afterword + annotator + author + bookauthor + booksubtitle + booktitle + booktitleaddon + chapter + commentator + editor + editora + editorb + editorc + foreword + holder + institution + introduction + issuesubtitle + issuetitle + issuetitleaddon + journalsubtitle + journaltitle + journaltitleaddon + location + mainsubtitle + maintitle + maintitleaddon + nameaddon + note + organization + origlanguage + origlocation + origpublisher + origtitle + part + publisher + relatedstring + series + shortauthor + shorteditor + shorthand + shortjournal + shortseries + shorttitle + sortname + sortshorthand + sorttitle + subtitle + title + titleaddon + translator + venue + + + article + book + inbook + bookinbook + suppbook + booklet + collection + incollection + suppcollection + manual + misc + mvbook + mvcollection + online + patent + periodical + suppperiodical + proceedings + inproceedings + reference + inreference + report + set + thesis + unpublished + + + date + year + + + + + set + + entryset + + + + article + + author + journaltitle + title + + + + book + mvbook + + author + title + + + + inbook + bookinbook + suppbook + + author + title + booktitle + + + + booklet + + + author + editor + + title + + + + collection + reference + mvcollection + mvreference + + editor + title + + + + incollection + suppcollection + inreference + + author + editor + title + booktitle + + + + dataset + + title + + + + manual + + title + + + + misc + software + + title + + + + online + + title + + url + doi + eprint + + + + + patent + + author + title + number + + + + periodical + + editor + title + + + + proceedings + mvproceedings + + title + + + + inproceedings + + author + title + booktitle + + + + report + + author + title + type + institution + + + + thesis + + author + title + type + institution + + + + unpublished + + author + title + + + + + isbn + + + issn + + + ismn + + + gender + + + + book + inbook + article + report + + author + title + + + + + + + Bibliography.bib + + + baden_three_2022 + scharkow_thematic_2013 + burscher_teaching_2014 + van_atteveldt_validity_2021 + hede_toxicity_2021 + baden_three_2022 + hase_computational_2022 + fong_machine_2021 + rauchfleisch_false_2020 + burscher_using_2015 + hede_toxicity_2021 + millimet_accounting_2022 + bachl_correcting_2017 + geis_statistical_2021 + van_atteveldt_validity_2021 + carroll_measurement_2006 + buonaccorsi_measurement_2010 + yi_handbook_2021 + fong_machine_2021 + fong_machine_2021 + zhang_how_2021 + zhang_how_2021 + blackwell_unified_2017-1 + blackwell_unified_2017-1 + carroll_measurement_2006 + carroll_measurement_2006 + scharkow_how_2017 + cjadams_jigsaw_2019 + hopp_social_2019 + kim_distorting_2021 + votta_going_2023 + grimmer_machine_2021-1 + breiman_statistical_2001 + barocas_fairness_2019 + bender_dangers_2021 + hase_computational_2022 + baden_three_2022 + song_validations_2020 + carroll_measurement_2006 + loken_measurement_2017 + van_smeden_reflection_2020 + carroll_measurement_2006 + fong_machine_2021 + zhang_how_2021 + loken_measurement_2017 + carroll_measurement_2006 + loken_measurement_2017 + van_smeden_reflection_2020 + hede_toxicity_2021 + hede_toxicity_2021 + teblunthuis_effects_2021 + hosseini_deceiving_2017 + rauchfleisch_false_2020 + scharkow_how_2017 + scharkow_how_2017 + loken_measurement_2017 + grimmer_text_2013 + oehmer-pedrazzi_automated_2023 + baden_three_2022 + hase_computational_2022 + junger_unboxing_2022 + song_validations_2020 + opperhuizen_framing_2019 + vermeer_online_2020 + hopkins_method_2010 + hopkins_method_2010 + reiss_reporting_2022 + carroll_measurement_2006 + fuller_measurement_1987 + fong_machine_2021 + fong_machine_2021 + blackwell_unified_2017-1 + zhang_how_2021 + zhang_how_2021 + carroll_measurement_2006 + fong_machine_2021 + fong_machine_2021 + fong_machine_2021 + fong_machine_2021 + fong_machine_2021 + fong_machine_2021 + fong_machine_2021 + blackwell_unified_2017-1 + blackwell_unified_2017-1 + blackwell_unified_2017-1 + blackwell_unified_2017-1 + zhang_how_2021 + zhang_how_2021 + zhang_how_2021 + zhang_how_2021 + carroll_measurement_2006 + carroll_measurement_2006 + carroll_measurement_2006 + carroll_measurement_2006 + carroll_measurement_2006 + carroll_measurement_2006 + williams_bayesian_1998 + song_validations_2020 + bachl_correcting_2017 + geis_statistical_2021 + fong_machine_2021 + zhang_how_2021 + mooney_monte_1997 + fong_machine_2021 + blackwell_unified_2017-1 + zhang_how_2021 + fong_machine_2021 + zhang_how_2021 + zhang_how_2021 + carroll_measurement_2006 + pearl_fusion_1986 + zhang_how_2021 + zhang_how_2021 + carroll_measurement_2006 + fong_machine_2021 + grimmer_text_2013 + grimmer_text_2013 + grimmer_text_2013 + baden_three_2022 + baden_three_2022 + geis_statistical_2021 + bachl_correcting_2017 + fong_machine_2021 + fong_machine_2021 + carroll_measurement_2006 + pipal_if_2022 + reiss_reporting_2022 + bachl_correcting_2017 + bachl_correcting_2017 + carroll_measurement_2006 + pepe_insights_2007 + cjadams_jigsaw_2019 + baden_three_2022 + hase_computational_2022 + junger_unboxing_2022 + song_validations_2020 + carroll_measurement_2006 + carroll_measurement_2006 + carroll_measurement_2006 + yi_handbook_2021 + carroll_measurement_2006 + carroll_measurement_2006 + carroll_measurement_2006 + williams_bayesian_1998 + carroll_measurement_2006 + carroll_measurement_2006 + + + + + presort + + + sortkey + + + sortname + author + editor + sorttitle + title + + + pubstate + + + sortyear + year + -2000000000 + + + month + -2000000000 + + + day + -2000000000 + + + sorttitle + title + + + volume + 0 + + + + + + + + diff --git a/article.fdb_latexmk b/article.fdb_latexmk new file mode 100644 index 0000000..61e8602 --- /dev/null +++ b/article.fdb_latexmk @@ -0,0 +1,311 @@ +# Fdb version 3 +["biber article"] 1667261561 "article.bcf" "article.bbl" "article" 1668898249 + "Bibliography.bib" 1667585526 195120 cfbc019b29ac89d1f76bfddaf79b2d04 "" + "article.bcf" 1668898249 137080 765275b281dbe74e467dc4835cfe345a "pdflatex" + (generated) + "article.bbl" + "article.blg" +["pdflatex"] 1668898244 "article.tex" "article.pdf" "article" 1668898249 + "/etc/texmf/web2c/texmf.cnf" 1666310162 475 c0e671620eb5563b2130f56340a5fde8 "" + "/usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map" 1577235249 3524 cb3e574dea2d1052e39280babc910dc8 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm" 1136768653 1288 655e228510b4c2a1abe905c368440826 "" + "/usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii" 1461363279 71627 94eb9990bed73c364d7f53f960cc8c5b "" + "/usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty" 1575674566 24708 5584a51a7101caf7e6bbf1fc27d8f7b1 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel-english/american.ldf" 1496785618 2768 564633551858ab4a7568c71525151d11 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel-english/english.ldf" 1496785618 7008 9ff5fdcc865b01beca2b0fe4a46231d4 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel/babel.sty" 1658348618 151308 f48d89beb96c9b108345f21bd476da55 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel/locale/en/babel-american.tex" 1656274800 339 4c91b3e348320102d0fa5bf0680df231 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel/locale/en/babel-en-US.ini" 1654547330 4191 48296e139c650c07eee6beafc5250bf6 "" + "/usr/share/texlive/texmf-dist/tex/generic/babel/txtbabel.def" 1643231327 5233 d5e383ed66bf272b71b1a90b596e21c6 "" + "/usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty" 1576625341 40635 c40361e206be584d448876bba8a64a3b "" + "/usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty" 1576016050 33961 6b5c75130e435b2bfdb9f480a09a39f9 "" + "/usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty" 1576625273 7734 b98cbb34c81f667027c1e3ebdbfce34b "" + "/usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty" 1576625223 8371 9d55b8bd010bc717624922fb3477d92e "" + "/usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty" 1644112042 7237 bdd120a32c8fdb4b433cf9ca2e7cd98a "" + "/usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty" 1572645307 1057 525c2192b5febbd8c1f662c9468335bb "" + "/usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty" 1575499628 8356 7bbb2c2373aa810be568c29e333da8ed "" + "/usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty" 1576625065 31769 002a487f55041f8e805cfbf6385ffd97 "" + "/usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty" 1576878844 5412 d5a2436094cd7be85769db90f29250a6 "" + "/usr/share/texlive/texmf-dist/tex/generic/kvsetkeys/kvsetkeys.sty" 1576624944 13807 952b0226d4efca026f0e19dd266dcc22 "" + "/usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty" 1600895880 17859 4409f8f50cd365c68e684407e5350b1b "" + "/usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty" 1576015897 19007 15924f7228aca6c6d184b115f4baa231 "" + "/usr/share/texlive/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty" 1593379760 20089 80423eac55aa175305d35b49e04fe23b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex" 1601326656 992 855ff26741653ab54814101ca36e153c "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorearrows.code.tex" 1601326656 43820 1fef971b75380574ab35a0d37fd92608 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreexternal.code.tex" 1601326656 19324 f4e4c6403dd0f1605fd20ed22fa79dea "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoregraphicstate.code.tex" 1601326656 6038 ccb406740cc3f03bbfb58ad504fe8c27 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreimage.code.tex" 1601326656 6944 e12f8f7a7364ddf66f93ba30fb3a3742 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorelayers.code.tex" 1601326656 4883 42daaf41e27c3735286e23e48d2d7af9 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreobjects.code.tex" 1601326656 2544 8c06d2a7f0f469616ac9e13db6d2f842 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathconstruct.code.tex" 1601326656 44195 5e390c414de027626ca5e2df888fa68d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathprocessing.code.tex" 1601326656 17311 2ef6b2e29e2fc6a2fc8d6d652176e257 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathusage.code.tex" 1601326656 21302 788a79944eb22192a4929e46963a3067 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepatterns.code.tex" 1601326656 9690 01feb7cde25d4293ef36eef45123eb80 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepoints.code.tex" 1601326656 33335 dd1fa4814d4e51f18be97d88bf0da60c "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorequick.code.tex" 1601326656 2965 4c2b1f4e0826925746439038172e5d6f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorerdf.code.tex" 1601326656 5196 2cc249e0ee7e03da5f5f6589257b1e5b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorescopes.code.tex" 1601326656 20726 d4c8db1e2e53b72721d29916314a22ea "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreshade.code.tex" 1601326656 35249 abd4adf948f960299a4b3d27c5dddf46 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransformations.code.tex" 1601326656 21989 fdc867d05d228316de137a9fc5ec3bbe "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransparency.code.tex" 1601326656 8893 e851de2175338fdf7c17f3e091d94618 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex" 1601326656 319 225dfe354ba678ff3c194968db39d447 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex" 1601326656 1179 5483d86c1582c569e665c74efab6281f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex" 1601326656 3937 3f208572dd82c71103831da976d74f1a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex" 1601326656 2889 d698e3a959304efa342d47e3bb86da5b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex" 1601326656 410 048d1174dabde96757a5387b8f23d968 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex" 1601326656 1201 8bd51e254d3ecf0cd2f21edd9ab6f1bb "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex" 1601326656 494 8de62576191924285b021f4fc4292e16 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex" 1601326656 339 be0fe46d92a80e3385dd6a83511a46f2 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex" 1601326656 329 ba6d5440f8c16779c2384e0614158266 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex" 1601326656 919 938802205ca20d7c36615aabc4d34be2 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex" 1601326656 475 4b4056fe07caa0603fede9a162fe666d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarysvg.path.code.tex" 1601326656 911 6574fc8fd117350d2b19ffbc21415df7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex" 1608933718 11518 738408f795261b70ce8dd47459171309 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex" 1621110968 186007 6e7dfe0bd57520fd5f91641aa72dcac8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex" 1601326656 31874 89148c383c49d4c72114a76fd0062299 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex" 1601326656 2563 d5b174eb7709fd6bdcc2f70953dbdf8e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex" 1601326656 32995 ac577023e12c0e4bd8aa420b2e852d1a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibrarysvg.path.code.tex" 1601326656 24742 2664b65ba02d7355a10bbd4e3a69b2e7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex" 1601326656 91587 e30123381f7b9bcf1341c31c6be18b94 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex" 1601326656 33336 427c354e28a4802ffd781da22ae9f383 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex" 1606168878 160993 6a81d63e475cc43874b46ed32a0a37c8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex" 1601326656 46241 588910a2f1e0a99f2c3e14490683c20d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex" 1601326656 62281 aff261ef10ba6cbe8e3c872a38c05a61 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex" 1601326656 90515 e30b2c9c93aacc373e47917c0c2a48ed "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfint.code.tex" 1557692582 3063 8c415c68a0f3394e45cfeca0b65f6ee6 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex" 1601326656 521 8e224a7af69b7fee4451d1bf76b46654 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathcalc.code.tex" 1601326656 13391 84d29568c13bdce4133ab4a214711112 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfloat.code.tex" 1601326656 104935 184ed87524e76d4957860df4ce0cd1c3 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.base.code.tex" 1601326656 10165 cec5fa73d49da442e56efc2d605ef154 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.basic.code.tex" 1601326656 28178 41c17713108e0795aac6fef3d275fbca "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.code.tex" 1601326656 9989 c55967bf45126ff9b061fa2ca0c4694f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.comparison.code.tex" 1601326656 3865 ac538ab80c5cf82b345016e474786549 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.integerarithmetics.code.tex" 1557692582 3177 27d85c44fbfe09ff3b2cf2879e3ea434 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.misc.code.tex" 1621110968 11024 0179538121bc2dba172013a3ef89519f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.random.code.tex" 1608933718 7854 4176998eeefd8745ac6d2d4bd9c98451 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.round.code.tex" 1601326656 3379 781797a101f647bab82741a99944a229 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.trigonometric.code.tex" 1601326656 92405 f515f31275db273f97b9d8f52e1b0736 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathparser.code.tex" 1601326656 37376 11cd75aac3da1c1b152b2848f30adc14 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathutil.code.tex" 1601326656 8471 c2883569d03f69e8e1cabfef4999cfd7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmodulematrix.code.tex" 1601326656 21201 08d231a2386e2b61d64641c50dc15abd "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleparser.code.tex" 1601326656 19581 c8cc0eb77d3c8a725f41ccfbc23bbb9d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleplot.code.tex" 1601326656 16121 346f9013d34804439f7436ff6786cef7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleshapes.code.tex" 1621110968 44784 cedaa399d15f95e68e22906e2cc09ef8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex" 1621110968 465 d68603f8b820ea4a08cce534944db581 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgf.cfg" 1601326656 926 2963ea0dcf6cc6c0a770b69ec46a477b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-common-pdf.def" 1601326656 5546 f3f24d7898386cb7daac70bdd2c4d6dc "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def" 1601326656 12601 4786e597516eddd82097506db7cfa098 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex" 1621110968 61163 9b2eefc24e021323e0fc140e9826d016 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex" 1601326656 1896 b8e0ca0ac371d74c0ca05583f6313c91 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex" 1601326656 7778 53c8b5623d80238f6a20aa1df1868e63 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex" 1606168878 23997 a4bed72405fa644418bea7eac2887006 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex" 1621110968 37060 797782f0eb50075c9bc952374d9a659a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeysfiltered.code.tex" 1601326656 37431 9abe862035de1b29c7a677f3205e3d9f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex" 1601326656 4494 af17fb7efeafe423710479858e42fa7e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common-lists.tex" 1601326656 7251 fb18c67117e09c64de82267e12cd8aa4 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common.tex" 1621110968 29274 e15c5b7157d21523bd9c9f1dfa146b8e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-latex.def" 1621110968 6825 a2b0ea5b539dda0625e99dd15785ab59 "" + "/usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty" 1576624663 7008 f92eaa0a3872ed622bbf538217cd2ab7 "" + "/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsbsy.sty" 1654720880 2222 78b930a5a6e3dc2ac69b78c2057b94d7 "" + "/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsgen.sty" 1654720880 4173 c989ee3ced31418e3593916ab26c793a "" + "/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsmath.sty" 1654720880 88393 1adf6fa3f245270d06e3d4f8910f7fc5 "" + "/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsopn.sty" 1654720880 4474 f04cd1cc7bd76eb033e6fb12eb6a0d77 "" + "/usr/share/texlive/texmf-dist/tex/latex/amsmath/amstext.sty" 1654720880 2444 70065bddd85997dc1fd0bb7ae634e5fa "" + "/usr/share/texlive/texmf-dist/tex/latex/apa7/apa7.cls" 1642541407 60920 30ce14ae2be740aa48eef81166b905b3 "" + "/usr/share/texlive/texmf-dist/tex/latex/apa7/config/APA7american.txt" 1642541407 3217 0343f6cfdd853e7dea4ca37ce622214f "" + "/usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty" 1576191570 19336 ce7ae9438967282886b3b036cfad1e4d "" + "/usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty" 1576625391 3935 57aa3c3e203a5c2effb4d2bd2efbc323 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/alltt.sty" 1654720880 3137 837d2e4f1defd7c190a44408f494ec95 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/article.cls" 1654720880 20144 7555b7429d80bef287ebb82117811acc "" + "/usr/share/texlive/texmf-dist/tex/latex/base/atbegshi-ltx.sty" 1654720880 3122 8df402c6591ccc8ed35ce64c1c49c50b "" + "/usr/share/texlive/texmf-dist/tex/latex/base/atveryend-ltx.sty" 1654720880 2462 2ab3964e30f8e7a2977395016edcbbc6 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty" 1654720880 5119 4ce42f43368f652f9c9522d943cce8e4 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty" 1654720880 5319 48d7f3cfa322abd2788e3c09d624b922 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty" 1654720880 5048 84b05796b49b69e2d4257d537721c960 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/size12.clo" 1654720880 8449 7fbdc9c8596083427317c1e525489c81 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty" 1654720880 2894 f2f8ee7d4fb94263f9f255fa22cab2d3 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex-apa/american-apa.lbx" 1656163846 15338 f9a26813c7ed13105ae11354f255582b "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex-apa/apa.bbx" 1656163846 68091 f4525fa4793d60658a3e9448826fb7fe "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex-apa/apa.cbx" 1656163846 20185 96097b806b41a55551394c1afad28064 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex-apa/apa.dbx" 1656163846 2676 5880b0b8d6c12bbdfb64146ad361fe84 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex-apa/english-apa.lbx" 1656163846 9935 19aa139f6a8ff2d300f300a6fb282646 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/bbx/standard.bbx" 1609451401 25680 409c3f3d570418bc545e8065bebd0688 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/biblatex.cfg" 1342308459 69 249fa6df04d948e51b6d5c67bea30c42 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/biblatex.def" 1656017808 92456 21e687f013958a6cb57adaa61a04572a "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/biblatex.sty" 1657655400 526811 a1f8c6dfa1788c26d4b7587a2e99a625 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/blx-case-expl3.sty" 1609451401 8433 72f8188742e7214b7068f345cd0287ac "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/blx-compat.def" 1643926307 13919 5426dbe90e723f089052b4e908b56ef9 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/blx-dm.def" 1643926307 32455 8d3e554836db11aab80a8e11be62e1b1 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/blx-natbib.def" 1541279461 2190 4b4fcc6752fa7201177431e523c40d74 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/lbx/american.lbx" 1342308459 169 40f2892b6b9cee1ffa9c07b78605a5a1 "" + "/usr/share/texlive/texmf-dist/tex/latex/biblatex/lbx/english.lbx" 1643926307 39965 48ce9ce3350aba9457f1020b1deba5cf "" + "/usr/share/texlive/texmf-dist/tex/latex/booktabs/booktabs.sty" 1579038678 6078 f1cb470c9199e7110a27851508ed7a5c "" + "/usr/share/texlive/texmf-dist/tex/latex/caption/caption.sty" 1647548653 54291 b8e5c600d4aa37b48a740dd2a6c26163 "" + "/usr/share/texlive/texmf-dist/tex/latex/caption/caption3.sty" 1647548653 71241 d2cd3a1c5acef9cb31f945b93c0bb6e3 "" + "/usr/share/texlive/texmf-dist/tex/latex/caption/subcaption.sty" 1645391520 11546 6c5257d230d8c5626812b45bc2f31212 "" + "/usr/share/texlive/texmf-dist/tex/latex/csquotes/csquotes.cfg" 1429144587 7068 06f8d141725d114847527a66439066b6 "" + "/usr/share/texlive/texmf-dist/tex/latex/csquotes/csquotes.def" 1614030765 20781 dc1bec6693d5466d8972ecc6b81f9f0b "" + "/usr/share/texlive/texmf-dist/tex/latex/csquotes/csquotes.sty" 1614030765 62518 6e0d74482f5cb16b3b0755031e72faf1 "" + "/usr/share/texlive/texmf-dist/tex/latex/endfloat/endfloat.sty" 1557078193 17055 97f1b7400dd9c8c5e7d57643a8807fd7 "" + "/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty" 1579991033 13886 d1306dcf79a944f6988e688c1785f9ce "" + "/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf.sty" 1579991033 4393 47f27fd4d95928d20b1885ba77de11d2 "" + "/usr/share/texlive/texmf-dist/tex/latex/etoolbox/etoolbox.sty" 1601931149 46845 3b58f70c6e861a13d927bff09d35ecbc "" + "/usr/share/texlive/texmf-dist/tex/latex/fancyhdr/fancyhdr.sty" 1652903436 17280 7856508378dfe40ed74280f5b81e31b6 "" + "/usr/share/texlive/texmf-dist/tex/latex/float/float.sty" 1137110151 6749 16d2656a1984957e674b149555f1ea1d "" + "/usr/share/texlive/texmf-dist/tex/latex/framed/framed.sty" 1338588508 22449 7ec15c16d0d66790f28e90343c5434a3 "" + "/usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty" 1578002852 41601 9cf6c5257b1bc7af01a58859749dd37a "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg" 1459978653 1213 620bba36b25224fa9b7e1ccb4ecb76fd "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg" 1465944070 1224 978390e9c2234eab29404bc21b268d1e "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def" 1601931164 19103 48d29b6e2a64cb717117ef65f107b404 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty" 1654720880 7233 e46ce9241d2b2ca2a78155475fdd557a "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty" 1654720880 18387 8f900a490197ebaf93c02ae9476d4b09 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty" 1654720880 8010 a8d949cbdbc5c983593827c9eec252e1 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty" 1654720880 2671 7e67d78d9b88c845599a85b2d41f2e39 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/mathcolor.ltx" 1654720880 3171 1cf0d440b5464e2f034398ce4ef36f75 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty" 1654720880 4023 293ea1c16429fc0c4cf605f4da1791a9 "" + "/usr/share/texlive/texmf-dist/tex/latex/grfext/grfext.sty" 1575499774 7133 b94bbacbee6e4fdccdc7f810b2aec370 "" + "/usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty" 1580250785 17914 4c28a13fc3d975e6e81c9bea1d697276 "" + "/usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def" 1655759286 47964 eeb2a5ee738d9e82276d44d01b7f8855 "" + "/usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty" 1655759286 222567 bf49823ea499fb02153a3135548e8552 "" + "/usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty" 1652818262 12951 45609f529c67717a6d5046d7f3d77f03 "" + "/usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def" 1655759286 14249 5722edfd0a97304b67eaad1229597886 "" + "/usr/share/texlive/texmf-dist/tex/latex/hyperref/puenc.def" 1655759286 117125 21f7791400296a3ca7ace2461e9f1794 "" + "/usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty" 1655478651 22555 6d8e155cfef6d82c3d5c742fea7c992e "" + "/usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def" 1656792629 31050 293f2cc98a3575c4228b5157bf850b8f "" + "/usr/share/texlive/texmf-dist/tex/latex/l3kernel/expl3.sty" 1657921463 6107 b3c06bf83accea84563c47d52b03b82f "" + "/usr/share/texlive/texmf-dist/tex/latex/l3packages/xparse/xparse.sty" 1656017767 6812 d2f733947d73940b228845829d585700 "" + "/usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg" 1279039959 678 4792914a8f45be57bb98413425e4c7af "" + "/usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty" 1575499565 5766 13a9e8766c47f30327caf893ece86ac8 "" + "/usr/share/texlive/texmf-dist/tex/latex/listings/listings.cfg" 1585170648 1830 e31effa752c61538383451ae21332364 "" + "/usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty" 1585170648 80964 64e57373f36316e4a09b517cbf1aba2e "" + "/usr/share/texlive/texmf-dist/tex/latex/listings/lstlang1.sty" 1585170648 204271 bae5b2d457283e99567249c1990510be "" + "/usr/share/texlive/texmf-dist/tex/latex/listings/lstlang2.sty" 1585170648 93649 5c560d0867c5758cf33716b703b23a26 "" + "/usr/share/texlive/texmf-dist/tex/latex/listings/lstlang3.sty" 1585170648 90075 280a31c119f13eac0a3dfaff2137635b "" + "/usr/share/texlive/texmf-dist/tex/latex/listings/lstmisc.sty" 1585170648 77022 ee25ce086f4a79d8cf73bac6f94c02a5 "" + "/usr/share/texlive/texmf-dist/tex/latex/logreq/logreq.def" 1284153563 1620 fb1c32b818f2058eca187e5c41dfae77 "" + "/usr/share/texlive/texmf-dist/tex/latex/logreq/logreq.sty" 1284153563 6187 b27afc771af565d3a9ff1ca7d16d0d46 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty" 1601326656 1090 bae35ef70b3168089ef166db3e66f5b2 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty" 1601326656 410 615550c46f918fcbee37641b02a862d9 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty" 1601326656 21013 f4ff83d25bb56552493b030f27c075ae "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty" 1601326656 989 c49c8ae06d96f8b15869da7428047b1e "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty" 1601326656 339 c2e180022e3afdb99c7d0ea5ce469b7d "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty" 1601326656 306 c56a323ca5bf9242f54474ced10fca71 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty" 1601326656 443 8c872229db56122037e86bcda49e14f3 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty" 1601326656 348 ee405e64380c11319f0e249fed57e6c5 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty" 1601326656 274 5ae372b7df79135d240456a1c6f2cf9a "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty" 1601326656 325 f9f16d12354225b7dd52a3321f085955 "" + "/usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty" 1576624809 9878 9e94e8fa600d95f9c7731bb21dfb67a4 "" + "/usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty" 1657483315 9714 ba3194bd52c8499b3f1e3eb91d409670 "" + "/usr/share/texlive/texmf-dist/tex/latex/scalerel/scalerel.sty" 1483104048 7825 43f8c26a0a3916d218f4f48c29aa92e4 "" + "/usr/share/texlive/texmf-dist/tex/latex/substr/substr.sty" 1258848659 5724 d3505925e87b345f11a5f17d58550dea "" + "/usr/share/texlive/texmf-dist/tex/latex/threeparttable/threeparttable.sty" 1267981840 13506 a4e71a27db1a69b6fabada5beebf0844 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/array.sty" 1654720880 12694 4770336659ba563be5de2e0739d61ddc "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/bm.sty" 1654720880 13231 b52297489a0e9d929aae403417d92a02 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/calc.sty" 1654720880 10214 de3e21cfc0eccc98ca7f8dac0ef263d2 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/enumerate.sty" 1654720880 3468 46ba9177f0f0a79fe79845d3eebff113 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/tabularx.sty" 1654720880 7147 be6981d9f5d866a5634048c4a11814a9 "" + "/usr/share/texlive/texmf-dist/tex/latex/upquote/upquote.sty" 1334873510 1048 517e01cde97c1c0baf72e69d43aa5a2e "" + "/usr/share/texlive/texmf-dist/tex/latex/url/url.sty" 1388531844 12796 8edb7d69a20b857904dd0ea757c14ec9 "" + "/usr/share/texlive/texmf-dist/tex/latex/wrapfig/wrapfig.sty" 1137111090 26220 3701aebf80ccdef248c0c20dd062fea9 "" + "/usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty" 1655066402 56148 51a9a8571c07b9921892ae11063ae853 "" + "/usr/share/texlive/texmf-dist/web2c/texmf.cnf" 1658757727 39561 34c98e380bf7c7201ee6a7909aff625a "" + "/usr/share/texmf/fonts/enc/dvips/lm/lm-ec.enc" 1254269338 2375 baa924870cfb487815765f9094cf3728 "" + "/usr/share/texmf/fonts/enc/dvips/lm/lm-mathex.enc" 1202520719 3486 c7eadf5dcc57b3b2d11736679f6636ba "" + "/usr/share/texmf/fonts/enc/dvips/lm/lm-mathit.enc" 1202520719 2405 5dcf2c1b967ee25cc46c58cd52244aed "" + "/usr/share/texmf/fonts/enc/dvips/lm/lm-mathsy.enc" 1202520719 2840 216e6e45ad352e2456e1149f28885bee "" + "/usr/share/texmf/fonts/enc/dvips/lm/lm-rm.enc" 1202520719 2327 9d6df24f9c4f7368395224341a95523a "" + "/usr/share/texmf/fonts/enc/dvips/lm/lm-ts1.enc" 1254269338 3031 6c4d3515bf7115d8518af1c9ab97ca44 "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmbx12.tfm" 1254269338 12088 d750ac78274fa7c9f73ba09914c04f8a "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmbxi10.tfm" 1254269338 17180 a5723008921cdcb0c5f4ebe997919b73 "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmr10.tfm" 1254269338 12056 7e13df7fe4cbce21b072ba7c4f4deb6e "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmr12.tfm" 1254269338 12092 7b1546e2d096cfd5dcbd4049b0b1ec2e "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmr6.tfm" 1254269338 12048 c1068d0f4772be9b0ec447692e1d6d82 "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmr7.tfm" 1254269338 12064 09aa3eeac96bf141d673bb1b0385ce55 "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmr8.tfm" 1254269338 12064 a35db870f0b76c338d749c56dc030ef5 "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmri10.tfm" 1254269338 17148 9556e1b5f936b77a796f68d2d559ba99 "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmri12.tfm" 1254269338 17144 271aaf9ebb339934b04110dc5211fba4 "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmtt10.tfm" 1254269338 1372 2ef2c2b492b3c4cd7879fe083abbb061 "" + "/usr/share/texmf/fonts/tfm/public/lm/ec-lmtt12.tfm" 1254269338 1368 6a60e6a5e029141041d64d339b87e533 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmbsy10.tfm" 1148093231 1300 2df9da0fc09d4a8c772b3dd386a47c6a "" + "/usr/share/texmf/fonts/tfm/public/lm/lmbsy5.tfm" 1148093231 1304 2ff0a255ae754422adbc0e6519ed2658 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmbsy7.tfm" 1148093231 1304 535e0954c1961c817723e44bc6a9662c "" + "/usr/share/texmf/fonts/tfm/public/lm/lmex10.tfm" 1148093231 992 ce925c9346c7613270a79afbee98c070 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmi10.tfm" 1148093231 1528 6d36b2385e0ca062a654de6ac59cb34f "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmi12.tfm" 1148093231 1524 753b192b18f2991794f9d41a8228510b "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmi5.tfm" 1148093231 1508 198f5b7b99b5769126de3a533f6fc334 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmi6.tfm" 1148093231 1512 94a3fd88c6f27dbd9ecb46987e297a4e "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmi7.tfm" 1148093231 1528 d5b028dd23da623848ef0645c96a1ed7 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmi8.tfm" 1148093231 1520 a3fe5596932db2db2cbda300920dd4e9 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmib10.tfm" 1148093231 1524 94d8ba2701edc3d8c3337e16e222f220 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmib5.tfm" 1148093231 1496 026e52505574e5c5b11a8037b8db27d0 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmmib7.tfm" 1148093231 1508 e1d41318430466dfe2207ded55ef3af5 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmsy10.tfm" 1148093231 1308 02cc510f9dd6012e5815d0c0ffbf6869 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmsy5.tfm" 1148093231 1296 54ed1a711e2303d5282575278e3620b0 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmsy6.tfm" 1148093231 1300 b0605d44c16c22d99dc001808e4f24ea "" + "/usr/share/texmf/fonts/tfm/public/lm/lmsy7.tfm" 1148093231 1304 32f22a15acc296b2a4e15698403dcb88 "" + "/usr/share/texmf/fonts/tfm/public/lm/lmsy8.tfm" 1148093231 1304 cdc9a17df9ef0d2dc320eff37bbab1c4 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmbx10.tfm" 1254269338 11880 35fcf136a2198418dfc53c83e9e2a07f "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmbx12.tfm" 1254269338 11880 ea60d06924270684e6f852f3141c992b "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmbx5.tfm" 1254269338 11828 9b1880528bdbe7e6035fd1b46bff1bbb "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmbx6.tfm" 1254269338 11852 eda7061aa4cc8552ba736dae866e4460 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmbx7.tfm" 1254269338 11864 44cdb751af976143ebc0bed7eb1df9f4 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmbx8.tfm" 1254269338 11868 731e03b24d399279cf9609d002110394 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmr10.tfm" 1254269338 11868 4f81e9b6033c032bdaf9884f4d7ef412 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmr12.tfm" 1254269338 11888 6841b91e46b65cf41a49b160e6e74130 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmr5.tfm" 1254269338 11804 aefb10c002e6492c25236524a447f969 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmr6.tfm" 1254269338 11836 e3b6ce3e601aec94f64a536e7f4224d5 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmr7.tfm" 1254269338 11852 5a9022f105fd1ee2797df861e79ae9a0 "" + "/usr/share/texmf/fonts/tfm/public/lm/rm-lmr8.tfm" 1254269338 11864 309fd7f43e4a0ba39f6f7644d76e8edf "" + "/usr/share/texmf/fonts/tfm/public/lm/ts1-lmr12.tfm" 1254269338 1596 1d4548788e389ded56d0b01b28377882 "" + "/usr/share/texmf/fonts/type1/public/lm/lmbx12.pfb" 1255129361 116908 1fca96723793882c2e0160350c192fc8 "" + "/usr/share/texmf/fonts/type1/public/lm/lmbxi10.pfb" 1255129361 112766 bdd4fa8b13a0d7dd137624085bd31d40 "" + "/usr/share/texmf/fonts/type1/public/lm/lmex10.pfb" 1254269338 23055 2e5b42921de910eaa97b85df04ca4891 "" + "/usr/share/texmf/fonts/type1/public/lm/lmmi10.pfb" 1254269338 30388 702fae6a5f0e6e9c48a1d872b442ffcf "" + "/usr/share/texmf/fonts/type1/public/lm/lmmi12.pfb" 1254269338 30696 2654571912f9cd384da9f7cb8a60c568 "" + "/usr/share/texmf/fonts/type1/public/lm/lmmi6.pfb" 1254269338 31113 04b711c9a4c7f8cb4ed784c6fc4cc1c5 "" + "/usr/share/texmf/fonts/type1/public/lm/lmmi8.pfb" 1254269338 30635 833ec815d446ec453a4913fc26d24cbc "" + "/usr/share/texmf/fonts/type1/public/lm/lmr10.pfb" 1255129361 119235 f35b44530a1d90eb90fe15d9cba67ea0 "" + "/usr/share/texmf/fonts/type1/public/lm/lmr12.pfb" 1255129361 113634 f99c44d58bae0863375faf0e1d74d612 "" + "/usr/share/texmf/fonts/type1/public/lm/lmr6.pfb" 1255129361 123394 d390152bb30feeb496aaaa93299ee9ba "" + "/usr/share/texmf/fonts/type1/public/lm/lmr7.pfb" 1255129361 121145 68312a933e2c689ed40ec0aba373e279 "" + "/usr/share/texmf/fonts/type1/public/lm/lmr8.pfb" 1255129361 122174 a7a08406857c9530a0320a2517f60370 "" + "/usr/share/texmf/fonts/type1/public/lm/lmri10.pfb" 1255129361 112593 fda2373ba4420af33949610de4c28fe8 "" + "/usr/share/texmf/fonts/type1/public/lm/lmri12.pfb" 1255129361 109265 32320cb6133d4d76bf83e27b5eb4009b "" + "/usr/share/texmf/fonts/type1/public/lm/lmsy10.pfb" 1254269338 27863 09ce3735688ffde955e72da27c95b61a "" + "/usr/share/texmf/fonts/type1/public/lm/lmsy8.pfb" 1254269338 27802 5c876bb2c4040caaf035d60bd74a86bd "" + "/usr/share/texmf/fonts/type1/public/lm/lmtt10.pfb" 1255129361 113227 1010e11451afc2822c95dae77c390042 "" + "/usr/share/texmf/fonts/type1/public/lm/lmtt12.pfb" 1255129361 110323 92daea7ca7b4120bd2b54b047c93be27 "" + "/usr/share/texmf/tex/latex/lm/lmodern.sty" 1616454256 1608 b00724785a9e9c599e5181bb8729160b "" + "/usr/share/texmf/tex/latex/lm/omllmm.fd" 1616454256 890 57f5adccd504fb5c98bdf99ed7e7f195 "" + "/usr/share/texmf/tex/latex/lm/omslmsy.fd" 1616454256 807 3de192f3efa968913bd2f096a7b430d8 "" + "/usr/share/texmf/tex/latex/lm/omxlmex.fd" 1616454256 568 a5494d810f2680caf10205cd1226c76c "" + "/usr/share/texmf/tex/latex/lm/ot1lmr.fd" 1616454256 1882 28c08db1407ebff35a658fd141753d16 "" + "/usr/share/texmf/tex/latex/lm/t1lmr.fd" 1616454256 1867 996fe743d88a01aca041ed22cc10e1bb "" + "/usr/share/texmf/tex/latex/lm/t1lmtt.fd" 1616454256 2682 555da1faa2e266801e4b221d01a42cb5 "" + "/usr/share/texmf/tex/latex/lm/ts1lmr.fd" 1616454256 1914 884882d7ebb0fd65cea93fca77ff6f5a "" + "/usr/share/texmf/web2c/texmf.cnf" 1658757727 39561 34c98e380bf7c7201ee6a7909aff625a "" + "/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map" 1666310487 4578607 d069bc70ba6abc8a4382683ea17c45fb "" + "/var/lib/texmf/web2c/pdftex/pdflatex.fmt" 1666310189 1628849 80260b62ec39921905d6c2cfa2d7ef0f "" + "Flowchart.png" 1666253385 594952 50601e09cb6e0a0a37098aad24b31fbc "" + "article.aux" 1668898249 40263 5e4db7c7045d875d309dc708ce858a17 "pdflatex" + "article.bbl" 1667261564 238362 da0a525a8d6fb451ee76d8c69699194e "biber article" + "article.out" 1668898249 9536 04078e275515bad7341e4de757e4a170 "pdflatex" + "article.run.xml" 1668898249 2484 1a0cd662b1695e7507b5de2c2ac493be "pdflatex" + "article.tex" 1667262630 117805 bd283cc5998d19be5f74625aa537323f "" + "bayesnets.tex" 1665085904 4270 e014b8a16424c7a11d3a1a724a6ae68a "" + "figure/example1_g-1.pdf" 1667262627 8994 7daad9b38b2c372eb4fa8f6b27c34087 "" + "figure/example1_x-1.pdf" 1667262625 9014 f7c3ce13d5511195106b53d6d30613be "" + "figure/example2_g-1.pdf" 1667262628 9012 ba9f2fe77f441edef51b76c72d94e7d4 "" + "figure/example2_x-1.pdf" 1667262625 8898 3dfdca456b7bae859c39603a6f0b0f30 "" + "figure/example3_x-1.pdf" 1667262626 8249 7c043bc270ea46cd5325c67cfc722f24 "" + "figure/example3_z-1.pdf" 1667262628 8249 dedd76fc2cdea9440e8d186e2bc178f0 "" + "figure/example_4_x-1.pdf" 1667262629 8275 523f1cf079d3a1ea707fe17fdd7a1264 "" + "figure/example_4_z-1.pdf" 1667262627 8282 7cbd97c9015390950e8342c70f74018e "" + "parrot.pdf" 1662686581 4525 873ee3e92e2293597173d885283b4e70 "" + (generated) + "article.aux" + "article.bcf" + "article.log" + "article.out" + "article.pdf" + "article.run.xml" diff --git a/article.pdf b/article.pdf new file mode 100644 index 0000000..ab57397 Binary files /dev/null and b/article.pdf differ diff --git a/article.run.xml b/article.run.xml new file mode 100644 index 0000000..65a800b --- /dev/null +++ b/article.run.xml @@ -0,0 +1,90 @@ + + + + + + + + + + + + + + + + + + + + + + + + +]> + + + latex + + article.bcf + + + article.bbl + + + blx-dm.def + apa.dbx + blx-compat.def + biblatex.def + blx-natbib.def + standard.bbx + apa.bbx + apa.cbx + biblatex.cfg + english.lbx + english-apa.lbx + american.lbx + american-apa.lbx + + + + biber + + biber + article + + + article.bcf + + + article.bbl + + + article.bbl + + + article.bcf + + + Bibliography.bib + + + diff --git a/auto/article.el b/auto/article.el new file mode 100644 index 0000000..1d7ec45 --- /dev/null +++ b/auto/article.el @@ -0,0 +1,75 @@ +(TeX-add-style-hook + "article" + (lambda () + (TeX-add-to-alist 'LaTeX-provided-class-options + '(("apa7" "floatsintext" "mask" "man"))) + (TeX-add-to-alist 'LaTeX-provided-package-options + '(("inputenc" "utf8") ("fontenc" "T1") ("babel" "american") ("biblatex" "natbib=true" "style=apa" "sortcites=true" "backend=biber"))) + (add-to-list 'LaTeX-verbatim-environments-local "lstlisting") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "url") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "path") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "lstinline") + (add-to-list 'LaTeX-verbatim-macros-with-delims-local "url") + (add-to-list 'LaTeX-verbatim-macros-with-delims-local "path") + (add-to-list 'LaTeX-verbatim-macros-with-delims-local "lstinline") + (TeX-run-style-hooks + "latex2e" + "bayesnets" + "apa7" + "apa710" + "epstopdf" + "subcaption" + "tikz" + "tabularx" + "inputenc" + "wrapfig" + "fontenc" + "textcomp" + "listings" + "xcolor" + "graphicx" + "enumerate" + "amsmath" + "babel" + "csquotes" + "biblatex") + (TeX-add-symbols + '("TODO" 1) + "citepos" + "citespos") + (LaTeX-add-labels + "eq:covariate.reg.general" + "eq:covariate.logisticreg.w" + "eq:depvar.general" + "eq:depvar.w" + "mod:measerr.ols" + "mod:true.ols" + "mod:measerr.logit" + "fig:sim1a.x" + "fig:sim1b.x" + "fig:sim2a.x" + "fig:sim2b.x" + "appendix:lit.review" + "fig:FigureA1" + "tab:TableA1" + "appendix:other.methods" + "appendix:derivation" + "eq:mle.covariate.chainrule.1" + "eq:mle.covariate.chainrule.2" + "eq:mle.covariate.chainrule.3" + "eq:mle.covariate.chainrule.4" + "appendix:misclassificationmodels" + "appendix:main.sim.plots" + "fig:sim1b.z" + "appendix:sim1.profile") + (LaTeX-add-bibliographies + "Bibliography") + (LaTeX-add-listings-lstdefinestyles + "mystyle") + (LaTeX-add-xcolor-definecolors + "codegreen" + "codegray" + "codepurple" + "backcolour")) + :latex) + diff --git a/auto/article_local.el b/auto/article_local.el new file mode 100644 index 0000000..3c62e39 --- /dev/null +++ b/auto/article_local.el @@ -0,0 +1,32 @@ +(TeX-add-style-hook + "article_local" + (lambda () + (TeX-add-to-alist 'LaTeX-provided-package-options + '(("natbib" "numbers" "sort&compress" "merge"))) + (TeX-run-style-hooks + "latex2e" + "interact" + "interact10" + "epstopdf" + "subcaption" + "tikz" + "natbib") + (TeX-add-symbols + '("bibnumfmt" 1) + '("citenumfont" 1)) + (LaTeX-add-labels + "fig:simulation.1" + "fig:simulation.2") + (LaTeX-add-environments + "theorem" + "lemma" + "corollary" + "proposition" + "definition" + "example" + "remark" + "notation") + (LaTeX-add-bibliographies + "Bibliography")) + :latex) + diff --git a/auto/flowchart_recommendations.el b/auto/flowchart_recommendations.el new file mode 100644 index 0000000..c3884d8 --- /dev/null +++ b/auto/flowchart_recommendations.el @@ -0,0 +1,7 @@ +(TeX-add-style-hook + "flowchart_recommendations" + (lambda () + (TeX-add-symbols + "myindent")) + :latex) + diff --git a/auto/iv_perspective_example.el b/auto/iv_perspective_example.el new file mode 100644 index 0000000..8a64373 --- /dev/null +++ b/auto/iv_perspective_example.el @@ -0,0 +1,13 @@ +(TeX-add-style-hook + "iv_perspective_example" + (lambda () + (TeX-run-style-hooks + "latex2e" + "standalone" + "standalone10" + "tikz" + "makecell") + (TeX-add-symbols + "myindent")) + :latex) + diff --git a/bayesnets.tex b/bayesnets.tex new file mode 100644 index 0000000..2b30d7f --- /dev/null +++ b/bayesnets.tex @@ -0,0 +1,130 @@ +\tikzset{ + observed/.style={circle, draw}, + partly observed/.style 2 args={draw, fill=#2, path picture={ + \fill[#1, sharp corners] (path picture bounding box.south west) -| + (path picture bounding box.north east) -- cycle;}, + circle}, + unobserved/.style={draw, circle, fill=gray!40}, + residual/.style={draw, rectangle} +} +\begin{figure}[htbp!] +\centering +\begin{subfigure}[t]{0.48\textwidth} +\centering +\begin{tikzpicture} + + \node[observed] (y) {$Y$}; + \node[unobserved, above=of y] (x) {$X$}; + \node[observed, left=of x] (w) {$W$}; + +% \node[unobserved, above=of w] (k) {$K$}; + \node[observed,right=of x] (z) {$Z$}; +% \node[residual,below=of y] (e) {$\varepsilon$}; +% \node[residual,below=of w] (xi) {$\xi$}; + + \draw[-] (z) to (y); + \draw[-] (z) -- (x); + \draw[-] (x) -- (y); + \draw[-] (x) -- (w); +% \draw[-] (y) -- (w); +% \draw[-] (x) -- (xi); + % \draw[-] (w) -- (xi); + +\end{tikzpicture} +\caption{In \emph{Simulation 1a}, classifications $W$ are conditionally independent of $Y$ so a model using $W$ as a proxy for $X$ has non-differential error. \label{fig:simulation.1a}} +\end{subfigure} +\hfill +\begin{subfigure}[t]{0.48\textwidth} +\centering +\begin{tikzpicture} + + \node[observed] (y) {$Y$}; + \node[unobserved, above=of y] (x) {$X$}; + \node[observed, left=of x] (w) {$W$}; + +% \node[unobserved, above=of w] (k) {$K$}; + \node[observed,right=of x] (z) {$Z$}; +% \node[residual,below=of y] (e) {$\varepsilon$}; +% \node[residual,below=of w] (xi) {$\xi$}; + + \draw[-] (z) to (y); + \draw[-] (z) -- (x); + \draw[-] (x) -- (y); + \draw[-] (x) -- (w); +% \draw[-] (k) -- (w); + + \draw[-] (x) to (y); + + \draw[-] (w) -- (y); + +% \draw[-] (x) -- (xi); +% \draw[-] (z) -- (xi); +% \draw[-] (w) -- (xi); +\end{tikzpicture} +\caption{In \emph{Simulation 1b}, the edge from $W$ to $Y$ signifies that the automatic classifications $W$ are not conditionally independent of $Y$ given $X$, indicating differential error. +\label{fig:simulation.1b} +} +\end{subfigure} +\\ +\hfill +\begin{subfigure}[t]{0.48\textwidth} +\centering +\begin{tikzpicture} + \node[unobserved] (y) {$Y$}; + + \node[observed, above=of y] (x) {$X$}; + \node[observed, left=of y] (w) {$W$}; + +% \node[unobserved, above=of w] (k) {$K$}; + \node[observed,right=of x] (z) {$Z$}; +% \node[residual,below=of y] (e) {$\varepsilon$}; + % \node[residual,below=of w] (xi) {$\xi$}; + \draw[-] (z) to (y); + \draw[-] (x) -- (y); + \draw[-] (y) -- (w); + \draw[-] (x) -- (z); +% \draw[-] (k) -- (w); + % \draw[-] (w) -- (xi); + +\end{tikzpicture} +\caption{In \emph{Simulation 2a}, an unbiased classifier measures the outcome. \label{fig:simulation.2a}} +\end{subfigure} \hfill +\begin{subfigure}[t]{0.48\textwidth} +\centering +\begin{tikzpicture} + \node[unobserved] (y) {$Y$}; + + \node[observed={white}{gray!40}, above=of y] (x) {$X$}; + \node[observed, left=of y] (w) {$W$}; + +% \node[unobserved, above=of w] (k) {$K$}; + \node[observed,right=of x] (z) {$Z$}; +% \node[residual,below=of y] (e) {$\varepsilon$}; +% \node[residual,below=of w] (xi) {$\xi$}; + \draw[-] (x) -- (y); + \draw[-] (x) -- (w); + \draw[-] (y) -- (w); + \draw[-] (x) -- (z); +% \draw[-] (k) -- (w); + \draw[-] (z) -- (y); +% \draw[-] (z) -- (k); +% \draw[-] (y) -- (xi); +% \draw[-] (w) -- (xi); +\end{tikzpicture} +\caption{In \emph{Simulation 2b}, the edge connecting $W$ and $X$ signifies that the predictions $W$ are not conditionally independent of $X$ given $Y$, indicating systematic misclassification. \label{fig:simulation.2b}} +\end{subfigure} +\vspace{1em} +\begin{subfigure}[t]{0.2\textwidth} +\centering +\begin{tikzpicture} + \matrix [draw, below, font=\small, align=center, column sep=2\pgflinewidth, inner sep=0.4em, outer sep=0em, nodes={align=center, anchor=center}] at (current bounding box.south){ + \node[observed,label=right:observed] {}; \\ + \node[unobserved,label=right:automatically classified]{}; \\ +% \node[residual,label=right:error term]{}; \\ + }; +\end{tikzpicture} +\end{subfigure} +\caption{ +Bayesnet networks representing the conditional independence structure of our simulations. \label{bayesnets} +} +\end{figure} \ No newline at end of file diff --git a/charts/example_1_dag/Makefile b/charts/example_1_dag/Makefile new file mode 100644 index 0000000..75fef1f --- /dev/null +++ b/charts/example_1_dag/Makefile @@ -0,0 +1,28 @@ +#!/usr/bin/make + +all: $(patsubst %.tex,%.svg,$(wildcard *.tex)) $(patsubst %.tex,%.png,$(wildcard *.tex)) + +%.png: %.pdf + convert -density 300 -transparent white $< $@ + +%.svg: %.pdf + /usr/bin/inkscape $< --export-plain-svg=$@ + +%.pdf: %.tex + latexmk -f -pdf $< + +clean: + latexmk -C *.tex + rm -f *.tmp + rm -f vc + rm *.svg + +viewpdf: all + evince *.pdf + +vc: + vc-git + +pdf: all + +.PHONY: clean all diff --git a/charts/example_1_dag/auto/example_1_dag.el b/charts/example_1_dag/auto/example_1_dag.el new file mode 100644 index 0000000..0054822 --- /dev/null +++ b/charts/example_1_dag/auto/example_1_dag.el @@ -0,0 +1,27 @@ +(TeX-add-style-hook + "example_1_dag" + (lambda () + (TeX-add-to-alist 'LaTeX-provided-class-options + '(("standalone" "12pt"))) + (TeX-add-to-alist 'LaTeX-provided-package-options + '(("inputenc" "utf8x") ("fontenc" "T1") ("mathdesign" "garamond"))) + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "href") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperref") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperimage") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperbaseurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "nolinkurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "url") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "path") + (add-to-list 'LaTeX-verbatim-macros-with-delims-local "path") + (TeX-run-style-hooks + "latex2e" + "standalone" + "standalone12" + "ucs" + "inputenc" + "fontenc" + "textcomp" + "mathdesign" + "tikz")) + :latex) + diff --git a/charts/example_1_dag/auto/example_2_dag.el b/charts/example_1_dag/auto/example_2_dag.el new file mode 100644 index 0000000..25d5265 --- /dev/null +++ b/charts/example_1_dag/auto/example_2_dag.el @@ -0,0 +1,27 @@ +(TeX-add-style-hook + "example_2_dag" + (lambda () + (TeX-add-to-alist 'LaTeX-provided-class-options + '(("standalone" "12pt"))) + (TeX-add-to-alist 'LaTeX-provided-package-options + '(("inputenc" "utf8x") ("fontenc" "T1") ("mathdesign" "garamond"))) + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "path") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "url") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "nolinkurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperbaseurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperimage") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperref") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "href") + (add-to-list 'LaTeX-verbatim-macros-with-delims-local "path") + (TeX-run-style-hooks + "latex2e" + "standalone" + "standalone12" + "ucs" + "inputenc" + "fontenc" + "textcomp" + "mathdesign" + "tikz")) + :latex) + diff --git a/charts/example_1_dag/example_1_dag.fdb_latexmk b/charts/example_1_dag/example_1_dag.fdb_latexmk new file mode 100644 index 0000000..7e7a955 --- /dev/null +++ b/charts/example_1_dag/example_1_dag.fdb_latexmk @@ -0,0 +1,146 @@ +# Fdb version 3 +["pdflatex"] 1653334315 "example_1_dag.tex" "example_1_dag.pdf" "example_1_dag" 1653334316 + "/dev/null" 0 -1 0 "" + "/etc/texmf/web2c/texmf.cnf" 1650342257 475 c0e671620eb5563b2130f56340a5fde8 "" + "/usr/local/share/texmf/fonts/type1/urw/garamond/ugmr8a.pfb" 1094499120 70277 baa4b3da2c0fdc989d3b086e9f92a592 "" + "/usr/share/texlive/texmf-dist/fonts/enc/dvips/ly1/texnansi.enc" 1276641224 6938 5135be6a8802f13a7faf296d1959a1ab "" + "/usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map" 1577235249 3524 cb3e574dea2d1052e39280babc910dc8 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/jknappen/ec/ecrm1200.tfm" 1136768653 3584 f80ddd985bd00e29e9a6047ebd9d4781 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm" 1136768653 1288 655e228510b4c2a1abe905c368440826 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8t.tfm" 1379030001 988 7af44ec9a0d02adcdeac5730bfa6c900 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8y.tfm" 1379030001 1420 c083b7ec6e57bd0ed303204f3ed5498f "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/mdugmr8t.tfm" 1379030001 19500 8b5a37a37d0abbdc70524f1b83751a16 "" + "/usr/share/texlive/texmf-dist/fonts/vf/public/mathdesign/mdugm/mdugmr8t.vf" 1379030001 2232 00d038d9916ec0c6921fcb4bc893379c "" + "/usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii" 1461363279 71627 94eb9990bed73c364d7f53f960cc8c5b "" + "/usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty" 1572645307 492 1994775aa15b0d1289725a0b1bbc2d4c "" + "/usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty" 1583617216 6501 4011d89d9621e0b0901138815ba5ff29 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex" 1601326656 992 855ff26741653ab54814101ca36e153c "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorearrows.code.tex" 1601326656 43820 1fef971b75380574ab35a0d37fd92608 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreexternal.code.tex" 1601326656 19324 f4e4c6403dd0f1605fd20ed22fa79dea "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoregraphicstate.code.tex" 1601326656 6038 ccb406740cc3f03bbfb58ad504fe8c27 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreimage.code.tex" 1601326656 6944 e12f8f7a7364ddf66f93ba30fb3a3742 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorelayers.code.tex" 1601326656 4883 42daaf41e27c3735286e23e48d2d7af9 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreobjects.code.tex" 1601326656 2544 8c06d2a7f0f469616ac9e13db6d2f842 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathconstruct.code.tex" 1601326656 44195 5e390c414de027626ca5e2df888fa68d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathprocessing.code.tex" 1601326656 17311 2ef6b2e29e2fc6a2fc8d6d652176e257 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathusage.code.tex" 1601326656 21302 788a79944eb22192a4929e46963a3067 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepatterns.code.tex" 1601326656 9690 01feb7cde25d4293ef36eef45123eb80 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepoints.code.tex" 1601326656 33335 dd1fa4814d4e51f18be97d88bf0da60c "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorequick.code.tex" 1601326656 2965 4c2b1f4e0826925746439038172e5d6f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorerdf.code.tex" 1601326656 5196 2cc249e0ee7e03da5f5f6589257b1e5b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorescopes.code.tex" 1601326656 20726 d4c8db1e2e53b72721d29916314a22ea "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreshade.code.tex" 1601326656 35249 abd4adf948f960299a4b3d27c5dddf46 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransformations.code.tex" 1601326656 21989 fdc867d05d228316de137a9fc5ec3bbe "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransparency.code.tex" 1601326656 8893 e851de2175338fdf7c17f3e091d94618 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex" 1601326656 319 225dfe354ba678ff3c194968db39d447 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex" 1601326656 1179 5483d86c1582c569e665c74efab6281f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex" 1601326656 3937 3f208572dd82c71103831da976d74f1a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex" 1601326656 2889 d698e3a959304efa342d47e3bb86da5b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex" 1601326656 410 048d1174dabde96757a5387b8f23d968 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex" 1601326656 1201 8bd51e254d3ecf0cd2f21edd9ab6f1bb "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex" 1601326656 494 8de62576191924285b021f4fc4292e16 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex" 1601326656 339 be0fe46d92a80e3385dd6a83511a46f2 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex" 1601326656 329 ba6d5440f8c16779c2384e0614158266 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex" 1601326656 919 938802205ca20d7c36615aabc4d34be2 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex" 1601326656 475 4b4056fe07caa0603fede9a162fe666d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex" 1608933718 11518 738408f795261b70ce8dd47459171309 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex" 1621110968 186007 6e7dfe0bd57520fd5f91641aa72dcac8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex" 1601326656 31874 89148c383c49d4c72114a76fd0062299 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex" 1601326656 2563 d5b174eb7709fd6bdcc2f70953dbdf8e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex" 1601326656 32995 ac577023e12c0e4bd8aa420b2e852d1a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex" 1601326656 91587 e30123381f7b9bcf1341c31c6be18b94 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex" 1601326656 33336 427c354e28a4802ffd781da22ae9f383 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex" 1606168878 160993 6a81d63e475cc43874b46ed32a0a37c8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex" 1601326656 46241 588910a2f1e0a99f2c3e14490683c20d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex" 1601326656 62281 aff261ef10ba6cbe8e3c872a38c05a61 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex" 1601326656 90515 e30b2c9c93aacc373e47917c0c2a48ed "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfint.code.tex" 1557692582 3063 8c415c68a0f3394e45cfeca0b65f6ee6 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex" 1601326656 521 8e224a7af69b7fee4451d1bf76b46654 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathcalc.code.tex" 1601326656 13391 84d29568c13bdce4133ab4a214711112 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfloat.code.tex" 1601326656 104935 184ed87524e76d4957860df4ce0cd1c3 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.base.code.tex" 1601326656 10165 cec5fa73d49da442e56efc2d605ef154 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.basic.code.tex" 1601326656 28178 41c17713108e0795aac6fef3d275fbca "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.code.tex" 1601326656 9989 c55967bf45126ff9b061fa2ca0c4694f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.comparison.code.tex" 1601326656 3865 ac538ab80c5cf82b345016e474786549 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.integerarithmetics.code.tex" 1557692582 3177 27d85c44fbfe09ff3b2cf2879e3ea434 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.misc.code.tex" 1621110968 11024 0179538121bc2dba172013a3ef89519f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.random.code.tex" 1608933718 7854 4176998eeefd8745ac6d2d4bd9c98451 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.round.code.tex" 1601326656 3379 781797a101f647bab82741a99944a229 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.trigonometric.code.tex" 1601326656 92405 f515f31275db273f97b9d8f52e1b0736 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathparser.code.tex" 1601326656 37376 11cd75aac3da1c1b152b2848f30adc14 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathutil.code.tex" 1601326656 8471 c2883569d03f69e8e1cabfef4999cfd7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmodulematrix.code.tex" 1601326656 21201 08d231a2386e2b61d64641c50dc15abd "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleplot.code.tex" 1601326656 16121 346f9013d34804439f7436ff6786cef7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleshapes.code.tex" 1621110968 44784 cedaa399d15f95e68e22906e2cc09ef8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex" 1621110968 465 d68603f8b820ea4a08cce534944db581 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgf.cfg" 1601326656 926 2963ea0dcf6cc6c0a770b69ec46a477b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-common-pdf.def" 1601326656 5546 f3f24d7898386cb7daac70bdd2c4d6dc "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def" 1601326656 12601 4786e597516eddd82097506db7cfa098 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex" 1621110968 61163 9b2eefc24e021323e0fc140e9826d016 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex" 1601326656 1896 b8e0ca0ac371d74c0ca05583f6313c91 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex" 1601326656 7778 53c8b5623d80238f6a20aa1df1868e63 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex" 1606168878 23997 a4bed72405fa644418bea7eac2887006 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex" 1621110968 37060 797782f0eb50075c9bc952374d9a659a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeysfiltered.code.tex" 1601326656 37431 9abe862035de1b29c7a677f3205e3d9f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex" 1601326656 4494 af17fb7efeafe423710479858e42fa7e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common-lists.tex" 1601326656 7251 fb18c67117e09c64de82267e12cd8aa4 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common.tex" 1621110968 29274 e15c5b7157d21523bd9c9f1dfa146b8e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-latex.def" 1621110968 6825 a2b0ea5b539dda0625e99dd15785ab59 "" + "/usr/share/texlive/texmf-dist/tex/generic/xkeyval/keyval.tex" 1605910342 2725 9f5d0b27f1f9a620c6ea983d6d41501d "" + "/usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkeyval.tex" 1605910342 19231 3cbf682090baecad8e17a66b7a271ed1 "" + "/usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkvutils.tex" 1605910342 7677 cf3e6aa6a8d444f55327f61df80bfa0c "" + "/usr/share/texlive/texmf-dist/tex/latex/base/article.cls" 1636758526 20144 8a7de377ae7a11ee924a7499611f5a9d "" + "/usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty" 1622581934 4946 461cc78f6f26901410d9f1d725079cc6 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty" 1622581934 5157 f308c7c04889e16c588e78aa42599fae "" + "/usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty" 1622581934 5049 969aec05d5f39c43f8005910498fcf90 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/size12.clo" 1636758526 8449 bc7344e882df4d7e51c046514dee83e4 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty" 1622581934 2894 55431114fc0e491ecee275edafd6c881 "" + "/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty" 1579991033 13886 d1306dcf79a944f6988e688c1785f9ce "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg" 1459978653 1213 620bba36b25224fa9b7e1ccb4ecb76fd "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg" 1465944070 1224 978390e9c2234eab29404bc21b268d1e "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def" 1601931164 19103 48d29b6e2a64cb717117ef65f107b404 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty" 1622581934 18399 7e40f80366dffb22c0e7b70517db5cb4 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty" 1636758526 7996 a8fb260d598dcaf305a7ae7b9c3e3229 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty" 1622581934 2671 4de6781a30211fe0ea4c672e4a2a8166 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty" 1636758526 4009 187ea2dc3194cd5a76cd99a8d7a6c4d0 "" + "/usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def" 1642022539 29921 f0f4f870357ebfb8fe58ed9ed4ee9b92 "" + "/usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg" 1279039959 678 4792914a8f45be57bb98413425e4c7af "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty" 1377991452 9111 d865fc87f99dbc5273fb00f1d7091d76 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd" 1153697689 579 116e648415099e5e059da594ef56c9f0 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd" 1153697689 579 ddcbed007a246f2b5a98aedc86efeed0 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def" 1377991452 5878 bba53c9220a1555c41919107cf6f41c3 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def" 1377991452 16973 0d74f58659233f7bbf4e2551e5c1b6ba "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def" 1377991452 9718 378a12581d907c0af4433a9e908339df "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg" 1379030001 3290 359d6e75cf1deff239ae04f0de11a9cd "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty" 1379030001 44894 0dc5cc17cbd2a8c871ce80ca701d85a5 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd" 1379030001 1522 66675d9aa6eb36761b2c579a644335b4 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty" 1601326656 1090 bae35ef70b3168089ef166db3e66f5b2 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty" 1601326656 410 615550c46f918fcbee37641b02a862d9 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty" 1601326656 21013 f4ff83d25bb56552493b030f27c075ae "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty" 1601326656 989 c49c8ae06d96f8b15869da7428047b1e "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty" 1601326656 339 c2e180022e3afdb99c7d0ea5ce469b7d "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty" 1601326656 306 c56a323ca5bf9242f54474ced10fca71 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty" 1601326656 443 8c872229db56122037e86bcda49e14f3 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty" 1601326656 348 ee405e64380c11319f0e249fed57e6c5 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty" 1601326656 274 5ae372b7df79135d240456a1c6f2cf9a "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty" 1601326656 325 f9f16d12354225b7dd52a3321f085955 "" + "/usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg" 1522098998 1015 662b4d7ad816b857a598284525f5c75e "" + "/usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls" 1522098998 28890 df75e6d37f47b7e27bff3f37375336b3 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty" 1622581934 4118 0f286eca74ee36b7743ff20320e5479f "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def" 1368571634 1375 8a855db83af5d6753ccbbd32e6a8a901 "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty" 1368571634 27982 5723d81d568db410592a59b85fb3eaae "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def" 1368571634 22368 c53c9d0d16c65bef2b157515c9d9f658 "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def" 1368571634 8036 21f7ac37aafb6cfeddbb196b8bfd6280 "" + "/usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty" 1635798903 56029 3f7889dab51d620aa43177c391b7b190 "" + "/usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty" 1605910342 4902 efb3d66683a2da2a232f71e3a571a899 "" + "/usr/share/texlive/texmf-dist/web2c/texmf.cnf" 1644012257 39432 7155514e09a3d69036fac785183a21c2 "" + "/usr/share/texmf/tex/latex/preview/preview.sty" 1598018759 13747 d074c8555b22976c3890effbb5ce8ed7 "" + "/usr/share/texmf/web2c/texmf.cnf" 1644012257 39432 7155514e09a3d69036fac785183a21c2 "" + "/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map" 1650342827 4362114 c882e0fb2b702d36ed2d6636759689e2 "" + "/var/lib/texmf/web2c/pdftex/pdflatex.fmt" 1652219592 1590085 c669746cfe044505095fd475f9edf6a3 "" + "example_1_dag.aux" 1653334316 32 3985256e7290058c681f74d7a3565a19 "pdflatex" + "example_1_dag.tex" 1653333596 1363 a1f0edc181ffd1d7e24cc3b4a0095fda "" + (generated) + "example_1_dag.aux" + "example_1_dag.log" + "example_1_dag.pdf" diff --git a/charts/example_1_dag/example_1_dag.fls b/charts/example_1_dag/example_1_dag.fls new file mode 100644 index 0000000..173db4d --- /dev/null +++ b/charts/example_1_dag/example_1_dag.fls @@ -0,0 +1,573 @@ +PWD /home/nathante/ml_measurement_error/charts/example_1_dag +INPUT /etc/texmf/web2c/texmf.cnf +INPUT /usr/share/texmf/web2c/texmf.cnf +INPUT /usr/share/texlive/texmf-dist/web2c/texmf.cnf +INPUT /var/lib/texmf/web2c/pdftex/pdflatex.fmt +INPUT example_1_dag.tex +OUTPUT example_1_dag.log +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkeyval.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkvutils.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/xkeyval/keyval.tex +INPUT /dev/null +INPUT /dev/null +INPUT /dev/null +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texmf/tex/latex/preview/preview.sty +INPUT /usr/share/texmf/tex/latex/preview/preview.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/jknappen/ec/ecrm1200.tfm +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/mdugmr8t.tfm +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common-lists.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-latex.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeysfiltered.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgf.cfg +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-common-pdf.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathcalc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathutil.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathparser.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.basic.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.trigonometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.random.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.comparison.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.base.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.round.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.integerarithmetics.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfloat.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfint.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepoints.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathconstruct.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathusage.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorescopes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoregraphicstate.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransformations.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorequick.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreobjects.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathprocessing.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorearrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreshade.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreimage.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreexternal.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorelayers.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransparency.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepatterns.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorerdf.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleshapes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleplot.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmodulematrix.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex +OUTPUT example_1_dag.pdf +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT ./example_1_dag.aux +INPUT example_1_dag.aux +INPUT example_1_dag.aux +OUTPUT example_1_dag.aux +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/mdugmr8t.tfm +INPUT /usr/share/texlive/texmf-dist/fonts/vf/public/mathdesign/mdugm/mdugmr8t.vf +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8y.tfm +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8t.tfm +INPUT /var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map +INPUT /usr/share/texlive/texmf-dist/fonts/vf/public/mathdesign/mdugm/mdugmr8t.vf +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8y.tfm +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8t.tfm +INPUT example_1_dag.aux +INPUT /usr/share/texlive/texmf-dist/fonts/enc/dvips/ly1/texnansi.enc +INPUT /usr/local/share/texmf/fonts/type1/urw/garamond/ugmr8a.pfb diff --git a/charts/example_1_dag/example_1_dag.png b/charts/example_1_dag/example_1_dag.png new file mode 100644 index 0000000..c5e12a9 Binary files /dev/null and b/charts/example_1_dag/example_1_dag.png differ diff --git a/charts/example_1_dag/example_1_dag.svg b/charts/example_1_dag/example_1_dag.svg new file mode 100644 index 0000000..f7e307b --- /dev/null +++ b/charts/example_1_dag/example_1_dag.svg @@ -0,0 +1,202 @@ + + + +YXWKobservedunobserved diff --git a/charts/example_1_dag/example_1_dag.tex b/charts/example_1_dag/example_1_dag.tex new file mode 100644 index 0000000..75df0a2 --- /dev/null +++ b/charts/example_1_dag/example_1_dag.tex @@ -0,0 +1,46 @@ +\documentclass[12pt]{standalone} + +\usepackage{ucs} +\usepackage[utf8x]{inputenc} + +\usepackage[T1]{fontenc} +\usepackage{textcomp} +\renewcommand{\rmdefault}{ugm} +\renewcommand{\sfdefault}{phv} +\usepackage[garamond]{mathdesign} +\usepackage{tikz} +\usetikzlibrary{positioning, shapes, arrows, shadows, } + +\begin{document} + +\tikzset{>=latex} +\begin{tikzpicture}[ + observed/.style={circle, draw}, + unobserved/.style={draw, circle, fill=gray!40}, + legend/.style={rectangle, draw}, + partly observed/.style 2 args={draw, fill=#2, path picture={ + \fill[#1, sharp corners] (path picture bounding box.south west) -| + (path picture bounding box.north east) -- cycle;}, + circle} + ] + + + \node[observed] (y) {Y}; +% \node[observed,above=of y, xshift=1cm] (r) {R}; + \node[partly observed={white}{gray!40}, above=of y] (x) {X}; + \node[observed, left=of x,xshift=-1] (w) {W}; + \node[unobserved, above=of w, xshift=1cm] (k) {K}; + + \matrix [draw, below, yshift=-0.2cm, font=\small, align=center, column sep=2\pgflinewidth, inner sep=0.6em, outer sep=0em, nodes={align=center, anchor=center}] at (current bounding box.south){ + % \node[observed,label=right:observed] {}; \\ + % \node[unobserved,label=right:unobserved]{}; \\ + }; + + \draw[->] (x) -- (y); + \draw[->] (x) -- (w); + \draw[-] (k) -- (x); + \draw[->] (k) -- (w); + +\end{tikzpicture} +\end{document} + diff --git a/charts/example_2_dag/#Makefile# b/charts/example_2_dag/#Makefile# new file mode 100644 index 0000000..a046bd4 --- /dev/null +++ b/charts/example_2_dag/#Makefile# @@ -0,0 +1,25 @@ +#!/usr/bin/make + +all: $(patsubst %.tex,%.svg,$(wildcard *.tex)) + +%.svg: %.pdf + /usr/bin/inkscape $< --export-plain-svg=$@ + +%.pdf: %.tex + latexmk -f -pdf $< + +clean: + latexmk -C *.tex + rm -f *.tmp + rm -f vc + rm *.svg + +viewpdf: all + evince *.pdf + +vc: + vc-git + +pdf: all + +.PHONY: clean all diff --git a/charts/example_2_dag/Makefile b/charts/example_2_dag/Makefile new file mode 100644 index 0000000..75fef1f --- /dev/null +++ b/charts/example_2_dag/Makefile @@ -0,0 +1,28 @@ +#!/usr/bin/make + +all: $(patsubst %.tex,%.svg,$(wildcard *.tex)) $(patsubst %.tex,%.png,$(wildcard *.tex)) + +%.png: %.pdf + convert -density 300 -transparent white $< $@ + +%.svg: %.pdf + /usr/bin/inkscape $< --export-plain-svg=$@ + +%.pdf: %.tex + latexmk -f -pdf $< + +clean: + latexmk -C *.tex + rm -f *.tmp + rm -f vc + rm *.svg + +viewpdf: all + evince *.pdf + +vc: + vc-git + +pdf: all + +.PHONY: clean all diff --git a/charts/example_2_dag/auto/example_2_dag.el b/charts/example_2_dag/auto/example_2_dag.el new file mode 100644 index 0000000..b8fb791 --- /dev/null +++ b/charts/example_2_dag/auto/example_2_dag.el @@ -0,0 +1,27 @@ +(TeX-add-style-hook + "example_2_dag" + (lambda () + (TeX-add-to-alist 'LaTeX-provided-class-options + '(("standalone" "12pt"))) + (TeX-add-to-alist 'LaTeX-provided-package-options + '(("inputenc" "utf8x") ("fontenc" "T1") ("mathdesign" "garamond"))) + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "href") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperref") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperimage") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperbaseurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "nolinkurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "url") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "path") + (add-to-list 'LaTeX-verbatim-macros-with-delims-local "path") + (TeX-run-style-hooks + "latex2e" + "standalone" + "standalone12" + "ucs" + "inputenc" + "fontenc" + "textcomp" + "mathdesign" + "tikz")) + :latex) + diff --git a/charts/example_2_dag/example_2_dag.fdb_latexmk b/charts/example_2_dag/example_2_dag.fdb_latexmk new file mode 100644 index 0000000..91eab2e --- /dev/null +++ b/charts/example_2_dag/example_2_dag.fdb_latexmk @@ -0,0 +1,146 @@ +# Fdb version 3 +["pdflatex"] 1653334323 "example_2_dag.tex" "example_2_dag.pdf" "example_2_dag" 1653334325 + "/dev/null" 0 -1 0 "" + "/etc/texmf/web2c/texmf.cnf" 1650342257 475 c0e671620eb5563b2130f56340a5fde8 "" + "/usr/local/share/texmf/fonts/type1/urw/garamond/ugmr8a.pfb" 1094499120 70277 baa4b3da2c0fdc989d3b086e9f92a592 "" + "/usr/share/texlive/texmf-dist/fonts/enc/dvips/ly1/texnansi.enc" 1276641224 6938 5135be6a8802f13a7faf296d1959a1ab "" + "/usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map" 1577235249 3524 cb3e574dea2d1052e39280babc910dc8 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/jknappen/ec/ecrm1200.tfm" 1136768653 3584 f80ddd985bd00e29e9a6047ebd9d4781 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm" 1136768653 1288 655e228510b4c2a1abe905c368440826 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8t.tfm" 1379030001 988 7af44ec9a0d02adcdeac5730bfa6c900 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8y.tfm" 1379030001 1420 c083b7ec6e57bd0ed303204f3ed5498f "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/mdugmr8t.tfm" 1379030001 19500 8b5a37a37d0abbdc70524f1b83751a16 "" + "/usr/share/texlive/texmf-dist/fonts/vf/public/mathdesign/mdugm/mdugmr8t.vf" 1379030001 2232 00d038d9916ec0c6921fcb4bc893379c "" + "/usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii" 1461363279 71627 94eb9990bed73c364d7f53f960cc8c5b "" + "/usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty" 1572645307 492 1994775aa15b0d1289725a0b1bbc2d4c "" + "/usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty" 1583617216 6501 4011d89d9621e0b0901138815ba5ff29 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex" 1601326656 992 855ff26741653ab54814101ca36e153c "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorearrows.code.tex" 1601326656 43820 1fef971b75380574ab35a0d37fd92608 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreexternal.code.tex" 1601326656 19324 f4e4c6403dd0f1605fd20ed22fa79dea "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoregraphicstate.code.tex" 1601326656 6038 ccb406740cc3f03bbfb58ad504fe8c27 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreimage.code.tex" 1601326656 6944 e12f8f7a7364ddf66f93ba30fb3a3742 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorelayers.code.tex" 1601326656 4883 42daaf41e27c3735286e23e48d2d7af9 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreobjects.code.tex" 1601326656 2544 8c06d2a7f0f469616ac9e13db6d2f842 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathconstruct.code.tex" 1601326656 44195 5e390c414de027626ca5e2df888fa68d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathprocessing.code.tex" 1601326656 17311 2ef6b2e29e2fc6a2fc8d6d652176e257 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathusage.code.tex" 1601326656 21302 788a79944eb22192a4929e46963a3067 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepatterns.code.tex" 1601326656 9690 01feb7cde25d4293ef36eef45123eb80 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepoints.code.tex" 1601326656 33335 dd1fa4814d4e51f18be97d88bf0da60c "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorequick.code.tex" 1601326656 2965 4c2b1f4e0826925746439038172e5d6f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorerdf.code.tex" 1601326656 5196 2cc249e0ee7e03da5f5f6589257b1e5b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorescopes.code.tex" 1601326656 20726 d4c8db1e2e53b72721d29916314a22ea "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreshade.code.tex" 1601326656 35249 abd4adf948f960299a4b3d27c5dddf46 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransformations.code.tex" 1601326656 21989 fdc867d05d228316de137a9fc5ec3bbe "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransparency.code.tex" 1601326656 8893 e851de2175338fdf7c17f3e091d94618 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex" 1601326656 319 225dfe354ba678ff3c194968db39d447 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex" 1601326656 1179 5483d86c1582c569e665c74efab6281f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex" 1601326656 3937 3f208572dd82c71103831da976d74f1a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex" 1601326656 2889 d698e3a959304efa342d47e3bb86da5b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex" 1601326656 410 048d1174dabde96757a5387b8f23d968 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex" 1601326656 1201 8bd51e254d3ecf0cd2f21edd9ab6f1bb "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex" 1601326656 494 8de62576191924285b021f4fc4292e16 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex" 1601326656 339 be0fe46d92a80e3385dd6a83511a46f2 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex" 1601326656 329 ba6d5440f8c16779c2384e0614158266 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex" 1601326656 919 938802205ca20d7c36615aabc4d34be2 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex" 1601326656 475 4b4056fe07caa0603fede9a162fe666d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex" 1608933718 11518 738408f795261b70ce8dd47459171309 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex" 1621110968 186007 6e7dfe0bd57520fd5f91641aa72dcac8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex" 1601326656 31874 89148c383c49d4c72114a76fd0062299 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex" 1601326656 2563 d5b174eb7709fd6bdcc2f70953dbdf8e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex" 1601326656 32995 ac577023e12c0e4bd8aa420b2e852d1a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex" 1601326656 91587 e30123381f7b9bcf1341c31c6be18b94 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex" 1601326656 33336 427c354e28a4802ffd781da22ae9f383 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex" 1606168878 160993 6a81d63e475cc43874b46ed32a0a37c8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex" 1601326656 46241 588910a2f1e0a99f2c3e14490683c20d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex" 1601326656 62281 aff261ef10ba6cbe8e3c872a38c05a61 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex" 1601326656 90515 e30b2c9c93aacc373e47917c0c2a48ed "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfint.code.tex" 1557692582 3063 8c415c68a0f3394e45cfeca0b65f6ee6 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex" 1601326656 521 8e224a7af69b7fee4451d1bf76b46654 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathcalc.code.tex" 1601326656 13391 84d29568c13bdce4133ab4a214711112 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfloat.code.tex" 1601326656 104935 184ed87524e76d4957860df4ce0cd1c3 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.base.code.tex" 1601326656 10165 cec5fa73d49da442e56efc2d605ef154 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.basic.code.tex" 1601326656 28178 41c17713108e0795aac6fef3d275fbca "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.code.tex" 1601326656 9989 c55967bf45126ff9b061fa2ca0c4694f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.comparison.code.tex" 1601326656 3865 ac538ab80c5cf82b345016e474786549 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.integerarithmetics.code.tex" 1557692582 3177 27d85c44fbfe09ff3b2cf2879e3ea434 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.misc.code.tex" 1621110968 11024 0179538121bc2dba172013a3ef89519f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.random.code.tex" 1608933718 7854 4176998eeefd8745ac6d2d4bd9c98451 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.round.code.tex" 1601326656 3379 781797a101f647bab82741a99944a229 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.trigonometric.code.tex" 1601326656 92405 f515f31275db273f97b9d8f52e1b0736 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathparser.code.tex" 1601326656 37376 11cd75aac3da1c1b152b2848f30adc14 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathutil.code.tex" 1601326656 8471 c2883569d03f69e8e1cabfef4999cfd7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmodulematrix.code.tex" 1601326656 21201 08d231a2386e2b61d64641c50dc15abd "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleplot.code.tex" 1601326656 16121 346f9013d34804439f7436ff6786cef7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleshapes.code.tex" 1621110968 44784 cedaa399d15f95e68e22906e2cc09ef8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex" 1621110968 465 d68603f8b820ea4a08cce534944db581 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgf.cfg" 1601326656 926 2963ea0dcf6cc6c0a770b69ec46a477b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-common-pdf.def" 1601326656 5546 f3f24d7898386cb7daac70bdd2c4d6dc "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def" 1601326656 12601 4786e597516eddd82097506db7cfa098 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex" 1621110968 61163 9b2eefc24e021323e0fc140e9826d016 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex" 1601326656 1896 b8e0ca0ac371d74c0ca05583f6313c91 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex" 1601326656 7778 53c8b5623d80238f6a20aa1df1868e63 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex" 1606168878 23997 a4bed72405fa644418bea7eac2887006 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex" 1621110968 37060 797782f0eb50075c9bc952374d9a659a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeysfiltered.code.tex" 1601326656 37431 9abe862035de1b29c7a677f3205e3d9f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex" 1601326656 4494 af17fb7efeafe423710479858e42fa7e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common-lists.tex" 1601326656 7251 fb18c67117e09c64de82267e12cd8aa4 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common.tex" 1621110968 29274 e15c5b7157d21523bd9c9f1dfa146b8e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-latex.def" 1621110968 6825 a2b0ea5b539dda0625e99dd15785ab59 "" + "/usr/share/texlive/texmf-dist/tex/generic/xkeyval/keyval.tex" 1605910342 2725 9f5d0b27f1f9a620c6ea983d6d41501d "" + "/usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkeyval.tex" 1605910342 19231 3cbf682090baecad8e17a66b7a271ed1 "" + "/usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkvutils.tex" 1605910342 7677 cf3e6aa6a8d444f55327f61df80bfa0c "" + "/usr/share/texlive/texmf-dist/tex/latex/base/article.cls" 1636758526 20144 8a7de377ae7a11ee924a7499611f5a9d "" + "/usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty" 1622581934 4946 461cc78f6f26901410d9f1d725079cc6 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty" 1622581934 5157 f308c7c04889e16c588e78aa42599fae "" + "/usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty" 1622581934 5049 969aec05d5f39c43f8005910498fcf90 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/size12.clo" 1636758526 8449 bc7344e882df4d7e51c046514dee83e4 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty" 1622581934 2894 55431114fc0e491ecee275edafd6c881 "" + "/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty" 1579991033 13886 d1306dcf79a944f6988e688c1785f9ce "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg" 1459978653 1213 620bba36b25224fa9b7e1ccb4ecb76fd "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg" 1465944070 1224 978390e9c2234eab29404bc21b268d1e "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def" 1601931164 19103 48d29b6e2a64cb717117ef65f107b404 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty" 1622581934 18399 7e40f80366dffb22c0e7b70517db5cb4 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty" 1636758526 7996 a8fb260d598dcaf305a7ae7b9c3e3229 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty" 1622581934 2671 4de6781a30211fe0ea4c672e4a2a8166 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty" 1636758526 4009 187ea2dc3194cd5a76cd99a8d7a6c4d0 "" + "/usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def" 1642022539 29921 f0f4f870357ebfb8fe58ed9ed4ee9b92 "" + "/usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg" 1279039959 678 4792914a8f45be57bb98413425e4c7af "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty" 1377991452 9111 d865fc87f99dbc5273fb00f1d7091d76 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd" 1153697689 579 116e648415099e5e059da594ef56c9f0 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd" 1153697689 579 ddcbed007a246f2b5a98aedc86efeed0 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def" 1377991452 5878 bba53c9220a1555c41919107cf6f41c3 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def" 1377991452 16973 0d74f58659233f7bbf4e2551e5c1b6ba "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def" 1377991452 9718 378a12581d907c0af4433a9e908339df "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg" 1379030001 3290 359d6e75cf1deff239ae04f0de11a9cd "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty" 1379030001 44894 0dc5cc17cbd2a8c871ce80ca701d85a5 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd" 1379030001 1522 66675d9aa6eb36761b2c579a644335b4 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty" 1601326656 1090 bae35ef70b3168089ef166db3e66f5b2 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty" 1601326656 410 615550c46f918fcbee37641b02a862d9 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty" 1601326656 21013 f4ff83d25bb56552493b030f27c075ae "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty" 1601326656 989 c49c8ae06d96f8b15869da7428047b1e "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty" 1601326656 339 c2e180022e3afdb99c7d0ea5ce469b7d "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty" 1601326656 306 c56a323ca5bf9242f54474ced10fca71 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty" 1601326656 443 8c872229db56122037e86bcda49e14f3 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty" 1601326656 348 ee405e64380c11319f0e249fed57e6c5 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty" 1601326656 274 5ae372b7df79135d240456a1c6f2cf9a "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty" 1601326656 325 f9f16d12354225b7dd52a3321f085955 "" + "/usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg" 1522098998 1015 662b4d7ad816b857a598284525f5c75e "" + "/usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls" 1522098998 28890 df75e6d37f47b7e27bff3f37375336b3 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty" 1622581934 4118 0f286eca74ee36b7743ff20320e5479f "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def" 1368571634 1375 8a855db83af5d6753ccbbd32e6a8a901 "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty" 1368571634 27982 5723d81d568db410592a59b85fb3eaae "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def" 1368571634 22368 c53c9d0d16c65bef2b157515c9d9f658 "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def" 1368571634 8036 21f7ac37aafb6cfeddbb196b8bfd6280 "" + "/usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty" 1635798903 56029 3f7889dab51d620aa43177c391b7b190 "" + "/usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty" 1605910342 4902 efb3d66683a2da2a232f71e3a571a899 "" + "/usr/share/texlive/texmf-dist/web2c/texmf.cnf" 1644012257 39432 7155514e09a3d69036fac785183a21c2 "" + "/usr/share/texmf/tex/latex/preview/preview.sty" 1598018759 13747 d074c8555b22976c3890effbb5ce8ed7 "" + "/usr/share/texmf/web2c/texmf.cnf" 1644012257 39432 7155514e09a3d69036fac785183a21c2 "" + "/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map" 1650342827 4362114 c882e0fb2b702d36ed2d6636759689e2 "" + "/var/lib/texmf/web2c/pdftex/pdflatex.fmt" 1652219592 1590085 c669746cfe044505095fd475f9edf6a3 "" + "example_2_dag.aux" 1653334325 32 3985256e7290058c681f74d7a3565a19 "pdflatex" + "example_2_dag.tex" 1653333623 1364 5305e471ef10398f88dd8d9067f782d9 "" + (generated) + "example_2_dag.aux" + "example_2_dag.log" + "example_2_dag.pdf" diff --git a/charts/example_2_dag/example_2_dag.fls b/charts/example_2_dag/example_2_dag.fls new file mode 100644 index 0000000..a233a0d --- /dev/null +++ b/charts/example_2_dag/example_2_dag.fls @@ -0,0 +1,573 @@ +PWD /home/nathante/ml_measurement_error/charts/example_2_dag +INPUT /etc/texmf/web2c/texmf.cnf +INPUT /usr/share/texmf/web2c/texmf.cnf +INPUT /usr/share/texlive/texmf-dist/web2c/texmf.cnf +INPUT /var/lib/texmf/web2c/pdftex/pdflatex.fmt +INPUT example_2_dag.tex +OUTPUT example_2_dag.log +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkeyval.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkvutils.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/xkeyval/keyval.tex +INPUT /dev/null +INPUT /dev/null +INPUT /dev/null +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texmf/tex/latex/preview/preview.sty +INPUT /usr/share/texmf/tex/latex/preview/preview.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/jknappen/ec/ecrm1200.tfm +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/mdugmr8t.tfm +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common-lists.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-latex.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeysfiltered.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgf.cfg +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-common-pdf.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathcalc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathutil.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathparser.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.basic.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.trigonometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.random.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.comparison.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.base.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.round.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.integerarithmetics.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfloat.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfint.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepoints.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathconstruct.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathusage.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorescopes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoregraphicstate.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransformations.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorequick.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreobjects.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathprocessing.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorearrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreshade.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreimage.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreexternal.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorelayers.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransparency.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepatterns.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorerdf.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleshapes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleplot.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmodulematrix.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex +OUTPUT example_2_dag.pdf +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT ./example_2_dag.aux +INPUT example_2_dag.aux +INPUT example_2_dag.aux +OUTPUT example_2_dag.aux +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/mdugmr8t.tfm +INPUT /usr/share/texlive/texmf-dist/fonts/vf/public/mathdesign/mdugm/mdugmr8t.vf +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8y.tfm +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8t.tfm +INPUT /var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map +INPUT /usr/share/texlive/texmf-dist/fonts/vf/public/mathdesign/mdugm/mdugmr8t.vf +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8y.tfm +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8t.tfm +INPUT example_2_dag.aux +INPUT /usr/share/texlive/texmf-dist/fonts/enc/dvips/ly1/texnansi.enc +INPUT /usr/local/share/texmf/fonts/type1/urw/garamond/ugmr8a.pfb diff --git a/charts/example_2_dag/example_2_dag.png b/charts/example_2_dag/example_2_dag.png new file mode 100644 index 0000000..263b56b Binary files /dev/null and b/charts/example_2_dag/example_2_dag.png differ diff --git a/charts/example_2_dag/example_2_dag.svg b/charts/example_2_dag/example_2_dag.svg new file mode 100644 index 0000000..c0d44bf --- /dev/null +++ b/charts/example_2_dag/example_2_dag.svg @@ -0,0 +1,245 @@ + + + +YRXWKobservedunobserved diff --git a/charts/example_2_dag/example_2_dag.tex b/charts/example_2_dag/example_2_dag.tex new file mode 100644 index 0000000..07ec19c --- /dev/null +++ b/charts/example_2_dag/example_2_dag.tex @@ -0,0 +1,47 @@ +\documentclass[12pt]{standalone} + +\usepackage{ucs} +\usepackage[utf8x]{inputenc} + +\usepackage[T1]{fontenc} +\usepackage{textcomp} +\renewcommand{\rmdefault}{ugm} +\renewcommand{\sfdefault}{phv} +\usepackage[garamond]{mathdesign} +\usepackage{tikz} +\usetikzlibrary{positioning, shapes, arrows, shadows} + +\begin{document} + +\tikzset{>=latex} +\begin{tikzpicture}[ + observed/.style={circle, draw}, + partly observed/.style 2 args={draw, fill=#2, path picture={ + \fill[#1, sharp corners] (path picture bounding box.south west) -| + (path picture bounding box.north east) -- cycle;}, + circle}, + unobserved/.style={draw, circle, fill=gray!40} + ] + + \node[observed] (y) {Y}; + \node[observed,above=of y, xshift=1cm] (r) {R}; + \node[partly observed={white}{gray!40}, left=of r] (x) {X}; + \node[observed, left=of x] (w) {W}; + \node[unobserved, above=of w, xshift=1cm] (k) {K}; + + \matrix [draw, below, yshift=-0.2cm, font=\small, align=center, column sep=2\pgflinewidth, inner sep=0.6em, outer sep=0em, nodes={align=center, anchor=center}] at (current bounding box.south){ + \node[observed,label=right:observed] {}; \\ + \node[unobserved,label=right:unobserved]{}; \\ + }; + + + \draw[->] (r) -- (y); + \draw[->] (r) -- (x); + \draw[->] (x) -- (y); + \draw[->] (x) -- (w); + \draw[->] (k) -- (x); + \draw[->] (k) -- (w); + +\end{tikzpicture} +\end{document} + diff --git a/charts/example_3_dag/#Makefile# b/charts/example_3_dag/#Makefile# new file mode 100644 index 0000000..a046bd4 --- /dev/null +++ b/charts/example_3_dag/#Makefile# @@ -0,0 +1,25 @@ +#!/usr/bin/make + +all: $(patsubst %.tex,%.svg,$(wildcard *.tex)) + +%.svg: %.pdf + /usr/bin/inkscape $< --export-plain-svg=$@ + +%.pdf: %.tex + latexmk -f -pdf $< + +clean: + latexmk -C *.tex + rm -f *.tmp + rm -f vc + rm *.svg + +viewpdf: all + evince *.pdf + +vc: + vc-git + +pdf: all + +.PHONY: clean all diff --git a/charts/example_3_dag/Makefile b/charts/example_3_dag/Makefile new file mode 100644 index 0000000..4bcb5ad --- /dev/null +++ b/charts/example_3_dag/Makefile @@ -0,0 +1,29 @@ +#!/usr/bin/make + +all: $(patsubst %.tex,%.svg,$(wildcard *.tex)) $(patsubst %.tex,%.png,$(wildcard *.tex)) + +%.png: %.pdf + convert -density 300 -transparent white $< $@ + +%.svg: %.pdf + /usr/bin/inkscape $< --export-plain-svg=$@ + + +%.pdf: %.tex + latexmk -f -pdf $< + +clean: + latexmk -C *.tex + rm -f *.tmp + rm -f vc + rm *.svg + +viewpdf: all + evince *.pdf + +vc: + vc-git + +pdf: all + +.PHONY: clean all diff --git a/charts/example_3_dag/auto/example_2_dag.el b/charts/example_3_dag/auto/example_2_dag.el new file mode 100644 index 0000000..25d5265 --- /dev/null +++ b/charts/example_3_dag/auto/example_2_dag.el @@ -0,0 +1,27 @@ +(TeX-add-style-hook + "example_2_dag" + (lambda () + (TeX-add-to-alist 'LaTeX-provided-class-options + '(("standalone" "12pt"))) + (TeX-add-to-alist 'LaTeX-provided-package-options + '(("inputenc" "utf8x") ("fontenc" "T1") ("mathdesign" "garamond"))) + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "path") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "url") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "nolinkurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperbaseurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperimage") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperref") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "href") + (add-to-list 'LaTeX-verbatim-macros-with-delims-local "path") + (TeX-run-style-hooks + "latex2e" + "standalone" + "standalone12" + "ucs" + "inputenc" + "fontenc" + "textcomp" + "mathdesign" + "tikz")) + :latex) + diff --git a/charts/example_3_dag/auto/example_3_dag.el b/charts/example_3_dag/auto/example_3_dag.el new file mode 100644 index 0000000..edffc52 --- /dev/null +++ b/charts/example_3_dag/auto/example_3_dag.el @@ -0,0 +1,27 @@ +(TeX-add-style-hook + "example_3_dag" + (lambda () + (TeX-add-to-alist 'LaTeX-provided-class-options + '(("standalone" "12pt"))) + (TeX-add-to-alist 'LaTeX-provided-package-options + '(("inputenc" "utf8x") ("fontenc" "T1") ("mathdesign" "garamond"))) + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "path") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "url") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "nolinkurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperbaseurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperimage") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperref") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "href") + (add-to-list 'LaTeX-verbatim-macros-with-delims-local "path") + (TeX-run-style-hooks + "latex2e" + "standalone" + "standalone12" + "ucs" + "inputenc" + "fontenc" + "textcomp" + "mathdesign" + "tikz")) + :latex) + diff --git a/charts/example_3_dag/example_3_dag.fdb_latexmk b/charts/example_3_dag/example_3_dag.fdb_latexmk new file mode 100644 index 0000000..3e15d1e --- /dev/null +++ b/charts/example_3_dag/example_3_dag.fdb_latexmk @@ -0,0 +1,146 @@ +# Fdb version 3 +["pdflatex"] 1653334811 "example_3_dag.tex" "example_3_dag.pdf" "example_3_dag" 1653334812 + "/dev/null" 0 -1 0 "" + "/etc/texmf/web2c/texmf.cnf" 1650342257 475 c0e671620eb5563b2130f56340a5fde8 "" + "/usr/local/share/texmf/fonts/type1/urw/garamond/ugmr8a.pfb" 1094499120 70277 baa4b3da2c0fdc989d3b086e9f92a592 "" + "/usr/share/texlive/texmf-dist/fonts/enc/dvips/ly1/texnansi.enc" 1276641224 6938 5135be6a8802f13a7faf296d1959a1ab "" + "/usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map" 1577235249 3524 cb3e574dea2d1052e39280babc910dc8 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/jknappen/ec/ecrm1200.tfm" 1136768653 3584 f80ddd985bd00e29e9a6047ebd9d4781 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm" 1136768653 1288 655e228510b4c2a1abe905c368440826 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8t.tfm" 1379030001 988 7af44ec9a0d02adcdeac5730bfa6c900 "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8y.tfm" 1379030001 1420 c083b7ec6e57bd0ed303204f3ed5498f "" + "/usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/mdugmr8t.tfm" 1379030001 19500 8b5a37a37d0abbdc70524f1b83751a16 "" + "/usr/share/texlive/texmf-dist/fonts/vf/public/mathdesign/mdugm/mdugmr8t.vf" 1379030001 2232 00d038d9916ec0c6921fcb4bc893379c "" + "/usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii" 1461363279 71627 94eb9990bed73c364d7f53f960cc8c5b "" + "/usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty" 1572645307 492 1994775aa15b0d1289725a0b1bbc2d4c "" + "/usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty" 1583617216 6501 4011d89d9621e0b0901138815ba5ff29 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex" 1601326656 992 855ff26741653ab54814101ca36e153c "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorearrows.code.tex" 1601326656 43820 1fef971b75380574ab35a0d37fd92608 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreexternal.code.tex" 1601326656 19324 f4e4c6403dd0f1605fd20ed22fa79dea "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoregraphicstate.code.tex" 1601326656 6038 ccb406740cc3f03bbfb58ad504fe8c27 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreimage.code.tex" 1601326656 6944 e12f8f7a7364ddf66f93ba30fb3a3742 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorelayers.code.tex" 1601326656 4883 42daaf41e27c3735286e23e48d2d7af9 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreobjects.code.tex" 1601326656 2544 8c06d2a7f0f469616ac9e13db6d2f842 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathconstruct.code.tex" 1601326656 44195 5e390c414de027626ca5e2df888fa68d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathprocessing.code.tex" 1601326656 17311 2ef6b2e29e2fc6a2fc8d6d652176e257 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathusage.code.tex" 1601326656 21302 788a79944eb22192a4929e46963a3067 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepatterns.code.tex" 1601326656 9690 01feb7cde25d4293ef36eef45123eb80 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepoints.code.tex" 1601326656 33335 dd1fa4814d4e51f18be97d88bf0da60c "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorequick.code.tex" 1601326656 2965 4c2b1f4e0826925746439038172e5d6f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorerdf.code.tex" 1601326656 5196 2cc249e0ee7e03da5f5f6589257b1e5b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorescopes.code.tex" 1601326656 20726 d4c8db1e2e53b72721d29916314a22ea "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreshade.code.tex" 1601326656 35249 abd4adf948f960299a4b3d27c5dddf46 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransformations.code.tex" 1601326656 21989 fdc867d05d228316de137a9fc5ec3bbe "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransparency.code.tex" 1601326656 8893 e851de2175338fdf7c17f3e091d94618 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex" 1601326656 319 225dfe354ba678ff3c194968db39d447 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex" 1601326656 1179 5483d86c1582c569e665c74efab6281f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex" 1601326656 3937 3f208572dd82c71103831da976d74f1a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex" 1601326656 2889 d698e3a959304efa342d47e3bb86da5b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex" 1601326656 410 048d1174dabde96757a5387b8f23d968 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex" 1601326656 1201 8bd51e254d3ecf0cd2f21edd9ab6f1bb "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex" 1601326656 494 8de62576191924285b021f4fc4292e16 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex" 1601326656 339 be0fe46d92a80e3385dd6a83511a46f2 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex" 1601326656 329 ba6d5440f8c16779c2384e0614158266 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex" 1601326656 919 938802205ca20d7c36615aabc4d34be2 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex" 1601326656 475 4b4056fe07caa0603fede9a162fe666d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex" 1608933718 11518 738408f795261b70ce8dd47459171309 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex" 1621110968 186007 6e7dfe0bd57520fd5f91641aa72dcac8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex" 1601326656 31874 89148c383c49d4c72114a76fd0062299 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex" 1601326656 2563 d5b174eb7709fd6bdcc2f70953dbdf8e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex" 1601326656 32995 ac577023e12c0e4bd8aa420b2e852d1a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex" 1601326656 91587 e30123381f7b9bcf1341c31c6be18b94 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex" 1601326656 33336 427c354e28a4802ffd781da22ae9f383 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex" 1606168878 160993 6a81d63e475cc43874b46ed32a0a37c8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex" 1601326656 46241 588910a2f1e0a99f2c3e14490683c20d "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex" 1601326656 62281 aff261ef10ba6cbe8e3c872a38c05a61 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex" 1601326656 90515 e30b2c9c93aacc373e47917c0c2a48ed "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfint.code.tex" 1557692582 3063 8c415c68a0f3394e45cfeca0b65f6ee6 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex" 1601326656 521 8e224a7af69b7fee4451d1bf76b46654 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathcalc.code.tex" 1601326656 13391 84d29568c13bdce4133ab4a214711112 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfloat.code.tex" 1601326656 104935 184ed87524e76d4957860df4ce0cd1c3 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.base.code.tex" 1601326656 10165 cec5fa73d49da442e56efc2d605ef154 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.basic.code.tex" 1601326656 28178 41c17713108e0795aac6fef3d275fbca "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.code.tex" 1601326656 9989 c55967bf45126ff9b061fa2ca0c4694f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.comparison.code.tex" 1601326656 3865 ac538ab80c5cf82b345016e474786549 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.integerarithmetics.code.tex" 1557692582 3177 27d85c44fbfe09ff3b2cf2879e3ea434 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.misc.code.tex" 1621110968 11024 0179538121bc2dba172013a3ef89519f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.random.code.tex" 1608933718 7854 4176998eeefd8745ac6d2d4bd9c98451 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.round.code.tex" 1601326656 3379 781797a101f647bab82741a99944a229 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.trigonometric.code.tex" 1601326656 92405 f515f31275db273f97b9d8f52e1b0736 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathparser.code.tex" 1601326656 37376 11cd75aac3da1c1b152b2848f30adc14 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathutil.code.tex" 1601326656 8471 c2883569d03f69e8e1cabfef4999cfd7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmodulematrix.code.tex" 1601326656 21201 08d231a2386e2b61d64641c50dc15abd "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleplot.code.tex" 1601326656 16121 346f9013d34804439f7436ff6786cef7 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleshapes.code.tex" 1621110968 44784 cedaa399d15f95e68e22906e2cc09ef8 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex" 1621110968 465 d68603f8b820ea4a08cce534944db581 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgf.cfg" 1601326656 926 2963ea0dcf6cc6c0a770b69ec46a477b "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-common-pdf.def" 1601326656 5546 f3f24d7898386cb7daac70bdd2c4d6dc "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def" 1601326656 12601 4786e597516eddd82097506db7cfa098 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex" 1621110968 61163 9b2eefc24e021323e0fc140e9826d016 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex" 1601326656 1896 b8e0ca0ac371d74c0ca05583f6313c91 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex" 1601326656 7778 53c8b5623d80238f6a20aa1df1868e63 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex" 1606168878 23997 a4bed72405fa644418bea7eac2887006 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex" 1621110968 37060 797782f0eb50075c9bc952374d9a659a "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeysfiltered.code.tex" 1601326656 37431 9abe862035de1b29c7a677f3205e3d9f "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex" 1601326656 4494 af17fb7efeafe423710479858e42fa7e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common-lists.tex" 1601326656 7251 fb18c67117e09c64de82267e12cd8aa4 "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common.tex" 1621110968 29274 e15c5b7157d21523bd9c9f1dfa146b8e "" + "/usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-latex.def" 1621110968 6825 a2b0ea5b539dda0625e99dd15785ab59 "" + "/usr/share/texlive/texmf-dist/tex/generic/xkeyval/keyval.tex" 1605910342 2725 9f5d0b27f1f9a620c6ea983d6d41501d "" + "/usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkeyval.tex" 1605910342 19231 3cbf682090baecad8e17a66b7a271ed1 "" + "/usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkvutils.tex" 1605910342 7677 cf3e6aa6a8d444f55327f61df80bfa0c "" + "/usr/share/texlive/texmf-dist/tex/latex/base/article.cls" 1636758526 20144 8a7de377ae7a11ee924a7499611f5a9d "" + "/usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty" 1622581934 4946 461cc78f6f26901410d9f1d725079cc6 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty" 1622581934 5157 f308c7c04889e16c588e78aa42599fae "" + "/usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty" 1622581934 5049 969aec05d5f39c43f8005910498fcf90 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/size12.clo" 1636758526 8449 bc7344e882df4d7e51c046514dee83e4 "" + "/usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty" 1622581934 2894 55431114fc0e491ecee275edafd6c881 "" + "/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty" 1579991033 13886 d1306dcf79a944f6988e688c1785f9ce "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg" 1459978653 1213 620bba36b25224fa9b7e1ccb4ecb76fd "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg" 1465944070 1224 978390e9c2234eab29404bc21b268d1e "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def" 1601931164 19103 48d29b6e2a64cb717117ef65f107b404 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty" 1622581934 18399 7e40f80366dffb22c0e7b70517db5cb4 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty" 1636758526 7996 a8fb260d598dcaf305a7ae7b9c3e3229 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty" 1622581934 2671 4de6781a30211fe0ea4c672e4a2a8166 "" + "/usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty" 1636758526 4009 187ea2dc3194cd5a76cd99a8d7a6c4d0 "" + "/usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def" 1642022539 29921 f0f4f870357ebfb8fe58ed9ed4ee9b92 "" + "/usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg" 1279039959 678 4792914a8f45be57bb98413425e4c7af "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty" 1377991452 9111 d865fc87f99dbc5273fb00f1d7091d76 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd" 1153697689 579 116e648415099e5e059da594ef56c9f0 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd" 1153697689 579 ddcbed007a246f2b5a98aedc86efeed0 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def" 1377991452 5878 bba53c9220a1555c41919107cf6f41c3 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def" 1377991452 16973 0d74f58659233f7bbf4e2551e5c1b6ba "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def" 1377991452 9718 378a12581d907c0af4433a9e908339df "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg" 1379030001 3290 359d6e75cf1deff239ae04f0de11a9cd "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty" 1379030001 44894 0dc5cc17cbd2a8c871ce80ca701d85a5 "" + "/usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd" 1379030001 1522 66675d9aa6eb36761b2c579a644335b4 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty" 1601326656 1090 bae35ef70b3168089ef166db3e66f5b2 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty" 1601326656 410 615550c46f918fcbee37641b02a862d9 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty" 1601326656 21013 f4ff83d25bb56552493b030f27c075ae "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty" 1601326656 989 c49c8ae06d96f8b15869da7428047b1e "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty" 1601326656 339 c2e180022e3afdb99c7d0ea5ce469b7d "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty" 1601326656 306 c56a323ca5bf9242f54474ced10fca71 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty" 1601326656 443 8c872229db56122037e86bcda49e14f3 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty" 1601326656 348 ee405e64380c11319f0e249fed57e6c5 "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty" 1601326656 274 5ae372b7df79135d240456a1c6f2cf9a "" + "/usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty" 1601326656 325 f9f16d12354225b7dd52a3321f085955 "" + "/usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg" 1522098998 1015 662b4d7ad816b857a598284525f5c75e "" + "/usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls" 1522098998 28890 df75e6d37f47b7e27bff3f37375336b3 "" + "/usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty" 1622581934 4118 0f286eca74ee36b7743ff20320e5479f "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def" 1368571634 1375 8a855db83af5d6753ccbbd32e6a8a901 "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty" 1368571634 27982 5723d81d568db410592a59b85fb3eaae "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def" 1368571634 22368 c53c9d0d16c65bef2b157515c9d9f658 "" + "/usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def" 1368571634 8036 21f7ac37aafb6cfeddbb196b8bfd6280 "" + "/usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty" 1635798903 56029 3f7889dab51d620aa43177c391b7b190 "" + "/usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty" 1605910342 4902 efb3d66683a2da2a232f71e3a571a899 "" + "/usr/share/texlive/texmf-dist/web2c/texmf.cnf" 1644012257 39432 7155514e09a3d69036fac785183a21c2 "" + "/usr/share/texmf/tex/latex/preview/preview.sty" 1598018759 13747 d074c8555b22976c3890effbb5ce8ed7 "" + "/usr/share/texmf/web2c/texmf.cnf" 1644012257 39432 7155514e09a3d69036fac785183a21c2 "" + "/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map" 1650342827 4362114 c882e0fb2b702d36ed2d6636759689e2 "" + "/var/lib/texmf/web2c/pdftex/pdflatex.fmt" 1652219592 1590085 c669746cfe044505095fd475f9edf6a3 "" + "example_3_dag.aux" 1653334812 32 3985256e7290058c681f74d7a3565a19 "pdflatex" + "example_3_dag.tex" 1653334695 1422 04e6602d1b98c8c446cb012c9111f5fe "" + (generated) + "example_3_dag.aux" + "example_3_dag.log" + "example_3_dag.pdf" diff --git a/charts/example_3_dag/example_3_dag.fls b/charts/example_3_dag/example_3_dag.fls new file mode 100644 index 0000000..318ac67 --- /dev/null +++ b/charts/example_3_dag/example_3_dag.fls @@ -0,0 +1,569 @@ +PWD /home/nathante/ml_measurement_error/charts/example_3_dag +INPUT /etc/texmf/web2c/texmf.cnf +INPUT /usr/share/texmf/web2c/texmf.cnf +INPUT /usr/share/texlive/texmf-dist/web2c/texmf.cnf +INPUT /var/lib/texmf/web2c/pdftex/pdflatex.fmt +INPUT example_3_dag.tex +OUTPUT example_3_dag.log +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/tools/shellesc.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/ifluatex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkeyval.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkvutils.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/xkeyval/keyval.tex +INPUT /dev/null +INPUT /dev/null +INPUT /dev/null +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/standalone/standalone.cfg +INPUT /usr/share/texmf/tex/latex/preview/preview.sty +INPUT /usr/share/texmf/tex/latex/preview/preview.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/article.cls +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/size12.clo +INPUT /usr/share/texlive/texmf-dist/fonts/map/fontname/texfonts.map +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/cm/cmr12.tfm +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/data/uni-global.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/utf8x.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/jknappen/ec/ecrm1200.tfm +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mathdesign.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/mdugm.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdsffont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdttfont.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdugm/t1mdugm.fd +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/mdugmr8t.tfm +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/frontendlayer/tikz.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgf.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-common-lists.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfutil-latex.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfrcs.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/pgf.revision.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/basiclayer/pgfcore.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/systemlayer/pgfsys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeysfiltered.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgf.cfg +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsys-common-pdf.def +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsyssoftpath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/systemlayer/pgfsysprotocol.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcore.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathcalc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathutil.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathparser.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.basic.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.trigonometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.random.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.comparison.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.base.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.round.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfunctions.integerarithmetics.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmathfloat.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfint.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepoints.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathconstruct.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathusage.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorescopes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoregraphicstate.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransformations.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorequick.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreobjects.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepathprocessing.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorearrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreshade.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreimage.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoreexternal.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorelayers.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcoretransparency.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorepatterns.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/basiclayer/pgfcorerdf.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleshapes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmoduleplot.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-0-65.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/compatibility/pgfcomp-version-1-18.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgffor.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgfkeys.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/math/pgfmath.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfrcs.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/pgf/utilities/pgfkeys.sty +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/utilities/pgffor.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/math/pgfmath.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/tikz.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryplothandlers.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/modules/pgfmodulematrix.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarytopaths.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibrarypositioning.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.geometric.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.misc.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.symbols.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.arrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.callouts.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/shapes/pgflibraryshapes.multipart.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryarrows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryshadows.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/frontendlayer/tikz/libraries/tikzlibraryfadings.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex +INPUT /usr/share/texlive/texmf-dist/tex/generic/pgf/libraries/pgflibraryfadings.code.tex +OUTPUT example_3_dag.pdf +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def +INPUT ./example_3_dag.aux +INPUT example_3_dag.aux +INPUT example_3_dag.aux +OUTPUT example_3_dag.aux +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdacmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/mathdesign/mdbcmr.fd +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/latex/ucs/ucsencs.def +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg +INPUT /usr/share/texlive/texmf-dist/fonts/vf/public/mathdesign/mdugm/mdugmr8t.vf +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8y.tfm +INPUT /usr/share/texlive/texmf-dist/fonts/tfm/public/mathdesign/mdugm/md-gmr8t.tfm +INPUT /var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map +INPUT example_3_dag.aux +INPUT /usr/share/texlive/texmf-dist/fonts/enc/dvips/ly1/texnansi.enc +INPUT /usr/local/share/texmf/fonts/type1/urw/garamond/ugmr8a.pfb diff --git a/charts/example_3_dag/example_3_dag.png b/charts/example_3_dag/example_3_dag.png new file mode 100644 index 0000000..49e40ce Binary files /dev/null and b/charts/example_3_dag/example_3_dag.png differ diff --git a/charts/example_3_dag/example_3_dag.svg b/charts/example_3_dag/example_3_dag.svg new file mode 100644 index 0000000..53b2e97 --- /dev/null +++ b/charts/example_3_dag/example_3_dag.svg @@ -0,0 +1,246 @@ + + + +YXWKUobservedunobserved diff --git a/charts/example_3_dag/example_3_dag.tex b/charts/example_3_dag/example_3_dag.tex new file mode 100644 index 0000000..01f94e1 --- /dev/null +++ b/charts/example_3_dag/example_3_dag.tex @@ -0,0 +1,51 @@ +\documentclass[12pt]{standalone} + +\usepackage{ucs} +\usepackage[utf8x]{inputenc} + +\usepackage[T1]{fontenc} +\usepackage{textcomp} +\renewcommand{\rmdefault}{ugm} +\renewcommand{\sfdefault}{phv} +\usepackage[garamond]{mathdesign} +\usepackage{tikz} +\usetikzlibrary{positioning, shapes, arrows, shadows} + +\begin{document} + +\tikzset{>=latex} +\begin{tikzpicture}[ + observed/.style={circle, draw}, + partly observed/.style 2 args={draw, fill=#2, path picture={ + \fill[#1, sharp corners] (path picture bounding box.south west) -| + (path picture bounding box.north east) -- cycle;}, + circle}, + unobserved/.style={draw, circle, fill=gray!40} + ] + + \node[observed] (y) {Y}; +% \node[observed,above=of y, xshift=1cm] (r) {R}; + \node[partly observed={white}{gray!40}, above = of y] (x) {X}; + \node[observed, left=of x] (w) {W}; + \node[unobserved, above=of w, xshift=1cm] (k) {K}; + \node[unobserved, right=of k] (u) {U}; + +% \draw[->] (r) -- (y); +% \draw[->] (r) -- (x); + \draw[->] (x) -- (y); + \draw[->] (x) -- (w); + \draw[->] (x) -- (k); + \draw[->] (k) -- (w); + \draw[->] (u) to [out=270,in=30] (y); + \draw[->] (u) -- (k); + + + \matrix [draw, below, yshift=-0.2cm, inner sep=0.6em, outer sep=0em, nodes={align=center, anchor=center}] at (current bounding box.south){ + \node[observed,label=right:observed] {}; \\ + \node[unobserved,label=right:unobserved]{}; \\ + }; + + +\end{tikzpicture} +\end{document} + diff --git a/dv_perspective_example.RDS b/dv_perspective_example.RDS new file mode 100644 index 0000000..6328acd Binary files /dev/null and b/dv_perspective_example.RDS differ diff --git a/example.png b/example.png new file mode 100644 index 0000000..f5f1001 Binary files /dev/null and b/example.png differ diff --git a/figure/dv_noz_x-1.pdf b/figure/dv_noz_x-1.pdf new file mode 100644 index 0000000..39a09d2 Binary files /dev/null and b/figure/dv_noz_x-1.pdf differ diff --git a/figure/dv_predacc_x-1.pdf b/figure/dv_predacc_x-1.pdf new file mode 100644 index 0000000..24b2c3d Binary files /dev/null and b/figure/dv_predacc_x-1.pdf differ diff --git a/figure/example1-1.svg b/figure/example1-1.svg new file mode 100644 index 0000000..8a770a0 --- /dev/null +++ b/figure/example1-1.svg @@ -0,0 +1,1160 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/figure/example1_g-1.pdf b/figure/example1_g-1.pdf new file mode 100644 index 0000000..20adb8c Binary files /dev/null and b/figure/example1_g-1.pdf differ diff --git a/figure/example1_g-2.pdf b/figure/example1_g-2.pdf new file mode 100644 index 0000000..a0f5e27 Binary files /dev/null and b/figure/example1_g-2.pdf differ diff --git a/figure/example1_x-1.pdf b/figure/example1_x-1.pdf new file mode 100644 index 0000000..8a5a6a1 Binary files /dev/null and b/figure/example1_x-1.pdf differ diff --git a/figure/example1_x-2.pdf b/figure/example1_x-2.pdf new file mode 100644 index 0000000..b41faa9 Binary files /dev/null and b/figure/example1_x-2.pdf differ diff --git a/figure/example2-1.svg b/figure/example2-1.svg new file mode 100644 index 0000000..9a63181 --- /dev/null +++ b/figure/example2-1.svg @@ -0,0 +1,1163 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/figure/example2_g-1.pdf b/figure/example2_g-1.pdf new file mode 100644 index 0000000..7811865 Binary files /dev/null and b/figure/example2_g-1.pdf differ diff --git a/figure/example2_g-2.pdf b/figure/example2_g-2.pdf new file mode 100644 index 0000000..de769d1 Binary files /dev/null and b/figure/example2_g-2.pdf differ diff --git a/figure/example2_x-1.pdf b/figure/example2_x-1.pdf new file mode 100644 index 0000000..4d58b45 Binary files /dev/null and b/figure/example2_x-1.pdf differ diff --git a/figure/example2_x-2.pdf b/figure/example2_x-2.pdf new file mode 100644 index 0000000..fe10225 Binary files /dev/null and b/figure/example2_x-2.pdf differ diff --git a/figure/example3-1.svg b/figure/example3-1.svg new file mode 100644 index 0000000..9d724d9 --- /dev/null +++ b/figure/example3-1.svg @@ -0,0 +1,1019 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/figure/example3_g-1.pdf b/figure/example3_g-1.pdf new file mode 100644 index 0000000..1a8fe92 Binary files /dev/null and b/figure/example3_g-1.pdf differ diff --git a/figure/example3_x-1.pdf b/figure/example3_x-1.pdf new file mode 100644 index 0000000..4ee63d6 Binary files /dev/null and b/figure/example3_x-1.pdf differ diff --git a/figure/example3_x-2.pdf b/figure/example3_x-2.pdf new file mode 100644 index 0000000..7b1788d Binary files /dev/null and b/figure/example3_x-2.pdf differ diff --git a/figure/example3_z-1.pdf b/figure/example3_z-1.pdf new file mode 100644 index 0000000..2f4ee07 Binary files /dev/null and b/figure/example3_z-1.pdf differ diff --git a/figure/example3_z-2.pdf b/figure/example3_z-2.pdf new file mode 100644 index 0000000..674bd96 Binary files /dev/null and b/figure/example3_z-2.pdf differ diff --git a/figure/example4_x-1.pdf b/figure/example4_x-1.pdf new file mode 100644 index 0000000..b9b4bdf Binary files /dev/null and b/figure/example4_x-1.pdf differ diff --git a/figure/example4_z-1.pdf b/figure/example4_z-1.pdf new file mode 100644 index 0000000..70c324b Binary files /dev/null and b/figure/example4_z-1.pdf differ diff --git a/figure/example_4_x-1.pdf b/figure/example_4_x-1.pdf new file mode 100644 index 0000000..c6b0b29 Binary files /dev/null and b/figure/example_4_x-1.pdf differ diff --git a/figure/example_4_x-2.pdf b/figure/example_4_x-2.pdf new file mode 100644 index 0000000..9556436 Binary files /dev/null and b/figure/example_4_x-2.pdf differ diff --git a/figure/example_4_z-1.pdf b/figure/example_4_z-1.pdf new file mode 100644 index 0000000..5602a6e Binary files /dev/null and b/figure/example_4_z-1.pdf differ diff --git a/figure/example_4_z-2.pdf b/figure/example_4_z-2.pdf new file mode 100644 index 0000000..7cb4fc7 Binary files /dev/null and b/figure/example_4_z-2.pdf differ diff --git a/figure/example_5_x-1.pdf b/figure/example_5_x-1.pdf new file mode 100644 index 0000000..faef6da Binary files /dev/null and b/figure/example_5_x-1.pdf differ diff --git a/figure/example_5_x-2.pdf b/figure/example_5_x-2.pdf new file mode 100644 index 0000000..12c459b Binary files /dev/null and b/figure/example_5_x-2.pdf differ diff --git a/figure/example_5_z-1.pdf b/figure/example_5_z-1.pdf new file mode 100644 index 0000000..5d65c83 Binary files /dev/null and b/figure/example_5_z-1.pdf differ diff --git a/figure/example_5_z-2.pdf b/figure/example_5_z-2.pdf new file mode 100644 index 0000000..91d1893 Binary files /dev/null and b/figure/example_5_z-2.pdf differ diff --git a/figure/example_6_x-1.pdf b/figure/example_6_x-1.pdf new file mode 100644 index 0000000..f382469 Binary files /dev/null and b/figure/example_6_x-1.pdf differ diff --git a/figure/example_6_x-2.pdf b/figure/example_6_x-2.pdf new file mode 100644 index 0000000..b317aae Binary files /dev/null and b/figure/example_6_x-2.pdf differ diff --git a/figure/example_6_z-1.pdf b/figure/example_6_z-1.pdf new file mode 100644 index 0000000..f382469 Binary files /dev/null and b/figure/example_6_z-1.pdf differ diff --git a/figure/example_6_z-2.pdf b/figure/example_6_z-2.pdf new file mode 100644 index 0000000..bac23e0 Binary files /dev/null and b/figure/example_6_z-2.pdf differ diff --git a/figure/misspec_y_z-1.pdf b/figure/misspec_y_z-1.pdf new file mode 100644 index 0000000..84f5660 Binary files /dev/null and b/figure/misspec_y_z-1.pdf differ diff --git a/figure/predacc_x_y-1.pdf b/figure/predacc_x_y-1.pdf new file mode 100644 index 0000000..4654e8a Binary files /dev/null and b/figure/predacc_x_y-1.pdf differ diff --git a/figure/predacc_x_z-1.pdf b/figure/predacc_x_z-1.pdf new file mode 100644 index 0000000..33506b9 Binary files /dev/null and b/figure/predacc_x_z-1.pdf differ diff --git a/figure/predacc_y_z-1.pdf b/figure/predacc_y_z-1.pdf new file mode 100644 index 0000000..788afe8 Binary files /dev/null and b/figure/predacc_y_z-1.pdf differ diff --git a/figure/real_data_example_1-1.pdf b/figure/real_data_example_1-1.pdf new file mode 100644 index 0000000..0e12659 Binary files /dev/null and b/figure/real_data_example_1-1.pdf differ diff --git a/figure/real_data_example_dv-1.pdf b/figure/real_data_example_dv-1.pdf new file mode 100644 index 0000000..4dbf0d8 Binary files /dev/null and b/figure/real_data_example_dv-1.pdf differ diff --git a/figure/real_data_example_dv_full-1.pdf b/figure/real_data_example_dv_full-1.pdf new file mode 100644 index 0000000..51ecbd4 Binary files /dev/null and b/figure/real_data_example_dv_full-1.pdf differ diff --git a/figure/real_data_example_iv-1.pdf b/figure/real_data_example_iv-1.pdf new file mode 100644 index 0000000..acd9c1a Binary files /dev/null and b/figure/real_data_example_iv-1.pdf differ diff --git a/figure/real_data_example_iv_full-1.pdf b/figure/real_data_example_iv_full-1.pdf new file mode 100644 index 0000000..4c24540 Binary files /dev/null and b/figure/real_data_example_iv_full-1.pdf differ diff --git a/figure/truth_intercept_only_x-1.pdf b/figure/truth_intercept_only_x-1.pdf new file mode 100644 index 0000000..15b1c7c Binary files /dev/null and b/figure/truth_intercept_only_x-1.pdf differ diff --git a/figure/truth_intercept_only_z-1.pdf b/figure/truth_intercept_only_z-1.pdf new file mode 100644 index 0000000..c7f6079 Binary files /dev/null and b/figure/truth_intercept_only_z-1.pdf differ diff --git a/flowchart_recommendations (Nathan E Tegrotenhuis's conflicted copy 2023-02-08).pdf b/flowchart_recommendations (Nathan E Tegrotenhuis's conflicted copy 2023-02-08).pdf new file mode 100644 index 0000000..c6b8585 Binary files /dev/null and b/flowchart_recommendations (Nathan E Tegrotenhuis's conflicted copy 2023-02-08).pdf differ diff --git a/flowchart_recommendations.pdf b/flowchart_recommendations.pdf new file mode 100644 index 0000000..09f588c Binary files /dev/null and b/flowchart_recommendations.pdf differ diff --git a/flowchart_recommendations.tex b/flowchart_recommendations.tex new file mode 100644 index 0000000..5bc9638 --- /dev/null +++ b/flowchart_recommendations.tex @@ -0,0 +1,98 @@ +\tikzset{ + observed/.style={circle, draw}, + partly observed/.style 2 args={draw, fill=#2, path picture={ + \fill[#1, sharp corners] (path picture bounding box.south west) -| + (path picture bounding box.north east) -- cycle;}, + circle}, + unobserved/.style={draw, circle, fill=gray!40}, + residual/.style={draw, rectangle}, + step box/.style={draw, rectangle, fill=gray!25, text width=6.7in, inner sep=0.2in, align=left, anchor=south west, execute at begin node=\setlength{\baselineskip}{4ex}},, + decision box/.style={draw, diamond, fill=blue!20, text width=2in, align=center, inner sep=0.1in, anchor=south west, execute at begin node=\setlength{\baselineskip}{1ex},aspect=3}, + outcome box/.style={draw, rectangle, fill=gray!5, text width=2in, inner sep=0.25in, align=center, anchor=south west, execute at begin node=\setlength{\baselineskip}{4ex}}, + show curve controls/.style={ + postaction={ + decoration={ + show path construction, + curveto code={ + \draw [blue] + (\tikzinputsegmentfirst) -- (\tikzinputsegmentsupporta) + (\tikzinputsegmentlast) -- (\tikzinputsegmentsupportb); + \fill [red, opacity=0.5] + (\tikzinputsegmentsupporta) circle [radius=.2ex] + (\tikzinputsegmentsupportb) circle [radius=.2ex]; + } + }, + decorate + }}, + myarrow/.style={ + arrows={-Stealth[round, open]}, + scale=1, + line width=1pt + }, + myarrownotip/.style={ + scale=1, + line width=1pt + }, + mylabel/.style={ + text width=2.2in, + align=center, + inner sep=1ex, + font={\mdseries\itshape\sffamily #1} + } +} +\newcommand{\myindent}{\hspace{1em}} + +\begin{tikzpicture}[every node/.style={transform shape}, scale=0.68] + + \node[step box] (manual) {\textbf{Step 1. Attempt Manual Content Analysis}\\ \myindent a. Analyze an affordable annotated dataset (\textit{feasible estimator}) \\\myindent b. Follow manual content analysis recommendations (Bachl \& Scharkow, 2017; Gei\ss, 2021) }; + + \node[outcome box] (report_manual) [below right=0.2in of manual] {Report evidence from manual content analysis.}; + + \node[step box] (test_systematic) [below left=0.2in of report_manual] {\textbf{Step 2. Use Manually Annotated Data to Detect Systematic Misclassification}\\ \myindent Test if ACs are conditionally independent of (in-)dependent variables given annotations. \\ \myindent For an independent variable test that $P(W|X,Z) = P(W|X,Y,Z)$ \\ \myindent For a dependent variable test that $P(W|Y) = P(W|Y,Z,X)$.}; + + \node[step box] (correct) [below=0.55in of test_systematic] {\textbf{Step 3. Correct Misclassification Bias Instead of Being Naive}}; + + \node[step box] (report) [below=2.8in of correct] {\textbf{Step 4. Provide a Full Account Of Methodological Decisions}\\ \myindent a. Information on AC design (e.g., sample size; sampling; cross-validation; balance)\\ \myindent b. Information on manual annotation (e.g., sample size; intercoder reliability)\\ \myindent c. Information on AC performance (e.g., predictiveness metrics on manual annotations) \\ \myindent d. Information on error correction (e.g., systematic error; correction methods)}; + + \node[mylabel, anchor=south west] (independent) [below=8ex of correct,xshift=1.23in] {Independent\\ variable}; + \node[mylabel, anchor=south west] (dependent) [below=10ex of independent] {Dependent\\ variable}; + + \node[outcome box] (outcome_systematic_iv) [below =3.8in of report_manual] {Use MLE or MI.}; + \node[outcome box] (outcome_nonsystematic_iv) [above =3ex of outcome_systematic_iv] {Use GMM or MLE.}; + + % \node[outcome box] (outcome_systematic_dv) [below =2in of outcome_nonsystematic_iv] {Use MLE.}; + + \node[outcome box] (outcome_dv) [below =3ex of outcome_systematic_iv] {Use MLE.}; + + % & \node[] (iv_1) {Independent variable}; & \node[decision box] (dv_1) {Dependent variable}; \\ + + \draw[myarrow] (manual.south) to [controls=+(280:2) and +(165:1.5)] (report_manual.west) { node [mylabel, pos=0.5, yshift=-5ex, xshift=5.5in] {Found convincing evidence}}; + + \draw[myarrow] (test_systematic) to (correct); + \draw[myarrow] (manual.south) to (test_systematic) {node [mylabel, pos=0.5, yshift=-5ex,xshift=2.5in] {Require stronger evidence\\ via an AC}}; + + \draw[myarrownotip] (correct.south) to [controls=+(280:1.4) and +(160:1.4)] (independent); + + \draw[myarrow] (independent.north)+(3ex,0) to [xshift=17ex, yshift=-4.5ex, controls=+(80:1.2) and +(170:1.2)] (outcome_nonsystematic_iv.west) {node [mylabel] {Nonsystematic misclassification}}; + + \draw[myarrow] (independent.south)+(3ex,0) to [xshift=15ex, yshift=-4ex, controls=+(290:0.4) and +(180:0.8)] (outcome_systematic_iv.west) {node [mylabel] { Systematic misclassification}}; + + \draw[myarrownotip] (correct.south) to [controls=+(280:3) and +(150:2)] (dependent); + + \draw[myarrow] (dependent)+(8ex,2ex) to [xshift=15ex,yshift=-4ex, controls=+(80:0.8) and +(170:1)] (outcome_dv.west); + + % \draw[myarrow] (systematic)to [xshift=15ex,yshift=-4ex, controls=+(270:1) and +(180:1)] (outcome_systematic_dv.west) {node [mylabel] { Dependent variable}}; + + \draw[myarrow] (correct) to (report); + +% \draw[myarrow] (convincing_evidence.east) to (report_manual); +% \draw[myarrow] (convincing_evidence.south) to (test_systematic); +% \draw[myarrow] (test_systematic) to (iv_1); + +\end{tikzpicture} +\vspace{1ex} +% \end{figure} +%%% Local Variables: +%%% mode: latex +%%% TeX-master: t +%%% End: diff --git a/graph1.eps b/graph1.eps new file mode 100644 index 0000000..bec0fe9 --- /dev/null +++ b/graph1.eps @@ -0,0 +1,2113 @@ +%!PS-Adobe-2.0 EPSF-2.0 +%%BoundingBox: 91.5625 3.1875 321.938 190. +/MISOfy +{ + /newfontname exch def + /oldfontname exch def + + oldfontname findfont + dup length dict begin + {1 index /FID ne {def} {pop pop} ifelse} forall + /Encoding ISOLatin1Encoding def + currentdict + end + + newfontname exch definefont pop +} def + +0 193.188 translate 1 -1 scale +gsave +150 dict begin +/Mfixwid true def +/Mrot 0 def +/Mpstart { + MathPictureStart +} bind def +/Mpend { + MathPictureEnd +} bind def +/Mscale { + 0 1 0 1 + 5 -1 roll + MathScale +} bind def +/Plain /Courier findfont def +/Bold /Courier-Bold findfont def +/Italic /Courier-Oblique findfont def +/MathPictureStart { + /Mimatrix + matrix currentmatrix + def + gsave + newpath + Mleft + Mbottom + translate + /Mtmatrix + matrix currentmatrix + def + Plain + Mfontsize scalefont + setfont + 0 setgray + 0 setlinewidth +} bind def +/MathPictureEnd { + grestore +} bind def +/MathSubStart { + Momatrix + Mgmatrix Mtmatrix + Mleft Mbottom + Mwidth Mheight + 9 -2 roll + moveto + Mtmatrix setmatrix + currentpoint + Mgmatrix setmatrix + 11 -2 roll + moveto + Mtmatrix setmatrix + currentpoint + 2 copy translate + /Mtmatrix matrix currentmatrix def + /Mleft 0 def + /Mbottom 0 def + 3 -1 roll + exch sub + /Mheight exch def + sub + /Mwidth exch def +} bind def +/MathSubEnd { + /Mheight exch def + /Mwidth exch def + /Mbottom exch def + /Mleft exch def + /Mtmatrix exch def + dup setmatrix + /Mgmatrix exch def + /Momatrix exch def +} bind def +/Mdot { + moveto + 0 0 rlineto + stroke +} bind def +/Mtetra { + moveto + lineto + lineto + lineto + fill +} bind def +/Metetra { + moveto + lineto + lineto + lineto + closepath + gsave + fill + grestore + 0 setgray + stroke +} bind def +/Mistroke { + flattenpath + 0 0 0 + { + 4 2 roll + pop pop + } + { + 4 -1 roll + 2 index + sub dup mul + 4 -1 roll + 2 index + sub dup mul + add sqrt + 4 -1 roll + add + 3 1 roll + } + { + stop + } + { + stop + } + pathforall + pop pop + currentpoint + stroke + moveto + currentdash + 3 -1 roll + add + setdash +} bind def +/Mfstroke { + stroke + currentdash + pop 0 + setdash +} bind def +/Mrotsboxa { + gsave + dup + /Mrot + exch def + Mrotcheck + Mtmatrix + dup + setmatrix + 7 1 roll + 4 index + 4 index + translate + rotate + 3 index + -1 mul + 3 index + -1 mul + translate + /Mtmatrix + matrix + currentmatrix + def + grestore + Msboxa + 3 -1 roll + /Mtmatrix + exch def + /Mrot + 0 def +} bind def +/Msboxa { + newpath + 5 -1 roll + Mvboxa + pop + Mboxout + 6 -1 roll + 5 -1 roll + 4 -1 roll + Msboxa1 + 5 -3 roll + Msboxa1 + Mboxrot + [ + 7 -2 roll + 2 copy + [ + 3 1 roll + 10 -1 roll + 9 -1 roll + ] + 6 1 roll + 5 -2 roll + ] +} bind def +/Msboxa1 { + sub + 2 div + dup + 2 index + 1 add + mul + 3 -1 roll + -1 add + 3 -1 roll + mul +} bind def +/Mvboxa { + Mfixwid + { + Mvboxa1 + } + { + dup + Mwidthcal + 0 exch + { + add + } + forall + exch + Mvboxa1 + 4 index + 7 -1 roll + add + 4 -1 roll + pop + 3 1 roll + } + ifelse +} bind def +/Mvboxa1 { + gsave + newpath + [ true + 3 -1 roll + { + Mbbox + 5 -1 roll + { + 0 + 5 1 roll + } + { + 7 -1 roll + exch sub + (m) stringwidth pop + .3 mul + sub + 7 1 roll + 6 -1 roll + 4 -1 roll + Mmin + 3 -1 roll + 5 index + add + 5 -1 roll + 4 -1 roll + Mmax + 4 -1 roll + } + ifelse + false + } + forall + { stop } if + counttomark + 1 add + 4 roll + ] + grestore +} bind def +/Mbbox { + 0 0 moveto + false charpath + flattenpath + pathbbox + newpath +} bind def +/Mmin { + 2 copy + gt + { exch } if + pop +} bind def +/Mmax { + 2 copy + lt + { exch } if + pop +} bind def +/Mrotshowa { + dup + /Mrot + exch def + Mrotcheck + Mtmatrix + dup + setmatrix + 7 1 roll + 4 index + 4 index + translate + rotate + 3 index + -1 mul + 3 index + -1 mul + translate + /Mtmatrix + matrix + currentmatrix + def + Mgmatrix setmatrix + Mshowa + /Mtmatrix + exch def + /Mrot 0 def +} bind def +/Mshowa { + 4 -2 roll + moveto + 2 index + Mtmatrix setmatrix + Mvboxa + 7 1 roll + Mboxout + 6 -1 roll + 5 -1 roll + 4 -1 roll + Mshowa1 + 4 1 roll + Mshowa1 + rmoveto + currentpoint + Mfixwid + { + Mshowax + } + { + Mshoway + } + ifelse + pop pop pop pop + Mgmatrix setmatrix +} bind def +/Mshowax { + 0 1 + 4 index length + -1 add + { + 2 index + 4 index + 2 index + get + 3 index + add + moveto + 4 index + exch get + show + } for +} bind def +/Mshoway { + 3 index + Mwidthcal + 5 1 roll + 0 1 + 4 index length + -1 add + { + 2 index + 4 index + 2 index + get + 3 index + add + moveto + 4 index + exch get + [ + 6 index + aload + length + 2 add + -1 roll + { + pop + Strform + stringwidth + pop + neg + exch + add + 0 rmoveto + } + exch + kshow + cleartomark + } for + pop +} bind def +/Mwidthcal { + [ + exch + { + Mwidthcal1 + } + forall + ] + [ + exch + dup + Maxlen + -1 add + 0 1 + 3 -1 roll + { + [ + exch + 2 index + { + 1 index + Mget + exch + } + forall + pop + Maxget + exch + } + for + pop + ] + Mreva +} bind def +/Mreva { + [ + exch + aload + length + -1 1 + {1 roll} + for + ] +} bind def +/Mget { + 1 index + length + -1 add + 1 index + ge + { + get + } + { + pop pop + 0 + } + ifelse +} bind def +/Maxlen { + [ + exch + { + length + } + forall + Maxget +} bind def +/Maxget { + counttomark + -1 add + 1 1 + 3 -1 roll + { + pop + Mmax + } + for + exch + pop +} bind def +/Mwidthcal1 { + [ + exch + { + Strform + stringwidth + pop + } + forall + ] +} bind def +/Strform { + /tem (x) def + tem 0 + 3 -1 roll + put + tem +} bind def +/Mshowa1 { + 2 copy + add + 4 1 roll + sub + mul + sub + -2 div +} bind def +/MathScale { + Mwidth + Mheight + Mlp + translate + scale + /yscale exch def + /ybias exch def + /xscale exch def + /xbias exch def + /Momatrix + xscale yscale matrix scale + xbias ybias matrix translate + matrix concatmatrix def + /Mgmatrix + matrix currentmatrix + def +} bind def +/Mlp { + 3 copy + Mlpfirst + { + Mnodistort + { + Mmin + dup + } if + 4 index + 2 index + 2 index + Mlprun + 11 index + 11 -1 roll + 10 -4 roll + Mlp1 + 8 index + 9 -5 roll + Mlp1 + 4 -1 roll + and + { exit } if + 3 -1 roll + pop pop + } loop + exch + 3 1 roll + 7 -3 roll + pop pop pop +} bind def +/Mlpfirst { + 3 -1 roll + dup length + 2 copy + -2 add + get + aload + pop pop pop + 4 -2 roll + -1 add + get + aload + pop pop pop + 6 -1 roll + 3 -1 roll + 5 -1 roll + sub + dup /MsaveAx exch def + div + 4 1 roll + exch sub + dup /MsaveAy exch def + div +} bind def +/Mlprun { + 2 copy + 4 index + 0 get + dup + 4 1 roll + Mlprun1 + 3 copy + 8 -2 roll + 9 -1 roll + { + 3 copy + Mlprun1 + 3 copy + 11 -3 roll + /gt Mlpminmax + 8 3 roll + 11 -3 roll + /lt Mlpminmax + 8 3 roll + } forall + pop pop pop pop + 3 1 roll + pop pop + aload pop + 5 -1 roll + aload pop + exch + 6 -1 roll + Mlprun2 + 8 2 roll + 4 -1 roll + Mlprun2 + 6 2 roll + 3 -1 roll + Mlprun2 + 4 2 roll + exch + Mlprun2 + 6 2 roll +} bind def +/Mlprun1 { + aload pop + exch + 6 -1 roll + 5 -1 roll + mul add + 4 -2 roll + mul + 3 -1 roll + add +} bind def +/Mlprun2 { + 2 copy + add 2 div + 3 1 roll + exch sub +} bind def +/Mlpminmax { + cvx + 2 index + 6 index + 2 index + exec + { + 7 -3 roll + 4 -1 roll + } if + 1 index + 5 index + 3 -1 roll + exec + { + 4 1 roll + pop + 5 -1 roll + aload + pop pop + 4 -1 roll + aload pop + [ + 8 -2 roll + pop + 5 -2 roll + pop + 6 -2 roll + pop + 5 -1 roll + ] + 4 1 roll + pop + } + { + pop pop pop + } ifelse +} bind def +/Mlp1 { + 5 index + 3 index sub + 5 index + 2 index mul + 1 index + le + 1 index + 0 le + or + dup + not + { + 1 index + 3 index div + .99999 mul + 8 -1 roll + pop + 7 1 roll + } + if + 8 -1 roll + 2 div + 7 -2 roll + pop sub + 5 index + 6 -3 roll + pop pop + mul sub + exch +} bind def +/intop 0 def +/inrht 0 def +/inflag 0 def +/outflag 0 def +/xadrht 0 def +/xadlft 0 def +/yadtop 0 def +/yadbot 0 def +/Minner { + outflag + 1 + eq + { + /outflag 0 def + /intop 0 def + /inrht 0 def + } if + 5 index + gsave + Mtmatrix setmatrix + Mvboxa pop + grestore + 3 -1 roll + pop + dup + intop + gt + { + /intop + exch def + } + { pop } + ifelse + dup + inrht + gt + { + /inrht + exch def + } + { pop } + ifelse + pop + /inflag + 1 def +} bind def +/Mouter { + /xadrht 0 def + /xadlft 0 def + /yadtop 0 def + /yadbot 0 def + inflag + 1 eq + { + dup + 0 lt + { + dup + intop + mul + neg + /yadtop + exch def + } if + dup + 0 gt + { + dup + intop + mul + /yadbot + exch def + } + if + pop + dup + 0 lt + { + dup + inrht + mul + neg + /xadrht + exch def + } if + dup + 0 gt + { + dup + inrht + mul + /xadlft + exch def + } if + pop + /outflag 1 def + } + { pop pop} + ifelse + /inflag 0 def + /inrht 0 def + /intop 0 def +} bind def +/Mboxout { + outflag + 1 + eq + { + 4 -1 + roll + xadlft + leadjust + add + sub + 4 1 roll + 3 -1 + roll + yadbot + leadjust + add + sub + 3 1 + roll + exch + xadrht + leadjust + add + add + exch + yadtop + leadjust + add + add + /outflag 0 def + /xadlft 0 def + /yadbot 0 def + /xadrht 0 def + /yadtop 0 def + } if +} bind def +/leadjust { + (m) stringwidth pop + .5 mul +} bind def +/Mrotcheck { + dup + 90 + eq + { + yadbot + /yadbot + xadrht + def + /xadrht + yadtop + def + /yadtop + xadlft + def + /xadlft + exch + def + } + if + dup + cos + 1 index + sin + Checkaux + dup + cos + 1 index + sin neg + exch + Checkaux + 3 1 roll + pop pop +} bind def +/Checkaux { + 4 index + exch + 4 index + mul + 3 1 roll + mul add + 4 1 roll +} bind def +/Mboxrot { + Mrot + 90 eq + { + brotaux + 4 2 + roll + } + if + Mrot + 180 eq + { + 4 2 + roll + brotaux + 4 2 + roll + brotaux + } + if + Mrot + 270 eq + { + 4 2 + roll + brotaux + } + if +} bind def +/brotaux { + neg + exch + neg +} bind def +/Mabsproc { + 0 + matrix defaultmatrix + dtransform idtransform + dup mul exch + dup mul + add sqrt +} bind def +/Mabswid { + Mabsproc + setlinewidth +} bind def +/Mabsdash { + exch + [ + exch + { + Mabsproc + } + forall + ] + exch + setdash +} bind def +/MBeginOrig { Momatrix concat} bind def +/MEndOrig { Mgmatrix setmatrix} bind def +/sampledsound where +{ pop} +{ /sampledsound { +exch +pop +exch +5 1 roll +mul +4 idiv +mul +2 idiv +exch pop +exch +/Mtempproc exch def +{ Mtempproc pop } +repeat +} bind def +} ifelse +% Here are the short operators +/g { setgray} bind def +/k { setcmykcolor} bind def +/m { moveto} bind def +/p { gsave} bind def +/r { setrgbcolor} bind def +/w { setlinewidth} bind def +/C { curveto} bind def +/F { fill} bind def +/L { lineto} bind def +/P { grestore} bind def +/s { stroke} bind def + +/MFill { + 0 0 moveto + Mwidth 0 lineto + Mwidth Mheight lineto + 0 Mheight lineto + fill +} bind def + +/MPlotRegion { + 3 index + Mwidth mul + 2 index + Mheight mul + translate + exch sub + Mheight mul + /Mheight + exch def + exch sub + Mwidth mul + /Mwidth + exch def +} bind def + +/Mcharproc +{ + currentfile + (x) + readhexstring + pop + 0 get + exch + div +} bind def + +/Mshadeproc +{ + dup + 3 1 + roll + { + dup + Mcharproc + 3 1 + roll + } repeat + 1 eq + { + setgray + } + { + 3 eq + { + setrgbcolor + } + { + setcmykcolor + } ifelse + } ifelse +} bind def + +/Mrectproc +{ + 3 index + 2 index + moveto + 2 index + 3 -1 + roll + lineto + dup + 3 1 + roll + lineto + lineto + fill +} bind def + +/_Mcolorimage +{ + 7 1 + roll + pop + pop + matrix + invertmatrix + concat + 2 exch exp + 1 sub + 3 1 roll + 1 1 + 2 index + { + 1 1 + 4 index + { + dup + 1 sub + exch + 2 index + dup + 1 sub + exch + 7 index + 9 index + Mshadeproc + Mrectproc + } for + pop + } for + pop pop pop pop +} bind def + +/_Mimage +{ + pop + matrix + invertmatrix + concat + 2 exch exp + 1 sub + 3 1 roll + 1 1 + 2 index + { + 1 1 + 4 index + { + dup + 1 sub + exch + 2 index + dup + 1 sub + exch + 7 index + Mcharproc + setgray + Mrectproc + } for + pop + } for + pop pop pop +} bind def + +/Mimage { + 4 index + 4 index + mul 1600 gt + { image } + { _Mimage } + ifelse +} def + +/Mcolorimage { + 6 index + 6 index + mul 1600 gt + { colorimage } + { _Mcolorimage } + ifelse +} def +/Mnodistort true def +1.000000 1.000000 scale +91.562500 190.000000 translate +1.000000 -1.000000 scale +0.000000 0.000000 translate +/Mleft 0.000000 def +/Mbottom 0.000000 def +/Mwidth 230.375000 def +/Mheight 186.812500 def +0 setgray +0 setlinewidth +/Courier findfont 12 scalefont setfont +/Mfontsize 12 def +/Plain /Courier findfont def + +%! +%%Creator: Mathematica +%%AspectRatio: .81114 +MathPictureStart +/Mabs { +Mgmatrix idtransform +Mtmatrix dtransform +} bind def +/Mabsadd { Mabs +3 -1 roll add +3 1 roll add +exch } bind def +%% SurfaceGraphics +%%IncludeResource: font Courier +%%IncludeFont: Courier +/Courier findfont 10 scalefont setfont +% Scaling calculations +5.55112e-017 1.04977 -0.0679587 1.04977 [ +[.02757 .24618 -6 -8.70276 ] +[.02757 .24618 0 .29724 ] +[.14598 .19958 -17.886 -9 ] +[.14598 .19958 .11404 0 ] +[.27013 .15073 -17.1354 -9 ] +[.27013 .15073 .86461 0 ] +[.40046 .09948 -16.3848 -9 ] +[.40046 .09948 1.61518 0 ] +[.53743 .04564 -15.6343 -9 ] +[.53743 .04564 2.36575 0 ] +[.68156 -0.01098 -4.96123 -9 ] +[.68156 -0.01098 1.03877 0 ] +[.29165 .07573 -9.31117 -12.5625 ] +[.29165 .07573 .68883 0 ] +[.70096 -0.00478 0 -6.26206 ] +[.70096 -0.00478 6 2.73794 ] +[.76745 .09703 0 -6.16187 ] +[.76745 .09703 18 2.83813 ] +[.82812 .18993 0 -6.07246 ] +[.82812 .18993 18 2.92754 ] +[.88369 .27502 0 -5.99218 ] +[.88369 .27502 18 3.00782 ] +[.93478 .35327 0 -5.9197 ] +[.93478 .35327 18 3.0803 ] +[.98191 .42546 0 -5.85393 ] +[.98191 .42546 6 3.14607 ] +[.91861 .21225 0 -8.41865 ] +[.91861 .21225 10 4.14385 ] +[.02267 .27415 -18 -2.74995 ] +[.02267 .27415 0 6.25005 ] +[.01456 .32496 -24 -2.79293 ] +[.01456 .32496 0 6.20707 ] +[.00619 .37747 -18 -2.8375 ] +[.00619 .37747 0 6.1625 ] +[-0.00248 .43177 -24 -2.88374 ] +[-0.00248 .43177 0 6.11626 ] +[-0.01144 .48795 -18 -2.93176 ] +[-0.01144 .48795 0 6.06824 ] +[ 0 0 0 0 ] +[ 1 .81114 0 0 ] +] MathScale +% Start of Graphics +1 setlinecap +1 setlinejoin +newpath +0 g +.25 Mabswid +[ ] 0 setdash +.03716 .25514 m +.68874 0 L +s +.03716 .25514 m +.04196 .25962 L +s +[(0)] .02757 .24618 1 .93395 Mshowa +.1552 .20892 m +.15981 .21359 L +s +[(0.2)] .14598 .19958 .98733 1 Mshowa +.27893 .16047 m +.28333 .16533 L +s +[(0.4)] .27013 .15073 .90393 1 Mshowa +.40878 .10962 m +.41294 .11469 L +s +[(0.6)] .40046 .09948 .82054 1 Mshowa +.54521 .0562 m +.54911 .06148 L +s +[(0.8)] .53743 .04564 .73714 1 Mshowa +.68874 0 m +.69233 .00549 L +s +[(1)] .68156 -0.01098 .65374 1 Mshowa +.125 Mabswid +.06616 .24378 m +.06901 .2465 L +s +.09549 .2323 m +.09832 .23504 L +s +.12517 .22067 m +.12797 .22345 L +s +.18558 .19702 m +.18832 .19985 L +s +.21633 .18498 m +.21903 .18784 L +s +.24744 .1728 m +.25012 .17569 L +s +.3108 .14799 m +.31341 .15094 L +s +.34306 .13535 m +.34564 .13834 L +s +.37572 .12257 m +.37826 .12558 L +s +.44225 .09652 m +.44471 .09959 L +s +.47614 .08324 m +.47856 .08635 L +s +.51046 .06981 m +.51284 .07294 L +s +.58041 .04242 m +.5827 .04562 L +s +.61605 .02846 m +.6183 .03169 L +s +.65216 .01432 m +.65436 .01759 L +s +gsave +.29165 .07573 -70.3112 -16.5625 Mabsadd m +1 1 Mabs scale +currentpoint translate +/MISOfy +{ + /newfontname exch def + /oldfontname exch def + + oldfontname findfont + dup length dict begin + {1 index /FID ne {def} {pop pop} ifelse} forall + /Encoding ISOLatin1Encoding def + currentdict + end + + newfontname exch definefont pop +} def + +0 20.5625 translate 1 -1 scale +63.000 12.813 moveto +%%IncludeResource: font Courier +%%IncludeFont: Courier +/Courier findfont 10.000 scalefont +[1 0 0 -1 0 0 ] makefont setfont +0.000 0.000 0.000 setrgbcolor +(t) show +1.000 setlinewidth +grestore +.25 Mabswid +.68874 0 m +.96935 .42924 L +s +.68874 0 m +.68263 .00239 L +s +[(0)] .70096 -0.00478 -1 .39157 Mshowa +.75514 .10158 m +.74899 .10385 L +s +[(0.2)] .76745 .09703 -1 .3693 Mshowa +.81573 .19425 m +.80953 .19642 L +s +[(0.4)] .82812 .18993 -1 .34944 Mshowa +.87123 .27915 m +.865 .28122 L +s +[(0.6)] .88369 .27502 -1 .3316 Mshowa +.92227 .35722 m +.91601 .35919 L +s +[(0.8)] .93478 .35327 -1 .31549 Mshowa +.96935 .42924 m +.96306 .43113 L +s +[(1)] .98191 .42546 -1 .30087 Mshowa +.125 Mabswid +.70593 .0263 m +.70226 .02771 L +s +.72272 .05198 m +.71904 .05338 L +s +.73912 .07706 m +.73543 .07845 L +s +.7708 .12553 m +.7671 .12688 L +s +.78611 .14895 m +.78241 .15028 L +s +.80108 .17185 m +.79737 .17317 L +s +.83006 .21617 m +.82633 .21745 L +s +.84407 .23761 m +.84035 .23888 L +s +.8578 .2586 m +.85407 .25986 L +s +.88439 .29928 m +.88065 .3005 L +s +.89727 .31899 m +.89353 .3202 L +s +.9099 .3383 m +.90615 .3395 L +s +.93439 .37576 m +.93063 .37693 L +s +.94627 .39394 m +.94251 .3951 L +s +.95792 .41176 m +.95416 .41291 L +s +gsave +.91861 .21225 -61 -12.4187 Mabsadd m +1 1 Mabs scale +currentpoint translate +/MISOfy +{ + /newfontname exch def + /oldfontname exch def + + oldfontname findfont + dup length dict begin + {1 index /FID ne {def} {pop pop} ifelse} forall + /Encoding ISOLatin1Encoding def + currentdict + end + + newfontname exch definefont pop +} def + +0 20.5625 translate 1 -1 scale +63.000 12.813 moveto +%%IncludeResource: font Courier +%%IncludeFont: Courier +/Courier findfont 10.000 scalefont +[1 0 0 -1 0 0 ] makefont setfont +0.000 0.000 0.000 setrgbcolor +(x) show +1.000 setlinewidth +grestore +.25 Mabswid +.03716 .25514 m +0 .48963 L +s +.0349 .26939 m +.04102 .26701 L +s +[(0.1)] .02267 .27415 1 -0.3889 Mshowa +.02683 .3203 m +.03297 .31798 L +s +[(0.15)] .01456 .32496 1 -0.37935 Mshowa +.01849 .37292 m +.02465 .37065 L +s +[(0.2)] .00619 .37747 1 -0.36944 Mshowa +.00987 .42733 m +.01605 .42512 L +s +[(0.25)] -0.00248 .43177 1 -0.35917 Mshowa +.00095 .48363 m +.00715 .48147 L +s +[(0.3)] -0.01144 .48795 1 -0.3485 Mshowa +.125 Mabswid +.03331 .27944 m +.03698 .27802 L +s +.03171 .28956 m +.03538 .28814 L +s +.03009 .29974 m +.03377 .29833 L +s +.02847 .30999 m +.03215 .30858 L +s +.02519 .33069 m +.02887 .3293 L +s +.02353 .34114 m +.02722 .33976 L +s +.02186 .35167 m +.02555 .35029 L +s +.02018 .36226 m +.02387 .36089 L +s +.01679 .38366 m +.02049 .3823 L +s +.01508 .39447 m +.01878 .39312 L +s +.01336 .40535 m +.01706 .404 L +s +.01162 .4163 m +.01532 .41497 L +s +.00811 .43844 m +.01182 .43712 L +s +.00634 .44962 m +.01005 .4483 L +s +.00456 .46088 m +.00827 .45957 L +s +.00276 .47222 m +.00647 .47091 L +s +.03648 .2594 m +.04015 .25797 L +s +.25 Mabswid +.03716 .25514 m +0 .48963 L +s +0 .48963 m +.39787 .81114 L +s +.39787 .81114 m +.40529 .59895 L +s +.40529 .59895 m +.03716 .25514 L +s +.68874 0 m +.96935 .42924 L +s +.96935 .42924 m +1 .6535 L +s +1 .6535 m +.70298 .24544 L +s +.70298 .24544 m +.68874 0 L +s +.03716 .25514 m +0 .48963 L +s +0 .48963 m +.70298 .24544 L +s +.70298 .24544 m +.68874 0 L +s +.68874 0 m +.03716 .25514 L +s +.40529 .59895 m +.96935 .42924 L +s +.96935 .42924 m +1 .6535 L +s +1 .6535 m +.39787 .81114 L +s +.39787 .81114 m +.40529 .59895 L +s +0 0 m +1 0 L +1 .81114 L +0 .81114 L +closepath +clip +newpath +.5 Mabswid +.737 .721 .837 r +.37496 .77944 .39806 .80578 .43728 .78226 .41454 .75583 Metetra +.734 .723 .841 r +.41454 .75583 .43728 .78226 .47677 .75934 .45439 .7328 Metetra +.731 .724 .844 r +.45439 .7328 .47677 .75934 .51656 .73694 .49455 .71025 Metetra +.728 .724 .847 r +.49455 .71025 .51656 .73694 .55667 .71498 .53504 .68813 Metetra +.725 .725 .85 r +.53504 .68813 .55667 .71498 .59713 .69341 .57589 .66636 Metetra +.723 .726 .852 r +.57589 .66636 .59713 .69341 .63795 .67216 .61711 .64491 Metetra +.72 .726 .855 r +.61711 .64491 .63795 .67216 .67917 .65119 .65874 .62371 Metetra +.718 .727 .857 r +.65874 .62371 .67917 .65119 .7208 .63046 .70078 .60272 Metetra +.716 .727 .859 r +.70078 .60272 .7208 .63046 .76286 .60991 .74327 .5819 Metetra +.714 .727 .86 r +.74327 .5819 .76286 .60991 .80537 .58952 .78621 .56122 Metetra +.713 .727 .862 r +.78621 .56122 .80537 .58952 .84834 .56924 .82963 .54064 Metetra +.711 .727 .863 r +.82963 .54064 .84834 .56924 .8918 .54905 .87354 .52013 Metetra +.709 .727 .864 r +.87354 .52013 .8918 .54905 .93575 .52891 .91796 .49965 Metetra +.708 .727 .865 r +.91796 .49965 .93575 .52891 .98022 .50881 .96291 .47919 Metetra +.736 .721 .838 r +.35133 .75249 .37496 .77944 .41454 .75583 .39127 .72878 Metetra +.733 .722 .841 r +.39127 .72878 .41454 .75583 .45439 .7328 .43149 .70562 Metetra +.73 .723 .845 r +.43149 .70562 .45439 .7328 .49455 .71025 .47202 .68292 Metetra +.727 .724 .848 r +.47202 .68292 .49455 .71025 .53504 .68813 .5129 .66061 Metetra +.724 .725 .85 r +.5129 .66061 .53504 .68813 .57589 .66636 .55414 .63865 Metetra +.722 .725 .853 r +.55414 .63865 .57589 .66636 .61711 .64491 .59576 .61696 Metetra +.72 .726 .855 r +.59576 .61696 .61711 .64491 .65874 .62371 .6378 .59552 Metetra +.717 .726 .857 r +.6378 .59552 .65874 .62371 .70078 .60272 .68027 .57426 Metetra +.715 .726 .859 r +.68027 .57426 .70078 .60272 .74327 .5819 .72318 .55316 Metetra +.714 .727 .86 r +.72318 .55316 .74327 .5819 .78621 .56122 .76657 .53217 Metetra +.712 .727 .862 r +.76657 .53217 .78621 .56122 .82963 .54064 .81044 .51127 Metetra +.71 .727 .863 r +.81044 .51127 .82963 .54064 .87354 .52013 .85481 .49041 Metetra +.709 .727 .864 r +.85481 .49041 .87354 .52013 .91796 .49965 .89971 .46958 Metetra +.708 .727 .865 r +.89971 .46958 .91796 .49965 .96291 .47919 .94514 .44874 Metetra +.734 .721 .839 r +.32714 .72491 .35133 .75249 .39127 .72878 .36745 .70109 Metetra +.731 .722 .842 r +.36745 .70109 .39127 .72878 .43149 .70562 .40804 .67778 Metetra +.728 .723 .845 r +.40804 .67778 .43149 .70562 .47202 .68292 .44895 .65491 Metetra +.726 .724 .848 r +.44895 .65491 .47202 .68292 .5129 .66061 .49022 .63241 Metetra +.723 .724 .851 r +.49022 .63241 .5129 .66061 .55414 .63865 .53185 .61022 Metetra +.721 .725 .853 r +.53185 .61022 .55414 .63865 .59576 .61696 .57389 .5883 Metetra +.719 .725 .855 r +.57389 .5883 .59576 .61696 .6378 .59552 .61634 .56659 Metetra +.717 .726 .857 r +.61634 .56659 .6378 .59552 .68027 .57426 .65923 .54505 Metetra +.715 .726 .859 r +.65923 .54505 .68027 .57426 .72318 .55316 .70259 .52364 Metetra +.713 .726 .86 r +.70259 .52364 .72318 .55316 .76657 .53217 .74642 .50233 Metetra +.711 .726 .862 r +.74642 .50233 .76657 .53217 .81044 .51127 .79075 .48109 Metetra +.71 .726 .863 r +.79075 .48109 .81044 .51127 .85481 .49041 .83559 .45987 Metetra +.708 .726 .864 r +.83559 .45987 .85481 .49041 .89971 .46958 .88097 .43867 Metetra +.707 .726 .865 r +.88097 .43867 .89971 .46958 .94514 .44874 .92689 .41743 Metetra +.733 .721 .839 r +.30239 .69668 .32714 .72491 .36745 .70109 .34306 .67273 Metetra +.73 .722 .843 r +.34306 .67273 .36745 .70109 .40804 .67778 .38403 .64927 Metetra +.727 .723 .846 r +.38403 .64927 .40804 .67778 .44895 .65491 .42532 .62621 Metetra +.725 .723 .849 r +.42532 .62621 .44895 .65491 .49022 .63241 .46698 .60349 Metetra +.722 .724 .851 r +.46698 .60349 .49022 .63241 .53185 .61022 .50902 .58107 Metetra +.72 .724 .853 r +.50902 .58107 .53185 .61022 .57389 .5883 .55146 .55889 Metetra +.718 .725 .856 r +.55146 .55889 .57389 .5883 .61634 .56659 .59434 .5369 Metetra +.716 .725 .857 r +.59434 .5369 .61634 .56659 .65923 .54505 .63766 .51506 Metetra +.714 .725 .859 r +.63766 .51506 .65923 .54505 .70259 .52364 .68146 .49333 Metetra +.712 .726 .861 r +.68146 .49333 .70259 .52364 .74642 .50233 .72575 .47168 Metetra +.711 .726 .862 r +.72575 .47168 .74642 .50233 .79075 .48109 .77054 .45008 Metetra +.709 .726 .863 r +.77054 .45008 .79075 .48109 .83559 .45987 .81586 .42849 Metetra +.708 .726 .864 r +.81586 .42849 .83559 .45987 .88097 .43867 .86173 .40688 Metetra +.707 .726 .865 r +.86173 .40688 .88097 .43867 .92689 .41743 .90816 .38523 Metetra +.732 .72 .84 r +.27705 .66778 .30239 .69668 .34306 .67273 .31808 .64369 Metetra +.729 .721 .844 r +.31808 .64369 .34306 .67273 .38403 .64927 .35943 .62004 Metetra +.726 .722 .847 r +.35943 .62004 .38403 .64927 .42532 .62621 .40111 .59678 Metetra +.724 .723 .849 r +.40111 .59678 .42532 .62621 .46698 .60349 .44317 .57384 Metetra +.721 .723 .852 r +.44317 .57384 .46698 .60349 .50902 .58107 .48561 .55116 Metetra +.719 .724 .854 r +.48561 .55116 .50902 .58107 .55146 .55889 .52848 .5287 Metetra +.717 .724 .856 r +.52848 .5287 .55146 .55889 .59434 .5369 .57178 .50641 Metetra +.715 .725 .858 r +.57178 .50641 .59434 .5369 .63766 .51506 .61554 .48425 Metetra +.713 .725 .859 r +.61554 .48425 .63766 .51506 .68146 .49333 .65979 .46219 Metetra +.712 .725 .861 r +.65979 .46219 .68146 .49333 .72575 .47168 .70453 .44018 Metetra +.71 .725 .862 r +.70453 .44018 .72575 .47168 .77054 .45008 .7498 .4182 Metetra +.709 .725 .863 r +.7498 .4182 .77054 .45008 .81586 .42849 .79561 .39621 Metetra +.707 .725 .864 r +.79561 .39621 .81586 .42849 .86173 .40688 .84197 .37419 Metetra +.706 .725 .865 r +.84197 .37419 .86173 .40688 .90816 .38523 .88891 .35211 Metetra +.73 .72 .841 r +.2511 .63818 .27705 .66778 .31808 .64369 .2925 .61392 Metetra +.728 .721 .844 r +.2925 .61392 .31808 .64369 .35943 .62004 .33423 .59009 Metetra +.725 .722 .847 r +.33423 .59009 .35943 .62004 .40111 .59678 .3763 .5666 Metetra +.723 .723 .85 r +.3763 .5666 .40111 .59678 .44317 .57384 .41876 .54341 Metetra +.72 .723 .852 r +.41876 .54341 .44317 .57384 .48561 .55116 .46161 .52047 Metetra +.718 .724 .854 r +.46161 .52047 .48561 .55116 .52848 .5287 .5049 .49771 Metetra +.716 .724 .856 r +.5049 .49771 .52848 .5287 .57178 .50641 .54863 .47511 Metetra +.714 .724 .858 r +.54863 .47511 .57178 .50641 .61554 .48425 .59284 .45261 Metetra +.713 .724 .859 r +.59284 .45261 .61554 .48425 .65979 .46219 .63754 .43019 Metetra +.711 .724 .861 r +.63754 .43019 .65979 .46219 .70453 .44018 .68276 .4078 Metetra +.709 .725 .862 r +.68276 .4078 .70453 .44018 .7498 .4182 .72851 .38542 Metetra +.708 .725 .863 r +.72851 .38542 .7498 .4182 .79561 .39621 .77481 .36301 Metetra +.707 .725 .864 r +.77481 .36301 .79561 .39621 .84197 .37419 .82168 .34056 Metetra +.706 .725 .865 r +.82168 .34056 .84197 .37419 .88891 .35211 .86913 .31802 Metetra +.729 .72 .842 r +.22451 .60786 .2511 .63818 .2925 .61392 .26629 .58342 Metetra +.726 .721 .845 r +.26629 .58342 .2925 .61392 .33423 .59009 .3084 .55937 Metetra +.724 .722 .848 r +.3084 .55937 .33423 .59009 .3763 .5666 .35087 .53565 Metetra +.721 .722 .85 r +.35087 .53565 .3763 .5666 .41876 .54341 .39373 .5122 Metetra +.719 .723 .852 r +.39373 .5122 .41876 .54341 .46161 .52047 .437 .48896 Metetra +.717 .723 .854 r +.437 .48896 .46161 .52047 .5049 .49771 .48071 .46589 Metetra +.715 .723 .856 r +.48071 .46589 .5049 .49771 .54863 .47511 .52489 .44295 Metetra +.713 .724 .858 r +.52489 .44295 .54863 .47511 .59284 .45261 .56955 .4201 Metetra +.712 .724 .859 r +.56955 .4201 .59284 .45261 .63754 .43019 .61471 .39729 Metetra +.71 .724 .861 r +.61471 .39729 .63754 .43019 .68276 .4078 .6604 .37451 Metetra +.709 .724 .862 r +.6604 .37451 .68276 .4078 .72851 .38542 .70663 .35171 Metetra +.708 .724 .863 r +.70663 .35171 .72851 .38542 .77481 .36301 .75343 .32886 Metetra +.707 .724 .864 r +.75343 .32886 .77481 .36301 .82168 .34056 .80082 .30594 Metetra +.705 .724 .864 r +.80082 .30594 .82168 .34056 .86913 .31802 .8488 .28292 Metetra +.728 .72 .843 r +.19726 .57679 .22451 .60786 .26629 .58342 .23942 .55215 Metetra +.725 .72 .846 r +.23942 .55215 .26629 .58342 .3084 .55937 .28192 .52787 Metetra +.723 .721 .848 r +.28192 .52787 .3084 .55937 .35087 .53565 .32479 .50389 Metetra +.72 .722 .851 r +.32479 .50389 .35087 .53565 .39373 .5122 .36806 .48015 Metetra +.718 .722 .853 r +.36806 .48015 .39373 .5122 .437 .48896 .41175 .4566 Metetra +.716 .723 .855 r +.41175 .4566 .437 .48896 .48071 .46589 .45589 .4332 Metetra +.714 .723 .857 r +.45589 .4332 .48071 .46589 .52489 .44295 .50051 .40991 Metetra +.713 .723 .858 r +.50051 .40991 .52489 .44295 .56955 .4201 .54563 .38667 Metetra +.711 .723 .859 r +.54563 .38667 .56955 .4201 .61471 .39729 .59126 .36347 Metetra +.71 .723 .861 r +.59126 .36347 .61471 .39729 .6604 .37451 .63743 .34026 Metetra +.708 .724 .862 r +.63743 .34026 .6604 .37451 .70663 .35171 .68416 .31702 Metetra +.707 .724 .863 r +.68416 .31702 .70663 .35171 .75343 .32886 .73147 .29371 Metetra +.706 .724 .863 r +.73147 .29371 .75343 .32886 .80082 .30594 .77938 .27031 Metetra +.705 .723 .864 r +.77938 .27031 .80082 .30594 .8488 .28292 .8279 .24678 Metetra +.727 .719 .843 r +.16933 .54494 .19726 .57679 .23942 .55215 .21187 .52008 Metetra +.724 .72 .846 r +.21187 .52008 .23942 .55215 .28192 .52787 .25476 .49555 Metetra +.722 .721 .849 r +.25476 .49555 .28192 .52787 .32479 .50389 .29803 .47129 Metetra +.719 .721 .851 r +.29803 .47129 .32479 .50389 .36806 .48015 .34172 .44725 Metetra +.717 .722 .853 r +.34172 .44725 .36806 .48015 .41175 .4566 .38584 .42337 Metetra +.715 .722 .855 r +.38584 .42337 .41175 .4566 .45589 .4332 .43042 .39961 Metetra +.714 .723 .857 r +.43042 .39961 .45589 .4332 .50051 .40991 .47549 .37594 Metetra +.712 .723 .858 r +.47549 .37594 .50051 .40991 .54563 .38667 .52107 .35231 Metetra +.711 .723 .86 r +.52107 .35231 .54563 .38667 .59126 .36347 .56717 .32868 Metetra +.709 .723 .861 r +.56717 .32868 .59126 .36347 .63743 .34026 .61384 .30503 Metetra +.708 .723 .862 r +.61384 .30503 .63743 .34026 .68416 .31702 .66107 .28132 Metetra +.707 .723 .863 r +.66107 .28132 .68416 .31702 .73147 .29371 .70889 .25752 Metetra +.706 .723 .863 r +.70889 .25752 .73147 .29371 .77938 .27031 .75733 .23361 Metetra +.705 .723 .864 r +.75733 .23361 .77938 .27031 .8279 .24678 .8064 .20955 Metetra +.725 .719 .844 r +.14069 .51228 .16933 .54494 .21187 .52008 .18362 .48718 Metetra +.723 .72 .847 r +.18362 .48718 .21187 .52008 .25476 .49555 .2269 .46238 Metetra +.721 .721 .849 r +.2269 .46238 .25476 .49555 .29803 .47129 .27058 .43782 Metetra +.718 .721 .852 r +.27058 .43782 .29803 .47129 .34172 .44725 .31468 .41345 Metetra +.716 .721 .854 r +.31468 .41345 .34172 .44725 .38584 .42337 .35923 .38922 Metetra +.715 .722 .855 r +.35923 .38922 .38584 .42337 .43042 .39961 .40426 .36509 Metetra +.713 .722 .857 r +.40426 .36509 .43042 .39961 .47549 .37594 .44978 .34102 Metetra +.711 .722 .858 r +.44978 .34102 .47549 .37594 .52107 .35231 .49583 .31696 Metetra +.71 .722 .86 r +.49583 .31696 .52107 .35231 .56717 .32868 .54242 .29289 Metetra +.709 .723 .861 r +.54242 .29289 .56717 .32868 .61384 .30503 .58958 .26876 Metetra +.707 .723 .862 r +.58958 .26876 .61384 .30503 .66107 .28132 .63733 .24456 Metetra +.706 .723 .862 r +.63733 .24456 .66107 .28132 .70889 .25752 .68568 .22025 Metetra +.705 .722 .863 r +.68568 .22025 .70889 .25752 .75733 .23361 .73465 .1958 Metetra +.705 .722 .864 r +.73465 .1958 .75733 .23361 .8064 .20955 .78428 .17119 Metetra +.724 .719 .845 r +.11132 .47878 .14069 .51228 .18362 .48718 .15463 .45342 Metetra +.722 .72 .848 r +.15463 .45342 .18362 .48718 .2269 .46238 .19832 .42833 Metetra +.72 .72 .85 r +.19832 .42833 .2269 .46238 .27058 .43782 .24241 .40345 Metetra +.718 .721 .852 r +.24241 .40345 .27058 .43782 .31468 .41345 .28693 .37873 Metetra +.716 .721 .854 r +.28693 .37873 .31468 .41345 .35923 .38922 .33192 .35413 Metetra +.714 .721 .856 r +.33192 .35413 .35923 .38922 .40426 .36509 .37739 .32959 Metetra +.712 .722 .857 r +.37739 .32959 .40426 .36509 .44978 .34102 .42338 .30509 Metetra +.711 .722 .858 r +.42338 .30509 .44978 .34102 .49583 .31696 .4699 .28059 Metetra +.709 .722 .86 r +.4699 .28059 .49583 .31696 .54242 .29289 .51698 .25604 Metetra +.708 .722 .861 r +.51698 .25604 .54242 .29289 .58958 .26876 .56465 .23143 Metetra +.707 .722 .861 r +.56465 .23143 .58958 .26876 .63733 .24456 .61291 .20671 Metetra +.706 .722 .862 r +.61291 .20671 .63733 .24456 .68568 .22025 .66179 .18186 Metetra +.705 .722 .863 r +.66179 .18186 .68568 .22025 .73465 .1958 .71132 .15684 Metetra +.704 .722 .863 r +.71132 .15684 .73465 .1958 .78428 .17119 .76151 .13164 Metetra +.723 .719 .846 r +.08118 .44441 .11132 .47878 .15463 .45342 .12488 .41877 Metetra +.721 .719 .848 r +.12488 .41877 .15463 .45342 .19832 .42833 .16897 .39336 Metetra +.719 .72 .85 r +.16897 .39336 .19832 .42833 .24241 .40345 .21348 .36813 Metetra +.717 .72 .852 r +.21348 .36813 .24241 .40345 .28693 .37873 .25843 .34304 Metetra +.715 .721 .854 r +.25843 .34304 .28693 .37873 .33192 .35413 .30386 .31804 Metetra +.713 .721 .856 r +.30386 .31804 .33192 .35413 .37739 .32959 .34979 .29308 Metetra +.712 .721 .857 r +.34979 .29308 .37739 .32959 .42338 .30509 .39624 .26814 Metetra +.71 .721 .859 r +.39624 .26814 .42338 .30509 .4699 .28059 .44325 .24316 Metetra +.709 .722 .86 r +.44325 .24316 .4699 .28059 .51698 .25604 .49083 .21811 Metetra +.708 .722 .861 r +.49083 .21811 .51698 .25604 .56465 .23143 .539 .19298 Metetra +.707 .722 .861 r +.539 .19298 .56465 .23143 .61291 .20671 .58779 .16771 Metetra +.706 .722 .862 r +.58779 .16771 .61291 .20671 .66179 .18186 .63722 .14229 Metetra +.705 .721 .863 r +.63722 .14229 .66179 .18186 .71132 .15684 .6873 .11668 Metetra +.704 .721 .863 r +.6873 .11668 .71132 .15684 .76151 .13164 .73807 .09086 Metetra +.722 .718 .846 r +.05025 .40914 .08118 .44441 .12488 .41877 .09435 .38318 Metetra +.72 .719 .849 r +.09435 .38318 .12488 .41877 .16897 .39336 .13884 .35743 Metetra +.718 .72 .851 r +.13884 .35743 .16897 .39336 .21348 .36813 .18376 .33184 Metetra +.716 .72 .853 r +.18376 .33184 .21348 .36813 .25843 .34304 .22915 .30635 Metetra +.714 .72 .855 r +.22915 .30635 .25843 .34304 .30386 .31804 .27502 .28093 Metetra +.712 .721 .856 r +.27502 .28093 .30386 .31804 .34979 .29308 .32141 .25552 Metetra +.711 .721 .857 r +.32141 .25552 .34979 .29308 .39624 .26814 .36834 .2301 Metetra +.71 .721 .859 r +.36834 .2301 .39624 .26814 .44325 .24316 .41584 .20462 Metetra +.708 .721 .86 r +.41584 .20462 .44325 .24316 .49083 .21811 .46392 .17905 Metetra +.707 .721 .861 r +.46392 .17905 .49083 .21811 .539 .19298 .51262 .15336 Metetra +.706 .721 .861 r +.51262 .15336 .539 .19298 .58779 .16771 .56194 .12752 Metetra +.705 .721 .862 r +.56194 .12752 .58779 .16771 .63722 .14229 .61192 .1015 Metetra +.704 .721 .862 r +.61192 .1015 .63722 .14229 .6873 .11668 .66258 .07526 Metetra +.704 .721 .863 r +.66258 .07526 .6873 .11668 .73807 .09086 .71392 .0488 Metetra +.721 .718 .847 r +.01849 .37292 .05025 .40914 .09435 .38318 .06298 .34663 Metetra +.719 .719 .849 r +.06298 .34663 .09435 .38318 .13884 .35743 .10789 .32052 Metetra +.717 .719 .851 r +.10789 .32052 .13884 .35743 .18376 .33184 .15323 .29453 Metetra +.715 .72 .853 r +.15323 .29453 .18376 .33184 .22915 .30635 .19905 .26862 Metetra +.713 .72 .855 r +.19905 .26862 .22915 .30635 .27502 .28093 .24538 .24274 Metetra +.712 .72 .856 r +.24538 .24274 .27502 .28093 .32141 .25552 .29224 .21686 Metetra +.71 .72 .858 r +.29224 .21686 .32141 .25552 .36834 .2301 .33965 .19093 Metetra +.709 .721 .859 r +.33965 .19093 .36834 .2301 .41584 .20462 .38764 .16492 Metetra +.708 .721 .86 r +.38764 .16492 .41584 .20462 .46392 .17905 .43624 .1388 Metetra +.707 .721 .86 r +.43624 .1388 .46392 .17905 .51262 .15336 .48546 .11253 Metetra +.706 .721 .861 r +.48546 .11253 .51262 .15336 .56194 .12752 .53533 .08608 Metetra +.705 .721 .862 r +.53533 .08608 .56194 .12752 .61192 .1015 .58587 .05943 Metetra +.704 .721 .862 r +.58587 .05943 .61192 .1015 .66258 .07526 .63711 .03254 Metetra +.704 .72 .863 r +.63711 .03254 .66258 .07526 .71392 .0488 .68905 .00539 Metetra +0 g +.25 Mabswid +.68874 0 m +.96935 .42924 L +s +.96935 .42924 m +1 .6535 L +s +1 .6535 m +.70298 .24544 L +s +.70298 .24544 m +.68874 0 L +s +.03716 .25514 m +0 .48963 L +s +0 .48963 m +.70298 .24544 L +s +.70298 .24544 m +.68874 0 L +s +.68874 0 m +.03716 .25514 L +s +.03716 .25514 m +.68874 0 L +s +.03716 .25514 m +.04196 .25962 L +s +[(0)] .02757 .24618 1 .93395 Mshowa +.1552 .20892 m +.15981 .21359 L +s +[(0.2)] .14598 .19958 .98733 1 Mshowa +.27893 .16047 m +.28333 .16533 L +s +[(0.4)] .27013 .15073 .90393 1 Mshowa +.40878 .10962 m +.41294 .11469 L +s +[(0.6)] .40046 .09948 .82054 1 Mshowa +.54521 .0562 m +.54911 .06148 L +s +[(0.8)] .53743 .04564 .73714 1 Mshowa +.68874 0 m +.69233 .00549 L +s +[(1)] .68156 -0.01098 .65374 1 Mshowa +.125 Mabswid +.06616 .24378 m +.06901 .2465 L +s +.09549 .2323 m +.09832 .23504 L +s +.12517 .22067 m +.12797 .22345 L +s +.18558 .19702 m +.18832 .19985 L +s +.21633 .18498 m +.21903 .18784 L +s +.24744 .1728 m +.25012 .17569 L +s +.3108 .14799 m +.31341 .15094 L +s +.34306 .13535 m +.34564 .13834 L +s +.37572 .12257 m +.37826 .12558 L +s +.44225 .09652 m +.44471 .09959 L +s +.47614 .08324 m +.47856 .08635 L +s +.51046 .06981 m +.51284 .07294 L +s +.58041 .04242 m +.5827 .04562 L +s +.61605 .02846 m +.6183 .03169 L +s +.65216 .01432 m +.65436 .01759 L +s +gsave +.29165 .07573 -70.3112 -16.5625 Mabsadd m +1 1 Mabs scale +currentpoint translate +/MISOfy +{ + /newfontname exch def + /oldfontname exch def + + oldfontname findfont + dup length dict begin + {1 index /FID ne {def} {pop pop} ifelse} forall + /Encoding ISOLatin1Encoding def + currentdict + end + + newfontname exch definefont pop +} def + +0 20.5625 translate 1 -1 scale +63.000 12.813 moveto +%%IncludeResource: font Courier +%%IncludeFont: Courier +/Courier findfont 10.000 scalefont +[1 0 0 -1 0 0 ] makefont setfont +0.000 0.000 0.000 setrgbcolor +(t) show +1.000 setlinewidth +grestore +% End of Graphics +MathPictureEnd + +%%PSTrailer +end +grestore + diff --git a/graph2.eps b/graph2.eps new file mode 100644 index 0000000..dfc0fdd --- /dev/null +++ b/graph2.eps @@ -0,0 +1,2092 @@ +%!PS-Adobe-2.0 EPSF-2.0 +%%BoundingBox: 91.5625 3.1875 321.938 190. +/MISOfy +{ + /newfontname exch def + /oldfontname exch def + + oldfontname findfont + dup length dict begin + {1 index /FID ne {def} {pop pop} ifelse} forall + /Encoding ISOLatin1Encoding def + currentdict + end + + newfontname exch definefont pop +} def + +0 193.188 translate 1 -1 scale +gsave +150 dict begin +/Mfixwid true def +/Mrot 0 def +/Mpstart { + MathPictureStart +} bind def +/Mpend { + MathPictureEnd +} bind def +/Mscale { + 0 1 0 1 + 5 -1 roll + MathScale +} bind def +/Plain /Courier findfont def +/Bold /Courier-Bold findfont def +/Italic /Courier-Oblique findfont def +/MathPictureStart { + /Mimatrix + matrix currentmatrix + def + gsave + newpath + Mleft + Mbottom + translate + /Mtmatrix + matrix currentmatrix + def + Plain + Mfontsize scalefont + setfont + 0 setgray + 0 setlinewidth +} bind def +/MathPictureEnd { + grestore +} bind def +/MathSubStart { + Momatrix + Mgmatrix Mtmatrix + Mleft Mbottom + Mwidth Mheight + 9 -2 roll + moveto + Mtmatrix setmatrix + currentpoint + Mgmatrix setmatrix + 11 -2 roll + moveto + Mtmatrix setmatrix + currentpoint + 2 copy translate + /Mtmatrix matrix currentmatrix def + /Mleft 0 def + /Mbottom 0 def + 3 -1 roll + exch sub + /Mheight exch def + sub + /Mwidth exch def +} bind def +/MathSubEnd { + /Mheight exch def + /Mwidth exch def + /Mbottom exch def + /Mleft exch def + /Mtmatrix exch def + dup setmatrix + /Mgmatrix exch def + /Momatrix exch def +} bind def +/Mdot { + moveto + 0 0 rlineto + stroke +} bind def +/Mtetra { + moveto + lineto + lineto + lineto + fill +} bind def +/Metetra { + moveto + lineto + lineto + lineto + closepath + gsave + fill + grestore + 0 setgray + stroke +} bind def +/Mistroke { + flattenpath + 0 0 0 + { + 4 2 roll + pop pop + } + { + 4 -1 roll + 2 index + sub dup mul + 4 -1 roll + 2 index + sub dup mul + add sqrt + 4 -1 roll + add + 3 1 roll + } + { + stop + } + { + stop + } + pathforall + pop pop + currentpoint + stroke + moveto + currentdash + 3 -1 roll + add + setdash +} bind def +/Mfstroke { + stroke + currentdash + pop 0 + setdash +} bind def +/Mrotsboxa { + gsave + dup + /Mrot + exch def + Mrotcheck + Mtmatrix + dup + setmatrix + 7 1 roll + 4 index + 4 index + translate + rotate + 3 index + -1 mul + 3 index + -1 mul + translate + /Mtmatrix + matrix + currentmatrix + def + grestore + Msboxa + 3 -1 roll + /Mtmatrix + exch def + /Mrot + 0 def +} bind def +/Msboxa { + newpath + 5 -1 roll + Mvboxa + pop + Mboxout + 6 -1 roll + 5 -1 roll + 4 -1 roll + Msboxa1 + 5 -3 roll + Msboxa1 + Mboxrot + [ + 7 -2 roll + 2 copy + [ + 3 1 roll + 10 -1 roll + 9 -1 roll + ] + 6 1 roll + 5 -2 roll + ] +} bind def +/Msboxa1 { + sub + 2 div + dup + 2 index + 1 add + mul + 3 -1 roll + -1 add + 3 -1 roll + mul +} bind def +/Mvboxa { + Mfixwid + { + Mvboxa1 + } + { + dup + Mwidthcal + 0 exch + { + add + } + forall + exch + Mvboxa1 + 4 index + 7 -1 roll + add + 4 -1 roll + pop + 3 1 roll + } + ifelse +} bind def +/Mvboxa1 { + gsave + newpath + [ true + 3 -1 roll + { + Mbbox + 5 -1 roll + { + 0 + 5 1 roll + } + { + 7 -1 roll + exch sub + (m) stringwidth pop + .3 mul + sub + 7 1 roll + 6 -1 roll + 4 -1 roll + Mmin + 3 -1 roll + 5 index + add + 5 -1 roll + 4 -1 roll + Mmax + 4 -1 roll + } + ifelse + false + } + forall + { stop } if + counttomark + 1 add + 4 roll + ] + grestore +} bind def +/Mbbox { + 0 0 moveto + false charpath + flattenpath + pathbbox + newpath +} bind def +/Mmin { + 2 copy + gt + { exch } if + pop +} bind def +/Mmax { + 2 copy + lt + { exch } if + pop +} bind def +/Mrotshowa { + dup + /Mrot + exch def + Mrotcheck + Mtmatrix + dup + setmatrix + 7 1 roll + 4 index + 4 index + translate + rotate + 3 index + -1 mul + 3 index + -1 mul + translate + /Mtmatrix + matrix + currentmatrix + def + Mgmatrix setmatrix + Mshowa + /Mtmatrix + exch def + /Mrot 0 def +} bind def +/Mshowa { + 4 -2 roll + moveto + 2 index + Mtmatrix setmatrix + Mvboxa + 7 1 roll + Mboxout + 6 -1 roll + 5 -1 roll + 4 -1 roll + Mshowa1 + 4 1 roll + Mshowa1 + rmoveto + currentpoint + Mfixwid + { + Mshowax + } + { + Mshoway + } + ifelse + pop pop pop pop + Mgmatrix setmatrix +} bind def +/Mshowax { + 0 1 + 4 index length + -1 add + { + 2 index + 4 index + 2 index + get + 3 index + add + moveto + 4 index + exch get + show + } for +} bind def +/Mshoway { + 3 index + Mwidthcal + 5 1 roll + 0 1 + 4 index length + -1 add + { + 2 index + 4 index + 2 index + get + 3 index + add + moveto + 4 index + exch get + [ + 6 index + aload + length + 2 add + -1 roll + { + pop + Strform + stringwidth + pop + neg + exch + add + 0 rmoveto + } + exch + kshow + cleartomark + } for + pop +} bind def +/Mwidthcal { + [ + exch + { + Mwidthcal1 + } + forall + ] + [ + exch + dup + Maxlen + -1 add + 0 1 + 3 -1 roll + { + [ + exch + 2 index + { + 1 index + Mget + exch + } + forall + pop + Maxget + exch + } + for + pop + ] + Mreva +} bind def +/Mreva { + [ + exch + aload + length + -1 1 + {1 roll} + for + ] +} bind def +/Mget { + 1 index + length + -1 add + 1 index + ge + { + get + } + { + pop pop + 0 + } + ifelse +} bind def +/Maxlen { + [ + exch + { + length + } + forall + Maxget +} bind def +/Maxget { + counttomark + -1 add + 1 1 + 3 -1 roll + { + pop + Mmax + } + for + exch + pop +} bind def +/Mwidthcal1 { + [ + exch + { + Strform + stringwidth + pop + } + forall + ] +} bind def +/Strform { + /tem (x) def + tem 0 + 3 -1 roll + put + tem +} bind def +/Mshowa1 { + 2 copy + add + 4 1 roll + sub + mul + sub + -2 div +} bind def +/MathScale { + Mwidth + Mheight + Mlp + translate + scale + /yscale exch def + /ybias exch def + /xscale exch def + /xbias exch def + /Momatrix + xscale yscale matrix scale + xbias ybias matrix translate + matrix concatmatrix def + /Mgmatrix + matrix currentmatrix + def +} bind def +/Mlp { + 3 copy + Mlpfirst + { + Mnodistort + { + Mmin + dup + } if + 4 index + 2 index + 2 index + Mlprun + 11 index + 11 -1 roll + 10 -4 roll + Mlp1 + 8 index + 9 -5 roll + Mlp1 + 4 -1 roll + and + { exit } if + 3 -1 roll + pop pop + } loop + exch + 3 1 roll + 7 -3 roll + pop pop pop +} bind def +/Mlpfirst { + 3 -1 roll + dup length + 2 copy + -2 add + get + aload + pop pop pop + 4 -2 roll + -1 add + get + aload + pop pop pop + 6 -1 roll + 3 -1 roll + 5 -1 roll + sub + dup /MsaveAx exch def + div + 4 1 roll + exch sub + dup /MsaveAy exch def + div +} bind def +/Mlprun { + 2 copy + 4 index + 0 get + dup + 4 1 roll + Mlprun1 + 3 copy + 8 -2 roll + 9 -1 roll + { + 3 copy + Mlprun1 + 3 copy + 11 -3 roll + /gt Mlpminmax + 8 3 roll + 11 -3 roll + /lt Mlpminmax + 8 3 roll + } forall + pop pop pop pop + 3 1 roll + pop pop + aload pop + 5 -1 roll + aload pop + exch + 6 -1 roll + Mlprun2 + 8 2 roll + 4 -1 roll + Mlprun2 + 6 2 roll + 3 -1 roll + Mlprun2 + 4 2 roll + exch + Mlprun2 + 6 2 roll +} bind def +/Mlprun1 { + aload pop + exch + 6 -1 roll + 5 -1 roll + mul add + 4 -2 roll + mul + 3 -1 roll + add +} bind def +/Mlprun2 { + 2 copy + add 2 div + 3 1 roll + exch sub +} bind def +/Mlpminmax { + cvx + 2 index + 6 index + 2 index + exec + { + 7 -3 roll + 4 -1 roll + } if + 1 index + 5 index + 3 -1 roll + exec + { + 4 1 roll + pop + 5 -1 roll + aload + pop pop + 4 -1 roll + aload pop + [ + 8 -2 roll + pop + 5 -2 roll + pop + 6 -2 roll + pop + 5 -1 roll + ] + 4 1 roll + pop + } + { + pop pop pop + } ifelse +} bind def +/Mlp1 { + 5 index + 3 index sub + 5 index + 2 index mul + 1 index + le + 1 index + 0 le + or + dup + not + { + 1 index + 3 index div + .99999 mul + 8 -1 roll + pop + 7 1 roll + } + if + 8 -1 roll + 2 div + 7 -2 roll + pop sub + 5 index + 6 -3 roll + pop pop + mul sub + exch +} bind def +/intop 0 def +/inrht 0 def +/inflag 0 def +/outflag 0 def +/xadrht 0 def +/xadlft 0 def +/yadtop 0 def +/yadbot 0 def +/Minner { + outflag + 1 + eq + { + /outflag 0 def + /intop 0 def + /inrht 0 def + } if + 5 index + gsave + Mtmatrix setmatrix + Mvboxa pop + grestore + 3 -1 roll + pop + dup + intop + gt + { + /intop + exch def + } + { pop } + ifelse + dup + inrht + gt + { + /inrht + exch def + } + { pop } + ifelse + pop + /inflag + 1 def +} bind def +/Mouter { + /xadrht 0 def + /xadlft 0 def + /yadtop 0 def + /yadbot 0 def + inflag + 1 eq + { + dup + 0 lt + { + dup + intop + mul + neg + /yadtop + exch def + } if + dup + 0 gt + { + dup + intop + mul + /yadbot + exch def + } + if + pop + dup + 0 lt + { + dup + inrht + mul + neg + /xadrht + exch def + } if + dup + 0 gt + { + dup + inrht + mul + /xadlft + exch def + } if + pop + /outflag 1 def + } + { pop pop} + ifelse + /inflag 0 def + /inrht 0 def + /intop 0 def +} bind def +/Mboxout { + outflag + 1 + eq + { + 4 -1 + roll + xadlft + leadjust + add + sub + 4 1 roll + 3 -1 + roll + yadbot + leadjust + add + sub + 3 1 + roll + exch + xadrht + leadjust + add + add + exch + yadtop + leadjust + add + add + /outflag 0 def + /xadlft 0 def + /yadbot 0 def + /xadrht 0 def + /yadtop 0 def + } if +} bind def +/leadjust { + (m) stringwidth pop + .5 mul +} bind def +/Mrotcheck { + dup + 90 + eq + { + yadbot + /yadbot + xadrht + def + /xadrht + yadtop + def + /yadtop + xadlft + def + /xadlft + exch + def + } + if + dup + cos + 1 index + sin + Checkaux + dup + cos + 1 index + sin neg + exch + Checkaux + 3 1 roll + pop pop +} bind def +/Checkaux { + 4 index + exch + 4 index + mul + 3 1 roll + mul add + 4 1 roll +} bind def +/Mboxrot { + Mrot + 90 eq + { + brotaux + 4 2 + roll + } + if + Mrot + 180 eq + { + 4 2 + roll + brotaux + 4 2 + roll + brotaux + } + if + Mrot + 270 eq + { + 4 2 + roll + brotaux + } + if +} bind def +/brotaux { + neg + exch + neg +} bind def +/Mabsproc { + 0 + matrix defaultmatrix + dtransform idtransform + dup mul exch + dup mul + add sqrt +} bind def +/Mabswid { + Mabsproc + setlinewidth +} bind def +/Mabsdash { + exch + [ + exch + { + Mabsproc + } + forall + ] + exch + setdash +} bind def +/MBeginOrig { Momatrix concat} bind def +/MEndOrig { Mgmatrix setmatrix} bind def +/sampledsound where +{ pop} +{ /sampledsound { +exch +pop +exch +5 1 roll +mul +4 idiv +mul +2 idiv +exch pop +exch +/Mtempproc exch def +{ Mtempproc pop } +repeat +} bind def +} ifelse +% Here are the short operators +/g { setgray} bind def +/k { setcmykcolor} bind def +/m { moveto} bind def +/p { gsave} bind def +/r { setrgbcolor} bind def +/w { setlinewidth} bind def +/C { curveto} bind def +/F { fill} bind def +/L { lineto} bind def +/P { grestore} bind def +/s { stroke} bind def + +/MFill { + 0 0 moveto + Mwidth 0 lineto + Mwidth Mheight lineto + 0 Mheight lineto + fill +} bind def + +/MPlotRegion { + 3 index + Mwidth mul + 2 index + Mheight mul + translate + exch sub + Mheight mul + /Mheight + exch def + exch sub + Mwidth mul + /Mwidth + exch def +} bind def + +/Mcharproc +{ + currentfile + (x) + readhexstring + pop + 0 get + exch + div +} bind def + +/Mshadeproc +{ + dup + 3 1 + roll + { + dup + Mcharproc + 3 1 + roll + } repeat + 1 eq + { + setgray + } + { + 3 eq + { + setrgbcolor + } + { + setcmykcolor + } ifelse + } ifelse +} bind def + +/Mrectproc +{ + 3 index + 2 index + moveto + 2 index + 3 -1 + roll + lineto + dup + 3 1 + roll + lineto + lineto + fill +} bind def + +/_Mcolorimage +{ + 7 1 + roll + pop + pop + matrix + invertmatrix + concat + 2 exch exp + 1 sub + 3 1 roll + 1 1 + 2 index + { + 1 1 + 4 index + { + dup + 1 sub + exch + 2 index + dup + 1 sub + exch + 7 index + 9 index + Mshadeproc + Mrectproc + } for + pop + } for + pop pop pop pop +} bind def + +/_Mimage +{ + pop + matrix + invertmatrix + concat + 2 exch exp + 1 sub + 3 1 roll + 1 1 + 2 index + { + 1 1 + 4 index + { + dup + 1 sub + exch + 2 index + dup + 1 sub + exch + 7 index + Mcharproc + setgray + Mrectproc + } for + pop + } for + pop pop pop +} bind def + +/Mimage { + 4 index + 4 index + mul 1600 gt + { image } + { _Mimage } + ifelse +} def + +/Mcolorimage { + 6 index + 6 index + mul 1600 gt + { colorimage } + { _Mcolorimage } + ifelse +} def +/Mnodistort true def +1.000000 1.000000 scale +91.562500 190.000000 translate +1.000000 -1.000000 scale +0.000000 0.000000 translate +/Mleft 0.000000 def +/Mbottom 0.000000 def +/Mwidth 230.375000 def +/Mheight 186.812500 def +0 setgray +0 setlinewidth +/Courier findfont 12 scalefont setfont +/Mfontsize 12 def +/Plain /Courier findfont def + +%! +%%Creator: Mathematica +%%AspectRatio: .81114 +MathPictureStart +/Mabs { +Mgmatrix idtransform +Mtmatrix dtransform +} bind def +/Mabsadd { Mabs +3 -1 roll add +3 1 roll add +exch } bind def +%% SurfaceGraphics +%%IncludeResource: font Courier +%%IncludeFont: Courier +/Courier findfont 10 scalefont setfont +% Scaling calculations +5.55112e-017 1.04977 -0.0679587 1.04977 [ +[.02757 .24618 -6 -8.70276 ] +[.02757 .24618 0 .29724 ] +[.14598 .19958 -17.886 -9 ] +[.14598 .19958 .11404 0 ] +[.27013 .15073 -17.1354 -9 ] +[.27013 .15073 .86461 0 ] +[.40046 .09948 -16.3848 -9 ] +[.40046 .09948 1.61518 0 ] +[.53743 .04564 -15.6343 -9 ] +[.53743 .04564 2.36575 0 ] +[.68156 -0.01098 -4.96123 -9 ] +[.68156 -0.01098 1.03877 0 ] +[.29165 .07573 -9.31117 -12.5625 ] +[.29165 .07573 .68883 0 ] +[.70096 -0.00478 0 -6.26206 ] +[.70096 -0.00478 6 2.73794 ] +[.76745 .09703 0 -6.16187 ] +[.76745 .09703 18 2.83813 ] +[.82812 .18993 0 -6.07246 ] +[.82812 .18993 18 2.92754 ] +[.88369 .27502 0 -5.99218 ] +[.88369 .27502 18 3.00782 ] +[.93478 .35327 0 -5.9197 ] +[.93478 .35327 18 3.0803 ] +[.98191 .42546 0 -5.85393 ] +[.98191 .42546 6 3.14607 ] +[.91861 .21225 0 -8.41865 ] +[.91861 .21225 10 4.14385 ] +[.02102 .28449 -30 -2.75869 ] +[.02102 .28449 0 6.24131 ] +[.01238 .33864 -30 -2.80453 ] +[.01238 .33864 0 6.19547 ] +[.00343 .39471 -30 -2.85216 ] +[.00343 .39471 0 6.14784 ] +[-0.00583 .45281 -30 -2.90171 ] +[-0.00583 .45281 0 6.09829 ] +[ 0 0 0 0 ] +[ 1 .81114 0 0 ] +] MathScale +% Start of Graphics +1 setlinecap +1 setlinejoin +newpath +0 g +.25 Mabswid +[ ] 0 setdash +.03716 .25514 m +.68874 0 L +s +.03716 .25514 m +.04196 .25962 L +s +[(0)] .02757 .24618 1 .93395 Mshowa +.1552 .20892 m +.15981 .21359 L +s +[(0.2)] .14598 .19958 .98733 1 Mshowa +.27893 .16047 m +.28333 .16533 L +s +[(0.4)] .27013 .15073 .90393 1 Mshowa +.40878 .10962 m +.41294 .11469 L +s +[(0.6)] .40046 .09948 .82054 1 Mshowa +.54521 .0562 m +.54911 .06148 L +s +[(0.8)] .53743 .04564 .73714 1 Mshowa +.68874 0 m +.69233 .00549 L +s +[(1)] .68156 -0.01098 .65374 1 Mshowa +.125 Mabswid +.06616 .24378 m +.06901 .2465 L +s +.09549 .2323 m +.09832 .23504 L +s +.12517 .22067 m +.12797 .22345 L +s +.18558 .19702 m +.18832 .19985 L +s +.21633 .18498 m +.21903 .18784 L +s +.24744 .1728 m +.25012 .17569 L +s +.3108 .14799 m +.31341 .15094 L +s +.34306 .13535 m +.34564 .13834 L +s +.37572 .12257 m +.37826 .12558 L +s +.44225 .09652 m +.44471 .09959 L +s +.47614 .08324 m +.47856 .08635 L +s +.51046 .06981 m +.51284 .07294 L +s +.58041 .04242 m +.5827 .04562 L +s +.61605 .02846 m +.6183 .03169 L +s +.65216 .01432 m +.65436 .01759 L +s +gsave +.29165 .07573 -70.3112 -16.5625 Mabsadd m +1 1 Mabs scale +currentpoint translate +/MISOfy +{ + /newfontname exch def + /oldfontname exch def + + oldfontname findfont + dup length dict begin + {1 index /FID ne {def} {pop pop} ifelse} forall + /Encoding ISOLatin1Encoding def + currentdict + end + + newfontname exch definefont pop +} def + +0 20.5625 translate 1 -1 scale +63.000 12.813 moveto +%%IncludeResource: font Courier +%%IncludeFont: Courier +/Courier findfont 10.000 scalefont +[1 0 0 -1 0 0 ] makefont setfont +0.000 0.000 0.000 setrgbcolor +(t) show +1.000 setlinewidth +grestore +.25 Mabswid +.68874 0 m +.96935 .42924 L +s +.68874 0 m +.68263 .00239 L +s +[(0)] .70096 -0.00478 -1 .39157 Mshowa +.75514 .10158 m +.74899 .10385 L +s +[(0.2)] .76745 .09703 -1 .3693 Mshowa +.81573 .19425 m +.80953 .19642 L +s +[(0.4)] .82812 .18993 -1 .34944 Mshowa +.87123 .27915 m +.865 .28122 L +s +[(0.6)] .88369 .27502 -1 .3316 Mshowa +.92227 .35722 m +.91601 .35919 L +s +[(0.8)] .93478 .35327 -1 .31549 Mshowa +.96935 .42924 m +.96306 .43113 L +s +[(1)] .98191 .42546 -1 .30087 Mshowa +.125 Mabswid +.70593 .0263 m +.70226 .02771 L +s +.72272 .05198 m +.71904 .05338 L +s +.73912 .07706 m +.73543 .07845 L +s +.7708 .12553 m +.7671 .12688 L +s +.78611 .14895 m +.78241 .15028 L +s +.80108 .17185 m +.79737 .17317 L +s +.83006 .21617 m +.82633 .21745 L +s +.84407 .23761 m +.84035 .23888 L +s +.8578 .2586 m +.85407 .25986 L +s +.88439 .29928 m +.88065 .3005 L +s +.89727 .31899 m +.89353 .3202 L +s +.9099 .3383 m +.90615 .3395 L +s +.93439 .37576 m +.93063 .37693 L +s +.94627 .39394 m +.94251 .3951 L +s +.95792 .41176 m +.95416 .41291 L +s +gsave +.91861 .21225 -61 -12.4187 Mabsadd m +1 1 Mabs scale +currentpoint translate +/MISOfy +{ + /newfontname exch def + /oldfontname exch def + + oldfontname findfont + dup length dict begin + {1 index /FID ne {def} {pop pop} ifelse} forall + /Encoding ISOLatin1Encoding def + currentdict + end + + newfontname exch definefont pop +} def + +0 20.5625 translate 1 -1 scale +63.000 12.813 moveto +%%IncludeResource: font Courier +%%IncludeFont: Courier +/Courier findfont 10.000 scalefont +[1 0 0 -1 0 0 ] makefont setfont +0.000 0.000 0.000 setrgbcolor +(x) show +1.000 setlinewidth +grestore +.25 Mabswid +.03716 .25514 m +0 .48963 L +s +.03326 .27976 m +.03938 .27739 L +s +[(-0.08)] .02102 .28449 1 -0.38696 Mshowa +.02466 .33401 m +.0308 .3317 L +s +[(-0.06)] .01238 .33864 1 -0.37677 Mshowa +.01576 .3902 m +.02192 .38794 L +s +[(-0.04)] .00343 .39471 1 -0.36619 Mshowa +.00653 .44842 m +.01271 .44622 L +s +[(-0.02)] -0.00583 .45281 1 -0.35518 Mshowa +.125 Mabswid +.03114 .29315 m +.03481 .29173 L +s +.029 .30665 m +.03267 .30525 L +s +.02684 .32027 m +.03052 .31888 L +s +.02246 .34787 m +.02615 .34649 L +s +.02025 .36186 m +.02394 .36049 L +s +.01801 .37597 m +.02171 .3746 L +s +.01348 .40456 m +.01718 .40321 L +s +.01119 .41905 m +.01489 .41771 L +s +.00887 .43367 m +.01257 .43234 L +s +.03536 .26649 m +.03903 .26506 L +s +.00417 .46331 m +.00788 .462 L +s +.00179 .47833 m +.00551 .47703 L +s +.25 Mabswid +.03716 .25514 m +0 .48963 L +s +0 .48963 m +.39787 .81114 L +s +.39787 .81114 m +.40529 .59895 L +s +.40529 .59895 m +.03716 .25514 L +s +.68874 0 m +.96935 .42924 L +s +.96935 .42924 m +1 .6535 L +s +1 .6535 m +.70298 .24544 L +s +.70298 .24544 m +.68874 0 L +s +.03716 .25514 m +0 .48963 L +s +0 .48963 m +.70298 .24544 L +s +.70298 .24544 m +.68874 0 L +s +.68874 0 m +.03716 .25514 L +s +.40529 .59895 m +.96935 .42924 L +s +.96935 .42924 m +1 .6535 L +s +1 .6535 m +.39787 .81114 L +s +.39787 .81114 m +.40529 .59895 L +s +0 0 m +1 0 L +1 .81114 L +0 .81114 L +closepath +clip +newpath +.5 Mabswid +.166 .632 .932 r +.38272 .59295 .40512 .60371 .44146 .61018 .41938 .59865 Metetra +.248 .673 .962 r +.41938 .59865 .44146 .61018 .47871 .61513 .45697 .60287 Metetra +.313 .702 .979 r +.45697 .60287 .47871 .61513 .51683 .61871 .49545 .60575 Metetra +.365 .721 .988 r +.49545 .60575 .51683 .61871 .55582 .62102 .53481 .60739 Metetra +.406 .735 .992 r +.53481 .60739 .55582 .62102 .59566 .62218 .57503 .60791 Metetra +.44 .745 .992 r +.57503 .60791 .59566 .62218 .63634 .62227 .61611 .60738 Metetra +.467 .752 .991 r +.61611 .60738 .63634 .62227 .67785 .62138 .65805 .6059 Metetra +.489 .757 .99 r +.65805 .6059 .67785 .62138 .72019 .61959 .70082 .60352 Metetra +.507 .76 .987 r +.70082 .60352 .72019 .61959 .76335 .61696 .74445 .60031 Metetra +.522 .763 .984 r +.74445 .60031 .76335 .61696 .80734 .61354 .78892 .59633 Metetra +.536 .765 .982 r +.78892 .59633 .80734 .61354 .85217 .60938 .83425 .59163 Metetra +.547 .766 .979 r +.83425 .59163 .85217 .60938 .89784 .60454 .88044 .58624 Metetra +.556 .767 .976 r +.88044 .58624 .89784 .60454 .94435 .59904 .92749 .5802 Metetra +.565 .768 .974 r +.92749 .5802 .94435 .59904 .99172 .59293 .97543 .57355 Metetra +.201 .651 .946 r +.35962 .58164 .38272 .59295 .41938 .59865 .39662 .58656 Metetra +.276 .687 .97 r +.39662 .58656 .41938 .59865 .45697 .60287 .43455 .59005 Metetra +.336 .712 .984 r +.43455 .59005 .45697 .60287 .49545 .60575 .47339 .59222 Metetra +.383 .729 .99 r +.47339 .59222 .49545 .60575 .53481 .60739 .51313 .59318 Metetra +.421 .74 .992 r +.51313 .59318 .53481 .60739 .57503 .60791 .55375 .59305 Metetra +.452 .749 .992 r +.55375 .59305 .57503 .60791 .61611 .60738 .59525 .59189 Metetra +.477 .755 .991 r +.59525 .59189 .61611 .60738 .65805 .6059 .63761 .58979 Metetra +.497 .759 .989 r +.63761 .58979 .65805 .6059 .70082 .60352 .68084 .58682 Metetra +.514 .762 .986 r +.68084 .58682 .70082 .60352 .74445 .60031 .72494 .58303 Metetra +.529 .764 .983 r +.72494 .58303 .74445 .60031 .78892 .59633 .7699 .57848 Metetra +.541 .766 .981 r +.7699 .57848 .78892 .59633 .83425 .59163 .81574 .5732 Metetra +.551 .767 .978 r +.81574 .5732 .83425 .59163 .88044 .58624 .86246 .56725 Metetra +.56 .768 .975 r +.86246 .56725 .88044 .58624 .92749 .5802 .91008 .56066 Metetra +.568 .769 .973 r +.91008 .56066 .92749 .5802 .97543 .57355 .95859 .55346 Metetra +.234 .668 .957 r +.33579 .56975 .35962 .58164 .39662 .58656 .37313 .57389 Metetra +.303 .699 .977 r +.37313 .57389 .39662 .58656 .43455 .59005 .41142 .57663 Metetra +.357 .72 .987 r +.41142 .57663 .43455 .59005 .47339 .59222 .45063 .57808 Metetra +.401 .735 .992 r +.45063 .57808 .47339 .59222 .51313 .59318 .49076 .57837 Metetra +.435 .745 .993 r +.49076 .57837 .51313 .59318 .55375 .59305 .53179 .57757 Metetra +.464 .752 .992 r +.53179 .57757 .55375 .59305 .59525 .59189 .57371 .57577 Metetra +.487 .757 .99 r +.57371 .57577 .59525 .59189 .63761 .58979 .61651 .57305 Metetra +.506 .761 .988 r +.61651 .57305 .63761 .58979 .68084 .58682 .66021 .56946 Metetra +.521 .764 .985 r +.66021 .56946 .68084 .58682 .72494 .58303 .70479 .56507 Metetra +.535 .766 .982 r +.70479 .56507 .72494 .58303 .7699 .57848 .75026 .55993 Metetra +.546 .767 .98 r +.75026 .55993 .7699 .57848 .81574 .5732 .79662 .55407 Metetra +.556 .768 .977 r +.79662 .55407 .81574 .5732 .86246 .56725 .84389 .54755 Metetra +.565 .769 .974 r +.84389 .54755 .86246 .56725 .91008 .56066 .89207 .54038 Metetra +.572 .77 .972 r +.89207 .54038 .91008 .56066 .95859 .55346 .94118 .53261 Metetra +.265 .683 .967 r +.31121 .55726 .33579 .56975 .37313 .57389 .34889 .56061 Metetra +.328 .71 .982 r +.34889 .56061 .37313 .57389 .41142 .57663 .38754 .56259 Metetra +.377 .728 .99 r +.38754 .56259 .41142 .57663 .45063 .57808 .42714 .56332 Metetra +.417 .741 .993 r +.42714 .56332 .45063 .57808 .49076 .57837 .46767 .56291 Metetra +.449 .749 .993 r +.46767 .56291 .49076 .57837 .53179 .57757 .50911 .56143 Metetra +.475 .756 .992 r +.50911 .56143 .53179 .57757 .57371 .57577 .55147 .55898 Metetra +.496 .76 .989 r +.55147 .55898 .57371 .57577 .61651 .57305 .59473 .55562 Metetra +.513 .763 .987 r +.59473 .55562 .61651 .57305 .66021 .56946 .63889 .55141 Metetra +.528 .765 .984 r +.63889 .55141 .66021 .56946 .70479 .56507 .68397 .54641 Metetra +.541 .767 .981 r +.68397 .54641 .70479 .56507 .75026 .55993 .72995 .54066 Metetra +.551 .768 .978 r +.72995 .54066 .75026 .55993 .79662 .55407 .77686 .5342 Metetra +.561 .769 .976 r +.77686 .5342 .79662 .55407 .84389 .54755 .82469 .52708 Metetra +.569 .77 .973 r +.82469 .52708 .84389 .54755 .89207 .54038 .87346 .51932 Metetra +.575 .77 .971 r +.87346 .51932 .89207 .54038 .94118 .53261 .92318 .51095 Metetra +.294 .697 .974 r +.28583 .54413 .31121 .55726 .34889 .56061 .32387 .54668 Metetra +.351 .72 .986 r +.32387 .54668 .34889 .56061 .38754 .56259 .3629 .5479 Metetra +.396 .735 .991 r +.3629 .5479 .38754 .56259 .42714 .56332 .40288 .5479 Metetra +.432 .746 .993 r +.40288 .5479 .42714 .56332 .46767 .56291 .44382 .54677 Metetra +.461 .753 .993 r +.44382 .54677 .46767 .56291 .50911 .56143 .48569 .54462 Metetra +.485 .758 .991 r +.48569 .54462 .50911 .56143 .55147 .55898 .52849 .54149 Metetra +.505 .762 .988 r +.52849 .54149 .55147 .55898 .59473 .55562 .57222 .53748 Metetra +.521 .765 .986 r +.57222 .53748 .59473 .55562 .63889 .55141 .61687 .53263 Metetra +.535 .767 .983 r +.61687 .53263 .63889 .55141 .68397 .54641 .66245 .527 Metetra +.546 .768 .98 r +.66245 .527 .68397 .54641 .72995 .54066 .70896 .52062 Metetra +.556 .769 .977 r +.70896 .52062 .72995 .54066 .77686 .5342 .75642 .51355 Metetra +.565 .77 .975 r +.75642 .51355 .77686 .5342 .82469 .52708 .80483 .50581 Metetra +.572 .77 .972 r +.80483 .50581 .82469 .52708 .87346 .51932 .8542 .49744 Metetra +.579 .771 .97 r +.8542 .49744 .87346 .51932 .92318 .51095 .90454 .48845 Metetra +.321 .709 .981 r +.25963 .53034 .28583 .54413 .32387 .54668 .29804 .53208 Metetra +.372 .728 .989 r +.29804 .53208 .32387 .54668 .3629 .5479 .33744 .53253 Metetra +.414 .741 .993 r +.33744 .53253 .3629 .5479 .40288 .5479 .37783 .53178 Metetra +.447 .75 .993 r +.37783 .53178 .40288 .5479 .44382 .54677 .41918 .52994 Metetra +.473 .756 .992 r +.41918 .52994 .44382 .54677 .48569 .54462 .46149 .52708 Metetra +.495 .761 .99 r +.46149 .52708 .48569 .54462 .52849 .54149 .50474 .52327 Metetra +.513 .764 .987 r +.50474 .52327 .52849 .54149 .57222 .53748 .54895 .51859 Metetra +.528 .766 .984 r +.54895 .51859 .57222 .53748 .61687 .53263 .5941 .51308 Metetra +.541 .768 .982 r +.5941 .51308 .61687 .53263 .66245 .527 .6402 .5068 Metetra +.552 .769 .979 r +.6402 .5068 .66245 .527 .70896 .52062 .68725 .49979 Metetra +.561 .77 .976 r +.68725 .49979 .70896 .52062 .75642 .51355 .73528 .49207 Metetra +.569 .77 .974 r +.73528 .49207 .75642 .51355 .80483 .50581 .78427 .4837 Metetra +.576 .771 .971 r +.78427 .4837 .80483 .50581 .8542 .49744 .83426 .47469 Metetra +.582 .771 .969 r +.83426 .47469 .8542 .49744 .90454 .48845 .88525 .46507 Metetra +.346 .719 .985 r +.23256 .51585 .25963 .53034 .29804 .53208 .27135 .51677 Metetra +.393 .735 .991 r +.27135 .51677 .29804 .53208 .33744 .53253 .31115 .51643 Metetra +.43 .747 .993 r +.31115 .51643 .33744 .53253 .37783 .53178 .35194 .51493 Metetra +.46 .754 .993 r +.35194 .51493 .37783 .53178 .41918 .52994 .39372 .51235 Metetra +.484 .759 .991 r +.39372 .51235 .41918 .52994 .46149 .52708 .43648 .50878 Metetra +.504 .763 .989 r +.43648 .50878 .46149 .52708 .50474 .52327 .4802 .50428 Metetra +.521 .766 .986 r +.4802 .50428 .50474 .52327 .54895 .51859 .52489 .49891 Metetra +.535 .768 .983 r +.52489 .49891 .54895 .51859 .5941 .51308 .57055 .49273 Metetra +.547 .769 .98 r +.57055 .49273 .5941 .51308 .6402 .5068 .61718 .48578 Metetra +.557 .77 .977 r +.61718 .48578 .6402 .5068 .68725 .49979 .66479 .4781 Metetra +.566 .771 .975 r +.66479 .4781 .68725 .49979 .73528 .49207 .71339 .46973 Metetra +.573 .771 .972 r +.71339 .46973 .73528 .49207 .78427 .4837 .763 .4607 Metetra +.58 .771 .97 r +.763 .4607 .78427 .4837 .83426 .47469 .81361 .45102 Metetra +.586 .771 .968 r +.81361 .45102 .83426 .47469 .88525 .46507 .86526 .44074 Metetra +.369 .728 .989 r +.2046 .50062 .23256 .51585 .27135 .51677 .24377 .50072 Metetra +.411 .742 .993 r +.24377 .50072 .27135 .51677 .31115 .51643 .28397 .49959 Metetra +.445 .751 .994 r +.28397 .49959 .31115 .51643 .35194 .51493 .32518 .49732 Metetra +.473 .758 .992 r +.32518 .49732 .35194 .51493 .39372 .51235 .3674 .49399 Metetra +.495 .762 .99 r +.3674 .49399 .39372 .51235 .43648 .50878 .41061 .48969 Metetra +.513 .765 .988 r +.41061 .48969 .43648 .50878 .4802 .50428 .45481 .48447 Metetra +.529 .767 .985 r +.45481 .48447 .4802 .50428 .52489 .49891 .5 .4784 Metetra +.542 .769 .982 r +.5 .4784 .52489 .49891 .57055 .49273 .54618 .47152 Metetra +.553 .77 .979 r +.54618 .47152 .57055 .49273 .61718 .48578 .59335 .46389 Metetra +.562 .771 .976 r +.59335 .46389 .61718 .48578 .66479 .4781 .64153 .45552 Metetra +.57 .771 .974 r +.64153 .45552 .66479 .4781 .71339 .46973 .69073 .44647 Metetra +.577 .772 .971 r +.69073 .44647 .71339 .46973 .763 .4607 .74095 .43675 Metetra +.583 .772 .969 r +.74095 .43675 .763 .4607 .81361 .45102 .79222 .42639 Metetra +.589 .772 .967 r +.79222 .42639 .81361 .45102 .86526 .44074 .84454 .41542 Metetra +.391 .736 .991 r +.1757 .48463 .2046 .50062 .24377 .50072 .21526 .4839 Metetra +.429 .748 .994 r +.21526 .4839 .24377 .50072 .28397 .49959 .25587 .48195 Metetra +.46 .755 .993 r +.25587 .48195 .28397 .49959 .32518 .49732 .29751 .4789 Metetra +.484 .761 .992 r +.29751 .4789 .32518 .49732 .3674 .49399 .34017 .47481 Metetra +.505 .764 .989 r +.34017 .47481 .3674 .49399 .41061 .48969 .38385 .46976 Metetra +.522 .767 .986 r +.38385 .46976 .41061 .48969 .45481 .48447 .42854 .46381 Metetra +.536 .769 .983 r +.42854 .46381 .45481 .48447 .5 .4784 .47423 .45701 Metetra +.548 .77 .98 r +.47423 .45701 .5 .4784 .54618 .47152 .52095 .44942 Metetra +.558 .771 .977 r +.52095 .44942 .54618 .47152 .59335 .46389 .56868 .44107 Metetra +.567 .771 .975 r +.56868 .44107 .59335 .46389 .64153 .45552 .61745 .432 Metetra +.574 .772 .972 r +.61745 .432 .64153 .45552 .69073 .44647 .66725 .42224 Metetra +.581 .772 .97 r +.66725 .42224 .69073 .44647 .74095 .43675 .71811 .41181 Metetra +.587 .772 .968 r +.71811 .41181 .74095 .43675 .79222 .42639 .77004 .40074 Metetra +.592 .772 .966 r +.77004 .40074 .79222 .42639 .84454 .41542 .82306 .38904 Metetra +.411 .743 .993 r +.14581 .46784 .1757 .48463 .21526 .4839 .18578 .46625 Metetra +.445 .752 .994 r +.18578 .46625 .21526 .4839 .25587 .48195 .2268 .46348 Metetra +.473 .759 .993 r +.2268 .46348 .25587 .48195 .29751 .4789 .26888 .45963 Metetra +.496 .763 .991 r +.26888 .45963 .29751 .4789 .34017 .47481 .312 .45476 Metetra +.514 .766 .988 r +.312 .45476 .34017 .47481 .38385 .46976 .35615 .44894 Metetra +.53 .768 .985 r +.35615 .44894 .38385 .46976 .42854 .46381 .40134 .44224 Metetra +.543 .77 .982 r +.40134 .44224 .42854 .46381 .47423 .45701 .44756 .4347 Metetra +.554 .771 .979 r +.44756 .4347 .47423 .45701 .52095 .44942 .49482 .42637 Metetra +.563 .771 .976 r +.49482 .42637 .52095 .44942 .56868 .44107 .54312 .41729 Metetra +.571 .772 .973 r +.54312 .41729 .56868 .44107 .61745 .432 .59249 .40749 Metetra +.579 .772 .971 r +.59249 .40749 .61745 .432 .66725 .42224 .64291 .39699 Metetra +.585 .772 .969 r +.64291 .39699 .66725 .42224 .71811 .41181 .69443 .38582 Metetra +.59 .772 .966 r +.69443 .38582 .71811 .41181 .77004 .40074 .74704 .37401 Metetra +.595 .772 .964 r +.74704 .37401 .77004 .40074 .82306 .38904 .80077 .36156 Metetra +.429 .749 .994 r +.1149 .45019 .14581 .46784 .18578 .46625 .15527 .44774 Metetra +.46 .757 .994 r +.15527 .44774 .18578 .46625 .2268 .46348 .19673 .44413 Metetra +.485 .762 .992 r +.19673 .44413 .2268 .46348 .26888 .45963 .23925 .43946 Metetra +.506 .766 .989 r +.23925 .43946 .26888 .45963 .312 .45476 .28284 .4338 Metetra +.523 .768 .986 r +.28284 .4338 .312 .45476 .35615 .44894 .32748 .4272 Metetra +.537 .77 .983 r +.32748 .4272 .35615 .44894 .40134 .44224 .37317 .41972 Metetra +.549 .771 .98 r +.37317 .41972 .40134 .44224 .44756 .4347 .41993 .41142 Metetra +.56 .772 .977 r +.41993 .41142 .44756 .4347 .49482 .42637 .46775 .40233 Metetra +.568 .772 .975 r +.46775 .40233 .49482 .42637 .54312 .41729 .51663 .39248 Metetra +.576 .772 .972 r +.51663 .39248 .54312 .41729 .59249 .40749 .56661 .38192 Metetra +.583 .772 .97 r +.56661 .38192 .59249 .40749 .64291 .39699 .61767 .37066 Metetra +.588 .772 .967 r +.61767 .37066 .64291 .39699 .69443 .38582 .66985 .35872 Metetra +.593 .772 .965 r +.66985 .35872 .69443 .38582 .74704 .37401 .72317 .34613 Metetra +.598 .772 .963 r +.72317 .34613 .74704 .37401 .80077 .36156 .77763 .33289 Metetra +.446 .754 .994 r +.08292 .43165 .1149 .45019 .15527 .44774 .1237 .42832 Metetra +.474 .76 .993 r +.1237 .42832 .15527 .44774 .19673 .44413 .16559 .42386 Metetra +.497 .765 .991 r +.16559 .42386 .19673 .44413 .23925 .43946 .20857 .41836 Metetra +.516 .767 .988 r +.20857 .41836 .23925 .43946 .28284 .4338 .25263 .41187 Metetra +.531 .769 .985 r +.25263 .41187 .28284 .4338 .32748 .4272 .29777 .40447 Metetra +.544 .771 .982 r +.29777 .40447 .32748 .4272 .37317 .41972 .34399 .3962 Metetra +.556 .772 .979 r +.34399 .3962 .37317 .41972 .41993 .41142 .39129 .3871 Metetra +.565 .772 .976 r +.39129 .3871 .41993 .41142 .46775 .40233 .43968 .37722 Metetra +.573 .772 .973 r +.43968 .37722 .46775 .40233 .51663 .39248 .48916 .36659 Metetra +.58 .773 .971 r +.48916 .36659 .51663 .39248 .56661 .38192 .53976 .35524 Metetra +.586 .773 .968 r +.53976 .35524 .56661 .38192 .61767 .37066 .59148 .34318 Metetra +.592 .773 .966 r +.59148 .34318 .61767 .37066 .66985 .35872 .64435 .33044 Metetra +.597 .773 .964 r +.64435 .33044 .66985 .35872 .72317 .34613 .69838 .31704 Metetra +.601 .772 .962 r +.69838 .31704 .72317 .34613 .77763 .33289 .75359 .30298 Metetra +.462 .758 .994 r +.04981 .41218 .08292 .43165 .1237 .42832 .09102 .40795 Metetra +.487 .763 .992 r +.09102 .40795 .1237 .42832 .16559 .42386 .13335 .40262 Metetra +.508 .767 .989 r +.13335 .40262 .16559 .42386 .20857 .41836 .17679 .39627 Metetra +.525 .769 .986 r +.17679 .39627 .20857 .41836 .25263 .41187 .22134 .38894 Metetra +.539 .771 .983 r +.22134 .38894 .25263 .41187 .29777 .40447 .26699 .38071 Metetra +.551 .772 .98 r +.26699 .38071 .29777 .40447 .34399 .3962 .31374 .37162 Metetra +.561 .772 .977 r +.31374 .37162 .34399 .3962 .39129 .3871 .36159 .3617 Metetra +.57 .773 .974 r +.36159 .3617 .39129 .3871 .43968 .37722 .41056 .35101 Metetra +.578 .773 .972 r +.41056 .35101 .43968 .37722 .48916 .36659 .46066 .33956 Metetra +.584 .773 .969 r +.46066 .33956 .48916 .36659 .53976 .35524 .5119 .32738 Metetra +.59 .773 .967 r +.5119 .32738 .53976 .35524 .59148 .34318 .56429 .31449 Metetra +.595 .773 .965 r +.56429 .31449 .59148 .34318 .64435 .33044 .61786 .30092 Metetra +.6 .773 .963 r +.61786 .30092 .64435 .33044 .69838 .31704 .67262 .28667 Metetra +.604 .772 .961 r +.67262 .28667 .69838 .31704 .75359 .30298 .7286 .27174 Metetra +.477 .762 .993 r +.01552 .39172 .04981 .41218 .09102 .40795 .05716 .38658 Metetra +.499 .766 .991 r +.05716 .38658 .09102 .40795 .13335 .40262 .09995 .38036 Metetra +.518 .769 .988 r +.09995 .38036 .13335 .40262 .17679 .39627 .14386 .37313 Metetra +.534 .77 .985 r +.14386 .37313 .17679 .39627 .22134 .38894 .1889 .36494 Metetra +.547 .772 .981 r +.1889 .36494 .22134 .38894 .26699 .38071 .23507 .35585 Metetra +.558 .772 .978 r +.23507 .35585 .26699 .38071 .31374 .37162 .28236 .34591 Metetra +.567 .773 .975 r +.28236 .34591 .31374 .37162 .36159 .3617 .33078 .33515 Metetra +.575 .773 .973 r +.33078 .33515 .36159 .3617 .41056 .35101 .38035 .32361 Metetra +.582 .773 .97 r +.38035 .32361 .41056 .35101 .46066 .33956 .43107 .31131 Metetra +.588 .773 .968 r +.43107 .31131 .46066 .33956 .5119 .32738 .48296 .29828 Metetra +.594 .773 .965 r +.48296 .29828 .5119 .32738 .56429 .31449 .53604 .28453 Metetra +.599 .773 .963 r +.53604 .28453 .56429 .31449 .61786 .30092 .59033 .27007 Metetra +.603 .773 .961 r +.59033 .27007 .61786 .30092 .67262 .28667 .64585 .25493 Metetra +.607 .772 .959 r +.64585 .25493 .67262 .28667 .7286 .27174 .70262 .2391 Metetra +0 g +.25 Mabswid +.68874 0 m +.96935 .42924 L +s +.96935 .42924 m +1 .6535 L +s +1 .6535 m +.70298 .24544 L +s +.70298 .24544 m +.68874 0 L +s +.03716 .25514 m +0 .48963 L +s +0 .48963 m +.70298 .24544 L +s +.70298 .24544 m +.68874 0 L +s +.68874 0 m +.03716 .25514 L +s +.03716 .25514 m +.68874 0 L +s +.03716 .25514 m +.04196 .25962 L +s +[(0)] .02757 .24618 1 .93395 Mshowa +.1552 .20892 m +.15981 .21359 L +s +[(0.2)] .14598 .19958 .98733 1 Mshowa +.27893 .16047 m +.28333 .16533 L +s +[(0.4)] .27013 .15073 .90393 1 Mshowa +.40878 .10962 m +.41294 .11469 L +s +[(0.6)] .40046 .09948 .82054 1 Mshowa +.54521 .0562 m +.54911 .06148 L +s +[(0.8)] .53743 .04564 .73714 1 Mshowa +.68874 0 m +.69233 .00549 L +s +[(1)] .68156 -0.01098 .65374 1 Mshowa +.125 Mabswid +.06616 .24378 m +.06901 .2465 L +s +.09549 .2323 m +.09832 .23504 L +s +.12517 .22067 m +.12797 .22345 L +s +.18558 .19702 m +.18832 .19985 L +s +.21633 .18498 m +.21903 .18784 L +s +.24744 .1728 m +.25012 .17569 L +s +.3108 .14799 m +.31341 .15094 L +s +.34306 .13535 m +.34564 .13834 L +s +.37572 .12257 m +.37826 .12558 L +s +.44225 .09652 m +.44471 .09959 L +s +.47614 .08324 m +.47856 .08635 L +s +.51046 .06981 m +.51284 .07294 L +s +.58041 .04242 m +.5827 .04562 L +s +.61605 .02846 m +.6183 .03169 L +s +.65216 .01432 m +.65436 .01759 L +s +gsave +.29165 .07573 -70.3112 -16.5625 Mabsadd m +1 1 Mabs scale +currentpoint translate +/MISOfy +{ + /newfontname exch def + /oldfontname exch def + + oldfontname findfont + dup length dict begin + {1 index /FID ne {def} {pop pop} ifelse} forall + /Encoding ISOLatin1Encoding def + currentdict + end + + newfontname exch definefont pop +} def + +0 20.5625 translate 1 -1 scale +63.000 12.813 moveto +%%IncludeResource: font Courier +%%IncludeFont: Courier +/Courier findfont 10.000 scalefont +[1 0 0 -1 0 0 ] makefont setfont +0.000 0.000 0.000 setrgbcolor +(t) show +1.000 setlinewidth +grestore +% End of Graphics +MathPictureEnd + +%%PSTrailer +end +grestore + diff --git a/interact.cls b/interact.cls new file mode 100644 index 0000000..89755b2 --- /dev/null +++ b/interact.cls @@ -0,0 +1,734 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% +%% This is file 'interact.cls' +%% +%% This file is part of a Taylor & Francis 'Interact' LaTeX bundle. +%% +%% v1.01 - 2016/03/11 +%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\NeedsTeXFormat{LaTeX2e} +\ProvidesClass{interact}[2016/03/11 v1.01 Interact LaTeX2e document class] +% +\newif\iflargeformat +\newif\ifsuppldata +% +\DeclareOption{largeformat}{\largeformattrue} +\DeclareOption{suppldata}{\suppldatatrue} +\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}} +\ExecuteOptions{a4paper,oneside,onecolumn,final} +\ProcessOptions +% +\LoadClass[11pt,a4paper]{article} +% +\RequirePackage{amsmath,amssymb,amsfonts,amsbsy,amsthm,booktabs,epsfig,graphicx,rotating} +% +\iflargeformat +\RequirePackage[left=1in,right=1in,top=1in,bottom=1in]{geometry} +\else +\RequirePackage[textwidth=34pc,textheight=650pt]{geometry} +\setlength\parindent{12pt} +\fi +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Fonts %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\def\abstractfont{\fontsize{9}{10}\selectfont\leftskip1pc\rightskip5pc}% +\def\affilfont{\fontsize{10}{12}\selectfont\raggedright}% +\def\articletypefont{\fontsize{12}{14}\selectfont\MakeUppercase}% +\def\authorfont{\fontsize{11}{13}\selectfont\raggedright}% +\def\extractfont{\fontsize{10}{12}\selectfont\leftskip12pt\rightskip12pt}% +\def\figcaptionfont{\fontsize{8}{10}\selectfont}% +\def\fignumfont{\fontsize{8}{10}\selectfont\bfseries}% +\def\historyfont{\fontsize{9}{10}\selectfont\leftskip1pc\rightskip5pc plus1fill}% +\def\keywordfont{\fontsize{9}{10}\selectfont\leftskip1pc\rightskip5pc plus1fill}% +\def\receivedfont{\fontsize{9}{12}\selectfont\leftskip1pc\rightskip5pc}% +\def\sectionfont{\fontsize{11}{13}\selectfont\bfseries\raggedright\boldmath}% +\def\subsectionfont{\fontsize{11}{13}\selectfont\bfseries\itshape\raggedright\boldmath}% +\def\subsubsectionfont{\fontsize{11}{13}\selectfont\itshape\raggedright}% +\def\paragraphfont{\fontsize{11}{13}\selectfont\bfseries\boldmath}% +\def\subparagraphfont{\fontsize{11}{13}\selectfont\itshape}% +\def\tablecaptionfont{\fontsize{8}{10}\selectfont\leftskip\tabledim\rightskip\tabledim}% +\def\tablefont{\fontsize{8}{9}\selectfont}% +\def\tablenumfont{\fontsize{8}{10}\selectfont\bfseries}% +\def\tabnotefont{\fontsize{8}{9}\selectfont}% +\def\thanksfont{\fontsize{8}{10}\selectfont}% +\def\titlefont{\fontsize{13}{16}\selectfont\bfseries\raggedright\boldmath}% +% +\def\@xpt{10} +\def\@xipt{11} +\def\@xiiipt{13} +\def\@xivpt{14} +\def\@xvipt{16} +\def\@xviiipt{18} +% +\renewcommand\normalsize{% + \@setfontsize\normalsize\@xipt\@xiiipt + \abovedisplayskip 13\p@ \@plus2\p@ minus.5pt + \abovedisplayshortskip \abovedisplayskip + \belowdisplayskip 13\p@ \@plus2\p@ minus.5pt + \belowdisplayshortskip\belowdisplayskip + \let\@listi\@listI} +% +\renewcommand\small{% + \@setfontsize\small\@xpt{11}% + \abovedisplayskip 8.5\p@ \@plus3\p@ + \abovedisplayshortskip \z@ \@plus2\p@ + \belowdisplayshortskip 4\p@ \@plus2\p@ + \def\@list1{\leftmargin\leftmargin1 + \topsep 6\p@ \@plus2\p@ + \parsep 2\p@ \@plus\p@ + \itemsep \parsep}% + \belowdisplayskip \abovedisplayskip\setSmallDelims} +% +\def\setSmallDelims{% +\def\big##1{{\hbox{$\left##1\vbox to7.5\p@{}\right.\n@space$}}}% +\def\Big##1{{\hbox{$\left##1\vbox to10.5\p@{}\right.\n@space$}}}% +\def\bigg##1{{\hbox{$\left##1\vbox to13.5\p@{}\right.\n@space$}}}% +\def\Bigg##1{{\hbox{$\left##1\vbox to16.5\p@{}\right.\n@space$}}}% +\def\biggg##1{{\hbox{$\left##1\vbox to19.5\p@{}\right.\n@space$}}}% +\def\Biggg##1{{\hbox{$\left##1\vbox to22.5\p@{}\right.\n@space$}}}% +} +% +\renewcommand\footnotesize{% + \@setfontsize\footnotesize\@viiipt{10}% + \abovedisplayskip 6\p@ \@plus2\p@ + \abovedisplayshortskip \z@ \@plus\p@ + \belowdisplayshortskip 3\p@ \@plus\p@ + \def\@listi{\leftmargin\leftmargini + \topsep 6\p@ \@plus\p@ + \parsep 2\p@ \@plus\p@ + \itemsep \parsep}% + \belowdisplayskip \abovedisplayskip\setFootnotesizeDelims + } +% +\def\setFootnotesizeDelims{% +\def\big##1{{\hbox{$\left##1\vbox to6.5\p@{}\right.\n@space$}}}% +\def\Big##1{{\hbox{$\left##1\vbox to9.5\p@{}\right.\n@space$}}}% +\def\bigg##1{{\hbox{$\left##1\vbox to12.5\p@{}\right.\n@space$}}}% +\def\Bigg##1{{\hbox{$\left##1\vbox to15.5\p@{}\right.\n@space$}}}% +\def\biggg##1{{\hbox{$\left##1\vbox to18.5\p@{}\right.\n@space$}}}% +\def\Biggg##1{{\hbox{$\left##1\vbox to21.5\p@{}\right.\n@space$}}}% +} +% +\def\capsdefault{caps}% +\DeclareRobustCommand\capsshape + {\not@math@alphabet\capsshape\mathrm + \fontshape\capsdefault\selectfont} +% +\DeclareOldFontCommand{\bi}{\bfseries\itshape}{\bfseries\itshape} +\renewcommand{\cal}{\protect\pcal}% +\newcommand{\pcal}{\@fontswitch{\relax}{\mathcal}} +\renewcommand{\mit}{\protect\pmit}% +\newcommand{\pmit}{\@fontswitch{\relax}{\mathnormal}} +% +\renewcommand\rmdefault{cmr} +\newcommand\rmmathdefault{cmr} +% +\DeclareFontFamily{OT1}{Clearface}{} +\DeclareFontShape{OT1}{Clearface}{m}{n}{ <-> Clearface-Regular }{} +\DeclareFontShape{OT1}{Clearface}{m}{it}{ <-> Clearface-RegularItalic }{} +\def\encodingdefault{OT1}% +\fontencoding{OT1}% +% +\def\boldmath{\mathversion{bold}} +\def\bm#1{\mathchoice + {\mbox{\boldmath$\displaystyle#1$}}% + {\mbox{\boldmath$#1$}}% + {\mbox{\boldmath$\scriptstyle#1$}}% + {\mbox{\boldmath$\scriptscriptstyle#1$}}} +% +\providecommand{\mathch}[2]{% + \begingroup + \let\@nomath\@gobble \mathversion{#1}% + \math@atom{#2}{% + \mathchoice% + {\hbox{$\m@th\displaystyle#2$}}% + {\hbox{$\m@th\textstyle#2$}}% + {\hbox{$\m@th\scriptstyle#2$}}% + {\hbox{$\m@th\scriptscriptstyle#2$}}}% + \endgroup} +% +\DeclareFontFamily{OML}{eur}{\skewchar\font'177} +\DeclareFontShape{OML}{eur}{m}{n}{ + <5> <6> <7> <8> <9> gen * eurm + <10> <10.95> <12> <14.4> <17.28> <20.74> <24.88> eurm10 + }{} +\DeclareFontShape{OML}{eur}{b}{n}{ + <5> <6> <7> <8> <9> gen * eurb + <10> <10.95> <12> <14.4> <17.28> <20.74> <24.88> eurb10 + }{} +% +\DeclareMathVersion{upright} +\DeclareMathVersion{boldupright} +\SetSymbolFont{letters}{upright} {OML}{eur}{m}{n} +\SetSymbolFont{letters}{boldupright}{OML}{eur}{b}{n} +\DeclareRobustCommand{\mathup}[1]{\mathch{upright}{#1}} +\DeclareRobustCommand{\mathbup}[1]{\mathch{boldupright}{#1}} +% +\newcommand\ualpha{\mathup{\alpha}} +\newcommand\ubeta{\mathup{\beta}} +\newcommand\ugamma{\mathup{\gamma}} +\newcommand\udelta{\mathup{\delta}} +\newcommand\uepsilon{\mathup{\epsilon}} +\newcommand\uzeta{\mathup{\zeta}} +\newcommand\ueta{\mathup{\eta}} +\newcommand\utheta{\mathup{\theta}} +\newcommand\uiota{\mathup{\iota}} +\newcommand\ukappa{\mathup{\kappa}} +\newcommand\ulambda{\mathup{\lambda}} +\newcommand\umu{\mathup{\mu}} +\newcommand\unu{\mathup{\nu}} +\newcommand\uxi{\mathup{\xi}} +\newcommand\upi{\mathup{\pi}} +\newcommand\urho{\mathup{\rho}} +\newcommand\usigma{\mathup{\sigma}} +\newcommand\utau{\mathup{\tau}} +\newcommand\uupsilon{\mathup{\upsilon}} +\newcommand\uphi{\mathup{\phi}} +\newcommand\uchi{\mathup{\chi}} +\newcommand\upsi{\mathup{\psi}} +\newcommand\uomega{\mathup{\omega}} +\newcommand\uvarepsilon{\mathup{\varepsilon}} +\newcommand\uvartheta{\mathup{\vartheta}} +\newcommand\uvarpi{\mathup{\varpi}} +\let\uvarrho\varrho +\let\uvarsigma\varsigma +\newcommand\uvarphi{\mathup{\varphi}} +\newcommand\ubalpha{\mathbup{\alpha}} +\newcommand\ubbeta{\mathbup{\beta}} +\newcommand\ubgamma{\mathbup{\gamma}} +\newcommand\ubdelta{\mathbup{\delta}} +\newcommand\ubepsilon{\mathbup{\epsilon}} +\newcommand\ubzeta{\mathbup{\zeta}} +\newcommand\uboldeta{\mathbup{\eta}} +\newcommand\ubtheta{\mathbup{\theta}} +\newcommand\ubiota{\mathbup{\iota}} +\newcommand\ubkappa{\mathbup{\kappa}} +\newcommand\ublambda{\mathbup{\lambda}} +\newcommand\ubmu{\mathbup{\mu}} +\newcommand\ubnu{\mathbup{\nu}} +\newcommand\ubxi{\mathbup{\xi}} +\newcommand\ubpi{\mathbup{\pi}} +\newcommand\ubrho{\mathbup{\rho}} +\newcommand\ubsigma{\mathbup{\sigma}} +\newcommand\ubtau{\mathbup{\tau}} +\newcommand\ubupsilon{\mathbup{\upsilon}} +\newcommand\ubphi{\mathbup{\phi}} +\newcommand\ubchi{\mathbup{\chi}} +\newcommand\ubpsi{\mathbup{\psi}} +\newcommand\ubomega{\mathbup{\omega}} +\newcommand\ubvarepsilon{\mathbup{\varepsilon}} +\newcommand\ubvartheta{\mathbup{\vartheta}} +\newcommand\ubvarpi{\mathbup{\varpi}} +\newcommand\ubvarrho{\boldsymbol{\varrho}} +\newcommand\ubvarsigma{\boldsymbol{\varsigma}} +\newcommand\ubvarphi{\mathbup{\varphi}} +\newcommand\upartial {\mathup{\partial}} +\newcommand\ubpartial{\mathbup{\partial}} +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End Fonts %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Title commands %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\def\articletype#1{\gdef\@articletype{{#1}}\MakeUppercase} +\def\articletype#1{\gdef\@articletype{#1}} +\gdef\@articletype{\ } +\def\title#1{\gdef\@title{{#1}}} +\def\author#1{\def\and{and }\gdef\@author{#1}} +\def\received#1{\gdef\@received{#1}} +\def\history#1{\gdef\@received{#1}} +\gdef\@history{\bfseries{ARTICLE HISTORY\\}\par} +\gdef\@received{Compiled \today} +% +\long\def\name#1{#1\\\vspace{6pt}}% +\long\def\affil#1{\affilfont{#1}\\} +\long\def\email#1{#1\\} +% +\def\thanks#1{\begingroup +\def\protect{\noexpand\protect\noexpand}\xdef\@thanks{\@thanks% + \protect\footnotetext[\the\c@footnote]{\thanksfont#1}}\endgroup} +% +\renewcommand\maketitle{\par% + \renewcommand\thefootnote{}% + \begingroup + \@maketitle% + \thispagestyle{title} + \endgroup + \@thanks + \let\@maketitle\relax + \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\gdef\@articletype{}% + \renewcommand\thefootnote{\arabic{footnote}}% + \@afterheading} +% +\def\@maketitle{\thispagestyle{plain} + \clearpage + \null + \bgroup + \parindent0pt + \vspace*{36pt} + {\articletypefont{\@articletype}\par}% + \vskip13pt + {\titlefont{\@title}\par}% + \vskip13pt + {\authorfont\@author\par}% + \ifsuppldata\else + \vskip17pt + {\receivedfont{\bfseries ARTICLE HISTORY\\}\@received\par}% + \fi + \vskip13pt + \egroup} +% +\renewenvironment{abstract}{% + \par\addvspace{0pt plus2pt minus1pt} + \abstractfont\noindent{\bfseries \abstractname\\}\ignorespaces% +}{% + \par\addvspace{13pt plus2pt minus1pt} + \@endparenv} +% +\newenvironment{abbreviations}{% + \par\addvspace{13pt plus2pt minus1pt} + \abstractfont\noindent{\bfseries \abbreviationsname: }\ignorespaces% +}{% + \par\addvspace{13pt plus2pt minus1pt} + \@endparenv} +% +\newenvironment{keywords}{% + \par\addvspace{13pt plus2pt minus1pt} + \keywordfont\noindent{\bfseries \keywordsname\\}\ignorespaces% +}{% + \par\addvspace{13pt plus2pt minus1pt} + \@endparenv} +% +\newenvironment{amscode}{% + \par\addvspace{13pt plus2pt minus1pt} + \keywordfont\noindent{\bfseries{AMS CLASSIFICATION}\\}\ignorespaces% +}{% + \par\addvspace{13pt plus2pt minus1pt} + \@endparenv} +% +\newenvironment{jelcode}{% + \par\addvspace{13pt plus2pt minus1pt} + \keywordfont\noindent{\bfseries{JEL CLASSIFICATION}\\}\ignorespaces% +}{% + \par\addvspace{13pt plus2pt minus1pt} + \@endparenv} +% +\newenvironment{pacscode}{% + \par\addvspace{13pt plus2pt minus1pt} + \keywordfont\noindent{\bfseries{PACS CLASSIFICATION}\\}\ignorespaces% +}{% + \par\addvspace{13pt plus2pt minus1pt} + \@endparenv} +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End Title commands %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Sectioning commands %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\setcounter{secnumdepth}{5} +%\newcounter {part} +%\newcounter {section} +%\newcounter {subsection}[section] +%\newcounter {subsubsection}[subsection] +%\newcounter {paragraph}[subsubsection] +%\newcounter {subparagraph}[paragraph] +\renewcommand\thepart {\arabic{part}} +\renewcommand\thesection {\arabic{section}} +\renewcommand\thesubsection {\thesection.\arabic{subsection}} +\renewcommand\thesubsubsection {\thesubsection.\arabic{subsubsection}} +\renewcommand\theparagraph {\thesubsubsection.\arabic{paragraph}} +\renewcommand\thesubparagraph {\theparagraph.\arabic{subparagraph}} +% +\renewcommand\section{\@startsection {section}{1}{\z@}% + {-26pt \@plus-4pt \@minus-2pt}% + {13pt}% + {\sectionfont}}% +\renewcommand\subsection{\@startsection{subsection}{2}{\z@}% + {-24pt \@plus-3pt \@minus-2pt}% + {7pt}% + {\subsectionfont}}% +\renewcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}% + {18pt \@plus2pt \@minus2pt}% + {6pt}% + {\subsubsectionfont}}% +\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}% + {18pt \@plus1pt \@minus1pt}% + {-6pt}% + {\paragraphfont}}% +\renewcommand\subparagraph{\@startsection{subparagraph}{5}{\z@}% + {3.25ex \@plus1ex}% + {-1em}% + {\reset@font\normalsize}}% +% +\def\@startsection#1#2#3#4#5#6{% + \if@noskipsec \leavevmode \fi + \par + \@tempskipa #4\relax + \ifdim \@tempskipa <\z@ + \@tempskipa -\@tempskipa \@afterindentfalse + \fi + \if@nobreak + \ifnum#2=3 + \vskip4pt + \fi + \everypar{}% + \else + \addpenalty\@secpenalty\addvspace\@tempskipa + \fi + \@ifstar + {\@ssect{#3}{#4}{#5}{#6}}% + {\@dblarg{\@sect{#1}{#2}{#3}{#4}{#5}{#6}}}} +% +\def\@sseccntformat#1{\csname the#1\endcsname.\quad} +\def\@appseccntformat#1{\appendixname\ \csname the#1\endcsname.\ } +\def\@seccntformat#1{\csname the#1\endcsname.\quad} +\def\@sect#1#2#3#4#5#6[#7]#8{\ifnum #2>\c@secnumdepth + \let\@svsec\@empty\else + \refstepcounter{#1}% + \let\@@protect\protect + \def\protect{\noexpand\protect\noexpand}% + \ifnum#2>\@ne\edef\@svsec{\@sseccntformat{#1}}\else\edef\@svsec{\@seccntformat{#1}}\fi% + \let\protect\@@protect\fi + \@tempskipa #5\relax + \ifdim \@tempskipa>\z@ + \begingroup #6\relax + \ifnum#2=1 + \@hangfrom{\hskip #3\relax\@svsec}% + {\interlinepenalty \@M {#8}\par}% + \else + \ifnum#2=2 + \@hangfrom{\hskip #3\relax{\em\@svsec}}% + {\interlinepenalty \@M #8\par}% + \else + \@hangfrom{\hskip #3\relax\@svsec}% + {\interlinepenalty \@M #8\par}% + \fi + \fi + \endgroup + \csname #1mark\endcsname{#7} + \addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}% + \else% + \def\@svsechd{#6\hskip #3\relax + \em\@svsec #8.\csname #1mark\endcsname + {#7}\addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}% + \fi + #7}}\fi + \@xsect{#5}} +% +%%%%%%%%%%%%%%%%%%%%%%%%%%% End Sectioning commands %%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Captions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\newbox\tempbox +% +\setlength\abovecaptionskip{7\p@} +\setlength\belowcaptionskip{\z@} +% +\def\FigName{figure} +% +\long\def\@caption#1[#2]#3{\par\begingroup + \@parboxrestore + \normalsize + \@makecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par + \endgroup} +% +\long\def\@makecaption#1#2{% + \ifx\FigName\@captype + \vskip5pt + \setbox\tempbox\hbox{\figcaptionfont{\fignumfont#1}\hskip4pt#2}% + \ifdim\wd\tempbox>\hsize + {\figcaptionfont\noindent{\fignumfont#1}\hskip7pt\ignorespaces#2\par} + \else + {\box\tempbox}% + \fi + \else + {\tablecaptionfont + {\tablenumfont#1}\hskip7pt\ignorespaces{#2}\par}% + \vskip\belowcaptionskip + \fi} +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End Captions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Figures %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\renewcommand\thefigure{\@arabic\c@figure} +\def\fps@figure{tbp} +\def\ftype@figure{1} +\def\ext@figure{lof} +\def\fnum@figure{\figurename\nobreakspace\thefigure.} +\renewenvironment{figure}% + {\figcaptionfont\@float{figure}} + {\end@float} +\renewenvironment{figure*}% + {\figcaptionfont\@dblfloat{figure}} + {\end@dblfloat} + +\def\ArtDir{art/}% +\def\ArtPiece{\@ifnextchar[{\@ArtPiece}{\@ArtPiece[]}}% +\def\@ArtPiece[#1]#2{\def\@tempa{#1}% + \hbox{\ifx\@tempa\@empty\else\epsfscale#1\fi + \noindent{\epsfbox{\ArtDir#2}}}}% +% +\newdimen\figheight +\newdimen\figwidth +% +\let\figformat\centerline +% +\def\figurebox#1#2#3#4{% + \global\figheight#1\global\figwidth#2 + \def\@tempa{#4}% + \leavevmode + \ifx\@tempa\empty + \figformat{\figbox}% + \else + \figformat{\ArtPiece[#3]{#4}}% + \fi\par} +% +\def\figbox{\hbox{\vbox{\hsize\figwidth + \hrule + \hbox to\figwidth{\vrule\hss + \vbox to \figheight{\vfill}% + \vrule}\hrule}}}% +% +\def\figformat#1{\footnotesize#1}% +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End Figures %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Tables %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\renewcommand\thetable{\@arabic\c@table} +\def\fps@table{tbp} +\def\ftype@table{2} +\def\ext@table{lot} +\def\fnum@table{\tablename\nobreakspace\thetable.} +\renewenvironment{table}% + {\@float{table}} + {\vskip5pt\end@float} +\renewenvironment{table*}% + {\@dblfloat{table}} + {\end@dblfloat} +% +\newdimen\tabledim +% +\long\def\tbl#1#2{% + \setbox\tempbox\hbox{\tablefont #2}% + \tabledim\hsize\advance\tabledim by -\wd\tempbox + \global\divide\tabledim\tw@ + \caption{#1} + \centerline{\box\tempbox} + }% +% +\newenvironment{tabnote}{% +\par\vskip5pt\tabnotefont +\@ifnextchar[{\@tabnote}{\@tabnote[]}}{% +\par\vskip-5pt} +\def\@tabnote[#1]{\def\@Tempa{#1}\leftskip\tabledim\rightskip\leftskip%\hspace*{9pt}% +\ifx\@Tempa\@empty\else{\itshape #1:}\ \fi\ignorespaces} +% +\def\x{@{\extracolsep{\fill}}} +\renewcommand\toprule{\\[-5.5pt]\hline\\[-5pt]} +\renewcommand\midrule{\\[-7.5pt]\hline\\[-5pt]} +\renewcommand\bottomrule{\\[-7.5pt]\hline} +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End Tables %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Lists %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\newdimen\LabelSep +\LabelSep.5em +\newskip\TopSep +\TopSep 6\p@ %\@plus2\p@% \@minus1\p@ +% +\def\@listI{\leftmargin\leftmargini + \listparindent\parindent + \parsep \z@\labelsep\LabelSep + \topsep\TopSep + \itemsep0\p@} +% +\let\@listi\@listI +\@listi +% +\def\@listii {\leftmargin\leftmarginii + \labelwidth\leftmarginii + \listparindent\parindent + \parsep \z@\labelsep\LabelSep + \topsep 0pt%6\p@ \@plus2\p@ \@minus1\p@ + \parsep\z@\itemsep\z@} +\def\@listiii{\leftmargin\leftmarginiii + \listparindent\parindent + \labelwidth\leftmarginiii + \topsep 0pt + \parsep \z@ + \partopsep0pt + \itemsep0pt} +\def\@listiv {\leftmargin\leftmarginiv + \labelwidth\leftmarginiv + \advance\labelwidth-\labelsep} +\def\@listv {\leftmargin\leftmarginv + \labelwidth\leftmarginv + \advance\labelwidth-\labelsep} +\def\@listvi {\leftmargin\leftmarginvi + \labelwidth\leftmarginvi + \advance\labelwidth-\labelsep} +% +\setlength\leftmargini {2.5em} +\leftmargin \leftmargini +\setlength\leftmarginii {2.2em} +\setlength\leftmarginiii {1.87em} +\setlength\leftmarginiv {1.7em} +\setlength\leftmarginv {1em} +\setlength\leftmarginvi {1em} +\setlength \labelsep {.5em} +\setlength \labelwidth{\leftmargini} +\addtolength\labelwidth{-\labelsep} +\@beginparpenalty -\@lowpenalty +\@endparpenalty -\@lowpenalty +\@itempenalty -\@lowpenalty +\renewcommand\theenumi{\@arabic\c@enumi} +\renewcommand\theenumii{\@alph\c@enumii} +\renewcommand\theenumiii{\@roman\c@enumiii} +\renewcommand\theenumiv{\@Alph\c@enumiv} +\renewcommand\labelenumi{(\theenumi)} +\renewcommand\labelenumii{(\theenumii)} +\renewcommand\labelenumiii{(\theenumiii)} +\renewcommand\labelenumiv{(\theenumiv)} +\renewcommand\p@enumii{\theenumi} +\renewcommand\p@enumiii{\theenumi(\theenumii)} +\renewcommand\p@enumiv{\p@enumiii\theenumiii} +\renewcommand\labelitemi{$\m@th\bullet$} +\renewcommand\labelitemii{$\m@th\circ$} +\renewcommand\labelitemiii{\normalfont\textendash} +\renewcommand\labelitemiv{$\m@th\ast$} +% +\renewenvironment{description}% + {\list{}{\labelwidth\z@ \itemindent-\leftmargin + \let\makelabel\descriptionlabel}} + {\endlist} +\renewcommand*\descriptionlabel[1]{\hspace\labelsep% + \normalfont\bfseries #1} +% +\renewenvironment{quote}{% + \par\addvspace{13pt plus2pt minus1pt} + \extractfont\noindent\ignorespaces +}{% + \par\addvspace{13pt plus2pt minus1pt} + \@endparenv} +% +\renewenvironment{quote}{% + \par\addvspace{6pt}\let\itemize\Itemize\let\enditemize\endItemize + \extractfont\noindent\ignorespaces +}{% + \par\addvspace{6pt} + \@endparenv} +% +\renewenvironment{quotation}{% + \par\addvspace{13pt plus2pt minus1pt} + \extractfont\ignorespaces +}{% + \par\addvspace{13pt plus2pt minus1pt} + \@endparenv} +% +\renewenvironment{quotation}{% + \par\addvspace{6pt}\let\itemize\Itemize\let\enditemize\endItemize + \extractfont\ignorespaces +}{% + \par\addvspace{6pt} + \@endparenv} +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End Lists %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\renewcommand{\appendix}{% + \let\@seccntformat\@appseccntformat + \setcounter{equation}{0}\renewcommand\theequation{\thesection\arabic{equation}}% + \setcounter{section}{0}\renewcommand\thesection {\Alph{section}}% + \setcounter{subsection}{0}\renewcommand\thesubsection {\thesection.\arabic{subsection}}% + \setcounter{table}{0}\renewcommand\thetable{\thesection\@arabic\c@table}% + \setcounter{figure}{0}\renewcommand\thefigure{\thesection\@arabic\c@figure}% + \@addtoreset{equation}{section} + \@addtoreset{table}{section} + \@addtoreset{figure}{section} +} +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Footnotes %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\renewcommand\footnoterule{% + \kern2pt + \hrule width\textwidth height.25pt + \kern4pt} +\renewcommand\@makefntext[1]{% + \parindent 0.5em% + \noindent + \hb@xt@1em{\hss\@makefnmark}#1} +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End Footnotes %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Page styles %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\def\endpage#1{\gdef\@endpage{#1}} +\endpage{}% +\def\jname#1{\gdef\@jname{#1}} +\gdef\@jname{} +\def\jvol#1{\gdef\@jvol{#1}} +\gdef\@jvol{00} +\def\jnum#1{\gdef\@jnum{#1}} +\gdef\@jnum{00} +\def\jmonth#1{\gdef\@jmonth{#1}} +\gdef\@jmonth{Month} +\def\jyear#1{\gdef\@jyear{#1}} +\gdef\@jyear{20XX} +\def\doi#1{\gdef\@doi{#1}} +\gdef\@doi{} +% +\def\ps@title{% + \let\@oddfoot\@empty + \let\@evenfoot\@empty + \def\@evenhead{}% + \def\@oddhead{}% + \let\@mkboth\@gobbletwo + \let\sectionmark\@gobble + \let\subsectionmark\@gobble + } +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End Page styles %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%% Theorem-like structures %%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +\renewenvironment{proof}[1][\proofname]{\par + \pushQED{\qed}% + \normalfont \topsep6\p@\@plus6\p@\relax + \trivlist + \item[\hskip\labelsep + \bfseries\itshape + #1\@addpunct{.}]\ignorespaces +}{\popQED\endtrivlist\@endpefalse} +% +\newtheoremstyle{plain} + {9pt}{9pt}{\itshape}{}{\bfseries}{.}{0.5em}{} +% +\newtheoremstyle{definition} + {9pt}{9pt}{}{}{\bfseries}{.}{0.5em}{} +% +\newtheoremstyle{remark} + {9pt}{9pt}{}{}{\bfseries}{.}{0.5em}{} +% +%%%%%%%%%%%%%%%%%%%%%%%%%% End Theorem-like structures %%%%%%%%%%%%%%%%%%%%%%%%%% +% +\newcommand\abbreviationsname{Abbreviations} +\renewcommand\abstractname{ABSTRACT} +\newcommand\keywordsname{KEYWORDS} +% +\setlength\parskip{0\p@} +\setlength\columnsep{12\p@} +\setlength\columnseprule{0\p@} +\pagestyle{plain} +\pagenumbering{arabic} +\frenchspacing +\sloppy +\onecolumn +% +\endinput diff --git a/iv_perspective_example.RDS b/iv_perspective_example.RDS new file mode 100644 index 0000000..3bec9c5 Binary files /dev/null and b/iv_perspective_example.RDS differ diff --git a/measurement_flow.pdf b/measurement_flow.pdf new file mode 100644 index 0000000..1c5ec95 Binary files /dev/null and b/measurement_flow.pdf differ diff --git a/parrot.pdf b/parrot.pdf new file mode 100644 index 0000000..36dd711 Binary files /dev/null and b/parrot.pdf differ diff --git a/remember_grid_sweep.RDS b/remember_grid_sweep.RDS new file mode 100644 index 0000000..2440523 Binary files /dev/null and b/remember_grid_sweep.RDS differ diff --git a/remember_irr.RDS b/remember_irr.RDS new file mode 100644 index 0000000..a44bc42 Binary files /dev/null and b/remember_irr.RDS differ diff --git a/remember_robustness_mispec.RDS b/remember_robustness_mispec.RDS new file mode 100644 index 0000000..07d2a51 Binary files /dev/null and b/remember_robustness_mispec.RDS differ diff --git a/remembr.RDS b/remembr.RDS new file mode 100644 index 0000000..448d5cc Binary files /dev/null and b/remembr.RDS differ diff --git a/resources/#robustness_check_plots.R# b/resources/#robustness_check_plots.R# new file mode 100644 index 0000000..3c21cd1 --- /dev/null +++ b/resources/#robustness_check_plots.R# @@ -0,0 +1,266 @@ +library(data.table) +library(ggplot2) +source('resources/functions.R') + +plot.robustness.1 <- function(iv='x'){ + +## robustness check 1 test g + r <- readRDS('robustness_1.RDS') + baseline_df <- readRDS('remembr.RDS')[['plot.df.example.2']] + robust_df <- data.table(r$robustness_1) + + ## just compare the mle methods in the two examples + robust_df <- robust_df[Bzy!=0] + robust_df <- robust_df[Bzx!=0] + baseline_df[method=='true', method:='True'] + robust_df[method=='true', method:='True'] + + baseline_df <- baseline_df[(method=='mle') | (method=='True') | (method=='naive')] + robust_df <- robust_df[(method=='mle') | (method=='True')] + + baseline_df[method=='mle',method:='MLE Reported'] + robust_df[method=='mle',method:='No Z in Error Model'] + + df <- rbind(baseline_df, robust_df, fill=TRUE) + df[method=='naive', method:='Naive'] + df <- df[(N %in% c(1000,5000)) & (m %in% c(200,100))] + p <- plot.simulation(df,iv=iv,levels=c('MLE Reported','No Z in Error Model', 'Naive', 'True')) + grid.draw(p) +} + +plot.robustness.1.checkassumption <- function(iv='x'){ + +## robustness check 1 test g + r <- readRDS('robustness_1.RDS') + baseline_df <- readRDS('remembr.RDS')[['plot.df.example.2']] + robust_df <- data.table(r$robustness_1) + + ## just compare the mle methods in the two examples + robust_df <- robust_df[Bzy==0] + robust_df <- robust_df[Bzx!=0] + baseline_df[method=='true', method:='True'] + robust_df[method=='true', method:='True'] + + baseline_df <- baseline_df[(method=='mle') | (method=='naive')] + robust_df <- robust_df[(method=='mle') | (method=='True')] + + baseline_df[method=='mle',method:='MLE Reported'] + robust_df[method=='mle',method:='No Z in Error Model'] + + df <- rbind(baseline_df, robust_df, fill=TRUE) + df[method=='naive', method:='Naive'] + df <- df[(N %in% c(1000,5000)) & (m %in% c(200,100))] + p <- plot.simulation(df,iv=iv,levels=c('MLE Reported','No Z in Error Model', 'Naive', 'True')) + grid.draw(p) +} + +plot.robustness.1.dv <- function(iv='z'){ + + ## robustness check 1 test g + r <- readRDS('robustness_1_dv.RDS') + baseline_df <- readRDS('remembr.RDS')[['plot.df.example.4']] + robust_df <- data.table(r$robustness_1_dv) + + ## just compare the mle methods in the two examples + + baseline_df[method=='true', method:='True'] + robust_df[method=='true', method:='True'] + + robust_df <- robust_df[Bxy!=0] + robust_df <- robust_df[Bzy!=0] + # robust_df <- robust_df[Bzx==-0.1] + + baseline_df <- baseline_df[(method=='mle') | (method=='True') | (method=='naive')] + robust_df <- robust_df[(method=='mle') | (method=='True')] + + baseline_df[method=='mle',method:='MLE Reported'] + robust_df[method=='mle',method:='No Z in Error Model'] + + df <- rbind(baseline_df, robust_df, fill=TRUE) + df <- df[(N %in% c(1000,5000)) & (m %in% c(200,100))] + df[method=='naive', method:='Naive'] + + p <- plot.simulation(df,iv=iv,levels=c('MLE Reported','No Z in Error Model','Naive', 'True')) + grid.draw(p) +} + +plot.robustness.2.iv <- function(iv, n.annotations=100, n.classifications=5000){ + + r <- readRDS("robustness_2.RDS") + robust_df <- data.table(r[['robustness_2']]) + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(prediction_accuracy~., ncol=4,as.table=F) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + + p <- arrangeGrob(p, + top=grid.text("AC Accuracy",x=0.32,just='right')) + + grid.draw(p) +} + +robust2 <- readRDS("robustness_2_dv.RDS") +robust_2_df <- data.table(robust2[['robustness_2_dv']]) +robust_2_min_acc <- min(robust_2_df[,prediction_accuracy]) +robust_2_max_acc <- max(robust_2_df[,prediction_accuracy]) + +plot.robustness.2.dv <- function(iv, n.annotations=100, n.classifications=5000){ + + r <- readRDS("robustness_2_dv.RDS") + robust_df <- data.table(r[['robustness_2_dv']]) + + + #temporary work around a bug in the makefile + ## if('Px' %in% names(robust_df)) + ## robust_df <- robust_df[is.na(Px)] + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(prediction_accuracy~., ncol=4,as.table=F) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("AC Accuracy",x=0.32,just='right')) + + grid.draw(p) +} + + +plot.robustness.3.iv <- function(iv, n.annotations=100, n.classifications=5000){ + r <- readRDS('robustness_3.RDS') + robust_df <- data.table(r[['robustness_3']]) + r2 <- readRDS('robustness_3_proflik.RDS') + robust_df_proflik <- data.table(r2[['robustness_3_proflik']]) + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + robust_df <- robust_df[method=='MLE',method:='Fischer approximation'] + + robust_df_proflik <- robust_df_proflik[(m==n.annotations) & (N==n.classifications)] + robust_df_proflik <- robust_df_proflik[,method := new.levels[method]] + robust_df_proflik <- robust_df_proflik[method=='MLE',method:='Profile likelihood'] + robust_df_proflik <- robust_df_proflik[method != "Feasible"] + + df <- df[(m==n.annotations) & (N==n.classifications)] + + df <- rbind(robust_df, robust_df_proflik) + + p <- .plot.simulation(df, iv=iv, levels=c("True","Naïve","MI", "GMM", "Profile likelihood","Fischer approximation", "PL", "Feasible")) + + p <- p + facet_wrap(Px~., ncol=3,as.table=F) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("P(X)",x=0.32,just='right')) + + grid.draw(p) +} + +plot.robustness.3.dv <- function(iv, n.annotations=100, n.classifications=1000){ + r <- readRDS('robustness_3_dv.RDS') + robust_df <- data.table(r[['robustness_3_dv']]) + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(B0~., ncol=3,as.table=F) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("P(Y)",x=0.32,just='right')) + + grid.draw(p) +} + +plot.robustness.4.iv <- function(iv, n.annotations=100, n.classifications=1000){ + r <- readRDS('robustness_4.RDS') + robust_df <- data.table(r[['robustness_4']]) + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + + robust_df <- robust_df[,y_bias=factor(robust_df$y_bias,levels=sort(unique(robust_df$y_bias),decreasing=TRUE))] + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(y_bias~., ncol=3,as.table=T) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("Coefficient of Y for W",x=0.32,just='right')) + + grid.draw(p) +} + + + +plot.robustness.4.iv <- function(iv, n.annotations=100, n.classifications=1000){ + r <- readRDS('robustness_4.RDS') + robust_df <- data.table(r[['robustness_4']]) + + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + + robust_df <- robust_df[,y_bias=factor(robust_df$y_bias,levels=sort(unique(robust_df$y_bias),decreasing=TRUE))] + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(y_bias~., ncol=3,as.table=T) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("Coefficient of Y for W",x=0.32,just='right')) + + grid.draw(p) +} + +plot.robustness.4.dv <- function(iv, n.annotations=100, n.classifications=1000){ + r <- readRDS('robustness_4_dv.RDS') + robust_df <- data.table(r[['robustness_4']]) + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + + robust_df <- robust_df[,z_bias=factor(z_bias, levels=sort(unique(z_bias),descending=TRUE))] + + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + p <- p + facet_wrap(z_bias~., ncol=3,as.table=F) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("Coefficient of Z on W",x=0.32,just='right')) + + grid.draw(p) +} diff --git a/resources/#variables.R# b/resources/#variables.R# new file mode 100644 index 0000000..9c0b856 --- /dev/null +++ b/resources/#variables.R# @@ -0,0 +1,55 @@ +library(knitr) +library(ggplot2) +library(data.table) +knitr::opts_chunk$set(fig.show='hold') +f <- function (x) {formatC(x, format="d", big.mark=',')} +format.percent <- function(x) {x<-as.numeric(x);paste(f(x*100),"\\%",sep='')} + +theme_set(theme_bw()) +r <- readRDS('remembr.RDS') +attach(r) +r2 <- readRDS('remember_irr.RDS') +attach(r2) +r3 <- readRDS('remember_grid_sweep.RDS') +attach(r3) + + +## simulation.summary.df <- data.table(sample.4 +## simulation.summary.df <- kable(simulation.summary.df,format='latex',row.names=T, column.names=c("Factors", "Input Parameters") + + +sim1a.cor.xz <- as.numeric(unlist(example.1['med.cor.xz'])) +sim1a.acc <- unlist(example.1['med.accuracy']) +sim1b.acc <- unlist(example.2['med.accuracy']) +sim1b.acc.y1 <- unlist(example.2['med.accuracy.y1']) +sim1b.acc.y0 <- example.2['med.accuracy.y0'] +(sim1b.fnr <- example.2['med.fnr']) +(sim1b.fnr.y0 <- example.2['med.fnr.y0']) +(sim1b.fnr.y1 <- example.2['med.fnr.y1']) +sim1b.fpr <- example.2['med.fpr'] +sim1b.fpr.y0 <- example.2['med.fpr.y0'] +sim1b.fpr.y1 <- example.2['med.fpr.y1'] +sim1b.cor.resid.w_pred <- as.numeric(unlist(example.2['cor.resid.w_pred'])) +(sim1b.cor.xz <- example.2['med.cor.xz']) + +sim2a.AC.acc <- example.3['med.accuracy'] +sim2a.lik.ratio <- example.3['med.lik.ratio'] +sim2a.cor.xz <- as.numeric(example.3['med.cor.xz']) + + +sim2b.AC.acc <- example.4['med.accuracy'] +sim2b.lik.ratio <- example.4['med.lik.ratio'] +(sim2b.error.cor.x <- as.numeric(unlist(example.4['med.error.cor.x']))) +(sim2b.error.cor.z <- as.numeric(unlist(example.4['med.error.cor.z']))) + +n.simulations <- max(unlist(example_1_jobs$seed)) +sim1a.cor.xz <- as.numeric(unlist(example.3['med.cor.xz'])) +sim1.R2 <- unlist(example_1_jobs$y_explained_variance) +N.sizes <- unlist(example_1_jobs$N) +N.sizes <- N.sizes[N.sizes!=800] +m.sizes <- unlist(example_1_jobs$m) +sim2.Bx <- as.numeric(example_4_jobs$Bxy) +sim2.Bz <- as.numeric(example_4_jobs$Bzy) +sim1.z.sd <- 0.5 +irr.coder.accuracy <- unlist(example_5_jobs$coder_accuracy) +med.loco.accuracy <- unlist(example.5$med.loco.acc) diff --git a/resources/functions.R b/resources/functions.R new file mode 100644 index 0000000..01a17a4 --- /dev/null +++ b/resources/functions.R @@ -0,0 +1,82 @@ +library(grid) +library(gridExtra) +library(gtable) +format.percent <- function(x,digits=1){paste(round(x*100,digits),"\\%",sep='')} + +f <- function (x) {formatC(x, format="d", big.mark=',')} + + +plot.simulation <- function(plot.df, iv='x', levels=c("true","naive", "amelia.full","mecor","gmm","mle", "zhang","feasible"),facet_lhs='m',facet_rhs='N'){ + + p <- .plot.simulation(plot.df, iv, levels) + + p <- p + facet_grid(as.formula(paste(facet_lhs,'~',facet_rhs)),as.table=F) + + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("No. classifications",x=0.32,just='right'), + right=grid.text("No. annotations",y=0.345,just='right',rot=270)) + + return(p) +} + +.plot.simulation <- function(plot.df, iv='x', levels=c("true","naive", "amelia.full","mecor","gmm","mle", "zhang","feasible")){ + + plot.df <- copy(plot.df) + + plot.df <- plot.df[,':='(method=factor(method,levels=levels,ordered=T), + N=factor(N), + m=factor(m))] + + plot.df <- plot.df[,method.old:=method] + + true.est <- mean(plot.df[(method=='True') & (variable==iv)]$mean.est) + + plot.df <- plot.df[(variable==iv)&(method !="True")] + + p <- ggplot(plot.df, aes(y=mean.est, ymax=est.upper.95, ymin=est.lower.95, x=method)) + p <- p + geom_hline(aes(yintercept=true.est),linetype=2) + + p <- p + geom_pointrange(shape=1,size=0.5) + p <- p + geom_linerange(aes(ymax=mean.ci.upper, ymin=mean.ci.lower),position=position_nudge(x=0.4), color='grey40') + + return(p) +} + + +plot.simulation.iv <- function(plot.df, iv='x'){ + + plot.df <- plot.df[(N!=8000) & (m!=800) & (m!=200)] + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + plot.df[,method := new.levels[method]] + + return(plot.simulation(plot.df, iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible"))) +} + + +plot.simulation.dv <- function(plot.df, iv='x'){ + plot.df <- copy(plot.df) + plot.df <- plot.df[(N!=8000) & (m!=800) & (m!=200)] + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + plot.df[,method:=new.levels[method]] + return(plot.simulation(plot.df, iv, levels=c("True","Naïve", "MI","MLE","PL","Feasible"))) +} + +plot.simulation.irr <- function(plot.df,iv='x'){ + plot.df <- copy(plot.df) + new.levels <- c('true'="True","loa0.feasible"="1 coder", "loco.feasible"="2 coders", "loa0.mle"="1 coder MLE", "loco.mle"="2 coders MLE", "amelia.full"="2 coders MI", "zhang"="2 coders PL", "gmm"="2 coders GMM") + plot.df <- plot.df[,method:=new.levels[method]] + return(plot.simulation(plot.df, iv, levels=c("True","1 coder", "2 coders", "1 coder MLE","2 coders MLE","2 coders MI","2 coders PL","2 coders GMM"))) + +} + +plot.simulation.irr.dv <- function(plot.df,iv='x'){ + plot.df <- copy(plot.df) + new.levels <- c('true'="True","naive"="Naïve","loa0.feasible"="1 coder", "loco.feasible"="Feasible", "loa0.mle"="1 coder MLE", "loco.mle"="MLE", "amelia.full"="MI", "zhang"="PL", "gmm"="GMM") + + plot.df <- plot.df[,method:=new.levels[method]] + return(plot.simulation(plot.df, iv, levels=c("True","Naïve", "Feasible", "MLE","MI","PL"))) +} diff --git a/resources/real_data_example.R b/resources/real_data_example.R new file mode 100644 index 0000000..f9ebe72 --- /dev/null +++ b/resources/real_data_example.R @@ -0,0 +1,111 @@ +library(scales) +library(data.table) +library(ggplot2) +iv.example <- readRDS("iv_perspective_example.RDS") + +dv.example <- readRDS("dv_perspective_example.RDS") + +iv.sample.prop <- iv.example$cc_ex_tox.likes.race_disclosed.medsampsample.prop +dv.sample.prop <- dv.example$cc_ex_tox.likes.race_disclosed.largesampsample.prop + +iv.sample.count <- iv.example$cc_ex_tox.likes.race_disclosed.medsampsample.count +dv.sample.count <- dv.example$cc_ex_tox.likes.race_disclosed.largesampsample.count + + +plot.cc.example <- function(datalist, name, varnames=NULL, varorder=NULL, include.models=c("Automatic Classification", "All Annotations")){ + + model.names <- c("Automatic Classification", "All Annotations", "Annotation Sample", "Error Correction") + + glm.par.names <- paste0(name,"coef_",c("pred", "coder", "sample"), "_model") + + measerr.par.name <- paste0(name,"measerr_model_par") + glm.pars <- datalist[glm.par.names] + + n.pars <- length(glm.pars[[1]]) + + all.pars <- append(glm.pars, list("corrected"=datalist[[measerr.par.name]][1:n.pars])) + names(all.pars) <- model.names + df.pars <- as.data.table(data.frame(all.pars),keep.rownames=TRUE) + + if(!is.null(varnames)){ + df.pars[, rn := varnames] + } + + setnames(df.pars, old="rn", new="variable") + + glm.stderr.names <- paste0(name,"se_",c("pred", "coder", "sample"), "_model") + glm.stderr <- datalist[glm.stderr.names] + measerr.stderr.name <- paste0(name,"measerr_model_stderr") + all.stderr <- append(glm.stderr, list("corrected"=datalist[[measerr.stderr.name]][1:n.pars])) + names(all.stderr) <- model.names + df.stderr <- as.data.table(data.frame(all.stderr), keep.rownames=TRUE) + + if(!is.null(varnames)){ + df.stderr[, rn := varnames] + } + + setnames(df.stderr, old="rn", new="variable") + + df.pars <- melt(df.pars, id.vars = "variable", variable.name = "Model", value.name = "Estimate") + + df.stderr <- melt(df.stderr, id.vars = "variable",variable.name = "Model", value.name = "StdErr") + + df <- df.pars[df.stderr, on = c("variable", "Model")] + + df[,":="(UpperCI = Estimate + 1.96*sqrt(StdErr), + LowerCI = Estimate - 1.96*sqrt(StdErr))] + + if(!is.null(varorder)){ + df[,variable:=factor(variable,levels=varorder)] + } + + df[,Model:= factor(gsub('\\.',' ', Model), levels=rev(model.names))] + + df <- df[Model %in% include.models] + + p <- ggplot(df[variable != "Intercept"], aes(y = Estimate, x=Model, ymax=LowerCI, ymin=UpperCI, group=variable)) + p <- p + geom_pointrange(shape=1) + facet_wrap('variable',scales='free_x',nrow=1,as.table=F) + geom_hline(aes(yintercept=0),linetype='dashed',color='gray40') + coord_flip() + xlab("") + p <- p + scale_y_continuous(breaks=breaks_extended(4)) + return(p) +} + + +plot.civilcomments.dv.example <- function(include.models=c("Automatic Classification", "All Annotations")){ + return(plot.cc.example(dv.example, "cc_ex_tox.likes.race_disclosed.medsamp", varnames=c("Intercept", "Likes", "Identity Disclosure", "Likes:Identity Disclosure"),varorder=c("Intercept", "Likes", "Identity Disclosure", "Likes:Identity Disclosure"), include.models=include.models) + ylab("Coefficients and 95% Confidence Intervals") + ggtitle("Logistic Regression Predicting Toxicity")) +} + + +plot.civilcomments.iv.example <- function(include.models=c("Automatic Classification", "All Annotations")){ + plot.cc.example(iv.example, "cc_ex_tox.likes.race_disclosed.medsamp", varnames=c("Intercept", "Likes", "Likes:Toxicity", "Toxicity"),varorder=c("Intercept", "Likes", "Toxicity", "Likes:Toxicity"), include.models=include.models) + ylab("Coefficients and 95% Confidence Intervals") + ggtitle("Logistic Regression Predicting Racial/Ethnic Identity Disclosure") +} + + +plot.civilcomments.iv.example.2 <- function(){ + attach(iv.example) + df.pars <- rbind(cc_ex_tox.likes.race_disclosedcoef_pred_model, + cc_ex_tox.likes.race_disclosedcoef_coder_model, + cc_ex_tox.likes.race_disclosedcoef_sample_model, + cc_ex_tox.likes.race_disclosedmeaserr_model_par[1:3] + ) + + rownames(df.pars) <- c('predictions', 'coders', 'sample', 'corrected') + + df.stderr <- rbind(cc_ex_tox.likes.race_disclosedse_pred_model, + cc_ex_tox.likes.race_disclosedse_coder_model, + cc_ex_tox.likes.race_disclosedse_sample_model, + cc_ex_tox.likes.race_disclosedmeaserr_model_stderr[1:3] + ) + + rownames(df.pars) <- c('predictions', 'coders', 'sample', 'corrected') + + ci.upper <- df.pars + 1.96 * sqrt(df.stderr) + ci.lower <- df.pars - 1.96 * sqrt(df.stderr) + return(plot.cc.example(df.pars, ci.lower, ci.upper)) +} + + + + + + + diff --git a/resources/robustness_check_plots.R b/resources/robustness_check_plots.R new file mode 100644 index 0000000..c271dd5 --- /dev/null +++ b/resources/robustness_check_plots.R @@ -0,0 +1,268 @@ +library(data.table) +library(ggplot2) +source('resources/functions.R') + +plot.robustness.1 <- function(iv='x'){ + +## robustness check 1 test g + r <- readRDS('robustness_1.RDS') + baseline_df <- readRDS('remembr.RDS')[['plot.df.example.2']] + robust_df <- data.table(r$robustness_1) + + ## just compare the mle methods in the two examples + robust_df <- robust_df[Bzy!=0] + robust_df <- robust_df[Bzx!=0] + baseline_df[method=='true', method:='True'] + robust_df[method=='true', method:='True'] + + baseline_df <- baseline_df[(method=='mle') | (method=='True') | (method=='naive')] + robust_df <- robust_df[(method=='mle') | (method=='True')] + + baseline_df[method=='mle',method:='MLE Reported'] + robust_df[method=='mle',method:='No Z in Error Model'] + + df <- rbind(baseline_df, robust_df, fill=TRUE) + df[method=='naive', method:='Naive'] + df <- df[(N %in% c(1000,5000)) & (m %in% c(200,100))] + p <- plot.simulation(df,iv=iv,levels=c('MLE Reported','No Z in Error Model', 'Naive', 'True')) + grid.draw(p) +} + +plot.robustness.1.checkassumption <- function(iv='x'){ + +## robustness check 1 test g + r <- readRDS('robustness_1.RDS') + baseline_df <- readRDS('remembr.RDS')[['plot.df.example.2']] + robust_df <- data.table(r$robustness_1) + + ## just compare the mle methods in the two examples + robust_df <- robust_df[Bzy==0] + robust_df <- robust_df[Bzx!=0] + baseline_df[method=='true', method:='True'] + robust_df[method=='true', method:='True'] + + baseline_df <- baseline_df[(method=='mle') | (method=='naive')] + robust_df <- robust_df[(method=='mle') | (method=='True')] + + baseline_df[method=='mle',method:='MLE Reported'] + robust_df[method=='mle',method:='No Z in Error Model'] + + df <- rbind(baseline_df, robust_df, fill=TRUE) + df[method=='naive', method:='Naive'] + df <- df[(N %in% c(1000,5000)) & (m %in% c(200,100))] + p <- plot.simulation(df,iv=iv,levels=c('MLE Reported','No Z in Error Model', 'Naive', 'True')) + grid.draw(p) +} + +plot.robustness.1.dv <- function(iv='z'){ + + ## robustness check 1 test g + r <- readRDS('robustness_1_dv.RDS') + baseline_df <- readRDS('remembr.RDS')[['plot.df.example.4']] + robust_df <- data.table(r$robustness_1_dv) + + ## just compare the mle methods in the two examples + + baseline_df[method=='true', method:='True'] + robust_df[method=='true', method:='True'] + + robust_df <- robust_df[Bxy!=0] + robust_df <- robust_df[Bzy!=0] + # robust_df <- robust_df[Bzx==-0.1] + + baseline_df <- baseline_df[(method=='mle') | (method=='True') | (method=='naive')] + robust_df <- robust_df[(method=='mle') | (method=='True')] + + baseline_df[method=='mle',method:='MLE Reported'] + robust_df[method=='mle',method:='No Z in Error Model'] + + df <- rbind(baseline_df, robust_df, fill=TRUE) + df <- df[(N %in% c(1000,5000)) & (m %in% c(200,100))] + df[method=='naive', method:='Naive'] + + p <- plot.simulation(df,iv=iv,levels=c('MLE Reported','No Z in Error Model','Naive', 'True')) + grid.draw(p) +} + +plot.robustness.2.iv <- function(iv, n.annotations=100, n.classifications=5000){ + + r <- readRDS("robustness_2.RDS") + robust_df <- data.table(r[['robustness_2']]) + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(prediction_accuracy~., ncol=4,as.table=F) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + + p <- arrangeGrob(p, + top=grid.text("AC Accuracy",x=0.32,just='right')) + + grid.draw(p) +} + +robust2 <- readRDS("robustness_2_dv.RDS") +robust_2_df <- data.table(robust2[['robustness_2_dv']]) +robust_2_min_acc <- min(robust_2_df[,prediction_accuracy]) +robust_2_max_acc <- max(robust_2_df[,prediction_accuracy]) + +plot.robustness.2.dv <- function(iv, n.annotations=100, n.classifications=5000){ + + r <- readRDS("robustness_2_dv.RDS") + robust_df <- data.table(r[['robustness_2_dv']]) + + + #temporary work around a bug in the makefile + ## if('Px' %in% names(robust_df)) + ## robust_df <- robust_df[is.na(Px)] + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(prediction_accuracy~., ncol=4,as.table=F) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("AC Accuracy",x=0.32,just='right')) + + grid.draw(p) +} + + +plot.robustness.3.iv <- function(iv, n.annotations=100, n.classifications=5000){ + r <- readRDS('robustness_3.RDS') + robust_df <- data.table(r[['robustness_3']]) + r2 <- readRDS('robustness_3_proflik.RDS') + robust_df_proflik <- data.table(r2[['robustness_3_proflik']]) + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + robust_df <- robust_df[method=='MLE',method:='Fischer likelihood'] + + robust_df_proflik <- robust_df_proflik[(m==n.annotations) & (N==n.classifications)] + robust_df_proflik <- robust_df_proflik[method=='MLE',method:='Profile likelihood'] + + + + robust_df_proflik <- robust_df_proflik[,method := new.levels[method]] + robust_df_proflik <- robust_df_proflik[method != "Feasible"] + + df <- rbind(robust_df, robust_df_proflik) + + p <- .plot.simulation(df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(Px~., ncol=3,as.table=F) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("P(X)",x=0.32,just='right')) + + grid.draw(p) +} + +plot.robustness.3.dv <- function(iv, n.annotations=100, n.classifications=1000){ + r <- readRDS('robustness_3_dv.RDS') + robust_df <- data.table(r[['robustness_3_dv']]) + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(B0~., ncol=3,as.table=F) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("P(Y)",x=0.32,just='right')) + + grid.draw(p) +} + +plot.robustness.4.iv <- function(iv, n.annotations=100, n.classifications=1000){ + r <- readRDS('robustness_4.RDS') + robust_df <- data.table(r[['robustness_4']]) + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + + robust_df <- robust_df[,y_bias=factor(robust_df$y_bias,levels=sort(unique(robust_df$y_bias),decreasing=TRUE))] + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(y_bias~., ncol=3,as.table=T) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("Coefficient of Y for W",x=0.32,just='right')) + + grid.draw(p) +} + + + +plot.robustness.4.iv <- function(iv, n.annotations=100, n.classifications=1000){ + r <- readRDS('robustness_4.RDS') + robust_df <- data.table(r[['robustness_4']]) + + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","gmm"="GMM", "mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + + robust_df <- robust_df[,y_bias=factor(robust_df$y_bias,levels=sort(unique(robust_df$y_bias),decreasing=TRUE))] + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + + p <- p + facet_wrap(y_bias~., ncol=3,as.table=T) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("Coefficient of Y for W",x=0.32,just='right')) + + grid.draw(p) +} + +plot.robustness.4.dv <- function(iv, n.annotations=100, n.classifications=1000){ + r <- readRDS('robustness_4_dv.RDS') + robust_df <- data.table(r[['robustness_4']]) + + new.levels <- c("true"="True","naive"="Naïve","amelia.full"="MI", "mecor"="mecor","mle"="MLE", "zhang"="PL","feasible"="Feasible") + + robust_df <- robust_df[(m==n.annotations) & (N==n.classifications)] + + robust_df <- robust_df[,method := new.levels[method]] + robust_df <- robust_df[method != "Feasible"] + + robust_df <- robust_df[,z_bias=factor(z_bias, levels=sort(unique(z_bias),descending=TRUE))] + + p <- .plot.simulation(robust_df, iv=iv, levels=c("True","Naïve","MI", "GMM", "MLE", "PL", "Feasible")) + p <- p + facet_wrap(z_bias~., ncol=3,as.table=F) + p <- p + scale_x_discrete(labels=label_wrap_gen(14)) + ylab("Estimate") + xlab("Method") + coord_flip() + + p <- arrangeGrob(p, + top=grid.text("Coefficient of Z on W",x=0.32,just='right')) + + grid.draw(p) +} diff --git a/resources/variables.R b/resources/variables.R new file mode 100644 index 0000000..9c0b856 --- /dev/null +++ b/resources/variables.R @@ -0,0 +1,55 @@ +library(knitr) +library(ggplot2) +library(data.table) +knitr::opts_chunk$set(fig.show='hold') +f <- function (x) {formatC(x, format="d", big.mark=',')} +format.percent <- function(x) {x<-as.numeric(x);paste(f(x*100),"\\%",sep='')} + +theme_set(theme_bw()) +r <- readRDS('remembr.RDS') +attach(r) +r2 <- readRDS('remember_irr.RDS') +attach(r2) +r3 <- readRDS('remember_grid_sweep.RDS') +attach(r3) + + +## simulation.summary.df <- data.table(sample.4 +## simulation.summary.df <- kable(simulation.summary.df,format='latex',row.names=T, column.names=c("Factors", "Input Parameters") + + +sim1a.cor.xz <- as.numeric(unlist(example.1['med.cor.xz'])) +sim1a.acc <- unlist(example.1['med.accuracy']) +sim1b.acc <- unlist(example.2['med.accuracy']) +sim1b.acc.y1 <- unlist(example.2['med.accuracy.y1']) +sim1b.acc.y0 <- example.2['med.accuracy.y0'] +(sim1b.fnr <- example.2['med.fnr']) +(sim1b.fnr.y0 <- example.2['med.fnr.y0']) +(sim1b.fnr.y1 <- example.2['med.fnr.y1']) +sim1b.fpr <- example.2['med.fpr'] +sim1b.fpr.y0 <- example.2['med.fpr.y0'] +sim1b.fpr.y1 <- example.2['med.fpr.y1'] +sim1b.cor.resid.w_pred <- as.numeric(unlist(example.2['cor.resid.w_pred'])) +(sim1b.cor.xz <- example.2['med.cor.xz']) + +sim2a.AC.acc <- example.3['med.accuracy'] +sim2a.lik.ratio <- example.3['med.lik.ratio'] +sim2a.cor.xz <- as.numeric(example.3['med.cor.xz']) + + +sim2b.AC.acc <- example.4['med.accuracy'] +sim2b.lik.ratio <- example.4['med.lik.ratio'] +(sim2b.error.cor.x <- as.numeric(unlist(example.4['med.error.cor.x']))) +(sim2b.error.cor.z <- as.numeric(unlist(example.4['med.error.cor.z']))) + +n.simulations <- max(unlist(example_1_jobs$seed)) +sim1a.cor.xz <- as.numeric(unlist(example.3['med.cor.xz'])) +sim1.R2 <- unlist(example_1_jobs$y_explained_variance) +N.sizes <- unlist(example_1_jobs$N) +N.sizes <- N.sizes[N.sizes!=800] +m.sizes <- unlist(example_1_jobs$m) +sim2.Bx <- as.numeric(example_4_jobs$Bxy) +sim2.Bz <- as.numeric(example_4_jobs$Bzy) +sim1.z.sd <- 0.5 +irr.coder.accuracy <- unlist(example_5_jobs$coder_accuracy) +med.loco.accuracy <- unlist(example.5$med.loco.acc) diff --git a/robustness_1.RDS b/robustness_1.RDS new file mode 100644 index 0000000..9c3ec35 Binary files /dev/null and b/robustness_1.RDS differ diff --git a/robustness_1_dv.RDS b/robustness_1_dv.RDS new file mode 100644 index 0000000..44112e9 Binary files /dev/null and b/robustness_1_dv.RDS differ diff --git a/robustness_2.RDS b/robustness_2.RDS new file mode 100644 index 0000000..e0f9f8d Binary files /dev/null and b/robustness_2.RDS differ diff --git a/robustness_2_dv.RDS b/robustness_2_dv.RDS new file mode 100644 index 0000000..f46db9f Binary files /dev/null and b/robustness_2_dv.RDS differ diff --git a/robustness_3.RDS b/robustness_3.RDS new file mode 100644 index 0000000..a1c1f1a Binary files /dev/null and b/robustness_3.RDS differ diff --git a/robustness_3_dv.RDS b/robustness_3_dv.RDS new file mode 100644 index 0000000..982b04b Binary files /dev/null and b/robustness_3_dv.RDS differ diff --git a/robustness_3_dv_proflik.RDS b/robustness_3_dv_proflik.RDS new file mode 100644 index 0000000..1084588 Binary files /dev/null and b/robustness_3_dv_proflik.RDS differ diff --git a/robustness_3_proflik.RDS b/robustness_3_proflik.RDS new file mode 100644 index 0000000..c7c775c Binary files /dev/null and b/robustness_3_proflik.RDS differ diff --git a/robustness_4.RDS b/robustness_4.RDS new file mode 100644 index 0000000..3acd176 Binary files /dev/null and b/robustness_4.RDS differ diff --git a/robustness_4_dv.RDS b/robustness_4_dv.RDS new file mode 100644 index 0000000..f3e8498 Binary files /dev/null and b/robustness_4_dv.RDS differ diff --git a/ugmm8a.pfb b/ugmm8a.pfb new file mode 100644 index 0000000..79a60e6 Binary files /dev/null and b/ugmm8a.pfb differ diff --git a/ugmmi8a.pfb b/ugmmi8a.pfb new file mode 100644 index 0000000..19bb435 Binary files /dev/null and b/ugmmi8a.pfb differ diff --git a/ugmr8a.pfb b/ugmr8a.pfb new file mode 100644 index 0000000..0017029 Binary files /dev/null and b/ugmr8a.pfb differ diff --git a/ugmri8a.pfb b/ugmri8a.pfb new file mode 100644 index 0000000..9b90d31 Binary files /dev/null and b/ugmri8a.pfb differ diff --git a/versions/article.pdf b/versions/article.pdf new file mode 100644 index 0000000..0da6b24 Binary files /dev/null and b/versions/article.pdf differ diff --git a/versions/automated_content_misclassfication_beta_20230126.pdf b/versions/automated_content_misclassfication_beta_20230126.pdf new file mode 100644 index 0000000..eb38a1e Binary files /dev/null and b/versions/automated_content_misclassfication_beta_20230126.pdf differ diff --git a/versions/automated_content_misclassfication_beta_20230214.pdf b/versions/automated_content_misclassfication_beta_20230214.pdf new file mode 100644 index 0000000..8a89ba5 Binary files /dev/null and b/versions/automated_content_misclassfication_beta_20230214.pdf differ diff --git a/versions/we_can_fix_it-ica23-submission_appendix.pdf b/versions/we_can_fix_it-ica23-submission_appendix.pdf new file mode 100644 index 0000000..c6a7f6d Binary files /dev/null and b/versions/we_can_fix_it-ica23-submission_appendix.pdf differ diff --git a/versions/we_can_fix_it-ica23-submission_maintext.pdf b/versions/we_can_fix_it-ica23-submission_maintext.pdf new file mode 100644 index 0000000..3c362af Binary files /dev/null and b/versions/we_can_fix_it-ica23-submission_maintext.pdf differ