1
0
Files
ml_measurement_error_overleaf/bayesnets.tex

106 lines
3.1 KiB
TeX

\tikzset{
observed/.style={circle, draw},
partly observed/.style 2 args={draw, fill=#2, path picture={
\fill[#1, sharp corners] (path picture bounding box.south west) -|
(path picture bounding box.north east) -- cycle;},
circle},
unobserved/.style={draw, circle, fill=gray!40},
residual/.style={draw, rectangle}
}
\begin{figure}[htbp!]
\centering
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\node[observed] (y) {$Y$};
\node[unobserved, above=of y] (x) {$X$};
\node[observed, left=of x] (w) {$W$};
\node[observed,right=of x] (z) {$Z$};
\draw[-] (z) to (y);
\draw[-] (z) -- (x);
\draw[-] (x) -- (y);
\draw[-] (x) -- (w);
\end{tikzpicture}
\caption{In \emph{Simulation 1a}, classifications $W$ are conditionally independent of $Y$ so a model using $W$ as a proxy for $X$ has non-differential error. \label{fig:simulation.1a}}
\end{subfigure}
\hfill
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\node[observed] (y) {$Y$};
\node[unobserved, above=of y] (x) {$X$};
\node[observed, left=of x] (w) {$W$};
\node[observed,right=of x] (z) {$Z$};
\draw[-] (z) to (y);
\draw[-] (z) -- (x);
\draw[-] (x) -- (y);
\draw[-] (x) -- (w);
\draw[-] (x) to (y);
\draw[-] (w) -- (y);
\end{tikzpicture}
\caption{In \emph{Simulation 1b}, the edge from $W$ to $Y$ signifies that the automatic classifications $W$ are not conditionally independent of $Y$ given $X$, indicating differential error.
\label{fig:simulation.1b}
}
\end{subfigure}
\\
\hfill
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\node[unobserved] (y) {$Y$};
\node[observed, above=of y] (x) {$X$};
\node[observed, right=of y] (w) {$W$};
\node[observed,right=of x] (z) {$Z$};
\draw[-] (z) to (y);
\draw[-] (x) -- (y);
\draw[-] (y) -- (w);
\draw[-] (x) -- (z);
\end{tikzpicture}
\caption{In \emph{Simulation 2a}, an unbiased classifier measures the outcome. \label{fig:simulation.2a}}
\end{subfigure} \hfill
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\node[unobserved] (y) {$Y$};
\node[observed={white}{gray!40}, above=of y] (x) {$X$};
\node[observed, right=of y] (w) {$W$};
\node[observed,right=of x] (z) {$Z$};
\draw[-] (x) -- (y);
\draw[-] (z) -- (w);
\draw[-] (y) -- (w);
\draw[-] (x) -- (z);
\draw[-] (z) -- (y);
\end{tikzpicture}
\caption{In \emph{Simulation 2b}, the edge connecting $W$ and $Z$ signifies that the predictions $W$ are not conditionally independent of $Z$ given $Y$, indicating systematic misclassification. \label{fig:simulation.2b}}
\end{subfigure}
\vspace{1em}
\begin{subfigure}[t]{0.2\textwidth}
\centering
\begin{tikzpicture}
\matrix [draw, below, font=\small, align=center, column sep=2\pgflinewidth, inner sep=0.4em, outer sep=0em, nodes={align=center, anchor=center}] at (current bounding box.south){
\node[observed,label=right:observed] {}; \\
\node[unobserved,label=right:automatically classified]{}; \\
};
\end{tikzpicture}
\end{subfigure}
\caption{
Bayesnet networks representing the conditional independence structure of our simulations. \label{bayesnets}
}
\end{figure}