-\documentclass[a4paper]{article}
-\usepackage[a4paper, margin=2cm]{geometry}
-\usepackage{array}
-\usepackage{amsmath}
-\usepackage{amssymb}
-\usepackage{tcolorbox}
-\usepackage{fancyhdr}
-\usepackage{pgfplots}
-\usepackage{tabularx}
-\usepackage{keystroke}
-\usepackage{listings}
-\usepackage{xcolor} % used only to show the phantomed stuff
-\definecolor{cas}{HTML}{e6f0fe}
-\usepackage{mathtools}
-\pgfplotsset{compat=1.16}
-
-\pagestyle{fancy}
-\fancyhead[LO,LE]{Unit 4 Specialist --- Statistics}
-\fancyhead[CO,CE]{Andrew Lorimer}
-
-\setlength\parindent{0pt}
-
+\documentclass[spec-collated.tex]{subfiles}
\begin{document}
- \title{Statistics}
- \author{}
- \date{}
- \maketitle
-
- \section{Linear combinations of random variables}
+ \section{Statistics}
\subsection*{Continuous random variables}
\item \(\int^\infty_{-\infty} f(x) \> dx = 1\)
\end{enumerate}
+ \begin{align*}
+ E(X) &= \int_\textbf{X} (x \cdot f(x)) \> dx \\
+ \operatorname{Var}(X) &= E\left[(X-\mu)^2\right]
+ \end{align*}
+
\[ \Pr(X \le c) = \int^c_{-\infty} f(x) \> dx \]
+
+
+ \subsection*{Two random variables \(X, Y\)}
+
+ If \(X\) and \(Y\) are independent:
+ \begin{align*}
+ \operatorname{E}(aX+bY) & = a\operatorname{E}(X)+b\operatorname{E}(Y) \\
+ \operatorname{Var}(aX \pm bY \pm c) &= a^2 \operatorname{Var}(X) + b^2 \operatorname{Var}(Y)
+ \end{align*}
- \subsubsection*{Linear functions \(X \rightarrow aX+b\)}
+ \subsection*{Linear functions \(X \rightarrow aX+b\)}
\begin{align*}
\Pr(Y \le y) &= \Pr(aX+b \le y) \\
&= \Pr\left(X \le \dfrac{y-b}{a}\right) \\
- &= \int^{\dfrac{y-b}{a}}_{-\infty} f(x) \> dx
+ &= \int^{\frac{y-b}{a}}_{-\infty} f(x) \> dx
\end{align*}
\begin{align*}
\textbf{Variance:} && \operatorname{Var}(aX+b) &= a^2 \operatorname{Var}(X) \\
\end{align*}
- \subsection*{Linear combination of two random variables}
+ \subsection*{Expectation theorems}
+
+ For some non-linear function \(g\), the expected value \(E(g(X))\) is not equal to \(g(E(X))\).
\begin{align*}
- \textbf{Mean:} && \operatorname{E}(aX+bY) & = a\operatorname{E}(X)+b\operatorname{E}(Y) \\
- \textbf{Variance:} && \operatorname{Var}(aX+bY) &= a^2 \operatorname{Var}(X) + b^2 \operatorname{Var}(Y) \tag{if \(X\) and \(Y\) are independent}\\
+ E(X^2) &= \operatorname{Var}(X) - \left[E(X)\right]^2 \\
+ E(X^n) &= \Sigma x^n \cdot p(x) \tag{non-linear} \\
+ &\ne [E(X)]^n \\
+ E(aX \pm b) &= aE(X) \pm b \tag{linear} \\
+ E(b) &= b \tag{\(\forall b \in \mathbb{R}\)}\\
+ E(X+Y) &= E(X) + E(Y) \tag{two variables}
\end{align*}
- \section{Sample mean}
+ \subsection*{Sample mean}
Approximation of the \textbf{population mean} determined experimentally.
\[ \overline{x} = \dfrac{\Sigma x}{n} \]
- where \(n\) is the size of the sample (number of sample points) and \(x\) is the value of a sample point
-
- \subsubsection*{\colorbox{cas}{On CAS:}}
+ where
+ \begin{description}[nosep, labelindent=0.5cm]
+ \item \(n\) is the size of the sample (number of sample points)
+ \item \(x\) is the value of a sample point
+ \end{description}
- \begin{enumerate}
+\begin{cas}
+ \begin{enumerate}[leftmargin=3mm]
\item Spreadsheet
- \item In cell A1: \verb;mean(randNorm(sd, mean, sample size));
+ \item In cell A1:\\ \path{mean(randNorm(sd, mean, sample size))}
\item Edit \(\rightarrow\) Fill \(\rightarrow\) Fill Range
\item Input range as A1:An where \(n\) is the number of samples
\item Graph \(\rightarrow\) Histogram
\end{enumerate}
+ \end{cas}
\subsubsection*{Sample size of \(n\)}
Sample mean is distributed with mean \(\mu\) and sd \(\frac{\sigma}{\sqrt{n}}\) (approaches these values for increasing sample size \(n\)).
- \colorbox{cas}{On CAS:} Spreadsheet \(\rightarrow\) Catalog \(\rightarrow\) \verb;randNorm(sd, mean, n); where \verb;n; is the number of samples. Show histogram with Histogram key in top left \\
- To calculate parameters of a dataset: Calc \(\rightarrow\) One-variable
+ For a new distribution with mean of \(n\) trials, \(\operatorname{E}(X^\prime) = \operatorname{E}(X), \quad \operatorname{sd}(X^\prime) = \dfrac{\operatorname{sd}(X)}{\sqrt{n}}\)
+
+ \begin{cas}
- \section{Normal distributions}
+ \begin{itemize}
+ \item Spreadsheet \(\rightarrow\) Catalog \(\rightarrow\) \verb;randNorm(sd, mean, n); where \verb;n; is the number of samples. Show histogram with Histogram key in top left
+ \item To calculate parameters of a dataset: Calc \(\rightarrow\) One-variable
+ \end{itemize}
+
+ \end{cas}
+
+ \subsection*{Normal distributions}
- mean = mode = median
\[ Z = \frac{X - \mu}{\sigma} \]
- Normal distributions must have are (total prob.) of 1 \(\implies \int^\infty_{-\infty} f(x) \> dx = 1\)
-\pgfmathdeclarefunction{gauss}{2}{%
- \pgfmathparse{1/(#2*sqrt(2*pi))*exp(-((x-#1)^2)/(2*#2^2))}%
-}
-
-\begin{tikzpicture}
-\begin{axis}[every axis plot post/.append style={
- mark=none,domain=-3:3,samples=50,smooth}, % All plots: from -2:2, 50 samples, smooth, no marks
- axis x line*=bottom, % no box around the plot, only x and y axis
- axis y line*=left, % the * suppresses the arrow tips
- enlargelimits=upper,
- ytick={0.5},
- yticklabels={\(\frac{1}{\sigma \sqrt{2\pi}}\)},
- xtick={-2,-1,0,1,2},
- xticklabels={\(\mu-2\sigma\), \(\mu-\sigma\), \(\mu\), \(\mu+\sigma\), \(\mu+2\sigma\)},
- xlabel={\(x\)},
- every axis x label/.style={at={(current axis.right of origin)},anchor=north west},
- ylabel={\(\Pr(X=x)\)}]
- \addplot {gauss(0,0.75)};
-\end{axis}
-\end{tikzpicture}
-
- \section{Central limit theorem}
+ Normal distributions must have area (total prob.) of 1 \(\implies \int^\infty_{-\infty} f(x) \> dx = 1\) \\
+ \(\text{mean} = \text{mode} = \text{median}\)
+
+ \begin{warning}
+ Always express \(z\) as +ve. Express confidence \textit{interval} as ordered pair.
+ \end{warning}
+
+ \begin{figure*}[hb]
+ \centering
+ \include{normal-dist-graph}
+ \end{figure*}
+
+ \subsection*{Central limit theorem}
If \(X\) is randomly distributed with mean \(\mu\) and sd \(\sigma\), then with an adequate sample size \(n\) the distribution of the sample mean \(\overline{X}\) is approximately normal with mean \(E(\overline{X})\) and \(\operatorname{sd}(\overline{X}) = \frac{\sigma}{\sqrt{n}}\).
+ \subsection*{Confidence intervals}
+
+ \begin{itemize}
+ \item \textbf{Point estimate:} single-valued estimate of the population mean from the value of the sample mean \(\overline{x}\)
+ \item \textbf{Interval estimate:} confidence interval for population mean \(\mu\)
+ \item \(C\)\% confidence interval \(\implies\) \(C\)\% of samples will contain population mean \(\mu\)
+ \end{itemize}
+
+ \subsubsection*{95\% confidence interval}
+
+ For 95\% c.i. of population mean \(\mu\):
+
+ \[ x \in \left(\overline{x} \pm 1.96 \dfrac{\sigma}{\sqrt{n}} \right)\]
+
+ where:
+ \begin{description}[nosep, labelindent=0.5cm]
+ \item \(\overline{x}\) is the sample mean
+ \item \(\sigma\) is the population sd
+ \item \(n\) is the sample size from which \(\overline{x}\) was calculated
+ \end{description}
+
+ \begin{cas}
+ Menu \(\rightarrow\) Stats \(\rightarrow\) Calc \(\rightarrow\) Interval \\
+ Set \textit{Type = One-Sample Z Int} \\ \-\hspace{1em} and select \textit{Variable}
+ \end{cas}
+
+ \subsection*{Margin of error}
+
+ For 95\% confidence interval of \(\mu\):
+ \begin{align*}
+ M &= 1.96 \times \dfrac{\sigma}{\sqrt{n}} \\
+ \implies n &= \left( \dfrac{1.96 \sigma}{M} \right)^2
+ \end{align*}
+
+ Always round \(n\) up to a whole number of samples.
+
+ \subsection*{General case}
+
+ For \(C\)\% c.i. of population mean \(\mu\):
+
+ \[ x \in \left( \overline{x} \pm k \dfrac{\sigma}{\sqrt{n}} \right) \]
+ \hfill where \(k\) is such that \(\Pr(-k < Z < k) = \frac{C}{100}\)
+
+ \subsection*{Confidence interval for multiple trials}
+
+ For a set of \(n\) confidence intervals (samples), there is \(0.95^n\) chance that all \(n\) intervals contain the population mean \(\mu\).
+
+ \section{Hypothesis testing}
+
+ \begin{warning}
+ Note hypotheses are always expressed in terms of population parameters
+ \end{warning}
+
+ \subsection*{Null hypothesis \(H_0\)}
+
+ Sample drawn from population has same mean as control population, and any difference can be explained by sample variations.
+
+ \subsection*{Alternative hypothesis \(H_1\)}
+
+ Amount of variation from control is significant, despite standard sample variations.
+
+ \subsection*{\(p\)-value}
+
+
+ \begin{align*}
+ p &= \Pr(\overline{X} \lessgtr \mu(H_1)) \\
+ &= 2 \cdot \Pr(\overline{X} <> \mu(H_1) | \mu = 8)
+ \end{align*}
+
+ Probability of observing a value of the sample statistic as significant as the one observed, assuming null hypothesis is true.
+
+ \vspace{0.5em}
+ \begin{tabularx}{23em}{|l|X|}
+ \hline
+ \rowcolor{cas}
+ \(\boldsymbol{p}\) & \textbf{Conclusion} \\
+ \hline
+ \(> 0.05\) & insufficient evidence against \(H_0\) \\
+ \(< 0.05\) (5\%) & good evidence against \(H_0\) \\
+ \(< 0.01\) (1\%) & strong evidence against \(H_0\) \\
+ \(< 0.001\) (0.1\%) & very strong evidence against \(H_0\) \\
+ \hline
+ \end{tabularx}
+
+ \subsection*{Statistical significance}
+
+ Significance level is denoted by \(\alpha\).
+
+ \-\hspace{1em} If \(p<\alpha\), null hypothesis is \textbf{rejected} \\
+ \-\hspace{1em} If \(p>\alpha\), null hypothesis is \textbf{accepted}
+
+ \subsection*{\(z\)-test}
+
+ Hypothesis test for a mean of a sample drawn from a normally distributed population with a known standard deviation.
+
+ \begin{cas}
+ Menu \(\rightarrow\) Statistics \(\rightarrow\) Calc \(\rightarrow\) Test. \\
+ Select \textit{One-Sample Z-Test} and \textit{Variable}, then input:
+ \begin{description}[nosep, style=multiline, labelindent=0.5cm, leftmargin=2cm, font=\normalfont]
+ \item[\(\mu\) cond:] same operator as \(H_1\)
+ \item[\(\mu_0\):] expected sample mean (null hypothesis)
+ \item[\(\sigma\):] standard deviation (null hypothesis)
+ \item[\(\overline{x}\):] sample mean
+ \item[\(n\):] sample size
+ \end{description}
+ \end{cas}
+
+ \subsection*{One-tail and two-tail tests}
+
+ \subsubsection*{One tail}
+
+ \begin{itemize}
+ \item \(\mu\) has changed in one direction
+ \item State ``\(H_1: \mu \lessgtr \) known population mean''
+ \end{itemize}
+
+ \subsubsection*{Two tail}
+
+ \begin{itemize}
+ \item Direction of \(\Delta \mu\) is ambiguous
+ \item State ``\(H_1: \mu \ne\) known population mean''
+ \end{itemize}
+
+ For two tail tests:
+ \begin{align*}
+ p\text{-value} &= \Pr(|\overline{X} - \mu| \ge |\overline{x}_0 - \mu|) \\
+ &= \left( |Z| \ge \left|\dfrac{\overline{x}_0 - \mu}{\sigma \div \sqrt{n}} \right| \right)
+ \end{align*}
+
+ \subsection*{Modulus notation for two tail}
+
+ \(\Pr(|\overline{X} - \mu| \ge a) \implies\) ``the probability that the distance between \(\overline{\mu}\) and \(\mu\) is \(\ge a\)''
+
+ \subsection*{Inverse normal}
+
+ \begin{cas}
+ \verb;invNormCdf("L", ;\(\alpha\)\verb;, ;\(\dfrac{\sigma}{n^\alpha}\)\verb;, ;\(\mu\)\verb;);
+ \end{cas}
+
+ \subsection*{Errors}
+
+ \begin{description}[labelwidth=2.5cm, labelindent=0.5cm]
+ \item [Type I error] \(H_0\) is rejected when it is \textbf{true}
+ \item [Type II error] \(H_0\) is \textbf{not} rejected when it is \textbf{false}
+ \end{description}
+
+% \subsection*{Using c.i. to find \(p\)}
+% need more here
+
\end{document}