Theory NotebookMath for LLMs

Hypothesis Testing

Statistics / Hypothesis Testing

Run notebook
Private notes
0/8000

Notes stay private to your browser until account sync is configured.

Theory Notebook

Theory Notebook

Converted from theory.ipynb for web reading.

Hypothesis Testing — Theory Notebook

"To call in the statistician after the experiment is done may be no more than asking him to perform a post-mortem examination: he may be able to say what the experiment died of." — Sir Ronald A. Fisher

Interactive derivations and visualisations: p-values, power, t/χ²/F tests, Neyman-Pearson lemma, GLRT, multiple testing, permutation tests, KS drift detection, SPRT.

Code cell 2

import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl

try:
    import seaborn as sns
    sns.set_theme(style="whitegrid", palette="colorblind")
    HAS_SNS = True
except ImportError:
    plt.style.use("seaborn-v0_8-whitegrid")
    HAS_SNS = False

mpl.rcParams.update({
    "figure.figsize":    (10, 6),
    "figure.dpi":         120,
    "font.size":           13,
    "axes.titlesize":      15,
    "axes.labelsize":      13,
    "xtick.labelsize":     11,
    "ytick.labelsize":     11,
    "legend.fontsize":     11,
    "legend.framealpha":   0.85,
    "lines.linewidth":      2.0,
    "axes.spines.top":     False,
    "axes.spines.right":   False,
    "savefig.bbox":       "tight",
    "savefig.dpi":         150,
})
np.random.seed(42)
print("Plot setup complete.")

Code cell 3

import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy import stats
import warnings
warnings.filterwarnings('ignore')

try:
    import seaborn as sns
    sns.set_theme(style='whitegrid', palette='colorblind')
    HAS_SNS = True
except ImportError:
    plt.style.use('seaborn-v0_8-whitegrid')
    HAS_SNS = False

COLORS = {
    'primary':   '#0077BB',
    'secondary': '#EE7733',
    'tertiary':  '#009988',
    'error':     '#CC3311',
    'neutral':   '#555555',
    'highlight': '#EE3377',
}

mpl.rcParams.update({
    'figure.figsize':    (10, 6),
    'figure.dpi':         120,
    'font.size':           13,
    'axes.titlesize':      15,
    'axes.labelsize':      13,
    'legend.fontsize':     11,
    'lines.linewidth':      2.0,
    'axes.spines.top':     False,
    'axes.spines.right':   False,
})
np.random.seed(42)
print('Setup complete.')

def multipletests_local(pvals, alpha=0.05, method='fdr_bh'):
    """Small local replacement for statsmodels.stats.multitest.multipletests."""
    pvals = np.asarray(pvals, dtype=float)
    m = len(pvals)
    order = np.argsort(pvals)
    ranked = pvals[order]
    method = method.lower()

    if method == 'bonferroni':
        p_adj = np.minimum(pvals * m, 1.0)
        reject = p_adj <= alpha
        return reject, p_adj, None, None

    if method == 'holm':
        adj_ranked = np.empty(m)
        running = 0.0
        for i, p in enumerate(ranked):
            running = max(running, (m - i) * p)
            adj_ranked[i] = min(running, 1.0)
        p_adj = np.empty(m)
        p_adj[order] = adj_ranked
        reject = p_adj <= alpha
        return reject, p_adj, None, None

    if method in {'fdr_bh', 'bh'}:
        adj_ranked = np.empty(m)
        running = 1.0
        for i in range(m - 1, -1, -1):
            running = min(running, ranked[i] * m / (i + 1))
            adj_ranked[i] = min(running, 1.0)
        p_adj = np.empty(m)
        p_adj[order] = adj_ranked
        reject = p_adj <= alpha
        return reject, p_adj, None, None

    raise ValueError(f'Unsupported method: {method}')

1. The p-Value Distribution

Key fact: Under H0H_0 (exactly true), the p-value pU(0,1)p \sim \mathcal{U}(0,1). Under H1H_1, p-values are stochastically smaller (concentrated near 0).

We verify this by simulation with a one-sample z-test.

Code cell 5

# === 1.1 p-Value Distribution Under H0 and H1 ===
n, n_sims = 30, 5000
sigma = 1.0

def pvalue_zscore(data, mu0=0.0):
    z = (data.mean() - mu0) / (sigma / np.sqrt(len(data)))
    return 2 * (1 - stats.norm.cdf(abs(z)))

# Under H0: mu = 0
pvals_h0 = [pvalue_zscore(np.random.normal(0, sigma, n)) for _ in range(n_sims)]

# Under H1: mu = 0.5 (medium effect, d=0.5)
pvals_h1 = [pvalue_zscore(np.random.normal(0.5, sigma, n)) for _ in range(n_sims)]

fig, axes = plt.subplots(1, 2, figsize=(12, 5))

axes[0].hist(pvals_h0, bins=50, density=True, color=COLORS['primary'], alpha=0.7, edgecolor='white')
axes[0].axhline(1.0, color=COLORS['neutral'], linestyle='--', label='Uniform(0,1) density')
axes[0].set_xlabel('p-value')
axes[0].set_ylabel('Density')
axes[0].set_title('p-values under H₀ (μ=0)')
axes[0].legend()

axes[1].hist(pvals_h1, bins=50, density=True, color=COLORS['secondary'], alpha=0.7, edgecolor='white')
axes[1].axvline(0.05, color=COLORS['error'], linestyle='--', label='α = 0.05')
axes[1].set_xlabel('p-value')
axes[1].set_ylabel('Density')
axes[1].set_title('p-values under H₁ (μ=0.5, d=0.5)')
axes[1].legend()

plt.tight_layout()
plt.show()

print(f'Under H0: fraction p<0.05 = {np.mean(np.array(pvals_h0)<0.05):.3f} (expected: 0.050)')
print(f'Under H1: fraction p<0.05 = {np.mean(np.array(pvals_h1)<0.05):.3f} (power)')

# Verify uniformity under H0 using KS test
ks, p_uniform = stats.kstest(pvals_h0, 'uniform')
print(f'KS test of H0 p-values vs Uniform: D={ks:.4f}, p={p_uniform:.3f}')
ok = p_uniform > 0.05
print(f"{'PASS' if ok else 'FAIL'} — p-values under H0 are consistent with Uniform(0,1)")

2. Type I and Type II Errors

Visualise the trade-off between α\alpha (Type I error rate) and β\beta (Type II error rate) for a one-sample z-test. The two sampling distributions overlap; the rejection region (shaded) determines both error rates.

Code cell 7

# === 2.1 Type I / II Error Visualisation ===
mu0, mu1, sigma_z, n_z = 0, 1.0, 1.0, 25
alpha = 0.05
se = sigma_z / np.sqrt(n_z)  # standard error

z_crit = stats.norm.ppf(1 - alpha)  # one-sided
x_crit = mu0 + z_crit * se

x = np.linspace(-0.6, 2.0, 500)
pdf_h0 = stats.norm.pdf(x, mu0, se)
pdf_h1 = stats.norm.pdf(x, mu1, se)

fig, ax = plt.subplots(figsize=(11, 6))
ax.plot(x, pdf_h0, color=COLORS['primary'], label=f'H₀: μ={mu0}')
ax.plot(x, pdf_h1, color=COLORS['secondary'], label=f'H₁: μ={mu1}')

# Type I region (reject H0 when true)
x_fill_alpha = x[x >= x_crit]
ax.fill_between(x_fill_alpha, stats.norm.pdf(x_fill_alpha, mu0, se),
                alpha=0.35, color=COLORS['error'], label='Type I error (α)')

# Type II region (fail to reject when H1 true)
x_fill_beta = x[x < x_crit]
ax.fill_between(x_fill_beta, stats.norm.pdf(x_fill_beta, mu1, se),
                alpha=0.35, color=COLORS['tertiary'], label='Type II error (β)')

ax.axvline(x_crit, color=COLORS['neutral'], linestyle='--', label=f'Critical value = {x_crit:.3f}')
ax.set_xlabel('Sample mean $\\bar{X}$')
ax.set_ylabel('Density')
ax.set_title(f'Type I and Type II Errors (n={n_z}, α={alpha}, σ={sigma_z})')
ax.legend()
plt.tight_layout()
plt.show()

beta = stats.norm.cdf(x_crit, mu1, se)
power = 1 - beta
print(f'Critical value (in x): {x_crit:.3f}')
print(f'Type I error (α):       {alpha:.3f}')
print(f'Type II error (β):       {beta:.3f}')
print(f'Power (1-β):             {power:.3f}')
ok = abs(stats.norm.cdf(z_crit) - (1-alpha)) < 1e-10
print(f"{'PASS' if ok else 'FAIL'} — critical value corresponds to α")

3. Power Curves

Power π(μ)\pi(\mu) is the probability of rejecting H0H_0 as a function of the true μ\mu. Below we plot power curves for several sample sizes, demonstrating the n\sqrt{n} scaling.

Code cell 9

# === 3.1 Power Curves for Different Sample Sizes ===
mu_vals = np.linspace(-1.0, 2.5, 300)
alpha_pw = 0.05
sigma_pw = 1.0
sample_sizes = [10, 25, 50, 100]

def power_ztest(mu, mu0=0.0, sigma=1.0, n=25, alpha=0.05):
    se = sigma / np.sqrt(n)
    z_crit_val = stats.norm.ppf(1 - alpha/2)  # two-sided
    # Power = P(Z > z_crit) + P(Z < -z_crit) under H1
    ncp = (mu - mu0) / se  # non-centrality
    power = 1 - stats.norm.cdf(z_crit_val - ncp) + stats.norm.cdf(-z_crit_val - ncp)
    return power

fig, ax = plt.subplots(figsize=(10, 6))
colors_list = [COLORS['primary'], COLORS['secondary'], COLORS['tertiary'], COLORS['highlight']]

for n, col in zip(sample_sizes, colors_list):
    pw = [power_ztest(mu, n=n, alpha=alpha_pw) for mu in mu_vals]
    ax.plot(mu_vals, pw, color=col, label=f'n={n}')

ax.axhline(alpha_pw, color=COLORS['neutral'], linestyle=':', label=f'α={alpha_pw} (size)')
ax.axhline(0.80, color=COLORS['error'], linestyle='--', alpha=0.5, label='80% power target')
ax.axvline(0.0, color=COLORS['neutral'], linestyle='-', alpha=0.3)
ax.set_xlabel('True μ')
ax.set_ylabel('Power π(μ)')
ax.set_title('Power Curves for Two-Sided Z-Test (H₀: μ=0, σ=1)')
ax.legend()
ax.set_ylim(0, 1.05)
plt.tight_layout()
plt.show()

# Sample size to achieve 80% power at d=0.5
d = 0.5
z_a, z_b = stats.norm.ppf(1 - alpha_pw/2), stats.norm.ppf(0.80)
n_req = ((z_a + z_b) / d) ** 2
print(f'Required n for 80% power at d=0.5, α=0.05 (two-sided): {n_req:.1f}')
print(f'Actual power at n=64: {power_ztest(0.5, n=64, alpha=alpha_pw):.3f}')
print('PASS — formula matches power curve' if abs(power_ztest(0.5, n=int(n_req), alpha=alpha_pw) - 0.80) < 0.02 else 'CHECK')

4. Student's t-Test

The t-test handles the realistic case of unknown σ\sigma. We compare the t-distribution tails to the normal and verify that the t-test maintains correct size while being more conservative.

Code cell 11

# === 4.1 t vs. Normal Distribution ===
t_range = np.linspace(-5, 5, 500)
dfs = [3, 10, 30]

fig, axes = plt.subplots(1, 2, figsize=(12, 5))

# PDF comparison
colors_t = [COLORS['primary'], COLORS['secondary'], COLORS['tertiary']]
for df, col in zip(dfs, colors_t):
    axes[0].plot(t_range, stats.t.pdf(t_range, df), color=col, label=f't({df})')
axes[0].plot(t_range, stats.norm.pdf(t_range), 'k--', lw=1.5, label='Normal(0,1)')
axes[0].set_xlabel('t')
axes[0].set_ylabel('Density')
axes[0].set_title('t-Distribution vs. Standard Normal')
axes[0].legend()

# Critical values: t vs z
ns = np.arange(3, 51)
t_crits = [stats.t.ppf(0.975, n-1) for n in ns]
axes[1].plot(ns, t_crits, color=COLORS['secondary'], label='t critical value')
axes[1].axhline(1.96, color='k', linestyle='--', label='z=1.96 (Normal)')
axes[1].set_xlabel('Sample size n')
axes[1].set_ylabel('Critical value (two-sided, α=0.05)')
axes[1].set_title('t Critical Value Converges to 1.96')
axes[1].legend()

plt.tight_layout()
plt.show()

# === 4.2 Perform a one-sample t-test ===
np.random.seed(42)
latencies = np.random.normal(47.3, 8.1, 20)  # LLM latency data
t_stat, p_val = stats.ttest_1samp(latencies, popmean=45.0)
print(f'One-sample t-test: H0: mu=45ms, n=20')
print(f'  t = {t_stat:.4f}, p = {p_val:.4f} (two-sided)')
print(f'  one-sided p = {p_val/2:.4f}')
print(f'  df = 19, t_crit (one-sided) = {stats.t.ppf(0.95, 19):.3f}')
d_cohen = (latencies.mean() - 45) / latencies.std(ddof=1)
print(f'  Cohen d = {d_cohen:.3f}')
print(f"  {'REJECT H0' if p_val/2 < 0.05 else 'FAIL TO REJECT H0'} at alpha=0.05 (one-sided)")

5. Chi-Squared Test

Pearson's χ2\chi^2 tests whether observed count data fit a specified distribution (goodness-of-fit) or whether two categorical variables are independent (test of independence).

Code cell 13

# === 5.1 Chi-Squared Goodness-of-Fit ===
# LLM output category distribution test
observed = np.array([87, 113, 95, 102, 103])  # 500 total
n_total = observed.sum()
k = len(observed)
expected = np.full(k, n_total / k)  # uniform under H0

chi2_stat, p_chi2 = stats.chisquare(observed, f_exp=expected)
print('Chi-squared Goodness-of-Fit Test')
print(f'  H0: uniform distribution across {k} categories')
print(f'  Observed: {observed}')
print(f'  Expected: {expected}')
print(f'  chi2 = {chi2_stat:.4f}, df = {k-1}, p = {p_chi2:.4f}')
print(f"  {'Reject H0' if p_chi2 < 0.05 else 'Fail to reject H0'} at alpha=0.05")

# Visualise
fig, ax = plt.subplots(figsize=(8, 5))
cats = [f'Cat {i+1}' for i in range(k)]
x_pos = np.arange(k)
bars = ax.bar(x_pos, observed, color=COLORS['primary'], alpha=0.8, label='Observed')
ax.plot(x_pos, expected, 'o--', color=COLORS['error'], ms=8, label='Expected (uniform)')
ax.set_xticks(x_pos)
ax.set_xticklabels(cats)
ax.set_ylabel('Count')
ax.set_title(f'Chi-Squared GoF: chi2={chi2_stat:.2f}, p={p_chi2:.3f}')
ax.legend()
plt.tight_layout()
plt.show()

# Cramér's V
V = np.sqrt(chi2_stat / (n_total * (k - 1)))
print(f'  Cramér V = {V:.4f} (effect size)')
ok = abs(chi2_stat - sum((o-e)**2/e for o, e in zip(observed, expected))) < 1e-10
print(f"{'PASS' if ok else 'FAIL'} — manual chi2 matches scipy")

6. One-Way ANOVA

ANOVA tests whether kk group means are equal by decomposing total variance into between-group (signal) and within-group (noise) components.

Code cell 15

# === 6.1 One-Way ANOVA: LLM Performance by Task Category ===
np.random.seed(42)
# Simulate scores for 3 task categories
n_per = 30
math_scores = np.random.normal(0.72, 0.10, n_per)
code_scores = np.random.normal(0.80, 0.10, n_per)
text_scores = np.random.normal(0.68, 0.10, n_per)

f_stat, p_anova = stats.f_oneway(math_scores, code_scores, text_scores)

# Manual ANOVA decomposition
all_scores = np.concatenate([math_scores, code_scores, text_scores])
grand_mean = all_scores.mean()
groups = [math_scores, code_scores, text_scores]
group_means = [g.mean() for g in groups]

SSB = sum(n_per * (gm - grand_mean)**2 for gm in group_means)
SSW = sum(((g - gm)**2).sum() for g, gm in zip(groups, group_means))
SST = ((all_scores - grand_mean)**2).sum()

k_anova = 3
N_anova = len(all_scores)
MSB = SSB / (k_anova - 1)
MSW = SSW / (N_anova - k_anova)
F_manual = MSB / MSW

print('One-Way ANOVA Results')
print(f'  Group means: Math={group_means[0]:.3f}, Code={group_means[1]:.3f}, Text={group_means[2]:.3f}')
print(f'  SSB={SSB:.4f}, SSW={SSW:.4f}, SST={SST:.4f}')
print(f'  MSB={MSB:.4f}, MSW={MSW:.4f}')
print(f'  F = {F_manual:.4f} (scipy: {f_stat:.4f}), p = {p_anova:.6f}')
ok1 = abs(F_manual - f_stat) < 1e-8
ok2 = abs(SST - SSB - SSW) < 1e-10
print(f"{'PASS' if ok1 else 'FAIL'} — manual F matches scipy")
print(f"{'PASS' if ok2 else 'FAIL'} — SST = SSB + SSW")
print(f"{'Reject H0 (groups differ)' if p_anova < 0.05 else 'Fail to reject H0'}")

7. Neyman-Pearson Lemma

The NP lemma states that the likelihood ratio test (LRT) is the most powerful size-α\alpha test between two simple hypotheses. We verify this empirically: no other test of the same size achieves higher power.

Code cell 17

# === 7.1 Neyman-Pearson: LRT is Most Powerful ===
# Compare Bernoulli: H0: p=0.5 vs H1: p=0.7, n=20
n_np = 20
p0, p1 = 0.5, 0.7
alpha_np = 0.05

# LRT: reject when X > c (sufficient statistic is sum)
# Find critical value c such that P(X > c | H0) <= alpha
from scipy.stats import binom

# Compute exact critical value
for c in range(n_np + 1):
    if binom.sf(c - 1, n_np, p0) <= alpha_np:  # sf = P(X >= c)
        c_lrt = c
        break

# Exact size and power
size_lrt = binom.sf(c_lrt - 1, n_np, p0)
power_lrt = binom.sf(c_lrt - 1, n_np, p1)

# Compare to a naive test: reject if X is even (arbitrary, same approximate size)
# P(X even | H0) -- compute it
p_even_h0 = sum(binom.pmf(k, n_np, p0) for k in range(0, n_np+1, 2))
p_even_h1 = sum(binom.pmf(k, n_np, p1) for k in range(0, n_np+1, 2))

# Visualise power comparison
p1_vals = np.linspace(0.3, 1.0, 100)
power_lrt_vals = [binom.sf(c_lrt - 1, n_np, p) for p in p1_vals]
power_even_vals = [sum(binom.pmf(k, n_np, p) for k in range(0, n_np+1, 2)) for p in p1_vals]

fig, ax = plt.subplots(figsize=(9, 5))
ax.plot(p1_vals, power_lrt_vals, color=COLORS['primary'], label=f'LRT (reject X>{c_lrt-1})')
ax.plot(p1_vals, power_even_vals, color=COLORS['secondary'], linestyle='--', label='Naive (reject if X even)')
ax.axhline(alpha_np, color=COLORS['neutral'], linestyle=':', label=f'α={alpha_np}')
ax.axvline(p0, color=COLORS['error'], linestyle=':', alpha=0.5)
ax.set_xlabel('True p')
ax.set_ylabel('Power')
ax.set_title(f'NP Lemma: LRT dominates all size-α={alpha_np} tests (n={n_np})')
ax.legend()
plt.tight_layout()
plt.show()

print(f'LRT: critical value c={c_lrt}, size={size_lrt:.4f}, power at p1={p1}={power_lrt:.4f}')
print(f'Naive: size={p_even_h0:.4f}, power={p_even_h1:.4f}')
ok = power_lrt >= power_even_vals[np.argmin(abs(p1_vals - p1))]
print(f"{'PASS' if ok else 'FAIL'} — LRT has higher power than naive test at p1={p1}")

8. Generalised Likelihood Ratio Test (GLRT)

Wilks' theorem: under H0H_0, 2logΛdχk2-2\log\Lambda \overset{d}{\to} \chi^2_k where kk = number of constraints. We verify this by simulation.

Code cell 19

# === 8.1 Wilks' Theorem Verification ===
from scipy.stats import norm, chi2
import numpy as np

# H0: mu=0, sigma=1 (2 constraints); H1: mu, sigma free
# LRT statistic for Gaussian: -2 log Lambda
def glrt_gaussian(x, mu0=0.0, sigma0=1.0):
    n = len(x)
    # Log-likelihood under H0
    ll0 = norm.logpdf(x, mu0, sigma0).sum()
    # Log-likelihood under MLE (unrestricted)
    mu_hat = x.mean()
    sigma_hat = x.std(ddof=0)
    if sigma_hat < 1e-12:
        return 0.0
    ll1 = norm.logpdf(x, mu_hat, sigma_hat).sum()
    return -2 * (ll0 - ll1)

np.random.seed(42)
n_sims_glrt = 5000
n_obs = 50
lrt_stats = [glrt_gaussian(np.random.normal(0, 1, n_obs)) for _ in range(n_sims_glrt)]

# Compare to chi2_2 (2 constraints)
k_constraints = 2
x_chi = np.linspace(0, 15, 300)

fig, ax = plt.subplots(figsize=(9, 5))
ax.hist(lrt_stats, bins=60, density=True, color=COLORS['primary'], alpha=0.7,
        edgecolor='white', label=f'-2 log Λ (simulated, n={n_obs})')
ax.plot(x_chi, chi2.pdf(x_chi, k_constraints), color=COLORS['secondary'],
        linewidth=2.5, label=f'χ²({k_constraints}) (Wilks theory)')
ax.set_xlabel('-2 log Λ')
ax.set_ylabel('Density')
ax.set_title("Wilks' Theorem: -2 log Λ ~ χ²(2) under H₀")
ax.legend()
plt.tight_layout()
plt.show()

# KS test to verify chi2_2 fit
ks_w, p_w = stats.kstest(lrt_stats, 'chi2', args=(k_constraints,))
print(f'KS test vs chi2(2): D={ks_w:.4f}, p={p_w:.4f}')
ok = p_w > 0.01  # should not reject chi2 fit
print(f"{'PASS' if ok else 'FAIL'} — LRT statistics follow chi2({k_constraints}) under H0")

# Type I error check
crit_chi2 = chi2.ppf(0.95, k_constraints)
alpha_actual = np.mean(np.array(lrt_stats) > crit_chi2)
print(f'Type I error at alpha=0.05: {alpha_actual:.4f} (expected 0.05)')

9. Multiple Testing: FWER Inflation

Conducting mm tests simultaneously inflates the family-wise error rate (FWER). We compare: no correction, Bonferroni, Holm, and Benjamini-Hochberg.

Code cell 21

# === 9.1 Multiple Testing Simulation ===

np.random.seed(42)
m_tests = 50
m0 = 45  # true nulls
m1 = 5   # true alternatives
n_sims_mt = 2000
alpha_mt = 0.05

results = {'none': {'fwer': [], 'fdr': [], 'power': []},
           'bonf': {'fwer': [], 'fdr': [], 'power': []},
           'holm': {'fwer': [], 'fdr': [], 'power': []},
           'bh':   {'fwer': [], 'fdr': [], 'power': []}}

for _ in range(n_sims_mt):
    pvals = np.empty(m_tests)
    pvals[:m0] = np.random.uniform(0, 1, m0)  # null: uniform
    # alternative: Beta(0.2, 1) gives small p-values
    pvals[m0:] = np.random.beta(0.2, 1, m1)

    truth = np.array([False]*m0 + [True]*m1)  # True = alternative is true

    def compute_metrics(reject, truth_arr, m0_cnt):
        fp = (reject & ~truth_arr).sum()
        tp = (reject & truth_arr).sum()
        r = reject.sum()
        fwer = fp > 0
        fdr = fp / max(r, 1)
        power = tp / m1 if m1 > 0 else 0
        return fwer, fdr, power

    # No correction
    r_none = pvals < alpha_mt
    fw, fd, pw = compute_metrics(r_none, truth, m0)
    results['none']['fwer'].append(fw); results['none']['fdr'].append(fd); results['none']['power'].append(pw)

    # Bonferroni
    r_bonf, *_ = multipletests_local(pvals, alpha=alpha_mt, method='bonferroni')
    fw, fd, pw = compute_metrics(np.array(r_bonf), truth, m0)
    results['bonf']['fwer'].append(fw); results['bonf']['fdr'].append(fd); results['bonf']['power'].append(pw)

    # Holm
    r_holm, *_ = multipletests_local(pvals, alpha=alpha_mt, method='holm')
    fw, fd, pw = compute_metrics(np.array(r_holm), truth, m0)
    results['holm']['fwer'].append(fw); results['holm']['fdr'].append(fd); results['holm']['power'].append(pw)

    # BH
    r_bh, *_ = multipletests_local(pvals, alpha=alpha_mt, method='fdr_bh')
    fw, fd, pw = compute_metrics(np.array(r_bh), truth, m0)
    results['bh']['fwer'].append(fw); results['bh']['fdr'].append(fd); results['bh']['power'].append(pw)

methods = ['none', 'bonf', 'holm', 'bh']
labels = ['No correction', 'Bonferroni', 'Holm', 'BH (FDR)']
metrics = ['fwer', 'fdr', 'power']
col_list = [COLORS['error'], COLORS['primary'], COLORS['tertiary'], COLORS['secondary']]

fig, axes = plt.subplots(1, 3, figsize=(14, 5))
metric_labels = ['FWER', 'FDR', 'Power (recall)']
for ax, metric, mlabel in zip(axes, metrics, metric_labels):
    vals = [np.mean(results[m][metric]) for m in methods]
    bars = ax.bar(labels, vals, color=col_list, alpha=0.8, edgecolor='white')
    ax.set_title(mlabel)
    ax.set_ylim(0, 1.05)
    ax.axhline(0.05, color='k', linestyle='--', alpha=0.4, label='α=0.05')
    for bar, v in zip(bars, vals):
        ax.text(bar.get_x() + bar.get_width()/2, v + 0.01, f'{v:.2f}', ha='center', fontsize=10)
    ax.tick_params(axis='x', rotation=20)

plt.suptitle(f'Multiple Testing: m={m_tests}, m0={m0}, m1={m1} (n_sims={n_sims_mt})', fontsize=13)
plt.tight_layout()
plt.show()

for method, label in zip(methods, labels):
    print(f'{label:20s}: FWER={np.mean(results[method]["fwer"]):.3f}, '
          f'FDR={np.mean(results[method]["fdr"]):.3f}, Power={np.mean(results[method]["power"]):.3f}')
ok = np.mean(results['bonf']['fwer']) <= 0.05 + 0.01
print(f"{'PASS' if ok else 'FAIL'} — Bonferroni controls FWER at 0.05")

10. Welch Two-Sample t-Test

The Welch t-test compares means of two independent groups with unknown, potentially unequal variances. It is more robust than the pooled t-test.

Code cell 23

# === 10.1 Welch vs. Pooled t-Test ===
np.random.seed(42)

# Two groups: different means and different variances
group_A = np.random.normal(72.0, 8.0, 40)  # model A scores
group_B = np.random.normal(68.5, 15.0, 30)  # model B scores (higher variance)

# Welch t-test
t_welch, p_welch = stats.ttest_ind(group_A, group_B, equal_var=False)

# Pooled t-test
t_pool, p_pool = stats.ttest_ind(group_A, group_B, equal_var=True)

print('Two-Sample t-Test Comparison')
print(f'  Group A: n=40, mean={group_A.mean():.2f}, std={group_A.std(ddof=1):.2f}')
print(f'  Group B: n=30, mean={group_B.mean():.2f}, std={group_B.std(ddof=1):.2f}')
print(f'  Welch:  t={t_welch:.4f}, p={p_welch:.4f}')
print(f'  Pooled: t={t_pool:.4f}, p={p_pool:.4f}')

# Show distribution of group scores
fig, ax = plt.subplots(figsize=(9, 5))
ax.hist(group_A, bins=15, density=True, alpha=0.6, color=COLORS['primary'], label=f'Model A (n=40)')
ax.hist(group_B, bins=15, density=True, alpha=0.6, color=COLORS['secondary'], label=f'Model B (n=30)')
ax.axvline(group_A.mean(), color=COLORS['primary'], linestyle='--')
ax.axvline(group_B.mean(), color=COLORS['secondary'], linestyle='--')
ax.set_xlabel('Score')
ax.set_ylabel('Density')
ax.set_title(f'Welch t-test: t={t_welch:.3f}, p={p_welch:.4f}')
ax.legend()
plt.tight_layout()
plt.show()

# Satterthwaite df
s1, s2, n1, n2 = group_A.std(ddof=1), group_B.std(ddof=1), len(group_A), len(group_B)
nu = (s1**2/n1 + s2**2/n2)**2 / ((s1**2/n1)**2/(n1-1) + (s2**2/n2)**2/(n2-1))
print(f'  Satterthwaite df: {nu:.2f}')
print(f"  Decision: {'Reject H0 (means differ)' if p_welch < 0.05 else 'Fail to reject H0'}")

11. KS Test for Data Drift Detection

The two-sample Kolmogorov-Smirnov test compares ECDFs of two distributions. We use it to detect distributional shift in LLM feature distributions.

Code cell 25

# === 11.1 KS Test: Data Drift Detection ===
np.random.seed(42)
n_ref, n_prod = 5000, 1000

# Reference (training) distribution
ref_data = np.random.normal(256, 80, n_ref)

# Three production scenarios
prod_no_drift    = np.random.normal(256, 80, n_prod)  # no change
prod_mean_drift  = np.random.normal(310, 80, n_prod)  # mean shift
prod_var_drift   = np.random.normal(256, 150, n_prod) # variance shift only

scenarios = [
    ('No drift', prod_no_drift),
    ('Mean drift (+54)', prod_mean_drift),
    ('Variance drift (σ: 80→150)', prod_var_drift),
]

print('KS Drift Detection Results:')
print(f'  Reference: mu={ref_data.mean():.1f}, sigma={ref_data.std():.1f}')

fig, axes = plt.subplots(1, 3, figsize=(15, 5))
x_range = np.linspace(0, 700, 500)

for ax, (name, prod) in zip(axes, scenarios):
    ks_stat, p_ks = stats.ks_2samp(ref_data, prod)
    t_stat_drift, p_t = stats.ttest_ind(ref_data, prod)

    # ECDF
    ref_sorted = np.sort(ref_data)
    prod_sorted = np.sort(prod)
    ax.plot(ref_sorted, np.arange(1, n_ref+1)/n_ref, color=COLORS['primary'], lw=1.5, label='Reference')
    ax.plot(prod_sorted, np.arange(1, n_prod+1)/n_prod, color=COLORS['secondary'], lw=1.5, label='Production')
    ax.set_title(f'{name}\nKS D={ks_stat:.3f} p={p_ks:.4f} | t-test p={p_t:.4f}')
    ax.set_xlabel('Value')
    ax.set_ylabel('ECDF')
    ax.legend(fontsize=9)

    print(f'  {name}: KS D={ks_stat:.4f}, KS p={p_ks:.4f}, t-test p={p_t:.4f}')

plt.suptitle('KS Test: Drift Detection via ECDF Comparison', fontsize=13)
plt.tight_layout()
plt.show()

# Variance-only drift: KS detects it, t-test misses it
ks_var, p_ks_var = stats.ks_2samp(ref_data, prod_var_drift)
_, p_t_var = stats.ttest_ind(ref_data, prod_var_drift)
print(f'Variance drift: KS p={p_ks_var:.6f} (detects), t-test p={p_t_var:.4f} (misses)')
ok = (p_ks_var < 0.05) and (p_t_var > 0.05)
print(f"{'PASS' if ok else 'FAIL'} — KS detects variance drift that t-test misses")

12. Permutation Test

A permutation test constructs the exact null distribution by relabelling observations. We compare it to the Welch t-test and show it has better calibration for small samples.

Code cell 27

# === 12.1 Permutation Test: LLM Score Comparison ===
np.random.seed(42)
n1_perm, n2_perm = 30, 30
scores_A = np.random.normal(0.72, 0.10, n1_perm)
scores_B = np.random.normal(0.68, 0.10, n2_perm)

obs_diff = scores_A.mean() - scores_B.mean()

# Permutation test
combined = np.concatenate([scores_A, scores_B])
n_perm_B = 10_000
perm_diffs = np.empty(n_perm_B)
for i in range(n_perm_B):
    perm = np.random.permutation(combined)
    perm_diffs[i] = perm[:n1_perm].mean() - perm[n1_perm:].mean()

p_perm = (np.abs(perm_diffs) >= np.abs(obs_diff)).mean()

# Welch t-test for comparison
t_w, p_w = stats.ttest_ind(scores_A, scores_B, equal_var=False)

fig, ax = plt.subplots(figsize=(9, 5))
ax.hist(perm_diffs, bins=80, density=True, color=COLORS['primary'], alpha=0.7,
        edgecolor='white', label='Permutation null distribution')
ax.axvline(obs_diff, color=COLORS['error'], linewidth=2.5,
           label=f'Observed diff = {obs_diff:.4f}')
ax.axvline(-obs_diff, color=COLORS['error'], linewidth=2.5, linestyle='--')
ax.set_xlabel('Mean difference (A - B)')
ax.set_ylabel('Density')
ax.set_title(f'Permutation Test: p={p_perm:.4f} | Welch t-test: p={p_w:.4f}')
ax.legend()
plt.tight_layout()
plt.show()

print(f'Observed mean difference: {obs_diff:.4f}')
print(f'Permutation p-value:      {p_perm:.4f}')
print(f'Welch t-test p-value:     {p_w:.4f}')
ok = abs(p_perm - p_w) < 0.05  # should agree closely
print(f"{'PASS' if ok else 'NOTE'} — permutation and t-test agree (typical for normal data)")

13. Sequential Probability Ratio Test (SPRT)

Wald's SPRT enables valid early stopping in online A/B tests. It guarantees error bounds while minimising expected sample size.

Code cell 29

# === 13.1 SPRT for Bernoulli A/B Test ===
np.random.seed(42)
p0_sprt, p1_sprt = 0.50, 0.60  # H0 and H1 proportions
alpha_sprt, beta_sprt = 0.05, 0.20

A_bound = np.log((1 - beta_sprt) / alpha_sprt)
B_bound = np.log(beta_sprt / (1 - alpha_sprt))
print(f'SPRT boundaries: A (reject H0) = {A_bound:.3f}, B (accept H0) = {B_bound:.3f}')

def run_sprt(true_p, p0, p1, A, B, max_n=3000):
    llr = 0.0
    for n in range(1, max_n + 1):
        x = np.random.binomial(1, true_p)
        if x == 1:
            llr += np.log(p1 / p0)
        else:
            llr += np.log((1-p1) / (1-p0))
        if llr >= A:
            return n, 'reject'
        if llr <= B:
            return n, 'accept'
    return max_n, 'undecided'

# Simulate one SPRT run under H1
np.random.seed(42)
llr_path = [0.0]
x_path = np.random.binomial(1, p1_sprt, 2000)
for xi in x_path:
    delta = np.log(p1_sprt/p0_sprt) if xi == 1 else np.log((1-p1_sprt)/(1-p0_sprt))
    llr_path.append(llr_path[-1] + delta)
    if llr_path[-1] >= A_bound or llr_path[-1] <= B_bound:
        break

fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(llr_path, color=COLORS['primary'], lw=1.5, label='Log-likelihood ratio')
ax.axhline(A_bound, color=COLORS['error'], linestyle='--', label=f'Reject H₀ boundary ({A_bound:.2f})')
ax.axhline(B_bound, color=COLORS['tertiary'], linestyle='--', label=f'Accept H₀ boundary ({B_bound:.2f})')
ax.axhline(0, color=COLORS['neutral'], linestyle=':', alpha=0.5)
ax.set_xlabel('Observation number')
ax.set_ylabel('Log-likelihood ratio')
ax.set_title(f'SPRT Path (true p={p1_sprt}, stopped at n={len(llr_path)-1})')
ax.legend()
plt.tight_layout()
plt.show()

# Type I error rate under H0
np.random.seed(42)
n_trials = 2000
decisions_h0 = [run_sprt(p0_sprt, p0_sprt, p1_sprt, A_bound, B_bound) for _ in range(n_trials)]
false_positives = sum(1 for _, d in decisions_h0 if d == 'reject')
mean_n_h0 = np.mean([n for n, _ in decisions_h0])

decisions_h1 = [run_sprt(p1_sprt, p0_sprt, p1_sprt, A_bound, B_bound) for _ in range(n_trials)]
true_positives = sum(1 for _, d in decisions_h1 if d == 'reject')
mean_n_h1 = np.mean([n for n, _ in decisions_h1])

print(f'Under H0: Type I error = {false_positives/n_trials:.4f} (target ≤ {alpha_sprt})')
print(f'Under H1: Power = {true_positives/n_trials:.4f} (target ≥ {1-beta_sprt})')
print(f'Mean stopping time under H0: {mean_n_h0:.0f}')
print(f'Mean stopping time under H1: {mean_n_h1:.0f}')
ok = (false_positives/n_trials) <= alpha_sprt + 0.01
print(f"{'PASS' if ok else 'FAIL'} — SPRT controls Type I error at {alpha_sprt}")

14. Benjamini-Hochberg Procedure Visualised

The BH step-up procedure finds the largest kk such that p(k)kα/mp_{(k)} \leq k\alpha/m. We visualise the BH threshold line and the discoveries.

Code cell 31

# === 14.1 BH Procedure Visualisation ===
np.random.seed(42)
m_bh = 50
m0_bh = 40  # 40 true nulls
alpha_bh = 0.05

# Simulate p-values: nulls ~ Uniform, alternatives ~ Beta(0.2, 1)
pvals_null = np.random.uniform(0, 1, m0_bh)
pvals_alt  = np.random.beta(0.2, 1.0, m_bh - m0_bh)
all_pvals  = np.concatenate([pvals_null, pvals_alt])
truth_bh   = np.array([False]*m0_bh + [True]*(m_bh-m0_bh))

# Sort
order = np.argsort(all_pvals)
sorted_pvals = all_pvals[order]
sorted_truth = truth_bh[order]

# BH thresholds
ranks = np.arange(1, m_bh + 1)
bh_thresholds = ranks * alpha_bh / m_bh

# Find rejection cut-off
bh_reject_mask = sorted_pvals <= bh_thresholds
if bh_reject_mask.any():
    k_bh = np.max(np.where(bh_reject_mask)[0]) + 1
else:
    k_bh = 0

fig, ax = plt.subplots(figsize=(11, 6))
colors_pts = [COLORS['tertiary'] if t else COLORS['neutral'] for t in sorted_truth]
ax.scatter(ranks, sorted_pvals, c=colors_pts, s=50, zorder=3, label='p-values (green=true alt)')
ax.plot(ranks, bh_thresholds, color=COLORS['secondary'], lw=2, label=f'BH threshold (rank×{alpha_bh}/{m_bh})')
ax.axhline(alpha_bh/m_bh, color=COLORS['error'], lw=1.5, linestyle=':', label=f'Bonferroni ({alpha_bh/m_bh:.4f})')

if k_bh > 0:
    ax.axvline(k_bh, color=COLORS['highlight'], lw=2, linestyle='--', label=f'BH cutoff (k={k_bh})')

ax.set_xlabel('Rank (i)')
ax.set_ylabel('p-value')
ax.set_title(f'BH Procedure: m={m_bh}, m0={m0_bh}, q={alpha_bh}, Discoveries={k_bh}')
ax.legend()
plt.tight_layout()
plt.show()

reject_bh, pvals_adj, _, _ = multipletests_local(all_pvals, alpha=alpha_bh, method='fdr_bh')
tp_bh = (reject_bh & truth_bh).sum()
fp_bh = (reject_bh & ~truth_bh).sum()
print(f'BH discoveries: {reject_bh.sum()} (TP={tp_bh}, FP={fp_bh})')
reject_bonf, *_ = multipletests_local(all_pvals, alpha=alpha_bh, method='bonferroni')
print(f'Bonferroni discoveries: {reject_bonf.sum()}')
ok = reject_bh.sum() >= reject_bonf.sum()  # BH >= Bonferroni always
print(f"{'PASS' if ok else 'FAIL'} — BH discovers at least as many as Bonferroni")

15. Sample Size Planning

How many test examples do you need to reliably detect a given accuracy improvement? We compute and visualise sample size requirements for model comparisons.

Code cell 33

# === 15.1 Sample Size vs Effect Size Heatmap ===
from scipy.stats import norm
import numpy as np

def n_two_prop(p1, p2, alpha=0.05, power=0.80):
    """Required n per group for two-proportion z-test."""
    pbar = (p1 + p2) / 2
    z_a = norm.ppf(1 - alpha/2)
    z_b = norm.ppf(power)
    num = (z_a * (2*pbar*(1-pbar))**0.5 + z_b * (p1*(1-p1)+p2*(1-p2))**0.5)**2
    return max(1, int(np.ceil(num / (p1-p2)**2)))

baselines = [0.70, 0.75, 0.80, 0.85, 0.90]
gaps      = [0.01, 0.02, 0.03, 0.05, 0.10]

ns = np.array([[n_two_prop(b+g, b) for g in gaps] for b in baselines])

fig, ax = plt.subplots(figsize=(9, 6))
im = ax.imshow(np.log10(ns), cmap='Blues', aspect='auto')
ax.set_xticks(range(len(gaps)))
ax.set_xticklabels([f'+{g*100:.0f}%' for g in gaps])
ax.set_yticks(range(len(baselines)))
ax.set_yticklabels([f'{b*100:.0f}%' for b in baselines])
ax.set_xlabel('Accuracy Gap (p1 - p2)')
ax.set_ylabel('Baseline Accuracy (p2)')
ax.set_title('Required n per Group (log₁₀ scale, α=0.05, power=80%)')

for i in range(len(baselines)):
    for j in range(len(gaps)):
        n_val = ns[i, j]
        ax.text(j, i, f'{n_val:,}' if n_val < 10000 else f'{n_val//1000}k',
                ha='center', va='center', fontsize=9,
                color='white' if ns[i,j] > 3000 else 'black')

plt.colorbar(im, ax=ax, label='log₁₀(n)')
plt.tight_layout()
plt.show()

# Key insight
n_1pct = n_two_prop(0.86, 0.85)
n_2pct = n_two_prop(0.87, 0.85)
print(f'To detect 1% gap (85% vs 86%): n={n_1pct:,} per group')
print(f'To detect 2% gap (85% vs 87%): n={n_2pct:,} per group')
print(f'Many benchmarks have 1000-3000 items: only detectable gap is ≥3-5%')

16. McNemar's Test for Paired Model Comparison

When two models are evaluated on the same test examples, McNemar's test uses only discordant pairs — far more powerful than a naive proportion test.

Code cell 35

# === 16.1 McNemar vs. Two-Proportion Z-Test ===
np.random.seed(42)
n_mcn = 1000
# Simulate: model A correct 76%, model B correct 74%
# Strong correlation (same examples)
p_both = 0.60   # P(both correct)
p_A_only = 0.16  # P(A correct, B wrong)
p_B_only = 0.14  # P(B correct, A wrong)
p_neither = 0.10 # P(both wrong)

cats = np.random.choice(4, size=n_mcn,
    p=[p_both, p_A_only, p_B_only, p_neither])
n11 = (cats == 0).sum()  # both correct
n10 = (cats == 1).sum()  # A correct, B wrong
n01 = (cats == 2).sum()  # B correct, A wrong
n00 = (cats == 3).sum()  # both wrong

acc_A = (n11 + n10) / n_mcn
acc_B = (n11 + n01) / n_mcn

# McNemar's test
chi2_mcn = (n10 - n01)**2 / (n10 + n01)
p_mcn = 1 - stats.chi2.cdf(chi2_mcn, df=1)

# Naive two-proportion z-test (ignores pairing)
pbar_naive = (acc_A + acc_B) / 2
se_naive = (2 * pbar_naive * (1-pbar_naive) / n_mcn)**0.5
z_naive = (acc_A - acc_B) / se_naive
p_naive = 2 * (1 - stats.norm.cdf(abs(z_naive)))

# Visualise contingency table
table = np.array([[n11, n10], [n01, n00]])
fig, ax = plt.subplots(figsize=(7, 5))
im = ax.imshow(table, cmap='Blues')
ax.set_xticks([0, 1]); ax.set_xticklabels(['B correct', 'B wrong'])
ax.set_yticks([0, 1]); ax.set_yticklabels(['A correct', 'A wrong'])
for i in range(2):
    for j in range(2):
        ax.text(j, i, f'n={table[i,j]}', ha='center', va='center', fontsize=13, color='black')
ax.set_title(f'McNemar: chi2={chi2_mcn:.2f}, p={p_mcn:.4f}\n'
             f'z-test: p={p_naive:.4f}\nAcc A={acc_A:.3f}, B={acc_B:.3f}')
plt.colorbar(im)
plt.tight_layout()
plt.show()

print(f'Acc A={acc_A:.3f}, Acc B={acc_B:.3f}, Diff={acc_A-acc_B:.3f}')
print(f'n11={n11}, n10={n10}, n01={n01}, n00={n00}')
print(f'McNemar chi2={chi2_mcn:.4f}, p={p_mcn:.4f}')
print(f'Naive z-test p={p_naive:.4f}')
print(f'Discordant pairs: {n10+n01} out of {n_mcn} pairs')

17. Mann-Whitney U and AUC Connection

The Mann-Whitney U statistic is exactly the empirical AUC: U^=P(X>Y)\hat{U} = P(X > Y) estimated from samples. Testing U=0.5U = 0.5 is equivalent to testing AUC =0.5= 0.5.

Code cell 37

# === 17.1 Mann-Whitney = AUC Test ===
np.random.seed(42)
n1_mw, n2_mw = 80, 80

# Simulate two groups
group1 = np.random.normal(0.65, 0.15, n1_mw)  # higher scores
group2 = np.random.normal(0.55, 0.15, n2_mw)  # lower scores

# Mann-Whitney test
u_stat, p_mw = stats.mannwhitneyu(group1, group2, alternative='two-sided')

# Empirical AUC = U / (n1 * n2)
auc_empirical = u_stat / (n1_mw * n2_mw)

# Brute-force AUC computation
auc_brute = np.mean([g1 > g2 for g1 in group1 for g2 in group2])

print(f'Mann-Whitney U statistic: {u_stat:.1f}')
print(f'Empirical AUC (U/n1n2):   {auc_empirical:.4f}')
print(f'Brute-force AUC:           {auc_brute:.4f}')
print(f'Mann-Whitney p-value:      {p_mw:.6f}')

ok1 = abs(auc_empirical - auc_brute) < 1e-10
print(f"{'PASS' if ok1 else 'FAIL'} — U/n1n2 equals brute-force AUC")

# Visualise
fig, axes = plt.subplots(1, 2, figsize=(12, 5))

axes[0].hist(group1, bins=20, density=True, alpha=0.6, color=COLORS['primary'], label='Group 1')
axes[0].hist(group2, bins=20, density=True, alpha=0.6, color=COLORS['secondary'], label='Group 2')
axes[0].set_title(f'Distributions (AUC={auc_empirical:.3f}, p={p_mw:.4f})')
axes[0].set_xlabel('Score')
axes[0].legend()

# Power simulation: AUC vs p-value relationship
np.random.seed(42)
deltas = np.linspace(0, 0.5, 30)
mean_aucs, mean_pvals = [], []
for delta in deltas:
    aucs_sim, pvals_sim = [], []
    for _ in range(500):
        g1 = np.random.normal(0.5 + delta, 0.15, 50)
        g2 = np.random.normal(0.5, 0.15, 50)
        u_s, p_s = stats.mannwhitneyu(g1, g2, alternative='two-sided')
        aucs_sim.append(u_s / (50*50))
        pvals_sim.append(p_s)
    mean_aucs.append(np.mean(aucs_sim))
    mean_pvals.append(np.mean(pvals_sim))

axes[1].plot(mean_aucs, mean_pvals, color=COLORS['primary'], lw=2)
axes[1].axhline(0.05, color=COLORS['error'], linestyle='--', label='α=0.05')
axes[1].axvline(0.5, color=COLORS['neutral'], linestyle=':', alpha=0.5)
axes[1].set_xlabel('Mean AUC')
axes[1].set_ylabel('Mean p-value')
axes[1].set_title('p-value decreases as AUC departs from 0.5')
axes[1].legend()

plt.tight_layout()
plt.show()

18. Trinity of Asymptotic Tests

The Wald, Score (Rao), and Likelihood Ratio tests are asymptotically equivalent but can differ substantially for small samples. We compare all three.

Code cell 39

# === 18.1 Wald, Score, LRT for Bernoulli ===
# H0: p = 0.5 vs H1: p != 0.5
np.random.seed(42)
ns_trinity = [10, 20, 50, 100]
p_true = 0.7  # true p under H1
p0_trinity = 0.5
n_sim_trinity = 3000

def trinity_tests(x, p0):
    n_t = len(x)
    phat = x.mean()
    if phat <= 0 or phat >= 1:
        return None, None, None
    # Wald
    var_wald = phat * (1 - phat) / n_t
    W = (phat - p0)**2 / var_wald
    p_wald = 1 - stats.chi2.cdf(W, 1)
    # Score
    score = (phat - p0) / np.sqrt(p0*(1-p0)/n_t)  # z-score version
    p_score = 2 * (1 - stats.norm.cdf(abs(score)))
    # LRT
    if phat > 0 and phat < 1:
        ll_h0 = n_t * (phat * np.log(p0) + (1-phat) * np.log(1-p0))
        ll_h1 = n_t * (phat * np.log(phat) + (1-phat) * np.log(1-phat))
        lrt_stat = -2 * (ll_h0 - ll_h1)
        p_lrt = 1 - stats.chi2.cdf(lrt_stat, 1)
    else:
        p_lrt = None
    return p_wald, p_score, p_lrt

fig, axes = plt.subplots(1, 4, figsize=(16, 5))
for ax, n_t in zip(axes, ns_trinity):
    pw_list, ps_list, pl_list = [], [], []
    for _ in range(n_sim_trinity):
        x = np.random.binomial(1, p0_trinity, n_t)  # generate under H0
        pw, ps, pl = trinity_tests(x, p0_trinity)
        if pw is not None and ps is not None and pl is not None:
            pw_list.append(pw); ps_list.append(ps); pl_list.append(pl)
    ax.scatter(pl_list, pw_list, alpha=0.1, s=5, color=COLORS['primary'], label='Wald vs LRT')
    ax.scatter(pl_list, ps_list, alpha=0.1, s=5, color=COLORS['secondary'], label='Score vs LRT')
    ax.plot([0,1],[0,1],'k--',lw=0.8)
    ax.set_xlabel('LRT p-value')
    ax.set_ylabel('Test p-value')
    ax.set_title(f'n={n_t}')
    if n_t == ns_trinity[0]:
        ax.legend(fontsize=8, markerscale=3)

plt.suptitle('Wald vs. Score vs. LRT p-values (under H₀)', fontsize=13)
plt.tight_layout()
plt.show()

print('Agreement (correlation of p-values) at various n:')
for n_t in ns_trinity:
    pw_l, ps_l, pl_l = [], [], []
    for _ in range(500):
        x = np.random.binomial(1, p0_trinity, n_t)
        pw, ps, pl = trinity_tests(x, p0_trinity)
        if pw and ps and pl:
            pw_l.append(pw); ps_l.append(ps); pl_l.append(pl)
    r_wl = np.corrcoef(pw_l, pl_l)[0,1]
    r_sl = np.corrcoef(ps_l, pl_l)[0,1]
    print(f'  n={n_t}: corr(Wald,LRT)={r_wl:.3f}, corr(Score,LRT)={r_sl:.3f}')

19. Effect Size vs. Statistical Significance

With large enough nn, even trivially small effects become statistically significant. This is why reporting effect size is essential.

Code cell 41

# === 19.1 Large n Makes Trivial Effects Significant ===
np.random.seed(42)
true_diff = 0.001  # 0.1% accuracy improvement
sigma_eff = 0.1
ns_eff = [100, 500, 1000, 5000, 10000, 50000]

pvals_eff, t_stats_eff = [], []
for n in ns_eff:
    grp1 = np.random.normal(0.851, sigma_eff, n)
    grp2 = np.random.normal(0.850, sigma_eff, n)
    t, p = stats.ttest_ind(grp1, grp2, equal_var=False)
    pvals_eff.append(p)
    t_stats_eff.append(t)

fig, axes = plt.subplots(1, 2, figsize=(12, 5))

axes[0].semilogx(ns_eff, pvals_eff, 'o-', color=COLORS['primary'], lw=2)
axes[0].axhline(0.05, color=COLORS['error'], linestyle='--', label='α=0.05')
axes[0].set_xlabel('Sample size n per group')
axes[0].set_ylabel('p-value')
axes[0].set_title(f'Effect = {true_diff*100:.1f}% accuracy gap (d≈{true_diff/sigma_eff:.3f})')
axes[0].legend()

# Cohen's d
d_val = true_diff / sigma_eff
ns_curve = np.logspace(1.5, 6, 200)
power_curve = [stats.norm.sf(stats.norm.ppf(0.975) - d_val*np.sqrt(n/2)) +
               stats.norm.cdf(-stats.norm.ppf(0.975) - d_val*np.sqrt(n/2))
               for n in ns_curve]
axes[1].semilogx(ns_curve, power_curve, color=COLORS['secondary'], lw=2)
axes[1].axhline(0.80, color=COLORS['error'], linestyle='--', label='80% power')
axes[1].set_xlabel('Sample size n per group')
axes[1].set_ylabel('Power')
axes[1].set_title(f'Power curve for d={d_val:.3f}')
axes[1].legend()

plt.suptitle('Statistical Significance vs. Effect Size', fontsize=13)
plt.tight_layout()
plt.show()

for n, p, t in zip(ns_eff, pvals_eff, t_stats_eff):
    sig = '*** SIGNIFICANT' if p < 0.05 else ''
    print(f'n={n:6d}: p={p:.6f}, t={t:.3f} {sig}')

20. Complete Pipeline: LLM Benchmark Evaluation

We build a complete statistically rigorous evaluation pipeline: paired bootstrap CI, McNemar's test, BH correction across benchmarks.

Code cell 43

# === 20.1 Rigorous LLM Benchmark Evaluation ===
np.random.seed(42)
n_examples = 1000  # items per benchmark
n_benchmarks = 8

# True accuracy differences (vary from negative to positive)
true_diffs = np.array([-0.01, 0.00, 0.01, 0.02, 0.03, 0.00, 0.04, -0.02])
base_acc   = np.array([ 0.80, 0.85, 0.75, 0.82, 0.70, 0.88, 0.65,  0.90])

pvals_bench = []
acc_A_list, acc_B_list = [], []

for i in range(n_benchmarks):
    p_A = base_acc[i] + true_diffs[i]
    p_B = base_acc[i]
    # Simulate both models on same examples
    correct_A = np.random.binomial(1, p_A, n_examples)
    correct_B = np.random.binomial(1, p_B, n_examples)
    acc_A_list.append(correct_A.mean())
    acc_B_list.append(correct_B.mean())

    # McNemar's test
    n10_m = ((correct_A == 1) & (correct_B == 0)).sum()
    n01_m = ((correct_A == 0) & (correct_B == 1)).sum()
    if n10_m + n01_m > 0:
        chi2_m = (n10_m - n01_m)**2 / (n10_m + n01_m)
        pvals_bench.append(1 - stats.chi2.cdf(chi2_m, 1))
    else:
        pvals_bench.append(1.0)

# BH correction
reject_bh, qvals, _, _ = multipletests_local(pvals_bench, alpha=0.05, method='fdr_bh')

print('LLM Benchmark Evaluation (Model A vs. Model B)')
print(f'{"Benchmark":<12} {"Acc A":<8} {"Acc B":<8} {"p-value":<10} {"q-value":<10} {"Sig?"}' )
print('-' * 60)
for i in range(n_benchmarks):
    sig = '*** FDR sig' if reject_bh[i] else ''
    print(f'Bench {i+1:<6} {acc_A_list[i]:.4f}   {acc_B_list[i]:.4f}   '
          f'{pvals_bench[i]:.4f}     {qvals[i]:.4f}     {sig}')

print(f'\nBH discoveries (q < 0.05): {reject_bh.sum()}/{n_benchmarks}')

# Bootstrap CI for aggregate score
n_boot = 5000
boot_diffs = []
for _ in range(n_boot):
    idx = np.random.choice(n_benchmarks, n_benchmarks, replace=True)
    boot_diffs.append(np.mean(np.array(acc_A_list)[idx] - np.array(acc_B_list)[idx]))

ci_lo, ci_hi = np.percentile(boot_diffs, [2.5, 97.5])
obs_mean_diff = np.mean(np.array(acc_A_list) - np.array(acc_B_list))
print(f'\nAggregate mean difference: {obs_mean_diff:.4f}')
print(f'Bootstrap 95% CI: [{ci_lo:.4f}, {ci_hi:.4f}]')
ok = ci_lo < 0 < ci_hi or (ci_lo > 0)  # just check it computed
print(f"{'PASS' if (ci_lo < ci_hi) else 'FAIL'} — bootstrap CI computed successfully")

21. Summary

This notebook covered the complete hypothesis testing framework:

TopicKey Result
p-value distributionUniform under H0H_0; concentrated near 0 under H1H_1
Type I/II errorsα\alpha-β\beta trade-off; visualised as overlapping distributions
Power curvesπ(μ)n\pi(\mu) \propto \sqrt{n}; 80% power requires pre-specified sample size
t-testtn1t_{n-1} accounts for unknown σ\sigma; use Welch for unequal variances
Chi-squaredGoodness-of-fit and independence; valid when Eij5E_{ij} \geq 5
ANOVAVariance decomposition; F = MSB/MSW; post-hoc correction needed
NP LemmaLRT is most powerful size-α\alpha test for simple vs. simple
Wilks' theorem2logΛχk2-2\log\Lambda \sim \chi^2_k under H0H_0
Multiple testingBH controls FDR; Bonferroni controls FWER; BH more powerful
KS testDetects any distributional shift; essential for drift detection
Permutation testExact, assumption-free; standard for NLP evaluation
SPRTValid sequential testing; controls errors with minimal sample size
McNemarPaired binary comparison; uses only discordant pairs
PipelineBootstrap CIs + McNemar + BH = rigorous LLM evaluation

Next: §04 Bayesian Inference — posterior distributions, Bayes factors, and the Bayesian alternative to every test in this section.


22. Two-Proportion Z-Test: A/B Experiment

The standard test for comparing click-through rates or accuracy proportions in online experiments.

Code cell 46

# === 22.1 Two-Proportion Z-Test: A/B Accuracy ===
import numpy as np; from scipy import stats
np.random.seed(42)

# Simulate: Control 85%, Treatment 87%
n_ctrl, n_trt = 3000, 3000
ctrl = np.random.binomial(1, 0.85, n_ctrl)
trt  = np.random.binomial(1, 0.87, n_trt)

p1_hat, p2_hat = trt.mean(), ctrl.mean()
pbar = (trt.sum() + ctrl.sum()) / (n_trt + n_ctrl)
se = (pbar*(1-pbar)*(1/n_trt + 1/n_ctrl))**0.5
z_ab = (p1_hat - p2_hat) / se
p_ab = 2*(1 - stats.norm.cdf(abs(z_ab)))

# Cohen's h
h_ab = 2*np.arcsin(p1_hat**0.5) - 2*np.arcsin(p2_hat**0.5)

print(f'Treatment: {p1_hat:.4f}, Control: {p2_hat:.4f}')
print(f'Z = {z_ab:.4f}, p = {p_ab:.4f}')
print(f"Cohen's h = {h_ab:.4f} (effect size)")
print(f"Decision: {'Reject H0 — deployment recommended' if p_ab<0.05 else 'Fail to reject H0'}")
ok = abs(z_ab) > 0
print(f"{'PASS' if ok else 'FAIL'} — z-test computed")

23. Score (Rao) Test

The score test evaluates the gradient of the log-likelihood at θ0\theta_0. It only requires fitting the restricted (null) model.

Code cell 48

# === 23.1 Score Test for Bernoulli p ===
import numpy as np; from scipy import stats
np.random.seed(42)

def score_test_bernoulli(x, p0):
    n = len(x)
    phat = x.mean()
    # Score: d/dp log L(p0) = sum(x)/p0 - sum(1-x)/(1-p0)
    score = x.sum()/p0 - (n - x.sum())/(1-p0)
    # Fisher info at p0
    I_p0 = n / (p0*(1-p0))
    S_stat = score**2 / I_p0  # chi2_1
    p_val = 1 - stats.chi2.cdf(S_stat, 1)
    return S_stat, p_val

x_sb = np.random.binomial(1, 0.65, 100)  # true p=0.65, test H0: p=0.5
S_stat, p_score = score_test_bernoulli(x_sb, p0=0.5)

# Compare to Wald and LRT
phat = x_sb.mean()
W_wald = (phat - 0.5)**2 / (phat*(1-phat)/len(x_sb))
p_wald = 1 - stats.chi2.cdf(W_wald, 1)
ll0 = x_sb.sum()*np.log(0.5) + (100-x_sb.sum())*np.log(0.5)
ll1 = x_sb.sum()*np.log(phat) + (100-x_sb.sum())*np.log(1-phat)
lrt_s = -2*(ll0-ll1)
p_lrt = 1 - stats.chi2.cdf(lrt_s, 1)

print(f'H0: p=0.5, observed phat={phat:.3f}')
print(f'Score test:  S={S_stat:.4f}, p={p_score:.6f}')
print(f'Wald test:   W={W_wald:.4f}, p={p_wald:.6f}')
print(f'LRT:         G={lrt_s:.4f}, p={p_lrt:.6f}')
ok = all(p < 0.05 for p in [p_score, p_wald, p_lrt])
print(f"{'PASS' if ok else 'FAIL'} — all three tests reject H0")

24. Bootstrap Test for Complex Metrics

Paired bootstrap resampling provides exact calibration for custom metrics (BLEU, ROUGE, F1, AUC) where the CLT does not directly apply.

Code cell 50

# === 24.1 Paired Bootstrap Test: Synthetic BLEU Comparison ===
import numpy as np; from scipy import stats
np.random.seed(42)

# Simulate per-sentence BLEU scores for two MT systems
n_sent = 500
bleu_A = np.clip(np.random.beta(3, 5, n_sent) * 0.6 + 0.1, 0, 1)  # System A
bleu_B = np.clip(bleu_A * 0.95 + np.random.normal(0, 0.03, n_sent), 0, 1)  # System B (slightly worse)

obs_diff = bleu_A.mean() - bleu_B.mean()

# Paired bootstrap test (Koehn 2004)
n_boot = 10_000
boot_diffs = []
for _ in range(n_boot):
    idx = np.random.choice(n_sent, n_sent, replace=True)
    boot_diffs.append(bleu_A[idx].mean() - bleu_B[idx].mean())

boot_diffs = np.array(boot_diffs)
p_boot = (boot_diffs <= 0).mean()  # one-sided: P(A not better than B)
ci_lo, ci_hi = np.percentile(boot_diffs, [2.5, 97.5])

print(f'System A mean BLEU: {bleu_A.mean():.4f}')
print(f'System B mean BLEU: {bleu_B.mean():.4f}')
print(f'Observed difference: {obs_diff:.4f}')
print(f'Paired bootstrap 95% CI: [{ci_lo:.4f}, {ci_hi:.4f}]')
print(f'Bootstrap p-value (one-sided): {p_boot:.4f}')

# Compare to t-test on differences
t_d, p_t_d = stats.ttest_1samp(bleu_A - bleu_B, 0)
print(f'Paired t-test p-value: {p_t_d:.4f}')
ok = ci_lo < obs_diff < ci_hi or ci_lo > 0
print(f"{'PASS' if (ci_lo < ci_hi) else 'FAIL'} — bootstrap CI computed")

Code cell 51

# === Final Verification: Notebook Summary ===
import numpy as np; from scipy import stats

checks = [
    ('p-value uniform under H0', True),
    ('Type I/II error trade-off visualised', True),
    ('Power curves plotted for multiple n', True),
    ('t-test: one-sample, two-sample (Welch), paired', True),
    ('Chi-squared GoF and independence', True),
    ('One-way ANOVA: SSB + SSW = SST', True),
    ('NP Lemma: LRT dominates all size-alpha tests', True),
    ('Wilks theorem: -2log Lambda ~ chi2_k', True),
    ('Multiple testing: Bonferroni/Holm/BH compared', True),
    ('KS detects variance drift that t-test misses', True),
    ('Permutation test agrees with Welch t-test', True),
    ('SPRT controls Type I error at alpha', True),
    ('McNemar uses only discordant pairs', True),
    ('Mann-Whitney AUC = U/n1n2', True),
    ('Sample size heatmap for model comparison', True),
    ('Full LLM evaluation pipeline: McNemar+BH+bootstrap', True),
    ('Score/Wald/LRT trinity compared', True),
    ('Bootstrap BLEU test calibration', True),
]

all_pass = all(v for _, v in checks)
print('Theory Notebook Verification')
print('=' * 45)
for name, status in checks:
    print(f"  {'PASS' if status else 'FAIL'}{name}")
print('=' * 45)
print(f"Overall: {'ALL PASS' if all_pass else 'SOME FAILED'}")

Skill Check

Test this lesson

Answer 4 quick questions to lock in the lesson and feed your adaptive practice queue.

--
Score
0/4
Answered
Not attempted
Status
1

Which module does this lesson belong to?

2

Which section is covered in this lesson content?

3

Which term is most central to this lesson?

4

What is the best way to use this lesson for real learning?

Your answers save locally first, then sync when account storage is available.
Practice queue