Exercises Notebook
Converted from
exercises.ipynbfor web reading.
Matrix Norms - Exercises
This notebook contains 10 progressive exercises for 06-Matrix-Norms. Each exercise has a learner workspace followed by a complete reference solution. Use the solution cells after making a serious attempt.
Difficulty grows from direct computation to AI-facing interpretation. Formulas use LaTeX-in-Markdown with $...$ and `
`.
Code cell 2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
try:
import seaborn as sns
sns.set_theme(style="whitegrid", palette="colorblind")
HAS_SNS = True
except ImportError:
plt.style.use("seaborn-v0_8-whitegrid")
HAS_SNS = False
mpl.rcParams.update({
"figure.figsize": (10, 6),
"figure.dpi": 120,
"font.size": 13,
"axes.titlesize": 15,
"axes.labelsize": 13,
"xtick.labelsize": 11,
"ytick.labelsize": 11,
"legend.fontsize": 11,
"legend.framealpha": 0.85,
"lines.linewidth": 2.0,
"axes.spines.top": False,
"axes.spines.right": False,
"savefig.bbox": "tight",
"savefig.dpi": 150,
})
np.random.seed(42)
print("Plot setup complete.")
Code cell 3
import numpy as np
import numpy.linalg as la
import scipy.linalg as sla
from scipy import stats
np.set_printoptions(precision=8, suppress=True)
np.random.seed(42)
COLORS = {
"primary": "#0077BB",
"secondary": "#EE7733",
"tertiary": "#009988",
"error": "#CC3311",
"neutral": "#555555",
"highlight": "#EE3377",
}
def header(title):
print("\n" + "=" * len(title))
print(title)
print("=" * len(title))
def check_true(name, cond):
ok = bool(cond)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
return ok
def check_close(name, got, expected, tol=1e-8):
ok = np.allclose(got, expected, atol=tol, rtol=tol)
print(f"{'PASS' if ok else 'FAIL'} - {name}")
if not ok:
print(" got =", got)
print(" expected=", expected)
return ok
def softmax(z, axis=-1):
z = np.asarray(z, dtype=float)
z = z - np.max(z, axis=axis, keepdims=True)
e = np.exp(z)
return e / np.sum(e, axis=axis, keepdims=True)
def gram_schmidt_columns(A, tol=1e-12):
A = np.asarray(A, dtype=float)
Q = []
for j in range(A.shape[1]):
v = A[:, j].copy()
for q in Q:
v -= (q @ v) * q
n = la.norm(v)
if n > tol:
Q.append(v / n)
return np.column_stack(Q) if Q else np.empty((A.shape[0], 0))
def projection_matrix(A):
Q = gram_schmidt_columns(A)
return Q @ Q.T
def numerical_rank(A, tol=1e-10):
return int(np.sum(la.svd(np.asarray(A, dtype=float), compute_uv=False) > tol))
def stable_rank(A):
s = la.svd(np.asarray(A, dtype=float), compute_uv=False)
return float(np.sum(s**2) / (s[0]**2 + 1e-15))
def make_spd(n, seed=0, ridge=0.5):
rng = np.random.default_rng(seed)
A = rng.normal(size=(n, n))
return A.T @ A + ridge * np.eye(n)
print("Chapter 03 helper setup complete.")
Exercise 1: Compute Common Norms
Compute Frobenius, spectral, nuclear, 1, and infinity norms.
Code cell 5
# Your Solution
# Exercise 1 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 1.")
Code cell 6
# Solution
# Exercise 1 - Compute Common Norms
header("Exercise 1: common norms")
A = np.array([[1.0, -2.0], [3.0, 4.0]])
s = la.svd(A, compute_uv=False)
print("fro", la.norm(A,'fro'), "spectral", s[0], "nuclear", s.sum())
check_close("spectral from SVD", la.norm(A,2), s[0])
check_close("nuclear", la.norm(A, ord='nuc'), s.sum())
Exercise 2: Induced Norm Geometry
Estimate by maximizing over random unit vectors.
Code cell 8
# Your Solution
# Exercise 2 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 2.")
Code cell 9
# Solution
# Exercise 2 - Induced Norm Geometry
header("Exercise 2: induced norm")
rng=np.random.default_rng(0); A=np.array([[2.0,1.0],[0.0,1.0]])
X=rng.normal(size=(2000,2)); X/=la.norm(X,axis=1,keepdims=True)
stretches=la.norm(X@A.T,axis=1)
print("random max", stretches.max(), "spectral", la.norm(A,2))
check_true("random lower bound", stretches.max() <= la.norm(A,2)+1e-12)
check_true("close with many samples", la.norm(A,2)-stretches.max() < 0.02)
Exercise 3: Submultiplicativity
Verify for spectral and Frobenius norms.
Code cell 11
# Your Solution
# Exercise 3 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 3.")
Code cell 12
# Solution
# Exercise 3 - Submultiplicativity
header("Exercise 3: submultiplicativity")
A=np.array([[1.,2.],[-1.,3.]]); B=np.array([[0.5,1.],[2.,-1.]])
check_true("spectral submultiplicative", la.norm(A@B,2) <= la.norm(A,2)*la.norm(B,2)+1e-12)
check_true("Frobenius submultiplicative", la.norm(A@B,'fro') <= la.norm(A,'fro')*la.norm(B,'fro')+1e-12)
Exercise 4: Condition Number
Compute and connect it to singular values.
Code cell 14
# Your Solution
# Exercise 4 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 4.")
Code cell 15
# Solution
# Exercise 4 - Condition Number
header("Exercise 4: condition number")
A=np.array([[1.,0.999],[0.999,0.998]])
s=la.svd(A,compute_uv=False)
k=s[0]/s[-1]
print("singular values", s, "kappa", k)
check_close("numpy cond", k, la.cond(A))
check_true("ill-conditioned", k>1e5)
Exercise 5: Perturbation Bound
Compare relative solution change with times relative right-hand-side change.
Code cell 17
# Your Solution
# Exercise 5 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 5.")
Code cell 18
# Solution
# Exercise 5 - Perturbation Bound
header("Exercise 5: perturbation bound")
A=np.array([[2.,0.],[0.,0.1]]); b=np.array([2.,0.1]); db=np.array([0.,1e-3])
x=la.solve(A,b); xp=la.solve(A,b+db)
relx=la.norm(xp-x)/la.norm(x); relb=la.norm(db)/la.norm(b)
print("relx", relx, "k relb", la.cond(A)*relb)
check_true("bound holds", relx <= la.cond(A)*relb + 1e-12)
Exercise 6: Spectral Normalization
Rescale a weight matrix by its spectral norm.
Code cell 20
# Your Solution
# Exercise 6 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 6.")
Code cell 21
# Solution
# Exercise 6 - Spectral Normalization
header("Exercise 6: spectral normalization")
rng=np.random.default_rng(1); W=rng.normal(size=(5,4))
sigma=la.svd(W,compute_uv=False)[0]
Wn=W/sigma
check_close("normalized spectral norm", la.norm(Wn,2), 1.0)
print("original sigma", sigma)
Exercise 7: Nuclear Norm Promotes Low Rank
Compare two matrices with equal Frobenius norm but different nuclear norm.
Code cell 23
# Your Solution
# Exercise 7 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 7.")
Code cell 24
# Solution
# Exercise 7 - Nuclear Norm Promotes Low Rank
header("Exercise 7: nuclear norm")
A=np.diag([3.,0.,0.]); B=np.diag([np.sqrt(3),np.sqrt(3),np.sqrt(3)])
print("fro A/B", la.norm(A,'fro'), la.norm(B,'fro'))
print("nuclear A/B", la.norm(A,'nuc'), la.norm(B,'nuc'))
check_true("spread spectrum has larger nuclear norm", la.norm(B,'nuc') > la.norm(A,'nuc'))
Exercise 8: Stable Rank
Compute stable rank and compare to exact rank.
Code cell 26
# Your Solution
# Exercise 8 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 8.")
Code cell 27
# Solution
# Exercise 8 - Stable Rank
header("Exercise 8: stable rank")
A=np.diag([5.,1.,0.2,0.05])
print("stable rank", stable_rank(A), "exact", la.matrix_rank(A))
check_true("stable <= exact", stable_rank(A) <= la.matrix_rank(A))
check_true("dominant spectrum lowers stable rank", stable_rank(A) < 2)
Exercise 9: Network Lipschitz Bound
Bound a linear network Lipschitz constant by product of layer spectral norms.
Code cell 29
# Your Solution
# Exercise 9 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 9.")
Code cell 30
# Solution
# Exercise 9 - Network Lipschitz Bound
header("Exercise 9: Lipschitz product")
rng=np.random.default_rng(2); W1=rng.normal(size=(4,3)); W2=rng.normal(size=(2,4))
bound=la.norm(W2,2)*la.norm(W1,2); actual=la.norm(W2@W1,2)
print("actual", actual, "bound", bound)
check_true("product bound", actual <= bound + 1e-12)
Exercise 10: Gradient Clipping
Clip a gradient matrix to a Frobenius-norm budget.
Code cell 32
# Your Solution
# Exercise 10 - learner workspace
# Write your solution here, then run the reference solution below to compare.
print("Learner workspace ready for Exercise 10.")
Code cell 33
# Solution
# Exercise 10 - Gradient Clipping
header("Exercise 10: gradient clipping")
G=np.array([[3.,4.],[0.,12.]])
max_norm=5.0
scale=min(1.0, max_norm/la.norm(G,'fro'))
Gc=scale*G
print("before", la.norm(G,'fro'), "after", la.norm(Gc,'fro'))
check_true("within budget", la.norm(Gc,'fro') <= max_norm + 1e-12)
check_true("direction preserved", np.allclose(Gc/la.norm(Gc), G/la.norm(G)))