import numpy as np# Create a matrixA = np.array([[4, 0], [0, 3], [0, 0]])U, s, Vt = np.linalg.svd(A)print(f"Singular values: {s}")print(f"Matrix rank: {np.linalg.matrix_rank(A)}")print(f"Number of non-zero singular values: {np.sum(s >1e-10)}")print(f"Spectral norm: {np.linalg.norm(A, 2)}")print(f"Frobenius norm: {np.linalg.norm(A, 'fro')}")print(f"Sum of squared singular values: {np.sum(s**2)}")
Singular values: [4. 3.]
Matrix rank: 2
Number of non-zero singular values: 2
Spectral norm: 4.0
Frobenius norm: 5.0
Sum of squared singular values: 25.0
1.3.2 Relationship with Eigenvalues
Several important relationships exist between singular values and eigenvalues:
For symmetric matrices:
If \(\lambda\) is an eigenvalue of \(A\), then \(\sqrt{|\lambda|}\) is a singular value of \(A\)
The singular vectors are eigenvectors
For general matrices:
The singular values of \(A\) are the square roots of the eigenvalues of \(A^TA\) or \(AA^T\)
The right singular vectors are eigenvectors of \(A^TA\)
The left singular vectors are eigenvectors of \(AA^T\)
Code
def compare_svd_eigen(A):# SVD U, s, Vt = np.linalg.svd(A)# Eigenvalues of A^T A eigenvals_ATA = np.linalg.eigvals(A.T @ A) eigenvals_ATA = np.sort(eigenvals_ATA)[::-1] # sort in descending orderprint("Singular values:", s)print("Square root of eigenvalues of A^T A:", np.sqrt(np.abs(eigenvals_ATA)))# Test with symmetric matrixA_sym = np.array([[4, 1], [1, 4]])print("Symmetric matrix:")print(f"A = {A_sym}")compare_svd_eigen(A_sym)# Test with general matrixA_gen = np.array([[2, 1], [1, 3], [0, 2]])print("\nGeneral matrix:")print(f"A = {A_gen}")compare_svd_eigen(A_gen)
Symmetric matrix:
A = [[4 1]
[1 4]]
Singular values: [5. 3.]
Square root of eigenvalues of A^T A: [5. 3.]
General matrix:
A = [[2 1]
[1 3]
[0 2]]
Singular values: [4.02825173 1.66528916]
Square root of eigenvalues of A^T A: [4.02825173 1.66528916]
1.3.3 Important Theorems
Theorem 1.2 (Best Low-Rank Approximation) For any matrix \(A\) of rank \(r\), and any \(k < r\), the best rank-\(k\) approximation to \(A\) in both Frobenius and spectral norms is:
\[A_k = \sum_{i=1}^k \sigma_i u_i v_i^T\]
This is known as the Eckart-Young-Mirsky theorem.
Code
def demonstrate_low_rank_approx(A, k): U, s, Vt = np.linalg.svd(A)# Best rank-k approximation Ak = U[:,:k] @ np.diag(s[:k]) @ Vt[:k,:]# Random rank-k matrix B = np.random.randn(A.shape[0], k) @ np.random.randn(k, A.shape[1])print(f"Error with best rank-{k} approximation: {np.linalg.norm(A - Ak, 'fro')}")print(f"Error with random rank-{k} matrix: {np.linalg.norm(A - B, 'fro')}")# Create a test matrixA = np.random.randn(5, 4)demonstrate_low_rank_approx(A, 2)
Error with best rank-2 approximation: 1.682214710482391
Error with random rank-2 matrix: 7.867547407071715
Theorem 1.3 (Pseudo-inverse) The Moore-Penrose pseudo-inverse of \(A\) is given by:
\[A^+ = V\Sigma^+ U^T\]
where \(\Sigma^+\) is obtained by taking the reciprocal of non-zero singular values.
Code
def compare_pseudoinverse(A):# SVD pseudo-inverse U, s, Vt = np.linalg.svd(A) s_plus = np.zeros_like(s) s_plus[s >1e-10] =1/s[s >1e-10]# Rank r =len(s_plus) A_plus = Vt.T @ np.diag(s_plus) @ U.T[:r]# NumPy's pseudo-inverse A_pinv = np.linalg.pinv(A)print(f"rank(A) = {r}")print("Max difference between methods:", np.max(np.abs(A_plus - A_pinv)))return# Test with a rectangular matrixA = np.random.randn(4, 3)compare_pseudoinverse(A)
rank(A) = 3
Max difference between methods: 2.220446049250313e-16
Theorem 1.4 (SVD of Matrix Product) For matrices \(A\) and \(B\) of compatible dimensions:
\[\sigma_i(AB) \leq \sigma_1(A)\sigma_i(B)\]
This is known as the multiplicative singular value inequality.
Key Applications
These properties make SVD particularly useful for:
The singular values can be computed without computing U and V
For large sparse matrices, iterative methods are preferred
The condition number \(\kappa(A) = \sigma_1/\sigma_r\) affects numerical stability
In practice, most software uses algorithms that compute the SVD without explicitly forming matrices like \(AA^T\) or \(A^TA\), which would be numerically unstable.
1.4 Connection with Eigendecomposition
For symmetric matrices, SVD is closely related to eigendecomposition:
If \(A\) is symmetric, then \(U = V\)
The singular values are absolute values of eigenvalues