def lstsq(A, b): """ Solves the linear system of equations Ax = b by computing a vector x that minimizes the Euclidean norm ||b - Ax||^2. Concretely, uses the QR decomposition of A to deal with an over or well determined system. Params ------ - A: a numpy array of shape (M, N). - b: a numpy array of shape (M,). Returns: - x: a numpy array of shape (N, ). """ M, N = A.shape # if well-determined, use PLU if (M == N): solver = LU(A, pivoting='partial') else: solver = QR(A) # solve for x x = solver.solve(b) return x
def solve(A, b, pivoting='partial'): """ Solves the linear system of equations Ax = b where A is well-determined. Concretely, uses PLU decomposition of A followed by forward and back substitution to solve for x. Params ------ - A: a numpy array of shape (N, N). - b: a numpy array of shape (N,). - pivoting: 'partial' or 'full' pivoting. Returns: - x: a numpy array of shape (N, ). """ M, N = A.shape Z = len(b) error_msg = "[!] A must be square." assert (M == N), error_msg error_msg = "[!] b must be {}D".format(M) assert (Z == N), error_msg solver = LU(A, pivoting=pivoting) # solve for x x = solver.solve(b) return x
def test_no_pivoting(self): T = np.random.randn(50, 50) L_a, U_a = LU(T).decompose() actual = np.dot(L_a, U_a) self.assertTrue(np.allclose(actual, T))
def test_solve_multi_full_pivoting(self): T = np.random.randn(50, 50) b = np.random.randn(50, 5) actual = LU(T, pivoting='full').solve(b) expected = np.linalg.solve(T, b) self.assertTrue(np.allclose(actual, expected))
def test_solve_single_no_pivoting(self): T = np.random.randn(50, 50) b = np.random.randn(50) actual = LU(T).solve(b) expected = np.linalg.solve(T, b) self.assertTrue(np.allclose(actual, expected))
def test_partial_pivoting(self): T = np.random.randn(50, 50) actual = LU(T, pivoting='partial').decompose() expected = LA.lu(T) self.assertTrue( all(np.allclose(a, e) for a, e in zip(actual, expected)) )
def det(X, log=False): """ Computes the determinant of a square matrix A. Concretely, first factorizes A into PLU and then computes the product of the determinant of P and U. In case where the determinant is a very small or very big number, the implementation may underflow or overflow. To combat this, we compute the log of the determinant and return the sign in which case: `det = sign * np.exp(logdet)` Args ---- - A: a numpy array of shape (N, N). - log: set to True to return the log of the determinant and the sign. Returns ------- If log = False, returns: - det: a scalar, the determinant of A. Else, returns a tuple: - sign: 1 or -1. - logdet: a float representing the log of the determinant. """ A = np.array(X) # LU decomposition t, U = LU(A, pivoting='partial').decompose(det=True) # compute determinant of P if t % 2 == 0: sign = 1. else: sign = -1. # compute determinant of U and then A diagonal = diag(U) if log: logdet = 0. for d in diagonal: logdet += np.log(d) else: det_U = multi_dot(diagonal) det_A = sign * det_U if log: return sign, logdet return det_A
def inverse(A): """ Computes the inverse of a square matrix A. Concretely, solves the linear system Ax = I where x is a square matrix rather than a vector. The system is solved using LU decomposition with partial pivoting. Params ------ - A: a numpy array of shape (N, N). Returns ------- - a numpy array of shape (N, N). """ N = A.shape[0] P, L, U = LU(A, pivoting='partial').decompose() # transpose P since LU returns A = PLU P = P.T # solve Ly = P for y y = np.zeros_like(L) for i in range(N): for j in range(N): summer = KahanSum() for k in range(i): summer.add(L[i, k] * y[k, j]) sum = summer.cur_sum() y[i, j] = (P[i, j] - sum) / (L[i, i]) # solve Ux = y for x x = np.zeros_like(U) for i in range(N - 1, -1, -1): for j in range(N): summer = KahanSum() for k in range(N - 1, i, -1): summer.add(U[i, k] * x[k, j]) sum = summer.cur_sum() x[i, j] = (y[i, j] - sum) / (U[i, i]) return x
def test_full_pivoting(self): T = np.random.randn(50, 50) actual = list(LU(T, pivoting='full').decompose()) self.assertTrue(np.allclose(multi_dot(actual), T))