def test_update_upper(self): n = 5 A = random_psd(n, n) B = random_psd(n, n) C = -random_psd(n, n) M = spa.bmat([[A, B.T], [B, C]], format='csc') b = np.random.randn(n + n) F = qdldl.Solver(M) F_upper = qdldl.Solver(spa.triu(M, format='csc'), upper=True) x_first_qdldl = F.solve(b) x_first_qdldl_upper = F_upper.solve(b) # Update M.data = M.data + 0.1 * np.random.randn(M.nnz) # Symmetrize matrix M = .5 * (M + M.T) F.update(M) F_upper.update(spa.triu(M, format='csc'), upper=True) x_second_qdldl = F.solve(b) x_second_qdldl_upper = F_upper.solve(b) nptest.assert_allclose(x_second_qdldl, x_second_qdldl_upper, rtol=1e-05, atol=1e-05)
def test_update(self): n = 5 A = random_psd(n, n) B = random_psd(n, n) C = -random_psd(n, n) M = spa.bmat([[A, B.T], [B, C]], format='csc') b = np.random.randn(n + n) F = qdldl.Solver(M) x_first_scipy = sla.spsolve(M, b) x_first_qdldl = F.solve(b) # Update M.data = M.data + 0.1 * np.random.randn(M.nnz) # Symmetrize matrix M = .5 * (M + M.T) x_second_scipy = sla.spsolve(M, b) x_second_qdldl_scratch = qdldl.Solver(M).solve(b) # M_triu = spa.triu(M, format='csc') # M_triu.sort_indices() # F.update(M_triu.data) F.update(M) x_second_qdldl = F.solve(b) nptest.assert_allclose(x_second_scipy, x_second_qdldl, rtol=1e-05, atol=1e-05)
def test_wrong_size_A(self): np.random.seed(2) A = spa.random(10, 12) with self.assertRaises(ValueError): F = qdldl.Solver(A)
def test_upper(self): np.random.seed(2) n = 5 A = random_psd(n, n) B = random_psd(n, n) C = -random_psd(n, n) M = spa.bmat([[A, B.T], [B, C]], format='csc') b = np.random.randn(n + n) # import ipdb; ipdb.set_trace() m = qdldl.Solver(M) x_qdldl = m.solve(b) M_triu = spa.triu(M, format='csc') m_triu = qdldl.Solver(M_triu, upper=True) x_qdldl_triu = m_triu.solve(b) nptest.assert_allclose(x_qdldl, x_qdldl_triu, rtol=1e-05, atol=1e-05)
def test_scalar_ls(self): M = spa.csc_matrix(np.random.randn(1, 1)) b = np.random.randn(1) F = qdldl.Solver(M) x_qdldl = F.solve(b) x_scipy = sla.spsolve(M, b) # Assert close nptest.assert_array_almost_equal(x_qdldl, x_scipy)
def test_wrong_size_b(self): np.random.seed(2) A = spa.eye(10) b = np.random.randn(8) F = qdldl.Solver(A) # x_qdldl = F.solve(b) with self.assertRaises(ValueError): x_qdldl = F.solve(b)
def test_basic_ls(self): np.random.seed(2) n = 5 A = random_psd(n, n) B = random_psd(n, n) C = -random_psd(n, n) M = spa.bmat([[A, B.T], [B, C]], format='csc') b = np.random.randn(n + n) # import ipdb; ipdb.set_trace() m = qdldl.Solver(M) x_qdldl = m.solve(b) x_scipy = sla.spsolve(M, b) # Assert close nptest.assert_array_almost_equal(x_qdldl, x_scipy)
def solve_qdldl(M, b): return qdldl.Solver(M).solve(b)
def admm(F, losses, reg, lam, rho=50, maxiter=5000, eps=1e-6, warm_start={}, verbose=False, eps_abs=1e-5, eps_rel=1e-5): m, n = F.shape ms = [l.m for l in losses] if "f" in warm_start.keys(): f = warm_start["f"] else: f = np.array(F.mean(axis=1)).flatten() if "w" in warm_start.keys(): w = warm_start["w"] else: w = np.ones(n) / n if "w_bar" in warm_start.keys(): w_bar = warm_start["w_bar"] else: w_bar = np.ones(n) / n if "w_tilde" in warm_start.keys(): w_tilde = warm_start["w_tilde"] else: w_tilde = np.ones(n) / n if "y" in warm_start.keys(): y = warm_start["y"] else: y = np.zeros(m) if "z" in warm_start.keys(): z = warm_start["z"] else: z = np.zeros(n) if "u" in warm_start.keys(): u = warm_start["u"] else: u = np.zeros(n) Q = sparse.bmat([[2 * sparse.eye(n), F.T], [F, -sparse.eye(m)]]) factor = qdldl.Solver(Q) if verbose: print(u'Iteration | ||r||/\u03B5_pri | ||s||/\u03B5_dual') w_best = None best_objective_value = float("inf") for k in range(maxiter): ct_cum = 0 for l in losses: f[ct_cum:ct_cum + l.m] = l.prox( F[ct_cum:ct_cum + l.m] @ w - y[ct_cum:ct_cum + l.m], 1 / rho) ct_cum += l.m w_tilde = reg.prox(w - z, lam / rho) w_bar = _projection_simplex(w - u) rhs = np.append(F.T @ (f + y) + w_tilde + z + w_bar + u, np.zeros(m)) w_new = factor.solve(rhs)[:n] s = rho * np.concatenate([F @ w_new - f, w_new - w, w_new - w]) w = w_new y = y + f - F @ w z = z + w_tilde - w u = u + w_bar - w r = np.concatenate([f - F @ w, w_tilde - w, w_bar - w]) p = m + 2 * n Ax_k_norm = np.linalg.norm(np.concatenate([f, w_tilde, w_bar])) Bz_k_norm = np.linalg.norm(np.concatenate([w, w, w])) # y = rho * u ATy_k_norm = np.linalg.norm(rho * np.concatenate([y, z, u])) eps_pri = np.sqrt(p) * eps_abs + eps_rel * max(Ax_k_norm, Bz_k_norm) eps_dual = np.sqrt(p) * eps_abs + eps_rel * ATy_k_norm s_norm = np.linalg.norm(s) r_norm = np.linalg.norm(r) if verbose and k % 50 == 0: print('It %03d / %03d | %8.5e | %8.5e' % (k, maxiter, r_norm / eps_pri, s_norm / eps_dual)) if isinstance(reg, BooleanRegularizer): ct_cum = 0 objective = 0. for l in losses: objective += l.evaluate(F[ct_cum:ct_cum + l.m] @ w_tilde) ct_cum += l.m if objective < best_objective_value: if verbose: print("Found better objective value: %3.5f -> %3.5f" % (best_objective_value, objective)) best_objective_value = objective w_best = w_tilde if r_norm <= eps_pri and s_norm <= eps_dual: break if not isinstance(reg, BooleanRegularizer): w_best = w_bar return { "f": f, "w": w, "w_bar": w_bar, "w_tilde": w_tilde, "y": y, "z": z, "u": u, "w_best": w_best }