def test_eigval_atoms(self): """Test eigenvalue atoms. """ P = np.arange(9) - 2j*np.arange(9) P = np.reshape(P, (3, 3)) P1 = np.conj(P.T).dot(P)/10 + np.eye(3)*.1 P2 = np.array([[10, 1j, 0], [-1j, 10, 0], [0, 0, 1]]) for P in [P1, P2]: value = cvx.lambda_max(P).value X = Variable(P.shape, complex=True) prob = Problem(cvx.Minimize(cvx.lambda_max(X)), [X == P]) result = prob.solve(solver=cvx.SCS, eps=1e-6) self.assertAlmostEqual(result, value, places=2) eigs = np.linalg.eigvals(P).real value = cvx.sum_largest(eigs, 2).value X = Variable(P.shape, complex=True) prob = Problem(cvx.Minimize(cvx.lambda_sum_largest(X, 2)), [X == P]) result = prob.solve(solver=cvx.SCS, eps=1e-8) self.assertAlmostEqual(result, value, places=3) self.assertItemsAlmostEqual(X.value, P, places=3) value = cvx.sum_smallest(eigs, 2).value X = Variable(P.shape, complex=True) prob = Problem(cvx.Maximize(cvx.lambda_sum_smallest(X, 2)), [X == P]) result = prob.solve(solver=cvx.SCS, eps=1e-6) self.assertAlmostEqual(result, value, places=3)
def _solve_direct_cvxpy(self, r, N, x_expanded, y, Pmatrix, coef_prev): xi = cp.Variable(N * r) cost = cp.sum_squares(x_expanded @ xi - y.flatten()) + self.threshold * cp.norm1(xi) cost = cost + cp.lambda_max(cp.reshape(Pmatrix @ xi, (r, r))) / self.eta if self.use_constraints: if self.inequality_constraints: prob = cp.Problem( cp.Minimize(cost), [self.constraint_lhs @ xi <= self.constraint_rhs], ) else: prob = cp.Problem( cp.Minimize(cost), [self.constraint_lhs @ xi == self.constraint_rhs], ) else: prob = cp.Problem(cp.Minimize(cost)) # default solver is SCS here I think prob.solve(eps=self.eps_solver) if xi.value is None: print("Infeasible solve, increase/decrease eta") return None, None coef_sparse = (xi.value).reshape(coef_prev.shape) if np.all(self.PL == 0.0) and np.all(self.PQ == 0.0): return np.zeros(r), coef_sparse # no optimization over m else: m_cp = cp.Variable(r) L = np.tensordot(self.PL, coef_sparse, axes=([3, 2], [0, 1])) Q = np.reshape( np.tensordot(self.PQ, coef_sparse, axes=([4, 3], [0, 1])), (r, r * r)) Ls = 0.5 * (L + L.T).flatten() cost_m = cp.lambda_max(cp.reshape(Ls - m_cp @ Q, (r, r))) prob_m = cp.Problem(cp.Minimize(cost_m)) # default solver is SCS here prob_m.solve(eps=self.eps_solver) m = m_cp.value if m is None: print("Infeasible solve over m, increase/decrease eta") return None, coef_sparse return m, coef_sparse
def partlysmooth(n=50, m=25, seed=0, **kwargs): torch.random.manual_seed(seed) tmp = torch.randn(n + 1, m, m, dtype=torch.double) A = stack([tmp[i, :, :].T + tmp[i, :, :] for i in range(n + 1)]) # Get true vaues l = cp.Variable(n) obj = A[0, :, :].data.numpy() for i in range(n): obj += A[i + 1, :, :] * l[i] prob = cp.Problem(cp.Minimize(cp.lambda_max(obj))) prob.solve(solver='MOSEK') true_val = prob.value true_spec = np.linalg.eigvalsh( A[0, :, :] + np.einsum('i,ijk->jk', l.value, A[1:, :, :])) true_mult = np.sum(np.isclose(true_spec, np.max(true_spec))) def ps_function(x): if type( x ) != Tensor: # If non-tensor passed in, no gradient will be used x = tensor(x, dtype=torch.double, requires_grad=False) assert len(x) == n mat = A[0, :, :] + einsum('i,ijk->jk', x, A[1:, :, :]) return symeig( mat, eigenvectors=True)[0][-1] # eigenvalues in ascending order return Objective(ps_function, **kwargs), true_val, true_mult
def lovasz_lambda(self, n, edge_locations, solver=cvx.CVXOPT, kktsolver='robust'): ''' Computes lovasz number by theta(G) = min lambda_max(A) A is defined by a_ij= 1 when i=j or a_ij = 1 when i~j not an edge ''' X = cvx.Variable(n,n) objective = cvx.Minimize(cvx.lambda_max(X)) # non-edge locations in the adjacency matrix complement_graph_constraints = [X[i,j]==1 for i,j in ComplementGraph.iterator(n,edge_locations)] #A_ij = 1, when i = j diagonal_constraints = [X[i,i]==1 for i in range(n)] constraints = diagonal_constraints + complement_graph_constraints problem = cvx.Problem(objective,constraints) #robust kktsolver only works with CVXOPT if solver == cvx.CVXOPT: problem.solve(solver=solver,kktsolver=kktsolver) else: problem.solve(solver=solver) return problem
def test_lambda_max(self) -> None: """Test gradient for lambda_max """ expr = cp.lambda_max(self.A) self.A.value = [[2, 0], [0, 1]] self.assertItemsAlmostEqual(expr.grad[self.A].toarray(), [1, 0, 0, 0]) self.A.value = [[1, 0], [0, 2]] self.assertItemsAlmostEqual(expr.grad[self.A].toarray(), [0, 0, 0, 1]) self.A.value = [[1, 0], [0, 1]] self.assertItemsAlmostEqual(expr.grad[self.A].toarray(), [0, 0, 0, 1])
def smallestCircumScribedEllip(ptS, P=None, obj=None, maxChange=[None, None], Pold=None, **kwargs): #ptS holds the points columnwise #mindCondition < lambda_max/lambda_min opts = {'minCondition': 1.e3} opts.update(kwargs) Pold = (Pold.T + Pold) / 2. dim = ptS.shape[0] if P is None: P = cvxpy.Semidef(dim, "P") cstr = [cvxpy.quad_form(aPt, P) <= 1. for aPt in ptS.T] eigMax = np.max(np.linalg.eigh(Pold)[0]) #restrict changement zeta = cvxpy.Variable(1) if not (Pold is None) and not (maxChange[0] is None): cstr.append(eigMax * maxChange[0] * np.identity(dim) < P - zeta * Pold) if not (Pold is None) and not (maxChange[1] is None): cstr.append(P - zeta * Pold < eigMax * maxChange[1] * np.identity(dim)) if not (opts['minCondition'] is None): cstr.append( cvxpy.lambda_max(P) < opts['minCondition'] * cvxpy.lambda_min(P)) if obj is None: obj = cvxpy.Maximize(cvxpy.log_det(P)) prob = cvxpy.Problem(obj, cstr) #prob.solve(); prob.solve(solver='CVXOPT', verbose=False, kktsolver=cvxpy.ROBUST_KKTSOLVER) assert prob.status == 'optimal', "Failed to solve circumscribed ellip prob" Pval = np.array(P.value) return Pval
def test_basic_lmi(self) -> None: np.random.seed(4) n = 3 A = np.random.randn(n, n) A = A.T @ A X = cp.Variable(shape=(n, n)) # will fail if you try PSD=True, or symmetric=Trues sigma = cp.suppfunc(X, [0 << X, cp.lambda_max(X) <= 1]) Y = cp.Variable(shape=(n, n)) cons = [Y == A] expr = sigma(Y) prob = cp.Problem(cp.Minimize(expr), cons) # opt value of support func would be at X=I. prob.solve(solver='SCS', eps=1e-8) actual1 = prob.value # computed with epigraph actual2 = expr.value # computed by evaluating support function, as a maximization problem. self.assertLessEqual(abs(actual1 - actual2), 1e-6) expect = np.trace(A) self.assertLessEqual(abs(actual1 - expect), 1e-4)
def test_lambda_max(self): with self.assertRaises(ValueError) as cm: cp.lambda_max([[1, 2], [3, 4]]).value self.assertEqual(str(cm.exception), "Input matrix was not Hermitian/symmetric.")
# Z = cvx.Variable(3,3) # objective = cvx.Minimize( sum(cvx.square(P - Z)) ) # constr = [cvx.constraints.semi_definite.SDP(P)] # prob = cvx.Problem(objective, constr) # prob.solve() import cvxpy as cp import numpy as np import cvxopt # create data P P = cp.Parameter(3,3) Z = cp.semidefinite(3) objective = cp.Minimize( cp.lambda_max(P) - cp.lambda_min(P - Z) ) prob = cp.Problem(objective, [Z >= 0]) P.value = cvxopt.matrix(np.matrix('4 1 3; 1 3.5 0.8; 3 0.8 1')) prob.solve() print "optimal value =", prob.value # [ 4, 1+2*j, 3-j ; ... # 1-2*j, 3.5, 0.8+2.3*j ; ... # 3+j, 0.8-2.3*j, 4 ]; # # % Construct and solve the model # n = size( P, 1 ); # cvx_begin sdp # variable Z(n,n) hermitian toeplitz # dual variable Q
bar = rec_rate / m ba = 2.5 * bar # Most networks are slightly viral, not too much bs = 20 * ba / 100 epsbar = 1 Delta = max(epsbar, max_rec_rate) Budget = 1.5 beta = cp.Variable(N, pos=True) delta = cp.Variable(N, pos=True) B = np.ones(N) - beta D = np.zeros(N) + delta #v = cp.Variable(pos=True) #lamb = cp.Variable(pos=True) obj = cp.Minimize(cp.lambda_max(cp.diag(B) * G - cp.diag(D))) contraints = [] contraints.append(cp.sum(B) + cp.sum(D) <= Budget) contraints.append(beta <= 1) contraints.append(beta >= 0) contraints.append(delta <= 1) prob = cp.Problem(obj, contraints) result = prob.solve() print(result) print(nx.degree_centrality(raw_G)) print(beta.value) print(delta.value)
minimize || Z - P ||_F subject to Z >= 0 Adapted from an example provided in the SeDuMi documentation and CVX examples. Unlike those examples, the data is real (not complex) and the result is only required to be PSD (instead of also Toeplitz) """ import cvxpy as cp import numpy as np import cvxopt # create data P P = cp.Parameter(3,3) Z = cp.SDPVar(3,3) objective = cp.Minimize( cp.lambda_max(P) - cp.lambda_min(P - Z) ) prob = cp.Problem(objective) P.value = cvxopt.matrix(np.matrix('4 1 3; 1 3.5 0.8; 3 0.8 1')) prob.solve() # [ 4, 1+2*j, 3-j ; ... # 1-2*j, 3.5, 0.8+2.3*j ; ... # 3+j, 0.8-2.3*j, 4 ]; # # % Construct and solve the model # n = size( P, 1 ); # cvx_begin sdp # variable Z(n,n) hermitian toeplitz # dual variable Q # minimize( norm( Z - P, 'fro' ) )
def C_soc_scaled(): return [cp.norm2(randn()*x) <= randn()*t] def C_soc_translated(): return [cp.norm2(x + randn()) <= t + randn()] def C_soc_scaled_translated(): return [cp.norm2(randn()*x + randn()) <= randn()*t + randn()] # Proximal operators PROX_TESTS = [ #prox("MATRIX_FRAC", lambda: cp.matrix_frac(p, X)), #prox("SIGMA_MAX", lambda: cp.sigma_max(X)), prox("AFFINE", lambda: randn(n).T*x), prox("CONSTANT", lambda: 0), prox("LAMBDA_MAX", lambda: cp.lambda_max(X)), prox("LOG_SUM_EXP", lambda: cp.log_sum_exp(x)), prox("MAX", lambda: cp.max_entries(x)), prox("NEG_LOG_DET", lambda: -cp.log_det(X)), prox("NON_NEGATIVE", None, C_non_negative_scaled), prox("NON_NEGATIVE", None, C_non_negative_scaled_elemwise), prox("NON_NEGATIVE", None, lambda: [x >= 0]), prox("NORM_1", f_norm1_weighted), prox("NORM_1", lambda: cp.norm1(x)), prox("NORM_2", lambda: cp.norm(X, "fro")), prox("NORM_2", lambda: cp.norm2(x)), prox("NORM_NUCLEAR", lambda: cp.norm(X, "nuc")), #prox("QUAD_OVER_LIN", lambda: cp.quad_over_lin(p, q1)), prox("SECOND_ORDER_CONE", None, C_soc_scaled), prox("SECOND_ORDER_CONE", None, C_soc_scaled_translated), prox("SECOND_ORDER_CONE", None, C_soc_translated),
def C_soc_scaled(): return [cp.norm2(randn()*x) <= randn()*t] def C_soc_translated(): return [cp.norm2(x + randn()) <= t + randn()] def C_soc_scaled_translated(): return [cp.norm2(randn()*x + randn()) <= randn()*t + randn()] # Proximal operators PROX_TESTS = [ #prox("MATRIX_FRAC", lambda: cp.matrix_frac(p, X)), #prox("SIGMA_MAX", lambda: cp.sigma_max(X)), prox("AFFINE", lambda: randn(n).T*x), prox("CONSTANT", lambda: 0), prox("LAMBDA_MAX", lambda: cp.lambda_max(X)), prox("LOG_SUM_EXP", lambda: cp.log_sum_exp(x)), prox("MAX", lambda: cp.max_entries(x)), prox("NEG_LOG_DET", lambda: -cp.log_det(X)), prox("NON_NEGATIVE", None, C_non_negative_scaled), prox("NON_NEGATIVE", None, C_non_negative_scaled_elemwise), prox("NON_NEGATIVE", None, lambda: [x >= 0]), prox("NORM_1", f_norm1_weighted), prox("NORM_1", lambda: cp.norm1(x)), prox("NORM_2", lambda: cp.norm(X, "fro")), prox("NORM_2", lambda: cp.norm2(x)), prox("NORM_NUCLEAR", lambda: cp.norm(X, "nuc")), prox("SECOND_ORDER_CONE", None, C_soc_scaled), prox("SECOND_ORDER_CONE", None, C_soc_scaled_translated), prox("SECOND_ORDER_CONE", None, C_soc_translated), prox("SECOND_ORDER_CONE", None, lambda: [cp.norm(X, "fro") <= t]),