def sub_graph_finder(A_tau, A0): m, n = A0.shape A = cvx.Symmetric(n) #objective function objective = cvx.Maximize(cvx.trace(A * A0)) # A_i,j = 0 if A0_i,j = 0 (and i != j) edge_constraints = EdgeLocationConstraint(n, A0, A, 0) # γ =eigenvalue corresponding to the largest eigenspace of the corresponding graph (eigenvalue with highest multiplicity) gamma = most_common_eigenvalue(A_tau) clique_size, same_clique_size = A_tau.shape #TODO SCHUR-HORN CONSTRAINT schur_horn_constraints = CliqueSchurHornOrbitopeConstraint( transform_matrix(n, A_tau, gamma), A, clique_size) # currently will only work for finding cliques constraints = edge_constraints.constraint_list + schur_horn_constraints.constraint_list problem = cvx.Problem(objective, constraints) problem.solve(kktsolver='robust') if problem.status == 'optimal': return problem.status, problem.value, A.value else: return problem.status, np.nan, np.nan
def _restrict(self, matrix): w, V = np.linalg.eigh(matrix) w_sorted_idxs = np.argsort(-w) pos_w = w[w_sorted_idxs[:self.k]] pos_V = V[:, w_sorted_idxs[:self.k]] Sigma = cvx.Symmetric(self.k, self.k) return [self == pos_V * Sigma * pos_V.T]
def ntf_fir_from_digested(Qs, A, C, H_inf, **opts): """ Synthesize FIR NTF from predigested specification Version for the cvxpy modeler. """ verbose = opts['show_progress'] if opts['cvxpy_opts']['solver'] == 'cvxopt': opts['cvxpy_opts']['solver'] = cvxpy.CVXOPT elif opts['cvxpy_opts']['solver'] == 'scs': opts['cvxpy_opts']['solver'] = cvxpy.SCS order = np.size(Qs, 0) - 1 br = cvxpy.Variable(order, 1, name='br') b = cvxpy.vstack(1, br) X = cvxpy.Symmetric(order, name='X') target = cvxpy.Minimize(cvxpy.norm2(Qs * b)) B = np.vstack((np.zeros((order - 1, 1)), 1.)) C = C + br[::-1].T D = np.matrix(1.) M1 = A.T * X M2 = M1 * B M = cvxpy.bmat([[M1 * A - X, M2, C.T], [M2.T, B.T * X * B - H_inf**2, D], [C, D, np.matrix(-1.)]]) constraints = [M << 0, X >> 0] p = cvxpy.Problem(target, constraints) p.solve(verbose=verbose, **opts['cvxpy_opts']) return np.hstack((1, np.asarray(br.value.T)[0]))
def deconvolve(self, A, epsilon_vector): ''' Recover the precise labelling of A1 and A2, A = A1+A2. Args: A (matrix) : Composition of A1 and A2 Returns: problem.status (str) : Optimal/ problem_correct (bool) : Correctness of solution problem.value (float) : Value of objective function (Euclidean norm of (A-A1*-A2*)) A1* (matrix) : Precise labelling of A1 A2* (matrix) : Precise labelling of A2 ''' # Realisations of the precise labelling of A1 and A2 A1_labelled = cvx.Symmetric(self.n) A2_labelled = cvx.Symmetric(self.n) # objective function objective = cvx.Minimize(cvx.pnorm((A - A1_labelled - A2_labelled), 2)) # Convex hull constraints A1_hull = SpectralHullConstraint(self.A1, A1_labelled, epsilon_vector) A2_hull = SpectralHullConstraint(self.A2, A2_labelled, epsilon_vector) # all values between 0 and 1 A1_limits = NodeLimitConstraint(A1_labelled, lower_limit=0, upper_limit=1) A2_limits = NodeLimitConstraint(A2_labelled, lower_limit=0, upper_limit=1) constraints = A1_hull.constraint_list + A2_hull.constraint_list + A1_limits.constraint_list + A2_limits.constraint_list problem = cvx.Problem(objective, constraints) problem.solve(solver=cvx.MOSEK) if problem.status == 'optimal': return problem.status, problem.value, A1_labelled.value, A2_labelled.value else: return problem.status, np.nan, np.nan, np.nan
def denoise(self, A_noisy, epsilon_vector): A_recovered= cvx.Symmetric(self.n) spectral_hull = SpectralHullConstraint(self.A, A_recovered, epsilon_vector) objective = cvx.Minimize(spectral_hull.constraint_list) node_limits = NodeLimitConstraint(A_recovered,lower_limit=0, upper_limit=1) constraints = spectral_hull.constraint_list + node_limits.constraint_list problem = cvx.Problem(objective,constraints) problem.solve(solver=cvx.MOSEK) if problem.status=='optimal': return problem.status,problem.value,A_recovered.value else: return problem.status,np.nan,np.nan
def ntf_fir_from_digested(order, osrs, H_inf, f0s, zf, **opts): """ Synthesize FIR NTF with minmax approach from predigested specification Version for the cvxpy_tinoco modeler. """ verbose = opts['show_progress'] if opts['cvxpy_opts']['solver'] == 'cvxopt': opts['cvxpy_opts']['solver'] = cvxpy.CVXOPT elif opts['cvxpy_opts']['solver'] == 'scs': opts['cvxpy_opts']['solver'] = cvxpy.SCS # State space representation of NTF A = np.matrix(np.eye(order, order, 1)) B = np.matrix(np.vstack((np.zeros((order-1, 1)), 1.))) # C contains the NTF coefficients D = np.matrix(1) # Set up the problem bands = len(f0s) c = cvxpy.Variable(1, order) F = [] gg = cvxpy.Variable(bands, 1) for idx in range(bands): f0 = f0s[idx] osr = osrs[idx] omega0 = 2*f0*np.pi Omega = 1./osr*np.pi P = cvxpy.Symmetric(order) Q = cvxpy.Semidef(order) if f0 == 0: # Lowpass modulator M1 = A.T*P*A+Q*A+A.T*Q-P-2*Q*np.cos(Omega) M2 = A.T*P*B + Q*B M3 = B.T*P*B - gg[idx, 0] M = cvxpy.bmat([[M1, M2, c.T], [M2.T, M3, D], [c, D, -1]]) F += [M << 0] if zf: # Force a zero at DC F += [cvxpy.sum_entries(c) == -1] else: # Bandpass modulator M1r = (A.T*P*A + Q*A*np.cos(omega0) + A.T*Q*np.cos(omega0) - P - 2*Q*np.cos(Omega)) M2r = A.T*P*B + Q*B*np.cos(omega0) M3r = B.T*P*B - gg[idx, 0] M1i = A.T*Q*np.sin(omega0) - Q*A*np.sin(omega0) M21i = -Q*B*np.sin(omega0) M22i = B.T*Q*np.sin(omega0) Mr = cvxpy.bmat([[M1r, M2r, c.T], [M2r.T, M3r, D], [c, D, -1]]) Mi = cvxpy.bmat([[M1i, M21i, np.zeros((order, 1))], [M22i, 0, 0], [np.zeros((1, order)), 0, 0]]) M = cvxpy.bmat([[Mr, Mi], [-Mi, Mr]]) F += [M << 0] if zf: # Force a zero at z=np.exp(1j*omega0) nn = np.arange(order).reshape((order, 1)) vr = np.matrix(np.cos(omega0*nn)) vi = np.matrix(np.sin(omega0*nn)) vn = np.matrix( [-np.cos(omega0*order), -np.sin(omega0*order)]) F += [c*cvxpy.hstack(vr, vi) == vn] if H_inf < np.inf: # Enforce the Lee constraint R = cvxpy.Semidef(order) MM = cvxpy.bmat([[A.T*R*A-R, A.T*R*B, c.T], [B.T*R*A, -H_inf**2+B.T*R*B, D], [c, D, -1]]) F += [MM << 0] target = cvxpy.Minimize(cvxpy.max_entries(gg)) p = cvxpy.Problem(target, F) p.solve(verbose=verbose, **opts['cvxpy_opts']) return np.hstack((1, np.asarray(c.value)[0, ::-1]))
def latent_variable_gmm_cvx(X_o, alpha=1, lambda_s=1, S_init=None, verbose=False): ''' A cvx implementation of the Latent Variable Gaussian Graphical Model see review of "Venkat Chandrasekaran, Pablo A Parrilo, and Alan S Willsky. Latent variable graphical model selection via convex optimization. The Annals of Statistics, 40(4):1935–1967, 2012." min_{S, L} -log det (S-L) + trace(emp_Cov*(S-L)) + alpha*lambda_s*\|S\|_{1} + alpha*\|L\|_{*} s.t. S-L \succeq 0 L \succeq 0 return S, L ''' n, m = X_o.shape emp_cov = np.cov(X_o) if alpha == 0: if return_costs: precision = np.linalg.pinv(emp_cov) cost = -2. * log_likelihood(emp_cov, precision) cost += n_features * np.log(2 * np.pi) d_gap = np.sum(emp_cov * precision) - n return emp_cov, precision, (cost, d_gap) else: return emp_cov, np.linalg.pinv(emp_cov) costs = list() if S_init is None: covariance_o = emp_cov.copy() else: covariance_o = S_init.copy() # As a trivial regularization (Tikhonov like), we scale down the # off-diagonal coefficients of our starting point: This is needed, as # in the cross-validation the cov_init can easily be # ill-conditioned, and the CV loop blows. Beside, this takes # conservative stand-point on the initial conditions, and it tends to # make the convergence go faster. covariance_o *= 0.95 diagonal_o = emp_cov.flat[::n + 1] covariance_o.flat[::n + 1] = diagonal_o # define the low-rank term L and sparse term S L = cvx.Semidef(n) S = cvx.Symmetric(n) # define the SDP problem objective = cvx.Minimize(-cvx.log_det(S - L) + cvx.trace(covariance_o * (S - L)) + alpha * lambda_s * cvx.norm(S, 1) + alpha * cvx.norm(L, "nuc")) constraints = [S - L >> 0] # solve the problem problem = cvx.Problem(objective, constraints) problem.solve(verbose=verbose) return (S.value, L.value)
# sparse well-connected graphs on 16 nodes # all values between 0 and 1 limit_constraints = NodeLimitConstraint(M, lower_limit=0, upper_limit=1) # diag(M) == 0 diagonal_constraints = DiagonalConstraint(n, M, 0) # (A*ones)_i <= 2.5 degree_constraints = MaxWeightedDegreeConstraint(n, M, 2.5) # 2nd smalled eigenvalue of the laplacian >= 1.1 laplacian_constraints = LaplacianLambdaSecondMinConstraint(M, 1.1) return limit_constraints.constraint_list + diagonal_constraints.constraint_list + degree_constraints.constraint_list + laplacian_constraints.constraint_list if __name__ == '__main__': # a 16 node path (cycle with vertex removed n = 16 A = ((0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15)) A_matrix = Graph.create_adjacency_matrix(n, A) M = cvx.Symmetric(n) family_1_constraints = generate_cycle_family_constraints(n, M) family_2_constraints = generate_sparse_well_connected_constraints(n, M) test_families(A_matrix, M, family_1_constraints, family_2_constraints)
import numpy as np import cvxpy as cp import scipy as sp import scipy.sparse as sps import scipy.linalg as la np.random.seed(1) n = 100 A = np.diag(-np.logspace(-0.5, 1, n)) U = la.orth(np.random.randn(n, n)) A = U.T.dot(A.dot(U)) P = cp.Symmetric(n, n) f = cp.trace(P) C = [A.T * P + P * A << np.eye(n), P >> np.eye(n)] prob = cp.Problem(cp.Minimize(f), C) problemDict = {"problemID": "trace", "problem": prob, "opt_val": None} problems = [problemDict] # For debugging individual problems: if __name__ == "__main__": def printResults(problemID="", problem=None, opt_val=None): print(problemID) problem.solve()
def sls_common_lyapunov(A, B, Q, R, eps_A, eps_B, tau, logger=None): """ Solves the common Lyapunov relaxation to the robust synthesis problem. Taken from lstd-lqr/blob/master/code/policy_iteration.ipynb learning-lqr/experiments/matlab/sls_synth_yalmip/common_lyap_synth_var2_alpha.m """ if logger is None: logger = logging.getLogger(__name__) d, p = B.shape X = cvx.Symmetric(d) # inverse Lyapunov function Z = cvx.Variable(p, d) # -K*X W_11 = cvx.Symmetric(d) W_12 = cvx.Variable(d, p) W_22 = cvx.Symmetric(p) alph = cvx.Variable() # scalar for tuning the H_inf constraint constraints = [] # H2 cost: trace(W)=H2 cost mat1 = cvx.bmat([[X, X, Z.T], [X, W_11, W_12], [Z, W_12.T, W_22]]) constraints.append(mat1 == cvx.Semidef(2 * d + p)) # H_infinity constraint mat2 = cvx.bmat( [[X - np.eye(d), (A * X + B * Z), np.zeros((d, d)), np.zeros((d, p))], [(X * A.T + Z.T * B.T), X, eps_A * X, eps_B * Z.T], [ np.zeros((d, d)), eps_A * X, alph * (tau**2) * np.eye(d), np.zeros((d, p)) ], [ np.zeros((p, d)), eps_B * Z, np.zeros((p, d)), (1 - alph) * (tau**2) * np.eye(p) ]]) constraints.append(mat2 == cvx.Semidef(3 * d + p)) # constrain alpha to be in [0,1]: constraints.append(alph >= 0) constraints.append(alph <= 1) # Solve! objective = cvx.Minimize(cvx.trace(Q * W_11) + cvx.trace(R * W_22)) prob = cvx.Problem(objective, constraints) try: obj = prob.solve(solver=cvx.MOSEK) except cvx.SolverError: logger.warn("SolverError encountered") return (False, None, None, None) if prob.status == cvx.OPTIMAL: logging.debug("common_lyapunov: found optimal solution") X_value = np.array(X.value) P_value = scipy.linalg.solve(X_value, np.eye(d), sym_pos=True) # NOTE: the K returned here is meant to be used # as A + BK **NOT** A - BK K_value = np.array(Z.value).dot(P_value) return (True, obj, P_value, K_value) else: logging.debug("common_lyapunov: could not solve (status={})".format( prob.status)) return (False, None, None, None)
def __init__(self, n): self.n = n self.A = cvx.Symmetric(n) np.random.seed(1) # move out of class? self.M = np.random.normal(size=(n, n))