def trainC(K, y, D, L=10**-6, L2=10**-6,): # make sure K is non negative n = K.shape[0] DK = D.dot(K) #print(DK.shape) # fix y dim for cvxpy y = y[:,0] alpha = cp.Variable(n) #loss = cp.sum( cp.huber(cp.matmul(K, alpha) -y, 1) ) loss = cp.pnorm(cp.matmul(K, alpha) - y, p=2)**2 reg1 = L * cp.sum( cp.pnorm(cp.matmul(DK, alpha), p=1) ) reg2 = L2 * cp.quad_form(alpha, K) obj = loss + reg1 + reg2 problem = cp.Problem(cp.Minimize(obj)) try: problem.solve() except cp.SolverError: problem.solve(solver=cp.SCS) a = alpha.value #print("a", a) if(a is None): return(np.zeros((n,1))) return(a.reshape((n,1)))
def synth(X1, X0): """ Fonction qui automatise le contrôle synthétque sur un pays donnée. Il faut que les données soient pré-traitées ! cf. prep_donnee() """ V_opt = cvx.Parameter((X0.shape[0], X0.shape[0]), PSD=True) # On définit V qui est un vecteur de paramètre x = cvx.Variable((X0.shape[1], 1), nonneg=True) # On définit un vecteur de variables cvxpy cost = cvx.pnorm(V_opt @ (X1 - X0 @ x)) # On définit la fonction de cout : norme des résidus constraints = [cvx.sum(x) == 1] # La contrainte prob = cvx.Problem(cvx.Minimize(cost), constraints) # On définit le problème # tps1 = clock() contrainte = LinearConstraint(np.ones((1,X0.shape[0])), 1, 1) bounds = [(0, 1) for i in range(X0.shape[0])] result = differential_evolution(loss_V, bounds, maxiter=1, constraints=contrainte, polish=False) # tps2 = clock() # print((tps2 - tps1)/60) V_opt.value = np.diag(result.x) cost = cvx.pnorm(V_opt.value @ (X1 - X0 @ x)) prob = cvx.Problem(cvx.Minimize(cost), constraints) prob.solve() W = x.value RMSPE_train = np.linalg.norm((X1 - X0 @ W)) return W, V_opt.value, RMSPE_train
def test_pnorm(self) -> None: """Test gradient for pnorm """ expr = cp.pnorm(self.x, 1) self.x.value = [-1, 0] self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), [-1, 0]) self.x.value = [0, 10] self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), [0, 1]) expr = cp.pnorm(self.x, 2) self.x.value = [-3, 4] self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), np.array([[-3.0 / 5], [4.0 / 5]])) expr = cp.pnorm(self.x, 0.5) self.x.value = [-1, 2] self.assertAlmostEqual(expr.grad[self.x], None) expr = cp.pnorm(self.x, 0.5) self.x.value = [0, 0] self.assertAlmostEqual(expr.grad[self.x], None) expr = cp.pnorm(self.x, 2) self.x.value = [0, 0] self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), [0, 0]) expr = cp.pnorm(self.x[:, None], 2, axis=1) self.x.value = [1, 2] val = np.eye(2) self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), val) expr = cp.pnorm(self.A, 2) self.A.value = np.array([[2, -2], [2, 2]]) self.assertItemsAlmostEqual(expr.grad[self.A].toarray(), [0.5, 0.5, -0.5, 0.5]) expr = cp.pnorm(self.A, 2, axis=0) self.A.value = np.array([[3, -3], [4, 4]]) self.assertItemsAlmostEqual( expr.grad[self.A].toarray(), np.array([[0.6, 0], [0.8, 0], [0, -0.6], [0, 0.8]])) expr = cp.pnorm(self.A, 2, axis=1) self.A.value = np.array([[3, -4], [4, 3]]) self.assertItemsAlmostEqual( expr.grad[self.A].toarray(), np.array([[0.6, 0], [0, 0.8], [-0.8, 0], [0, 0.6]])) expr = cp.pnorm(self.A, 0.5) self.A.value = np.array([[3, -4], [4, 3]]) self.assertAlmostEqual(expr.grad[self.A], None)
def compute_w_opt(C_yy, mu_y, mu_train_y, rho): n = C_yy.shape[0] theta = cp.Variable(n) b = mu_y - mu_train_y objective = cp.Minimize(cp.pnorm(C_yy * theta - b) + rho * cp.pnorm(theta)) constraints = [-1 <= theta] prob = cp.Problem(objective, constraints) result = prob.solve() w = 1 + theta.value print('Estimated w is', w) return w
def test_projection(self): """Test projection, query methods of the Zeros oracle.""" x = cvxpy.Variable(1000) zeros = Zeros(x) constr = zeros._constr # Test idempotency x_0 = np.zeros(1000) x_star = zeros.project(x_0) self.assertTrue(zeros.contains(x_star)) self.assertTrue(np.array_equal(x_0, x_star), "projection not " "idemptotent") p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr) p.solve() self.assertTrue( np.isclose(np.array(x.value).flatten(), x_star, atol=1e-3).all()) utils.query_helper(self, x_0, x_star, zeros, idempotent=True) # Test the case in which x_0 >= 0 x_0 = np.abs(np.random.randn(1000)) x_star = zeros.project(x_0) self.assertTrue(np.array_equal(x_star, np.zeros(1000)), "projection not zero") p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr) p.solve() self.assertTrue( np.isclose(np.array(x.value).flatten(), x_star, atol=1e-3).all()) utils.query_helper(self, x_0, x_star, zeros, idempotent=False) # Test the case in which x_0 <= 0 x_0 = -1 * np.abs(np.random.randn(1000)) x_star = zeros.project(x_0) self.assertTrue(np.array_equal(x_star, np.zeros(1000)), "projection not zero") p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr) p.solve() self.assertTrue( np.isclose(np.array(x.value).flatten(), x_star, atol=1e-3).all()) utils.query_helper(self, x_0, x_star, zeros, idempotent=False) # Test random projection x_0 = np.random.randn(1000) x_star = zeros.project(x_0) self.assertTrue(np.array_equal(x_star, np.zeros(1000)), "projection not zero") p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr) p.solve() self.assertTrue( np.isclose(np.array(x.value).flatten(), x_star, atol=1e-3).all())
def compute_w_feature(C, feature_test, feature_train, rho, labda=1): n = C.shape[1] theta = cp.Variable(n) print("theta: ", theta) b = feature_test - feature_train objective = cp.Minimize(cp.pnorm(C * theta - b) + rho * cp.pnorm(theta)) constraints = [-1 <= theta] prob = cp.Problem(objective, constraints) result = prob.solve() w = 1 + theta.value * labda # y = theta.value print('Estimated w_feature is', w) return w
def test_projection(self): """Test projection, query methods of the SOC oracle.""" x = cvxpy.Variable(1000) x_0 = np.ones(1000) soc = SOC(x) constr = soc._constr # Test idempotency z = x_0[:-1] norm_z = np.linalg.norm(x_0[:-1], 2) x_0[-1] = norm_z + 10 x_star = soc.project(x_0) self.assertTrue(soc.contains(x_star)) self.assertTrue(np.array_equal(x_0, x_star), "projection not " "idemptotent") p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr) p.solve() self.assertTrue( np.isclose(np.array(x.value).flatten(), x_star, atol=1e-3).all()) utils.query_helper(self, x_0, x_star, soc, idempotent=True) # Test the case in which norm_z <= -t x_0[-1] = -1 * norm_z x_star = soc.project(x_0) self.assertTrue(np.array_equal(x_star, np.zeros(1000)), "projection not zero") p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr) p.solve() self.assertTrue( np.isclose(np.array(x.value).flatten(), x_star, atol=1e-3).all()) utils.query_helper(self, x_0, x_star, soc, idempotent=False) # Test the case in which norm_z > abs(t) x_0[-1] = norm_z - 10 x_star = soc.project(x_0) p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr) p.solve() self.assertTrue( np.isclose(np.array(x.value).flatten(), x_star, atol=1e-3).all()) utils.query_helper(self, x_0, x_star, soc, idempotent=False) # Test random projection x_0 = np.random.randn(1000) x_star = soc.project(x_0) p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr) p.solve() self.assertTrue( np.isclose(np.array(x.value).flatten(), x_star, atol=1e-3).all())
def test_pnorm(self): """ Test domain for pnorm. """ dom = cp.pnorm(self.a, -0.5).domain prob = Problem(Minimize(self.a), dom) prob.solve() self.assertAlmostEqual(prob.value, 0)
def cvxpy_logistic_loss_l2(w, X, y, lam=None, num_points=None): """CVXPY implementation of L2 regularized logistic loss. Args: w (np.ndarray): 1D, the weight matrix with shape (n_features,). X (np.ndarray): 2D, the features with shape (n_samples, n_features) y (np.ndarray): 1D, the true labels with shape (n_samples,). lam (float): regularization parameter. num_points (int): number of points in X (corresponds to the first dimension of X "n", but some methods pass a different value for scaling). Returns: (float): the loss. """ if lam is None: lam = 1.0 if num_points is None: num_points = X.shape[0] yz = cvxpy.multiply(-y, X * w) logistic_loss = cvxpy.sum(cvxpy.logistic(yz)) l2_reg = (float(lam) / 2.0) * cvxpy.pnorm(w, p=2)**2 out = logistic_loss + l2_reg return out / num_points
def plane_search(iterates, num_iterates, cvxpy_set, cvxpy_var): """ Plane search on previous iterates when performing the projection. This plane search does not preserve Fejer monotonicity! Parameters ---------- iterates : list-like (list-like (float)) List of the iterates belonging to a convex set, where iterates[i] is the i-th iterate. num_iterates : int Use the num_iterates most recent iterates in the plane search cvxpy_set : list List of cvxpy constraints defining the convex set on which to project cvxpy_var : cvxpy.Variable cvxpy variable used in specifying cvxpy_set """ num_iterates = min(len(iterates), num_iterates) iterates = iterates[-num_iterates:] theta = cvxpy.Variable(num_iterates) # in the plane search, we seek a convex combination of the iterates # that is close to a point in the other convex set obj = cvxpy.Minimize( cvxpy.pnorm( sum([p * it for (p, it) in zip(theta, iterates)]) - cvxpy_var, 2)) constrs = (cvxpy_set + [cvxpy.sum_entries(theta) == 1] + [theta >= 0] + [theta <= 1]) prob = cvxpy.Problem(obj, constrs) prob.solve(solver=cvxpy.SCS, use_indirect=True) opt_point = sum([p.value * it for (p, it) in zip(theta, iterates)]) np_dist = np.linalg.norm(opt_point - cvxpy_var.value, 2) return opt_point, np_dist
def nuclear_norm_minimization(X, y, alpha=0.1): ''' OLS method to recover the tensor :param X: Input data, of dimension n*l*d_x :param Y: Output data, of dimension n*d_y :param alpha: Parameter to adjust the bias variance trade-off (Noisy data tend to imply lower alpha) :return: Recovered tensor ''' if y.ndim == 1: y = y.reshape((y.shape[0], 1)) dim = X.shape[1] N = X.shape[0] l = X.ndim - 1 p = y.shape[1] l1 = l // 2 l2 = l - l1 T = cvxpy.Variable(dim**l, p) if p == 1: T = cvxpy.reshape(T, dim**l, 1) X = X.reshape([N, dim**l]) if alpha != 0.: alpha = ((len(X) * y.shape[1])**0.5) / alpha obj = cvxpy.Minimize( cvxpy.norm(cvxpy.reshape(T, dim**l1, dim**l2 * p), 'nuc') + cvxpy.pnorm((X * T - y), p=2) / alpha) prob = cvxpy.Problem(obj) else: constraint = [(X * T == y)] obj = cvxpy.Minimize( cvxpy.norm(cvxpy.reshape(T, dim**l1, dim**l2 * p), 'nuc')) prob = cvxpy.Problem(obj, constraint) prob.solve(solver=cvxpy.SCS) if T.value is None: return None return np.array(T.value).reshape([dim] * l + [p])
def test_pnorm(self) -> None: """ Test domain for pnorm. """ dom = cp.pnorm(self.a, -0.5).domain prob = Problem(Minimize(self.a), dom) prob.solve(solver=cp.SCS, eps=1e-6) self.assertAlmostEqual(prob.value, 0)
def compute_w_opt(C_yy, mu_y, mu_train_y, rho): n = C_yy.shape[0] theta = cp.Variable(n) b = mu_y - mu_train_y objective = cp.Minimize(cp.pnorm(C_yy * theta - b) + rho * cp.pnorm(theta)) constraints = [-1 <= theta] prob = cp.Problem(objective, constraints) # The optimal objective value is returned by `prob.solve()`. result = prob.solve() # The optimal value for x is stored in `x.value`. # print(theta.value) w = 1 + theta.value print('Estimated w is', w) #print(constraints[0].dual_value) return w
def getAccuracyBetweenDigits(digit1, digit2, imageTrain, labelTrain, imageTest, labelTest): numOnenumTwoTrainLabel, numOnenumTwoTestLabel, numOnenumTwo_TrainImage, numOnenumTwo_TestImage = getTwoDigitsTrainTest( digit1, digit2, imageTrain, labelTrain, imageTest, labelTest) # print(numOnenumTwo_TrainImage.shape) # print(numOnenumTwo_TestImage.shape) n = numOnenumTwo_TrainImage.shape[1] W = cp.Variable((n)) b = cp.Variable() ones = np.array(np.ones(numOnenumTwo_TrainImage.shape[0])) Y = np.diag(numOnenumTwoTrainLabel) obj = cp.Minimize((cp.pnorm(W, p=2)**2) / 2) constraints = [ones - Y @ (numOnenumTwo_TrainImage @ W - b * ones) <= 0] prob = cp.Problem(obj, constraints) prob.solve() W_final = W.value b_final = b.value # print(W_final.shape) errors = 0 ones_test = ones = np.array(np.ones(numOnenumTwo_TestImage.shape[0])) # numOnenumTwoTestImage@W_final +b*ones pred = np.sign(numOnenumTwo_TestImage @ W_final + b_final * ones_test) # pred inAcc = (np.sign(numOnenumTwo_TestImage @ W_final + b_final * ones_test) != numOnenumTwoTestLabel).sum() / \ numOnenumTwo_TestImage.shape[0] acc = 1 - inAcc return acc
def regularizer(beta, penalty, L): """ Get regulizaration method for objective function Parameters: ----------- beta : CVXPY-variable beta vector penalty : string Either 'Ridge' or 'Lasso' L : array-like, shape (n_features, n_features) Laplacian matrix Returns: -------- * : Regularization method for optimization """ if penalty == 'Ridge': if L is not None: return cp.sum(cp.quad_form(beta, L)) else: return cp.pnorm(beta, p=2)**2 if penalty == 'Lasso': if L is not None: return cp.norm1(L * beta) else: return cp.norm1(beta)
def trainD(K, y, D, L=10**-6, L2=10**-6): # make sure K is non negative n = K.shape[0] DK = D.dot(K) # fix y dim for cvxpy y = y[:,0] alpha = cp.Variable(n) loss = cp.pnorm(cp.matmul(K, alpha) - y, p=2)**2 reg = L*cp.quad_form(alpha, K) cons = [ cp.matmul(DK, alpha) >= 0 ] obj = loss + reg problem = cp.Problem(cp.Minimize(obj), cons) try: problem.solve() except cp.SolverError: problem.solve(solver=cp.SCS) a = alpha.value #print(reg.value) #print("a", a) if(a is None): return(np.zeros((n,1))) return(a.reshape((n,1)))
def controlProblem(problemOptions, solverOptions): m = problemOptions['m'] # number of inputs n = problemOptions['n'] # number of states T = problemOptions['T'] # number of time steps alpha = problemOptions['alpha'] beta = problemOptions['beta'] A = np.eye(n) + alpha*np.random.randn(n,n) B = np.random.randn(n,m) x_0 = beta*np.random.randn(n,1) # Form and solve control problem. x = cp.Variable(n, T+1) u = cp.Variable(m, T) states = [] for t in range(T): cost = cp.pnorm(u[:,t], 1) constr = [x[:,t+1] == A*x[:,t] + B*u[:,t], cp.norm(u[:,t], 'inf') <= 1] states.append(cp.Problem(cp.Minimize(cost), constr)) # sums problem objectives and concatenates constraints. prob = sum(states) prob.constraints += [x[:,T] == 0, x[:,0] == x_0] prob.solve(**solverOptions) return {'Problem':prob, 'name':'controlProblem'}
def compute_min_norm_solution(x, y, norm_type): """Compute the min-norm solution using a convex-program solver.""" w = cp.Variable((x.shape[0], 1)) if norm_type == 'linf': # compute minimal L_infinity solution constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm_inf(w)), constraints) elif norm_type == 'l2': # compute minimal L_2 solution constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm2(w)), constraints) elif norm_type == 'l1': # compute minimal L_1 solution constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm1(w)), constraints) elif norm_type[0] == 'l': # compute minimal Lp solution p = float(norm_type[1:]) constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.pnorm(w, p)), constraints) elif norm_type == 'dft1': w = cp.Variable((x.shape[0], 1), complex=True) # compute minimal Fourier L1 norm (||F(w)||_1) solution dft = np.matrix(scipy.linalg.dft(x.shape[0], scale='sqrtn')) constraints = [cp.multiply(y, (cp.real(w).T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm1(dft @ w)), constraints) prob.solve() logging.info('Min %s-norm solution found (norm=%.4f)', norm_type, float(norm_f(w.value, norm_type))) return cp.real(w).value
def compute_weights(L, d, T, N): # Correct T T = 1.0 * T / T[0] # Create optimization variables. cvx_eps = cvx.Variable() cvx_w = cvx.Variable(L) # Create constraints: constraints = [ cvx.sum_entries(cvx_w) == 1, cvx.pnorm(cvx_w, 2) - cvx_eps / 2 < 0 ] for i in range(1, L): Tp = ((1.0 * T / N)**(1.0 * i / (2 * d))) cvx_mult = cvx_w.T * Tp constraints.append(cvx.sum_entries(cvx_mult) - cvx_eps * 2 < 0) # Form objective. obj = cvx.Minimize(cvx_eps) # Form and solve problem. prob = cvx.Problem(obj, constraints) prob.solve() # Returns the optimal value. sol = np.array(cvx_w.value) return sol.T
def pk_constrained(self, epsilon=0.1): ''' Method to estimate wave number spectrum based on constrained optimization matrix inversion technique. Inputs: epsilon - upper bound of noise floor vector ''' # loop over frequencies bar = ChargingBar('Calculating bounded optmin...', max=len(self.controls.k0), suffix='%(percent)d%%') self.pk = np.zeros((self.n_waves, len(self.controls.k0)), dtype=np.csingle) # print(self.pk.shape) for jf, k0 in enumerate(self.controls.k0): k_vec = k0 * self.dir # Form H matrix h_mtx = np.exp(1j * self.receivers.coord @ k_vec.T) H = h_mtx.astype( complex) # cvxpy does not accept floats, apparently # measured data pm = self.pres_s[:, jf].astype(complex) #### Performing the Tikhonov inversion with cvxpy ######################### x = cp.Variable(h_mtx.shape[1], complex=True) # create x variable # Create the problem problem = cp.Problem( cp.Minimize(cp.norm2(x)**2), [cp.pnorm(cp.matmul(H, x) - pm, p=2) <= epsilon]) problem.solve(solver=cp.SCS, verbose=False) self.pk[:, jf] = x.value bar.next() bar.finish() return self.pk
def pk_cs(self, lambd_value=[], method='scipy'): ''' Method to estimate wave number spectrum based on the l1 inversion technique. This is supposed to give us a sparse solution for the sound field decomposition. Inputs: method: string defining the method to be used on finding the correct P(k). It can be: (1) - 'scipy': using scipy.linalg.lsqr (2) - 'direct': via x= (Hm^H) * ((Hm * Hm^H + lambd_value * I)^-1) * pm (3) - else: via cvxpy ''' # loop over frequencies bar = ChargingBar('Calculating CS inversion...', max=len(self.controls.k0), suffix='%(percent)d%%') self.pk = np.zeros((self.n_waves, len(self.controls.k0)), dtype=np.csingle) # print(self.pk.shape) for jf, k0 in enumerate(self.controls.k0): # wave numbers k_vec = k0 * self.dir # Form H matrix h_mtx = np.exp(1j * self.receivers.coord @ k_vec.T) # measured data pm = self.pres_s[:, jf].astype(complex) ## Choosing the method to find the P(k) if method == 'scipy': # from scipy.sparse.linalg import lsqr, lsmr # x = lsqr(h_mtx, self.pres_s[:,jf], damp=np.sqrt(lambd_value)) # self.pk[:,jf] = x[0] pass elif method == 'direct': # Hm = np.matrix(h_mtx) # self.pk[:,jf] = Hm.getH() @ np.linalg.inv(Hm @ Hm.getH() + lambd_value*np.identity(len(pm))) @ pm pass # print('x values: {}'.format(x[0])) #### Performing the Tikhonov inversion with cvxpy ######################### else: H = h_mtx.astype(complex) x = cp.Variable(h_mtx.shape[1], complex=True) objective = cp.Minimize(cp.pnorm(x, p=1)) constraints = [H * x == pm] # Create the problem and solve problem = cp.Problem(objective, constraints) # problem.solve() # problem.solve(verbose=False) # Fast but gives some warnings problem.solve(solver=cp.SCS, verbose=True) # Fast but gives some warnings # problem.solve(solver=cp.ECOS, abstol=1e-3) # slow # problem.solve(solver=cp.ECOS_BB) # slow # problem.solve(solver=cp.NAG) # not installed # problem.solve(solver=cp.CPLEX) # not installed # problem.solve(solver=cp.CBC) # not installed # problem.solve(solver=cp.CVXOPT) # not installed # problem.solve(solver=cp.MOSEK) # not installed # problem.solve(solver=cp.OSQP) # did not work self.pk[:, jf] = x.value bar.next() bar.finish() return self.pk
def delta_estimate_params4(V, U, delta): if len(V) != len(U): raise ValueError("Arrays must have the same size") T = len(V) gammas = np.ones((T, 4)) iters = 100 for i in range(iters): grad = np.zeros((T, 4)) for t in range(T-1): grad[t, 0] += V[t] * (V[t] * gammas[t, 0] - V[t+1] + gammas[t, 1] * U[t]) grad[t, 1] += U[t] * (U[t] * gammas[t, 1] - V[t+1] + gammas[t, 0] * V[t]) grad[t, 2] += V[t] * (V[t] * gammas[t, 2] - U[t+1] + gammas[t, 3] * U[t]) grad[t, 3] += U[t] * (U[t] * gammas[t, 3] - U[t+1] + gammas[t, 2] * V[t]) gammas = gammas - grad * (0.5 / math.sqrt(i+1)) / np.linalg.norm(grad) # make projection onto convex set defined by delta x = cp.Variable((T, 4)) expression = cp.sum_squares(x - gammas) constraints = [x >= 0] for t in range(T-1): constraints.append(cp.pnorm(x[t] - x[t-1], 'inf') <= delta) prob = cp.Problem(cp.Minimize(expression), constraints) prob.solve() gammas = x.value print("done") return gammas
def loss_fn(X, Y, beta, penalty): """ Get loss function for objective function Parameters: ----------- X : CVXPY-variable covariate matrix Y : CVXPY-variable, responses to samples beta : CVXPY-variable beta vector penalty : string Either 'Ridge' or 'Lasso' Returns: -------- * : Loss function for optimization """ if penalty == 'Ridge': return cp.pnorm(cp.matmul(X, beta) - Y, p=2)**2 if penalty == 'Lasso': return cp.norm2(cp.matmul(X, beta) - Y)**2
def test_docstring_example(self): np.random.seed(0) tf.random.set_seed(0) n, m = 2, 3 x = cp.Variable(n) A = cp.Parameter((m, n)) b = cp.Parameter(m) constraints = [x >= 0] objective = cp.Minimize(0.5 * cp.pnorm(A @ x - b, p=1)) problem = cp.Problem(objective, constraints) assert problem.is_dpp() cvxpylayer = CvxpyLayer(problem, parameters=[A, b], variables=[x]) A_tf = tf.Variable(tf.random.normal((m, n))) b_tf = tf.Variable(tf.random.normal((m, ))) with tf.GradientTape() as tape: # solve the problem, setting the values of A and b to A_tf and b_tf solution, = cvxpylayer(A_tf, b_tf) summed_solution = tf.math.reduce_sum(solution) gradA, gradb = tape.gradient(summed_solution, [A_tf, b_tf]) def f(): problem.solve(solver=cp.SCS, eps=1e-10) return np.sum(x.value) numgradA, numgradb = numerical_grad(f, [A, b], [A_tf, b_tf]) np.testing.assert_almost_equal(gradA, numgradA, decimal=4) np.testing.assert_almost_equal(gradb, numgradb, decimal=4)
def _minimize_loss(self, fvs, labels): """ Use CVXPY to minimize the loss function. :param fvs: the feature vectors (np.ndarray) :param labels: the list of labels (np.ndarray or List[int]) :return: w (np.ndarray) and b (float) """ # Setup variables and constants w = cvx.Variable(fvs.shape[1]) b = cvx.Variable() # Setup CVX problem f_vector = [] for arr in fvs: f_vector.append(sum(map(lambda x, y: x * y, w, arr)) + b) loss = sum(map(lambda x, y: (x - y) ** 2, f_vector, labels)) loss /= fvs.shape[0] loss += 0.5 * self.lda * (cvx.pnorm(w, 2) ** 2) # Solve problem prob = cvx.Problem(cvx.Minimize(loss), []) prob.solve(solver=cvx.SCS, verbose=self.verbose, parallel=True) return np.array(w.value).flatten(), b.value
def minDistance(self, z, norm="inf", verbose=False): """ Compute the minimum distance between a point z and the polytope, using the provided norm. Parameters ---------- norm : {1,2,"inf"} The norm to minimize. verbose : bool, optional If ``True``, solver prints its output. Returns ------- d : float The minimum distance between z and this polytope. """ x = cvx.Variable(self.n) cost = cvx.Minimize(cvx.pnorm(z - x, norm)) constraints = [self.P * x <= self.p] problem = cvx.Problem(cost, constraints) min_distance = problem.solve(verbose=verbose) if min_distance == np.inf: self.raiseError("Infeasible problem.") return min_distance
def test_example(self): key = random.PRNGKey(0) n, m = 2, 3 x = cp.Variable(n) A = cp.Parameter((m, n)) b = cp.Parameter(m) constraints = [x >= 0] objective = cp.Minimize(0.5 * cp.pnorm(A @ x - b, p=1)) problem = cp.Problem(objective, constraints) assert problem.is_dpp() cvxpylayer = CvxpyLayer(problem, parameters=[A, b], variables=[x]) key, k1, k2 = random.split(key, num=3) A_jax = random.normal(k1, shape=(m, n)) b_jax = random.normal(k2, shape=(m, )) # solve the problem solution, = cvxpylayer(A_jax, b_jax) # compute the gradient of the sum of the solution with respect to A, b def sum_sol(A_jax, b_jax): solution, = cvxpylayer(A_jax, b_jax) return solution.sum() dsum_sol = jax.grad(sum_sol) dsum_sol(A_jax, b_jax)
def learn_structure(L): N = float(np.shape(L)[0]) M = int(np.shape(L)[1]) sigma_O = (np.dot(L.T,L))/(N-1) - \ np.outer(np.mean(L,axis=0), np.mean(L,axis=0)) #bad code O = 1 / 2 * (sigma_O + sigma_O.T) O_root = np.real(sp.linalg.sqrtm(O)) # low-rank matrix L_cvx = cp.Variable([M, M], PSD=True) # sparse matrix S = cp.Variable([M, M], PSD=True) # S-L matrix R = cp.Variable([M, M], PSD=True) #reg params lam = 1 / np.sqrt(M) gamma = 1e-8 objective = cp.Minimize(0.5 * (cp.norm(R * O_root, 'fro')**2) - cp.trace(R) + lam * (gamma * cp.pnorm(S, 1) + cp.norm(L_cvx, "nuc"))) constraints = [R == S - L_cvx, L_cvx >> 0] prob = cp.Problem(objective, constraints) result = prob.solve(verbose=False) opt_error = prob.value #extract dependencies J_hat = S.value return J_hat
def dccp_ini(self, times=3, random=0, solver=None, **kwargs): """ set initial values :param times: number of random projections for each variable :param random: mandatory random initial values """ dom_constr = self.objective.args[ 0].domain # domain of the objective function for arg in self.constraints: for l in range(2): for dom in arg.expr.args[l].domain: dom_constr.append(dom) # domain on each side of constraints var_store = [] # store initial values for each variable init_flag = [] # indicate if any variable is initialized by the user for var in self.variables(): var_store.append(np.zeros(var.size)) # to be averaged init_flag.append(var.value is None) # setup the problem ini_cost = 0 var_ind = 0 value_para = [] for var in self.variables(): if init_flag[ var_ind] or random: # if the variable is not initialized by the user, or random initialization is mandatory value_para.append(cvx.Parameter(var.size)) ini_cost += cvx.pnorm(var - value_para[-1], 2) var_ind += 1 ini_obj = cvx.Minimize(ini_cost) ini_prob = cvx.Problem(ini_obj, dom_constr) # solve it several times with random points for t in range(times): # for each time of random projection count_para = 0 var_ind = 0 for var in self.variables(): # if the variable is not initialized by the user, or random # initialization is mandatory if init_flag[var_ind] or random: # set a random point value_para[count_para].value = np.random.randn(var.size) * 10 count_para += 1 var_ind += 1 if solver is None: ini_prob.solve(**kwargs) else: ini_prob.solve(solver=solver, **kwargs) var_ind = 0 for var in self.variables(): var_store[var_ind] = var_store[var_ind] + var.value / float( times) # average var_ind += 1 # set initial values var_ind = 0 for var in self.variables(): if init_flag[var_ind] or random: var.value = var_store[var_ind] var_ind += 1
def create(**kwargs): m = kwargs["m"] n = kwargs["n"] k = 10 A = [problem_util.normalized_data_matrix(m,n,1) for i in range(k)] B = problem_util.normalized_data_matrix(k,n,1) c = np.random.rand(k) x = cp.Variable(n) t = cp.Variable(k) f = cp.max_entries(t+cp.abs(B*x-c)) C = [] for i in range(k): C.append(cp.pnorm(A[i]*x, 2) <= t[i]) t_eval = lambda: np.array([cp.pnorm(A[i]*x, 2).value for i in range(k)]) f_eval = lambda: cp.max_entries(t_eval() + cp.abs(B*x-c)).value return cp.Problem(cp.Minimize(f), C), f_eval
def project(x, k): y = cp.Variable(k) objective = cp.Minimize(cp.pnorm(x - y, 2)) constraints = [cp.sum(y) == 1, y >= 0] problem = cp.Problem(objective, constraints) try: result = problem.solve(verbose=False) except: print(x) return normalize_p(y.value)
def dccp_ini(self, times = 3, random = 0): """ set initial values :param times: number of random projections for each variable :param random: mandatory random initial values """ dom_constr = self.objective.args[0].domain # domain of the objective function for arg in self.constraints: for l in range(2): for dom in arg.args[l].domain: dom_constr.append(dom) # domain on each side of constraints var_store = [] # store initial values for each variable init_flag = [] # indicate if any variable is initialized by the user for var in self.variables(): var_store.append(np.zeros(var.size)) # to be averaged init_flag.append(var.value is None) # setup the problem ini_cost = 0 var_ind = 0 value_para = [] for var in self.variables(): if init_flag[var_ind] or random: # if the variable is not initialized by the user, or random initialization is mandatory value_para.append(cvx.Parameter(*var.size)) ini_cost += cvx.pnorm(var-value_para[-1], 2) var_ind += 1 ini_obj = cvx.Minimize(ini_cost) ini_prob = cvx.Problem(ini_obj,dom_constr) # solve it several times with random points for t in range(times): # for each time of random projection count_para = 0 var_ind = 0 for var in self.variables(): # if the variable is not initialized by the user, or random # initialization is mandatory if init_flag[var_ind] or random: # set a random point value_para[count_para].value = np.random.randn(*var.size)*10 count_para += 1 var_ind += 1 ini_prob.solve() var_ind = 0 for var in self.variables(): var_store[var_ind] = var_store[var_ind] + var.value/float(times) # average var_ind += 1 # set initial values var_ind = 0 for var in self.variables(): if init_flag[var_ind] or random: var.value = var_store[var_ind] var_ind += 1