def test_duplicate_constraints(self): eq = (self.x == 2) le = (self.x <= 2) obj = 0 def test(self): objective, constraints = self.canonicalize() sym_data = SymData(objective, constraints, SOLVERS[s.ECOS]) return (len(sym_data.constr_map[s.EQ]), len(sym_data.constr_map[s.LEQ])) Problem.register_solve("test", test) p = Problem(Minimize(obj), [eq, eq, le, le]) result = p.solve(method="test") self.assertEqual(result, (1, 1)) # Internal constraints. X = Semidef(2) obj = sum_entries(X + X) p = Problem(Minimize(obj)) result = p.solve(method="test") self.assertEqual(result, (0, 1)) # Duplicates from non-linear constraints. exp = norm(self.x, 2) prob = Problem(Minimize(0), [exp <= 1, exp <= 2]) result = prob.solve(method="test") self.assertEqual(result, (0, 4))
def lambda_sum_largest(X, k): """Sum of the largest k eigenvalues. """ X = Expression.cast_to_const(X) if X.size[0] != X.size[1]: raise ValueError("First argument must be a square matrix.") elif int(k) != k or k <= 0: raise ValueError("Second argument must be a positive integer.") """ S_k(X) denotes lambda_sum_largest(X, k) t >= k S_k(X - Z) + trace(Z), Z is PSD implies t >= ks + trace(Z) Z is PSD sI >= X - Z (PSD sense) which implies t >= ks + trace(Z) >= S_k(sI + Z) >= S_k(X) We use the fact that S_k(X) = sup_{sets of k orthonormal vectors u_i}\sum_{i}u_i^T X u_i and if Z >= X in PSD sense then \sum_{i}u_i^T Z u_i >= \sum_{i}u_i^T X u_i We have equality when s = lambda_k and Z diagonal with Z_{ii} = (lambda_i - lambda_k)_+ """ Z = Semidef(X.size[0]) return k*lambda_max(X - Z) + trace(Z)
def test_var_copy(self): """Test the copy function for variable types. """ x = Variable(3, 4, name="x") y = x.copy() self.assertEquals(y.size, (3, 4)) self.assertEquals(y.name(), "x") x = Semidef(5, name="x") y = x.copy() self.assertEquals(y.size, (5, 5))
def test_diag_prob(self): """Test a problem with diag. """ C = Variable(3, 3) obj = Maximize(C[0, 2]) constraints = [ diag(C) == 1, C[0, 1] == 0.6, C[1, 2] == -0.3, C == Semidef(3) ] prob = Problem(obj, constraints) result = prob.solve() self.assertAlmostEqual(result, 0.583151)
def test_sdp_problem(self): # SDP in objective. obj = Minimize(sum_entries(square(self.X - self.F))) p = Problem(obj, []) result = p.solve() self.assertAlmostEqual(result, 1, places=4) self.assertAlmostEqual(self.X.value[0, 0], 1, places=3) self.assertAlmostEqual(self.X.value[0, 1], 0) self.assertAlmostEqual(self.X.value[1, 0], 0) self.assertAlmostEqual(self.X.value[1, 1], 0) # SDP in constraint. # ECHU: note to self, apparently this is a source of redundancy obj = Minimize(sum_entries(square(self.Y - self.F))) p = Problem(obj, [self.Y == Semidef(2)]) result = p.solve() self.assertAlmostEqual(result, 1, places=2) self.assertAlmostEqual(self.Y.value[0, 0], 1, places=3) self.assertAlmostEqual(self.Y.value[0, 1], 0) self.assertAlmostEqual(self.Y.value[1, 0], 0) self.assertAlmostEqual(self.Y.value[1, 1], 0, places=3) # Index into semidef. obj = Minimize(square(self.X[0, 0] - 1) + square(self.X[1, 0] - 2) + #square(self.X[0,1] - 3) + square(self.X[1, 1] - 4)) p = Problem(obj, []) result = p.solve() print((self.X.value)) self.assertAlmostEqual(result, 0) self.assertAlmostEqual(self.X.value[0, 0], 1, places=2) self.assertAlmostEqual(self.X.value[0, 1], 2, places=2) self.assertAlmostEqual(self.X.value[1, 0], 2, places=2) self.assertAlmostEqual(self.X.value[1, 1], 4, places=3)
def setUp(self): self.X = Semidef(2) self.Y = Variable(2, 2) self.F = np.matrix([[1, 0], [0, -1]])