Exemplo n.º 1
0
 def test_large_sdp(self):
     """Test for bug where large PSD caused integer overflow in cvxcore.
     """
     SHAPE = (256, 256)
     rows = SHAPE[0]
     cols = SHAPE[1]
     X = Variable(SHAPE)
     Z = Variable((rows+cols, rows+cols))
     prob = Problem(Minimize(0.5*at.trace(Z)),
                    [X[0, 0] >= 1, Z[0:rows, rows:rows+cols] == X, Z >> 0, Z == Z.T])
     prob.solve(solver="SCS")
     self.assertAlmostEqual(prob.value, 1.0)
Exemplo n.º 2
0
 def test_large_sdp(self):
     """Test for bug where large SDP caused integer overflow in CVXcanon.
     """
     SHAPE = (256, 256)
     rows = SHAPE[0]
     cols = SHAPE[1]
     X = Variable(*SHAPE)
     Z = Variable(rows+cols, rows+cols)
     prob = Problem(Minimize(0.5*at.trace(Z)),
         [X[0,0] >= 1, Z[0:rows,rows:rows+cols] == X, Z >> 0, Z == Z.T])
     prob.solve(solver="SCS")
     self.assertAlmostEqual(prob.value, 1.0)
Exemplo n.º 3
0
    def grad(self):
        """Gives the (sub/super)gradient of the expression w.r.t. each variable.

        Matrix expressions are vectorized, so the gradient is a matrix.
        None indicates variable values unknown or outside domain.

        Returns:
            A map of variable to SciPy CSC sparse matrix or None.
        """
        # Subgrad of g(y) = min f_0(x,y)
        #                   s.t. f_i(x,y) <= 0, i = 1,..,p
        #                        h_i(x,y) == 0, i = 1,...,q
        # Given by Df_0(x^*,y) + \sum_i Df_i(x^*,y) \lambda^*_i
        #          + \sum_i Dh_i(x^*,y) \nu^*_i
        # where x^*, \lambda^*_i, \nu^*_i are optimal primal/dual variables.
        # Add PSD constraints in same way.

        # Short circuit for constant.
        if self.is_constant():
            return u.grad.constant_grad(self)

        old_vals = {var.id: var.value for var in self.variables()}
        fix_vars = []
        for var in self.dont_opt_vars:
            if var.value is None:
                return u.grad.error_grad(self)
            else:
                fix_vars += [var == var.value]
        prob = Problem(self.args[0].objective,
                       fix_vars + self.args[0].constraints)
        prob.solve(verbose=True)
        # Compute gradient.
        if prob.status in s.SOLUTION_PRESENT:
            sign = self.is_convex() - self.is_concave()
            # Form Lagrangian.
            lagr = self.args[0].objective.args[0]
            for constr in self.args[0].constraints:
                # TODO: better way to get constraint expressions.
                lagr_multiplier = self.cast_to_const(sign*constr.dual_value)
                prod = lagr_multiplier.T*constr.expr
                if prod.is_scalar():
                    lagr += sum(prod)
                else:
                    lagr += trace(prod)
            grad_map = lagr.grad
            result = {var: grad_map[var] for var in self.dont_opt_vars}
        else:  # Unbounded, infeasible, or solver error.
            result = u.grad.error_grad(self)
        # Restore the original values to the variables.
        for var in self.variables():
            var.value = old_vals[var.id]
        return result
Exemplo n.º 4
0
    def grad(self):
        """Gives the (sub/super)gradient of the expression w.r.t. each variable.

        Matrix expressions are vectorized, so the gradient is a matrix.
        None indicates variable values unknown or outside domain.

        Returns:
            A map of variable to SciPy CSC sparse matrix or None.
        """
        # Subgrad of g(y) = min f_0(x,y)
        #                   s.t. f_i(x,y) <= 0, i = 1,..,p
        #                        h_i(x,y) == 0, i = 1,...,q
        # Given by Df_0(x^*,y) + \sum_i Df_i(x^*,y) \lambda^*_i
        #          + \sum_i Dh_i(x^*,y) \nu^*_i
        # where x^*, \lambda^*_i, \nu^*_i are optimal primal/dual variables.
        # Add PSD constraints in same way.

        # Short circuit for constant.
        if self.is_constant():
            return u.grad.constant_grad(self)

        old_vals = {var.id:var.value for var in self.variables()}
        fix_vars = []
        for var in self.variables():
            if var.value is None:
                return u.grad.error_grad(self)
            else:
                fix_vars += [var == var.value]
        obj_arg = self._prob.objective.args[0]
        prob = Problem(self.args[0].objective.copy(obj_arg),
                       fix_vars + self._prob.constraints)
        prob.solve()
        # Compute gradient.
        if prob.status in s.SOLUTION_PRESENT:
            sign = self.is_convex() - self.is_concave()
            # Form Lagrangian.
            lagr = self._prob.objective.args[0]
            for constr in self._prob.constraints:
                # TODO: better way to get constraint expressions.
                lagr_multiplier = self.cast_to_const(sign*constr.dual_value)
                lagr += trace(lagr_multiplier.T*constr._expr)
            grad_map = lagr.grad
            result = {var:grad_map[var] for var in self.variables()}
        else: # Unbounded, infeasible, or solver error.
            result = u.grad.error_grad(self)
        # Restore the original values to the variables.
        for var in self.variables():
            var.value = old_vals[var.id]
        return result
Exemplo n.º 5
0
def matrix_frac_canon(expr, args):
    X = args[0]  # n by m matrix.
    P = args[1]  # n by n matrix.

    if len(X.shape) == 1:
        X = reshape(X, (X.shape[0], 1))
    n, m = X.shape
    T = Variable((m, m), symmetric=True)
    M = bmat([[P, X], [X.T, T]])
    # ^ a matrix with Schur complement T - X.T*P^-1*X.
    constraints = [PSD(M)]
    if not P.is_symmetric():
        ut = upper_tri(P)
        lt = upper_tri(P.T)
        constraints.append(ut == lt)
    return trace(T), constraints
Exemplo n.º 6
0
def matrix_frac_canon(expr, args):
    X = args[0]  # n by m matrix.
    P = args[1]  # n by n matrix.

    if len(X.shape) == 1:
        X = reshape(X, (X.shape[0], 1))
    n, m = X.shape

    # Create a matrix with Schur complement T - X.T*P^-1*X.
    M = Variable((n + m, n + m), PSD=True)
    T = Variable((m, m))
    constraints = []
    # Fix M using the fact that P must be affine by the DCP rules.
    # M[0:n, 0:n] == P.
    constraints.append(M[0:n, 0:n] == P)
    # M[0:n, n:n+m] == X
    constraints.append(M[0:n, n:n + m] == X)
    # M[n:n+m, n:n+m] == T
    constraints.append(M[n:n + m, n:n + m] == T)
    return trace(T), constraints
Exemplo n.º 7
0
def lambda_sum_largest_canon(expr, args):
    """
    S_k(X) denotes lambda_sum_largest(X, k)
    t >= k S_k(X - Z) + trace(Z), Z is PSD
    implies
    t >= ks + trace(Z)
    Z is PSD
    sI >= X - Z (PSD sense)
    which implies
    t >= ks + trace(Z) >= S_k(sI + Z) >= S_k(X)
    We use the fact that
    S_k(X) = sup_{sets of k orthonormal vectors u_i}sum_{i}u_i^T X u_i
    and if Z >= X in PSD sense then
    sum_{i}u_i^T Z u_i >= sum_{i}u_i^T X u_i

    We have equality when s = lambda_k and Z diagonal
    with Z_{ii} = (lambda_i - lambda_k)_+
    """
    X = expr.args[0]
    k = expr.k
    Z = Variable((X.shape[0], X.shape[0]), PSD=True)
    obj, constr = lambda_max_canon(expr, [X - Z])
    obj = k * obj + trace(Z)
    return obj, constr