Пример #1
0
 def maximize_problem(self, solver):
     A = np.random.randn(5, 2)
     A = np.maximum(A, 0)
     b = np.random.randn(5)
     b = np.maximum(b, 0)
     p = Problem(Maximize(-sum(self.x)), [self.x >= 0, A * self.x <= b])
     self.solve_QP(p, solver)
     for var in p.variables():
         self.assertItemsAlmostEqual([0., 0.], var.value, places=3)
Пример #2
0
 def affine_problem(self, solver):
     A = numpy.random.randn(5, 2)
     A = numpy.maximum(A, 0)
     b = numpy.random.randn(5, 1)
     b = numpy.maximum(b, 0)
     p = Problem(Minimize(sum(self.x)), [self.x >= 0, A * self.x <= b])
     s = self.solve_QP(p, solver)
     for var in p.variables():
         self.assertItemsAlmostEqual([0., 0.], s.primal_vars[var.id])
Пример #3
0
    def huber_small(self, solver):
        # Solve the Huber regression problem
        x = Variable(3)
        objective = sum(huber(x))

        # Solve problem with QP
        p = Problem(Minimize(objective), [x[2] >= 3])
        self.solve_QP(p, solver)
        self.assertAlmostEqual(3, x.value[2], places=4)
        self.assertAlmostEqual(5, objective.value, places=4)
Пример #4
0
 def test_large_sum(self):
     """Test large number of variables summed.
     """
     for n in [10, 20, 30, 40, 50]:
         A = np.arange(n * n)
         A = np.reshape(A, (n, n))
         x = Variable((n, n))
         p = Problem(Minimize(at.sum(x)), [x >= A])
         result = p.solve()
         answer = n * n * (n * n + 1) / 2 - n * n
         print(result - answer)
         self.assertAlmostEqual(result, answer)
Пример #5
0
    def grad(self):
        """Gives the (sub/super)gradient of the expression w.r.t. each variable.

        Matrix expressions are vectorized, so the gradient is a matrix.
        None indicates variable values unknown or outside domain.

        Returns:
            A map of variable to SciPy CSC sparse matrix or None.
        """
        # Subgrad of g(y) = min f_0(x,y)
        #                   s.t. f_i(x,y) <= 0, i = 1,..,p
        #                        h_i(x,y) == 0, i = 1,...,q
        # Given by Df_0(x^*,y) + \sum_i Df_i(x^*,y) \lambda^*_i
        #          + \sum_i Dh_i(x^*,y) \nu^*_i
        # where x^*, \lambda^*_i, \nu^*_i are optimal primal/dual variables.
        # Add PSD constraints in same way.

        # Short circuit for constant.
        if self.is_constant():
            return u.grad.constant_grad(self)

        old_vals = {var.id: var.value for var in self.variables()}
        fix_vars = []
        for var in self.dont_opt_vars:
            if var.value is None:
                return u.grad.error_grad(self)
            else:
                fix_vars += [var == var.value]
        prob = Problem(self.args[0].objective,
                       fix_vars + self.args[0].constraints)
        prob.solve(verbose=True)
        # Compute gradient.
        if prob.status in s.SOLUTION_PRESENT:
            sign = self.is_convex() - self.is_concave()
            # Form Lagrangian.
            lagr = self.args[0].objective.args[0]
            for constr in self.args[0].constraints:
                # TODO: better way to get constraint expressions.
                lagr_multiplier = self.cast_to_const(sign*constr.dual_value)
                prod = lagr_multiplier.T*constr.expr
                if prod.is_scalar():
                    lagr += sum(prod)
                else:
                    lagr += trace(prod)
            grad_map = lagr.grad
            result = {var: grad_map[var] for var in self.dont_opt_vars}
        else:  # Unbounded, infeasible, or solver error.
            result = u.grad.error_grad(self)
        # Restore the original values to the variables.
        for var in self.variables():
            var.value = old_vals[var.id]
        return result
Пример #6
0
    def equivalent_forms_1(self, solver):
        m = 100
        n = 80
        r = 70
        np.random.seed(1)
        A = np.random.randn(m, n)
        b = np.random.randn(m)
        G = np.random.randn(r, n)
        h = np.random.randn(r)

        obj1 = .1 * sum((A * self.xef - b)**2)
        cons = [G * self.xef == h]

        p1 = Problem(Minimize(obj1), cons)
        self.solve_QP(p1, solver)
        self.assertAlmostEqual(p1.value, 68.1119420108, places=4)
Пример #7
0
    def equivalent_forms_1(self, solver):
        m = 100
        n = 80
        r = 70
        numpy.random.seed(1)
        A = numpy.random.randn(m, n)
        b = numpy.random.randn(m, 1)
        G = numpy.random.randn(r, n)
        h = numpy.random.randn(r, 1)

        obj1 = sum((A * self.xef - b)**2)
        cons = [G * self.xef == h]

        p1 = Problem(Minimize(obj1), cons)
        s = self.solve_QP(p1, solver)
        self.assertAlmostEqual(s.opt_val, 681.119420108)
Пример #8
0
    def huber(self, solver):
        # Generate problem data
        np.random.seed(2)
        n = 3
        m = 5
        A = sp.random(m, n, density=0.8, format='csc')
        x_true = np.random.randn(n) / np.sqrt(n)
        ind95 = (np.random.rand(m) < 0.95).astype(float)
        b = A.dot(x_true) + np.multiply(0.5*np.random.randn(m), ind95) \
            + np.multiply(10.*np.random.rand(m), 1. - ind95)

        # Solve the Huber regression problem
        x = Variable(n)
        objective = sum(huber(A * x - b))

        # Solve problem with QP
        p = Problem(Minimize(objective))
        self.solve_QP(p, solver)
        self.assertAlmostEqual(1.327429461061672, objective.value, places=3)
        self.assertItemsAlmostEqual(x.value,
                                    [-1.03751745, 0.86657204, -0.9649172],
                                    places=3)
Пример #9
0
def log_sum_exp_canon(expr, args):
    x = args[0]
    shape = expr.shape
    axis = expr.axis
    t = Variable(shape)

    # log(sum(exp(x))) <= t <=> sum(exp(x-t)) <= 1
    if axis is None:  # shape = (1, 1)
        promoted_t = promote(t, x.shape)
    elif axis == 0:  # shape = (1, n)
        promoted_t = Constant(np.ones(
            (x.shape[0], 1))) * reshape(t, (1, ) + x.shape[1:])
    else:  # shape = (m, 1)
        promoted_t = reshape(t, x.shape[:-1] +
                             (1, )) * Constant(np.ones((1, x.shape[1])))

    exp_expr = exp(x - promoted_t)
    obj, constraints = exp_canon(exp_expr, exp_expr.args)
    obj = sum(obj, axis=axis)
    ones = Constant(np.ones(shape))
    constraints.append(obj <= ones)
    return t, constraints
Пример #10
0
 def power_matrix(self, solver):
     p = Problem(Minimize(sum(power(self.A - 3., 2))), [])
     self.solve_QP(p, solver)
     for var in p.variables():
         self.assertItemsAlmostEqual([3., 3., 3., 3.], var.value, places=4)
Пример #11
0
 def power(self, solver):
     p = Problem(Minimize(sum(power(self.x, 2))), [])
     s = self.solve_QP(p, solver)
     for var in p.variables():
         self.assertItemsAlmostEqual([0., 0.], var.value, places=4)
Пример #12
0
 def power_matrix(self, solver):
     p = Problem(Minimize(sum(power(self.A - 3., 2))), [])
     s = self.solve_QP(p, solver)
     for var in p.variables():
         self.assertItemsAlmostEqual([3., 3., 3., 3.],
                                     s.primal_vars[var.id])