Ejemplo n.º 1
0
def sig_dual(f, ell=0, X=None, modulator_support=None):
    f = f.without_zeros()
    # Signomial definitions (for the objective).
    lagrangian = f - cl.Variable(name='gamma')
    if modulator_support is None:
        modulator_support = lagrangian.alpha
    t_mul = Signomial(modulator_support,
                      np.ones(modulator_support.shape[0]))**ell
    metadata = {'f': f, 'lagrangian': lagrangian, 'modulator': t_mul, 'X': X}
    lagrangian = lagrangian * t_mul
    f_mod = f * t_mul
    # C_SAGE^STAR (v must belong to the set defined by these constraints).
    v = cl.Variable(shape=(lagrangian.m, 1), name='v')
    con = relative_dual_sage_cone(lagrangian,
                                  v,
                                  name='Lagrangian SAGE dual constraint',
                                  X=X)
    constraints = [con]
    # Equality constraint (for the Lagrangian to be bounded).
    a = sym_corr.relative_coeff_vector(t_mul, lagrangian.alpha)
    a = a.reshape(a.size, 1)
    constraints.append(a.T @ v == 1)
    # Objective definition and problem creation.
    obj_vec = sym_corr.relative_coeff_vector(f_mod, lagrangian.alpha)
    obj = obj_vec.T @ v
    # Create coniclifts Problem
    prob = cl.Problem(cl.MIN, obj, constraints)
    prob.metadata = metadata
    cl.clear_variable_indices()
    return prob
Ejemplo n.º 2
0
def poly_dual(f, poly_ell=0, sigrep_ell=0, X=None):
    if poly_ell == 0:
        sr, _ = f.sig_rep
        prob = sage_sigs.sig_dual(sr, sigrep_ell, X=X)
        if AUTO_CLEAR_INDICES:  # pragma:no cover
            cl.clear_variable_indices()
        return prob
    elif sigrep_ell == 0:
        modulator = f.standard_multiplier()**poly_ell
        gamma = cl.Variable()
        lagrangian = (f - gamma) * modulator
        v = cl.Variable(shape=(lagrangian.m, 1), name='v')
        con_base_name = v.name + ' domain'
        constraints = relative_dual_sage_poly_cone(lagrangian,
                                                   v,
                                                   con_base_name,
                                                   log_AbK=X)
        a = sym_corr.relative_coeff_vector(modulator, lagrangian.alpha)
        constraints.append(a.T @ v == 1)
        f_mod = Polynomial(f.alpha, f.c) * modulator
        obj_vec = sym_corr.relative_coeff_vector(f_mod, lagrangian.alpha)
        obj = obj_vec.T @ v
        prob = cl.Problem(cl.MIN, obj, constraints)
        if AUTO_CLEAR_INDICES:  # pragma:no cover
            cl.clear_variable_indices()
        return prob
    else:  # pragma: no cover
        raise NotImplementedError()
Ejemplo n.º 3
0
def poly_constrained_dual(f, gts, eqs, p=0, q=1, ell=0, X=None, slacks=False):
    """
    Construct the dual SAGE-(p, q, ell) relaxation for the polynomial optimization problem

        inf{ f(x) : g(x) >= 0 for g in gts,
                    g(x) == 0 for g in eqs,
                    and x in X }

    where :math:`X = R^{\\texttt{f.n}}` by default.
    """
    lagrangian, ineq_lag_mults, eq_lag_mults, _ = make_poly_lagrangian(f, gts, eqs, p=p, q=q)
    metadata = {'lagrangian': lagrangian, 'f': f, 'gts': gts, 'eqs': eqs, 'X': X}
    if ell > 0:
        alpha_E_1 = hierarchy_e_k([f, f.upcast_to_polynomial(1)] + gts + eqs, k=1)
        modulator = Polynomial(2 * alpha_E_1, np.ones(alpha_E_1.shape[0])) ** ell
        lagrangian = lagrangian * modulator
        f = f * modulator
    else:
        modulator = f.upcast_to_polynomial(1)
    metadata['modulator'] = modulator
    # In primal form, the Lagrangian is constrained to be a SAGE polynomial.
    # Introduce a dual variable "v" for this constraint.
    v = cl.Variable(shape=(lagrangian.m, 1), name='v')
    metadata['v_poly'] = v
    constraints = relative_dual_sage_poly_cone(lagrangian, v, 'Lagrangian', log_AbK=X)
    for s_g, g in ineq_lag_mults:
        # These generalized Lagrange multipliers "s_g" are SAGE polynomials.
        # For each such multiplier, introduce an appropriate dual variable "v_g", along
        # with constraints over that dual variable.
        g_m = g * modulator
        c_g = sym_corr.moment_reduction_array(s_g, g_m, lagrangian)
        name_base = 'v_' + str(g)
        if slacks:
            v_g = cl.Variable(name=name_base, shape=(s_g.m, 1))
            con = c_g @ v == v_g
            con.name += str(g) + ' >= 0'
            constraints.append(con)
        else:
            v_g = c_g @ v
        constraints += relative_dual_sage_poly_cone(s_g, v_g,
                                                    name_base=(name_base + ' domain'), log_AbK=X)
    for z_g, g in eq_lag_mults:
        # These generalized Lagrange multipliers "z_g" are arbitrary polynomials.
        # They dualize to homogeneous equality constraints.
        g_m = g * modulator
        c_g = sym_corr.moment_reduction_array(z_g, g_m, lagrangian)
        con = c_g @ v == 0
        con.name += str(g) + ' == 0'
        constraints.append(con)
    # Equality constraint (for the Lagrangian to be bounded).
    a = sym_corr.relative_coeff_vector(modulator, lagrangian.alpha)
    constraints.append(a.T @ v == 1)
    # Define the dual objective function.
    obj_vec = sym_corr.relative_coeff_vector(f, lagrangian.alpha)
    obj = obj_vec.T @ v
    # Return the coniclifts Problem.
    prob = cl.Problem(cl.MIN, obj, constraints)
    prob.metadata = metadata
    cl.clear_variable_indices()
    return prob
Ejemplo n.º 4
0
 def test_pcp_2(self):
     # TODO: reformulate with SolverTestHelper
     """
     Reformulate
         max  (x**0.2)*(y**0.8) + z**0.4 - x
         s.t. x + y + z/2 == 2
              x, y, z >= 0
     Into
         max  x3 + x4 - x0
         s.t. x0 + x1 + x2 / 2 == 2,
              (x0, x1, x3) in Pow3D(0.2)
              (x2, 1.0, x4) in Pow3D(0.4)
     """
     x = cl.Variable(shape=(3,))
     hypos = cl.Variable(shape=(2,))
     objective = -cl.sum(hypos) + x[0]
     con1_expr = cl.hstack((x[0], x[1], hypos[0]))
     con1_weights = np.array([0.2, 0.8, -1.0])
     con2_expr = cl.hstack((x[2], 1.0, hypos[1]))
     con2_weights = np.array([0.4, 0.6, -1.0])
     constraints = [
         x[0] + x[1] + 0.5 * x[2] == 2,
         PowCone(con1_expr, con1_weights),
         PowCone(con2_expr, con2_weights)
     ]
     opt_objective = -1.8073406786220672
     opt_x = np.array([0.06393515, 0.78320961, 2.30571048])
     prob = cl.Problem(cl.MIN, objective, constraints)
     prob.solve(solver='CP')
     self.assertAlmostEqual(prob.value, opt_objective)
     assert np.allclose(x.value, opt_x, atol=1e-3)
Ejemplo n.º 5
0
 def test_pcp_1(self):
     #TODO: reformulate with SolverTestHelper
     """
     Use a 3D power cone formulation for
     min 3 * x[0] + 2 * x[1] + x[2]
     s.t. norm(x,2) <= y
          x[0] + x[1] + 3*x[2] >= 1.0
          y <= 5
     """
     x = cl.Variable(shape=(3,))
     y_square = cl.Variable()
     epis = cl.Variable(shape=(3,))
     constraints = [PowCone(cl.hstack((1.0, x[0], epis[0])), np.array([0.5, -1, 0.5])),
                    PowCone(cl.hstack((1.0, x[1], epis[1])), np.array([0.5, -1, 0.5])),
                    PowCone(cl.hstack((x[2], 1.0, epis[2])), np.array([-1, 0.5, 0.5])),
                     # Could have done PowCone(cl.hstack((1.0, x[2], epis[2])), np.array([0.5, -1, 0.5])).
                    cl.sum(epis) <= y_square,
                    x[0] + x[1] + 3 * x[2] >= 1.0,
                    y_square <= 25]
     objective = 3 * x[0] + 2 * x[1] + x[2]
     expect_x = np.array([-3.874621860638774, -2.129788233677883, 2.33480343377204])
     expect_epis = expect_x ** 2
     expect_x = np.round(expect_x, decimals=5)
     expect_epis = np.round(expect_epis, decimals=5)
     expect_y_square = 25
     expect_objective = -13.548638904065102
     prob = cl.Problem(cl.MIN, objective, constraints)
     prob.solve(solver='CP')
     self.assertAlmostEqual(prob.value, expect_objective, delta=1e-4)
     self.assertAlmostEqual(y_square.value, expect_y_square, delta=1e-4)
     concat = cl.hstack((x.value, epis.value))
     expect_concat = cl.hstack((expect_x, expect_epis))
     for i in range(5):
         self.assertAlmostEqual(concat[i], expect_concat[i], delta=1e-2)
     pass
Ejemplo n.º 6
0
def _least_squares_magnitude_recovery(con, alpha_reduced, v_reduced, zero_tol):
    v_abs = np.abs(v_reduced).ravel()
    if con.X is not None:
        n = con.X.A.shape[1]
    else:
        n = con.alpha.shape[1]
    if n > con.alpha.shape[1]:
        padding = np.zeros(shape=(alpha_reduced.shape[0],
                                  n - con.alpha.shape[1].n))
        alpha_reduced = np.hstack((alpha_reduced, padding))
    y = cl.Variable(shape=(n, ), name='abs moment mag recovery')
    are_nonzero = v_abs > np.sqrt(zero_tol)
    t = cl.Variable(shape=(1, ), name='t')
    residual = alpha_reduced[are_nonzero, :] @ y - np.log(v_abs[are_nonzero])
    constraints = [cl.vector2norm(residual) <= t]
    if np.any(~are_nonzero):
        tempcon = alpha_reduced[~are_nonzero, :] @ y <= np.log(zero_tol)
        constraints.append(tempcon)
    if con.X is not None:
        A, b, K = con.X.A, con.X.b, con.X.K
        tempcon = cl.PrimalProductCone(A @ y + b, K)
        constraints.append(tempcon)
    prob = cl.Problem(cl.MIN, t, constraints)
    prob.solve(verbose=False)
    cl.clear_variable_indices()
    if prob.status in {cl.SOLVED, cl.INACCURATE} and prob.value < np.inf:
        mag = np.exp(y.value.astype(np.longdouble))
        return mag
    else:
        return None
Ejemplo n.º 7
0
def sig_constrained_dual(f, gts, eqs, p=0, q=1, ell=0, X=None, slacks=False):
    """
    Construct the SAGE-(p, q, ell) dual problem for the signomial program

        min{ f(x) : g(x) >= 0 for g in gts,
                    g(x) == 0 for g in eqs,
                    and x in X }

    where X = :math:`R^{\\texttt{f.n}}` by default.
    """
    lagrangian, ineq_lag_mults, eq_lag_mults, _ = make_sig_lagrangian(f, gts, eqs, p=p, q=q)
    metadata = {'lagrangian': lagrangian, 'f': f, 'gts': gts, 'eqs': eqs, 'level': (p, q, ell), 'X': X}
    if ell > 0:
        alpha_E_1 = hierarchy_e_k([f, f.upcast_to_signomial(1)] + list(gts) + list(eqs), k=1)
        modulator = Signomial(alpha_E_1, np.ones(alpha_E_1.shape[0])) ** ell
        lagrangian = lagrangian * modulator
        f = f * modulator
    else:
        modulator = f.upcast_to_signomial(1)
    metadata['modulator'] = modulator
    # In primal form, the Lagrangian is constrained to be a SAGE signomial.
    # Introduce a dual variable "v" for this constraint.
    v = cl.Variable(shape=(lagrangian.m, 1), name='v')
    con = relative_dual_sage_cone(lagrangian, v, name='Lagrangian SAGE dual constraint', X=X)
    constraints = [con]
    expcovers = None
    for i, (s_h, h) in enumerate(ineq_lag_mults):
        # These generalized Lagrange multipliers "s_h" are SAGE signomials.
        # For each such multiplier, introduce an appropriate dual variable "v_h", along
        # with constraints over that dual variable.
        h_m = h * modulator
        c_h = sym_corr.moment_reduction_array(s_h, h_m, lagrangian)
        if slacks:
            v_h = cl.Variable(name='v_' + str(h), shape=(s_h.m, 1))
            constraints.append(c_h @ v == v_h)
        else:
            v_h = c_h @ v
        con_name = 'SAGE dual for signomial inequality # ' + str(i)
        con = relative_dual_sage_cone(s_h, v_h, name=con_name, X=X, expcovers=expcovers)
        expcovers = con.ech.expcovers  # only * really * needed in first iteration, but keeps code flat.
        constraints.append(con)
    for s_h, h in eq_lag_mults:
        # These generalized Lagrange multipliers "s_h" are arbitrary signomials.
        # They dualize to homogeneous equality constraints.
        h = h * modulator
        c_h = sym_corr.moment_reduction_array(s_h, h, lagrangian)
        constraints.append(c_h @ v == 0)
    # Equality constraint (for the Lagrangian to be bounded).
    a = sym_corr.relative_coeff_vector(modulator, lagrangian.alpha)
    constraints.append(a.T @ v == 1)
    # Define the dual objective function.
    obj_vec = sym_corr.relative_coeff_vector(f, lagrangian.alpha)
    obj = obj_vec.T @ v
    # Return the coniclifts Problem.
    prob = cl.Problem(cl.MIN, obj, constraints)
    prob.metadata = metadata
    cl.clear_variable_indices()
    return prob
Ejemplo n.º 8
0
def _make_dummy_lagrangian(f, gts, eqs):
    dummy_gamma = cl.Variable(shape=())
    if len(gts) > 0:
        dummy_slacks = cl.Variable(shape=(len(gts), ))
        ineq_term = sum([gts[i] * dummy_slacks[i] for i in range(len(gts))])
    else:
        ineq_term = 0
    if len(eqs) > 0:
        dummy_multipliers = cl.Variable(shape=(len(eqs), ))
        eq_term = sum([eqs[i] * dummy_multipliers[i] for i in range(len(eqs))])
    else:
        eq_term = 0
    dummy_L = f - dummy_gamma - ineq_term - eq_term
    return dummy_L
Ejemplo n.º 9
0
def _constrained_least_squares(con, alpha, log_v):
    A, b, K = con.X.A, con.X.b, con.X.K
    lifted_n = A.shape[1]
    n = con.alpha.shape[1]
    x = cl.Variable(shape=(lifted_n,))
    t = cl.Variable(shape=(1,))
    cons = [cl.vector2norm(log_v - alpha @ x[:n]) <= t,
            cl.PrimalProductCone(A @ x + b, K)]
    prob = cl.Problem(cl.MIN, t, cons)
    cl.clear_variable_indices()
    res = prob.solve(verbose=False)
    if res[0] in {cl.SOLVED, cl.INACCURATE}:
        mu_ls = x.value[:n]
        return mu_ls
    else:
        return None
Ejemplo n.º 10
0
    def test_simple_sage_1(self):
        """
        Solve a simple SAGE relaxation for a signomial minimization problem.

        Do this without resorting to "Signomial" objects.
        """
        alpha = np.array([[0, 0],
                          [1, 0],
                          [0, 1],
                          [1, 1],
                          [0.5, 0],
                          [0, 0.5]])
        gamma = cl.Variable(shape=(), name='gamma')
        c = cl.Expression([0 - gamma, 3, 2, 1, -4, -2])
        expected_val = -1.8333331773244161

        # with presolve
        cl.presolve_trivial_age_cones(True)
        con = cl.PrimalSageCone(c, alpha, None, 'test_con_name')
        obj = gamma
        prob = Problem(cl.MAX, obj, [con])
        status, val = prob.solve(solver='ECOS', verbose=False)
        assert abs(val - expected_val) < 1e-6
        v = con.violation()
        assert v < 1e-6

        # without presolve
        cl.presolve_trivial_age_cones(False)
        con = cl.PrimalSageCone(c, alpha, None, 'test_con_name')
        obj = gamma
        prob = Problem(cl.MAX, obj, [con])
        status, val = prob.solve(solver='ECOS', verbose=False)
        assert abs(val - expected_val) < 1e-6
        v = con.violation()
        assert v < 1e-6
Ejemplo n.º 11
0
 def _compute_sig_rep(self):
     self._sig_rep = None
     self._sig_rep_constrs = []
     sigrep_c = np.zeros(shape=(self.m, ), dtype=object)
     need_vars = []
     for i, row in enumerate(self.alpha):
         if np.any(row % 2 != 0):
             if isinstance(self.c[i], __NUMERIC_TYPES__):
                 sigrep_c[i] = -abs(self.c[i])
             elif self.c[i].is_constant():
                 sigrep_c[i] = -abs(self.c[i].value)
             else:
                 need_vars.append(i)
         else:
             if isinstance(self.c[i], np.ndarray):
                 sigrep_c[i] = self.c[i][()]
             else:
                 sigrep_c[i] = self.c[i]
     if len(need_vars) > 0:
         var_name = str(self) + ' variable sigrep coefficients'
         c_hat = cl.Variable(shape=(len(need_vars), ), name=var_name)
         sigrep_c[need_vars] = c_hat
         self._sig_rep_constrs.append(c_hat <= self.c[need_vars])
         self._sig_rep_constrs.append(c_hat <= -self.c[need_vars])
     if sigrep_c.dtype == object:
         sigrep_c = cl.Expression(sigrep_c)
     self._sig_rep = Signomial(self.alpha, sigrep_c)
     pass
Ejemplo n.º 12
0
 def test_simple_MILP(self):
     # Include continuous variables
     x = cl.Variable()
     y = cl.Variable((2,))
     obj_expr = y[0]  # minimize me
     cont_cons = [cl.sum(y) == x, -1.5 <= x, x <= 2.5, 0 <= y[1], y[1] <= 4.7]
     prob = cl.Problem(cl.MIN, obj_expr, cont_cons, integer_variables=[x])
     prob.solve(solver='MOSEK')
     # to push y[0] negative, we need to push x to its lower bounds
     # and y[1] to its upper bound.
     expect_y = np.array([-5.7, 4.7])
     expect_x = -1.0
     self.assertAlmostEqual(y[0].value, expect_y[0], places=5)
     self.assertAlmostEqual(y[1].value, expect_y[1], places=5)
     self.assertAlmostEqual(x.value, expect_x, places=5)
     pass
Ejemplo n.º 13
0
    def _geometric_program_1(self, solver, **kwargs):
        """
        Solve a GP with a linear objective and single posynomial constraint.

        The reference solution was computed by Wolfram Alpha.
        """
        alpha = np.array([[1, 0],
                          [0, 1],
                          [1, 1],
                          [0.5, 0],
                          [0, 0.5]])
        c = np.array([3, 2, 1, 4, 2])
        x = cl.Variable(shape=(2,), name='x')
        y = alpha @ x
        expr = cl.weighted_sum_exp(c, y)
        cons = [expr <= 1]
        obj = - x[0] - 2 * x[1]
        prob = Problem(cl.MIN, obj, cons)
        status, val = prob.solve(solver=solver, **kwargs)
        assert status == 'solved'
        assert abs(val - 10.4075826) < 1e-6
        x_star = x.value
        expect = np.array([-4.93083, -2.73838])
        assert np.allclose(x_star, expect, atol=1e-4)
        return prob
Ejemplo n.º 14
0
def poly_primal(f, poly_ell=0, sigrep_ell=0, X=None):
    if poly_ell == 0:
        sr, _ = f.sig_rep
        prob = sage_sigs.sig_primal(sr, sigrep_ell, X=X)
        if AUTO_CLEAR_INDICES:  # pragma:no cover
            cl.clear_variable_indices()
        return prob
    else:
        poly_modulator = f.standard_multiplier()**poly_ell
        gamma = cl.Variable(shape=(), name='gamma')
        lagrangian = (f - gamma) * poly_modulator
        if sigrep_ell > 0:
            sr, cons = lagrangian.sig_rep
            sig_modulator = Signomial(sr.alpha,
                                      np.ones(shape=(sr.m, )))**sigrep_ell
            sig_under_test = sr * sig_modulator
            con_name = 'Lagrangian modulated sigrep sage'
            con = sage_sigs.primal_sage_cone(sig_under_test, con_name, X=X)
            constraints = [con] + cons
        else:
            con_name = 'Lagrangian sage poly'
            constraints = primal_sage_poly_cone(lagrangian,
                                                con_name,
                                                log_AbK=X)
        obj = gamma
        prob = cl.Problem(cl.MAX, obj, constraints)
        if AUTO_CLEAR_INDICES:  # pragma:no cover
            cl.clear_variable_indices()
        return prob
Ejemplo n.º 15
0
 def test_trivial_01LP(self):
     x = cl.Variable()
     obj_expr = x
     cont_cons = [0 <= x, x <= 1.5]
     prob = cl.Problem(cl.MAX, obj_expr, cont_cons,
                       integer_variables=[x])
     prob.solve(solver='MOSEK')
     self.assertAlmostEqual(x.value, 1.0, places=5)
     pass
Ejemplo n.º 16
0
 def test_redundant_components(self):
     # create problems where some (but not all) components of a vector variable
     # participate in the final conic formulation.
     x = cl.Variable(shape=(4,))
     cons = [0 <= x[1:], cl.sum(x[1:]) <= 1]
     objective = x[1] + 0.5 * x[2] + 0.25 * x[3]
     prob = cl.Problem(cl.MAX, objective, cons)
     prob.solve(solver='ECOS', verbose=False)
     assert np.allclose(x.value, np.array([0, 1, 0, 0]))
     pass
Ejemplo n.º 17
0
    def test_simple_MINLP(self):
        x = cl.Variable(shape=(3,))
        y = cl.Variable(shape=(2,))
        constraints = [cl.vector2norm(x) <= y[0],
                       cl.vector2norm(x) <= y[1],
                       x[0] + x[1] + 3 * x[2] >= 0.1,
                       y <= 5]
        obj_expr = 3 * x[0] + 2 * x[1] + x[2] + y[0] + 2 * y[1]
        prob = cl.Problem(cl.MIN, obj_expr, constraints, integer_variables=[y])
        prob.solve(solver='MOSEK')

        expect_obj = 0.21363997604807272
        self.assertAlmostEqual(prob.value, expect_obj, places=4)
        expect_x = np.array([-0.78510265, -0.43565177, 0.44025147])
        for i in [0, 1, 2]:
            self.assertAlmostEqual(x[i].value, expect_x[i], places=4)
        expect_y = np.array([1.0, 1.0])
        for i in [0, 1]:
            self.assertAlmostEqual(y[i].value, expect_y[i], places=4)
        pass
Ejemplo n.º 18
0
 def pcp_4(ceei: bool = True):
     """
     A power cone formulation of a Fisher market equilibrium pricing model.
     ceei = Competitive Equilibrium from Equal Incomes
     """
     # Generate test data
     np.random.seed(0)
     n_buyer = 4
     n_items = 6
     V = np.random.rand(n_buyer, n_items)
     X = cl.Variable(shape=(n_buyer, n_items))
     u = cl.sum(V * X, axis=1)
     z = cl.Variable()
     if ceei:
         b = np.ones(n_buyer) / n_buyer
         expect_X = np.array([[9.16311051e-01, 2.71146000e-09, 6.44984275e-10, 0.00000000e+00,
                             1.85098676e-09, 6.66541059e-01],
                            [0.00000000e+00, 0.00000000e+00, 5.30793141e-01, 0.00000000e+00,
                             9.99999995e-01, 1.35828851e-09],
                            [9.78080132e-10, 9.99999998e-01, 0.00000000e+00, 0.00000000e+00,
                             1.16278780e-09, 3.33458937e-01],
                            [8.36889514e-02, 0.00000000e+00, 4.69206858e-01, 1.00000001e+00,
                             7.80694090e-10, 8.26483799e-10]])
         pow_objective = (-z, -1.179743761485325)
     else:
         b = np.array([0.3, 0.15, 0.2, 0.35])
         expect_X = np.array([[9.08798195e-01, 0.00000000e+00, 0.00000000e+00, 2.67738456e-10,
                         3.44073780e-09, 9.58119833e-01],
                        [0.00000000e+00, 1.92431554e-10, 3.91981663e-09, 0.00000000e+00,
                         9.99999991e-01, 0.00000000e+00],
                        [0.00000000e+00, 9.99999993e-01, 0.00000000e+00, 0.00000000e+00,
                         0.00000000e+00, 4.18801652e-02],
                        [9.12018094e-02, 1.09687013e-08, 1.00000000e+00, 1.00000001e+00,
                         5.94724468e-09, 6.99603695e-09]])
         pow_objective = (-z, -1.2279371987281384)
     pow_cons = [(cl.sum(X, axis=0) <= 1, None),
                 (PowCone(cl.hstack((u, z)), np.hstack((b, -1))), None),
                 (X >= 0, None)]
     pow_vars = [(X, expect_X)]
     sth = SolverTestHelper(pow_objective, pow_vars, pow_cons)
     return sth
Ejemplo n.º 19
0
 def test_sigrep_2(self):
     c33 = cl.Variable(shape=(), name='c33')
     alpha = np.array([[0, 0], [1, 1], [3, 3]])
     c = cl.Expression([0, -1, c33])
     p = Polynomial(alpha, c)
     sr, sr_cons = p.sig_rep
     assert len(sr_cons) == 2
     var_names = set(v.name for v in sr_cons[0].variables())
     var_names.union(set(v.name for v in sr_cons[1].variables()))
     for v in var_names:
         assert v == 'c33' or v == str(p) + ' variable sigrep coefficients'
     assert sr.alpha_c[(1, 1)] == -1
Ejemplo n.º 20
0
 def test_variables(self):
     # random problem data
     G = np.random.randn(3, 6)
     h = G @ np.random.rand(6)
     c = np.random.rand(6)
     # input to coniclift's Problem constructor
     x = cl.Variable(shape=(6,))
     constrs = [0 <= x, G @ x == h]
     objective_expression = c @ x
     prob = cl.Problem(cl.MIN, objective_expression, constrs)
     x = Variable(shape=(3,), name='my_name')
     shallow_copy = [v for v in prob.all_variables]
     assert Expression.are_equivalent(shallow_copy, prob.variables())
Ejemplo n.º 21
0
 def case_1():
     alpha = np.array([[1, 0], [0, 1], [1, 1], [0.5, 0], [0, 0.5]])
     c = np.array([3, 2, 1, 4, 2])
     x = cl.Variable(shape=(2, ), name='x')
     y = alpha @ x
     expr = cl.weighted_sum_exp(c, y)
     cons = [expr <= 1]
     obj = -x[0] - 2 * x[1]
     prob = Problem(cl.MIN, obj, cons)
     status = 'solved'
     value = 10.4075826  # up to 1e-6
     x_star = np.array([-4.93083, -2.73838])  # up to 1e-4
     return prob, status, value, x_star
Ejemplo n.º 22
0
def sig_primal(f, ell=0, X=None, modulator_support=None):
    f = f.without_zeros()
    gamma = cl.Variable(name='gamma')
    lagrangian = f - gamma
    if modulator_support is None:
        modulator_support = lagrangian.alpha
    t = Signomial(modulator_support, np.ones(modulator_support.shape[0]))
    s_mod = lagrangian * (t ** ell)
    con = primal_sage_cone(s_mod, name=str(s_mod), X=X)
    constraints = [con]
    obj = gamma.as_expr()
    prob = cl.Problem(cl.MAX, obj, constraints)
    cl.clear_variable_indices()
    return prob
Ejemplo n.º 23
0
def sage_multiplier_search(f, level=1, X=None):
    """
    Constructs a coniclifts maximization Problem which is feasible if ``f`` can be certified as nonnegative
    over ``X``, by using an appropriate X-SAGE modulating function.

    Parameters
    ----------
    f : Polynomial
        We want to test if ``f`` is nonnegative over ``X``.
    level : int
        Controls the complexity of the X-SAGE modulating function. Must be a positive integer.
    X : PolyDomain or None
        If ``X`` is None, then we test nonnegativity of ``f`` over :math:`R^{\\texttt{f.n}}`.

    Returns
    -------
    prob : sageopt.coniclifts.Problem

    Notes
    -----
    This function provides an alternative to moving up the reference SAGE hierarchy, for the
    goal of certifying nonnegativity of a polynomial ``f`` over some set ``X`` where ``|X|``
    is log-convex. In general, the approach is to introduce a polynomial

        ``mult = Polynomial(alpha_hat, c_tilde)``

    where the rows of alpha_hat are all "level"-wise sums of rows from ``f.alpha``, and ``c_tilde``
    is a coniclifts Variable defining a nonzero SAGE polynomial. Then we can check if
    ``f_mod = f * mult`` is SAGE for any choice of ``c_tilde``.
    """
    constraints = []
    # Make the multiplier polynomial (and require that it be SAGE)
    mult_alpha = hierarchy_e_k([f], k=level)
    c_tilde = cl.Variable(shape=(mult_alpha.shape[0], ), name='c_tilde')
    mult = Polynomial(mult_alpha, c_tilde)
    temp_cons = primal_sage_poly_cone(mult,
                                      name=(c_tilde.name + ' domain'),
                                      log_AbK=X)
    constraints += temp_cons
    constraints.append(cl.sum(c_tilde) >= 1)
    # Make "f_mod := f * mult", and require that it be SAGE.
    f_mod = mult * f
    temp_cons = primal_sage_poly_cone(f_mod, name='f_mod sage poly', log_AbK=X)
    constraints += temp_cons
    # noinspection PyTypeChecker
    prob = cl.Problem(cl.MAX, 0, constraints)
    if AUTO_CLEAR_INDICES:  # pragma:no cover
        cl.clear_variable_indices()
    return prob
Ejemplo n.º 24
0
def sage_multiplier_search(f, level=1, X=None):
    """
    Constructs a coniclifts maximization Problem which is feasible if ``f`` can be certified as nonnegative
    over ``X``, by using an appropriate X-SAGE modulating function.

    Parameters
    ----------
    f : Signomial
        We want to test if ``f`` is nonnegative over ``X``.
    level : int
        Controls the complexity of the X-SAGE modulating function. Must be a positive integer.
    X : SigDomain
        If ``X`` is None, then we test nonnegativity of ``f`` over :math:`R^{\\texttt{f.n}}`.


    Returns
    -------
    prob : sageopt.coniclifts.Problem

    Notes
    -----
    This function provides an alternative to moving up the reference SAGE hierarchy, for the goal of certifying
    nonnegativity of a signomial ``f`` over some convex set ``X``.  In general, the approach is to introduce
    a signomial

        ``mult = Signomial(alpha_hat, c_tilde)``

    where the rows of ``alpha_hat`` are all ``level``-wise sums of rows from ``f.alpha``, and ``c_tilde``
    is a coniclifts Variable defining a nonzero X-SAGE function. Then we check if ``f_mod = f * mult``
    is X-SAGE for any choice of ``c_tilde``.
    """
    f = f.without_zeros()
    constraints = []
    mult_alpha = hierarchy_e_k([f, f.upcast_to_signomial(1)], k=level)
    c_tilde = cl.Variable(mult_alpha.shape[0], name='c_tilde')
    mult = Signomial(mult_alpha, c_tilde)
    constraints.append(cl.sum(c_tilde) >= 1)
    sig_under_test = mult * f
    con1 = primal_sage_cone(mult, name=str(mult), X=X)
    con2 = primal_sage_cone(sig_under_test, name=str(sig_under_test), X=X)
    constraints.append(con1)
    constraints.append(con2)
    prob = cl.Problem(cl.MAX, cl.Expression([0]), constraints)
    if AUTO_CLEAR_INDICES:  # pragma:no cover
        cl.clear_variable_indices()
    return prob
Ejemplo n.º 25
0
 def test_signomial_multiplication(self):
     # data for tests
     s0 = Signomial.from_dict({(0, ): 1, (1, ): 2, (2, ): 3})
     t0 = Signomial.from_dict({(-1, ): 1})
     q0 = Signomial.from_dict({(5, ): 0})
     # tests
     s = s0 * t0
     s = s.without_zeros()
     assert s.alpha_c == {(-1, ): 1, (0, ): 2, (1, ): 3}
     s = t0 * s0
     s = s.without_zeros()
     assert s.alpha_c == {(-1, ): 1, (0, ): 2, (1, ): 3}
     s = s0 * q0
     s = s.without_zeros()
     assert s.alpha_c == {(0, ): 0}
     z = cl.Variable()
     a = z * s0
     assert True
Ejemplo n.º 26
0
 def test_infeasible_sig_domain(self):
     x = cl.Variable()
     cons = [x <= -1, x >= 1]
     try:
         dom = SigDomain(1, coniclifts_cons=cons)
         assert False
     except RuntimeError as err:
         err_str = str(err)
         assert 'seem to be infeasible' in err_str
     A = np.ones(shape=(2, 2))
     b = np.array([0, 1])
     K = [cl.Cone('0', 2)]
     try:
         dom = SigDomain(2, AbK=(A, b, K))
         assert False
     except RuntimeError as err:
         err_str = str(err)
         assert 'seem to be infeasible' in err_str
     pass
Ejemplo n.º 27
0
    def test_infeasible_poly_domain(self):
        x = cl.Variable()
        cons = [x <= -1, x >= 1]
        try:
            dom = PolyDomain(1, logspace_cons=cons)
            assert False
        except RuntimeError as err:
            err_str = str(err)
            assert 'seem to be infeasible' in err_str

        A = np.ones(shape=(2, 2))
        b = np.array([0, 1])
        K = [cl.Cone('0', 2)]
        try:
            dom = PolyDomain(2, log_AbK=(A, b, K))
            assert False
        except RuntimeError as err:
            err_str = str(err)
            assert 'seem to be infeasible' in err_str
        self.assertRaises(RuntimeError, PolyDomain, 1, logspace_cons=cons, log_AbK=(A, b, K))
Ejemplo n.º 28
0
 def _check_feasibility(self):
     A, b, K = self.A, self.b, self.K
     y = cl.Variable(shape=(A.shape[1], ), name='y')
     cons = [cl.PrimalProductCone(A @ y + b, K)]
     prob = cl.Problem(cl.MIN, cl.Expression([0]), cons)
     prob.solve(verbose=False, solver='ECOS')
     if not prob.value < 1e-7:
         if prob.value is np.NaN:  # pragma: no cover
             msg = 'PolyDomain constraints could not be verified as feasible.'
             msg += '\n Proceed with caution!'
             warnings.warn(msg)
         else:
             msg1 = 'PolyDomain constraints seem to be infeasible.\n'
             msg2 = 'Feasibility problem\'s status: ' + prob.status + '\n'
             msg3 = 'Feasibility problem\'s  value: ' + str(
                 prob.value) + '\n'
             msg4 = 'The objective was "minimize 0"; we expect problem value < 1e-7. \n'
             msg = msg1 + msg2 + msg3 + msg4
             raise RuntimeError(msg)
     pass
Ejemplo n.º 29
0
    def suppfunc(self, y):
        """
        The support function of the convex set :math:`X` associated with this SigDomain,
        evaluated at :math:`y`:

        .. math::

            \\sigma_X(y) \\doteq \\max\\{ y^\\intercal x \\,:\\, x \\in X \\}.
        """
        if isinstance(y, cl.Expression):
            y = y.value
        if self._lift_x is None:
            self._lift_x = cl.Variable(self.A.shape[1])
        objective = y @ self._lift_x
        cons = [cl.PrimalProductCone(self.A @ self._lift_x + self.b, self.K)]
        prob = cl.Problem(cl.MAX, objective, cons)
        prob.solve(solver='ECOS', verbose=False)
        if prob.status == cl.FAILED:
            return np.inf
        else:
            return prob.value
Ejemplo n.º 30
0
def relative_dual_sage_poly_cone(primal_poly, dual_var, name_base, log_AbK):
    """
    :param log_AbK:
    :param primal_poly: a Polynomial
    :param dual_var: a coniclifts Variable with y.shape == (p.m, 1).
    :param name_base:

    :return: coniclifts Constraints over y (and additional auxilliary variables, as
    necessary) so that y defines a dual variable to the constraint that "p is a SAGE polynomial."
    """
    sr, sr_cons = primal_poly.sig_rep
    evens = [i for i, row in enumerate(sr.alpha) if np.all(row % 2 == 0)]
    if len(evens) < sr.m:
        is_even = np.zeros(shape=(sr.m,), dtype=bool)
        is_even[evens] = True
        aux_v = cl.Variable(shape=(sr.m, 1), name='aux_v_{' + name_base + ' sage poly dual}')
        constrs = [sage_sigs.relative_dual_sage_cone(sr, aux_v, name_base + ' sigrep sage dual', log_AbK),
                   aux_v[is_even] == dual_var[is_even],
                   -aux_v[~is_even] <= dual_var[~is_even], dual_var[~is_even] <= aux_v[~is_even]]
    else:
        constrs = [sage_sigs.relative_dual_sage_cone(sr, dual_var, name_base + ' sigrep sage dual', log_AbK)]
    return constrs