Esempio n. 1
0
 def test_unconstrained_1(self):
     # Background
     #
     #       This example was constructed soley as a test case for sageopt.
     #
     #       We consider two polynomial optimization problems, that are related
     #       to one another by a change of sign in one of the variables.
     #
     # Tests
     #
     #       (1) Verify primal / dual consistency for the two problems, at level (0, 0).
     #
     #       (2) Verify that the SAGE bound is the same for the two formulations.
     #
     alpha = np.array([[0, 0], [1, 1], [2, 2], [0, 2], [2, 0]])
     # First formulation
     p = Polynomial(alpha, np.array([1, -3, 1, 4, 4]))
     res0 = primal_dual_unconstrained(p, 0, sigrep_ell=0)
     assert abs(res0[0] - res0[1]) <= 1e-6
     # Second formulation
     p = Polynomial(alpha, np.array([1, 3, 1, 4, 4]))
     res1 = primal_dual_unconstrained(p, 0, sigrep_ell=0)
     assert abs(res1[0] - res1[1]) <= 1e-6
     # Check for same results between the two formulations
     expected = 1
     assert abs(res0[0] - expected) <= 1e-5
     assert abs(res1[0] - expected) <= 1e-5
Esempio n. 2
0
 def test_standard_monomials(self):
     x = standard_poly_monomials(3)
     y_actual = np.prod(x)
     y_expect = Polynomial.from_dict({(1, 1, 1): 1})
     assert TestPolynomials.are_equal(y_actual, y_expect)
     x = standard_poly_monomials(2)
     y_actual = np.sum(x)**2
     y_expect = Polynomial.from_dict({(2, 0): 1, (1, 1): 2, (0, 2): 1})
     assert TestPolynomials.are_equal(y_actual, y_expect)
Esempio n. 3
0
 def test_polynomial_exponentiation(self):
     p = Polynomial.from_dict({(0,): -1, (1,): 1})
     # square of (x-1)
     res = p ** 2
     expect = Polynomial.from_dict({(0,): 1, (1,): -2, (2,): 1})
     assert res == expect
     # cube of (2x+5)
     p = Polynomial.from_dict({(0,): 5, (1,): 2})
     expect = Polynomial.from_dict({(0,): 125, (1,): 150, (2,): 60, (3,): 8})
     res = p ** 3
     assert res == expect
     self.assertRaises(RuntimeError, Polynomial.__pow__, p, p)
Esempio n. 4
0
 def test_addition_and_subtraction(self):
     # data for tests
     s0 = Polynomial(np.array([[0], [1], [2]]), np.array([1, 2, 3]))
     t0 = Polynomial(np.array([[4]]), np.array([5]))
     # tests
     s = s0 - s0
     s = s.without_zeros()
     assert s.m == 1 and set(s.c) == {0}
     s = -s0 + s0
     s = s.without_zeros()
     assert s.m == 1 and set(s.c) == {0}
     s = s0 + t0
     assert s.alpha_c == {(0, ): 1, (1, ): 2, (2, ): 3, (4, ): 5}
Esempio n. 5
0
 def test_composition(self):
     p = Polynomial.from_dict({(2,): 1})  # represents lambda x: x ** 2
     z = Polynomial.from_dict({(1,): 2, (0,): -1})  # represents lambda x: 2*x - 1
     w = p(z)  # represents lambda x: (2*x - 1) ** 2
     assert w(0.5) == 0
     assert w(1) == 1
     assert w(0) == 1
     x = standard_poly_monomials(3)
     p = np.prod(x)
     y = standard_poly_monomials(2)
     expr = np.array([y[0], y[0]-y[1], y[1]])
     w = p(expr)
     assert w.n == 2
     assert w(np.array([1, 1])) == 0
     assert w(np.array([1, -2])) == -6
Esempio n. 6
0
 def test_polynomial_multiplication(self):
     # data for tests
     s0 = Polynomial(np.array([[0], [1], [2]]), np.array([1, 2, 3]))
     t0 = Polynomial(np.array([[1]]), np.array([1]))
     q0 = Polynomial(np.array([[5]]), np.array([0]))
     # tests
     s = s0 * t0
     s = s.without_zeros()
     assert s.alpha_c == {(1, ): 1, (2, ): 2, (3, ): 3}
     s = t0 * s0
     s = s.without_zeros()
     assert s.alpha_c == {(1, ): 1, (2, ): 2, (3, ): 3}
     s = s0 * q0
     s = s.without_zeros()
     assert s.alpha_c == {(0, ): 0}
Esempio n. 7
0
    def test_constant_location(self):
        s0 = Polynomial(np.array([[0], [1], [2]]), np.array([1, 2, 3]))
        t0 = Polynomial(np.array([[1]]), np.array([1]))
        q0 = Polynomial(np.array([[5]]), np.array([0]))

        # tests
        loc = s0.constant_location()
        assert (s0.alpha[loc, :] == 0).all()
        loc = t0.constant_location()
        assert loc == None
        loc = q0.constant_location()
        assert loc == None
Esempio n. 8
0
    def test_polynomial_division(self):
        s0 = Polynomial(np.array([[0], [1], [2]]), np.array([1, 2, 3]))
        t0 = Polynomial(np.array([[1]]), np.array([1]))
        q0 = Polynomial(np.array([[5]]), np.array([0]))

        #tests
        self.assertRaises(ValueError, Polynomial.__truediv__, s0, t0)
        s = s0 / 2
        s = s.without_zeros()
        assert s.alpha_c == {(0, ): 0.5, (1, ): 1, (2, ): 1.5}
        s = t0 / 4
        s = s.without_zeros()
        assert s.alpha_c == {(1, ): 0.25}
        s = q0 / 4
        s = s.without_zeros()
        assert s.alpha_c == {(5, ): 0}
Esempio n. 9
0
 def test_equal(self):
     s0 = Polynomial(np.array([[0], [1], [2]]), np.array([1, 2, 3]))
     assert s0 == s0
     alpha = np.array([[1, 0], [0, 1], [1, 1]])
     c = np.array([1, 2, 3])
     f = Signomial(alpha, c)
     assert f != s0
Esempio n. 10
0
def poly_constrained_primal(f, gts, eqs, p=0, q=1, ell=0, X=None):
    """
    Construct the primal SAGE-(p, q, ell) relaxation for the polynomial optimization problem

        inf{ f(x) : g(x) >= 0 for g in gts,
                    g(x) == 0 for g in eqs,
                    and x in X }

    where :math:`X = R^{\\texttt{f.n}}` by default.
    """
    lagrangian, ineq_lag_mults, _, gamma = make_poly_lagrangian(f, gts, eqs, p=p, q=q)
    metadata = {'lagrangian': lagrangian}
    if ell > 0:
        alpha_E_q = hierarchy_e_k([f] + list(gts) + list(eqs), k=1)
        modulator = Polynomial(2 * alpha_E_q, np.ones(alpha_E_q.shape[0])) ** ell
        lagrangian = lagrangian * modulator
        metadata['modulator'] = modulator
    # The Lagrangian (after possible multiplication, as above) must be a SAGE polynomial.
    con_name = 'Lagrangian sage poly'
    constrs = primal_sage_poly_cone(lagrangian, con_name, log_AbK=X)
    #  Lagrange multipliers (for inequality constraints) must be SAGE polynomials.
    for s_h, _ in ineq_lag_mults:
        con_name = str(s_h) + ' domain'
        cons = primal_sage_poly_cone(s_h, con_name, log_AbK=X)
        constrs += cons
    # Construct the coniclifts problem.
    prob = cl.Problem(cl.MAX, gamma, constrs)
    prob.metadata = metadata
    cl.clear_variable_indices()
    return prob
Esempio n. 11
0
def poly_constrained_dual(f, gts, eqs, p=0, q=1, ell=0, X=None, slacks=False):
    """
    Construct the dual SAGE-(p, q, ell) relaxation for the polynomial optimization problem

        inf{ f(x) : g(x) >= 0 for g in gts,
                    g(x) == 0 for g in eqs,
                    and x in X }

    where :math:`X = R^{\\texttt{f.n}}` by default.
    """
    lagrangian, ineq_lag_mults, eq_lag_mults, _ = make_poly_lagrangian(f, gts, eqs, p=p, q=q)
    metadata = {'lagrangian': lagrangian, 'f': f, 'gts': gts, 'eqs': eqs, 'X': X}
    if ell > 0:
        alpha_E_1 = hierarchy_e_k([f, f.upcast_to_polynomial(1)] + gts + eqs, k=1)
        modulator = Polynomial(2 * alpha_E_1, np.ones(alpha_E_1.shape[0])) ** ell
        lagrangian = lagrangian * modulator
        f = f * modulator
    else:
        modulator = f.upcast_to_polynomial(1)
    metadata['modulator'] = modulator
    # In primal form, the Lagrangian is constrained to be a SAGE polynomial.
    # Introduce a dual variable "v" for this constraint.
    v = cl.Variable(shape=(lagrangian.m, 1), name='v')
    metadata['v_poly'] = v
    constraints = relative_dual_sage_poly_cone(lagrangian, v, 'Lagrangian', log_AbK=X)
    for s_g, g in ineq_lag_mults:
        # These generalized Lagrange multipliers "s_g" are SAGE polynomials.
        # For each such multiplier, introduce an appropriate dual variable "v_g", along
        # with constraints over that dual variable.
        g_m = g * modulator
        c_g = sym_corr.moment_reduction_array(s_g, g_m, lagrangian)
        name_base = 'v_' + str(g)
        if slacks:
            v_g = cl.Variable(name=name_base, shape=(s_g.m, 1))
            con = c_g @ v == v_g
            con.name += str(g) + ' >= 0'
            constraints.append(con)
        else:
            v_g = c_g @ v
        constraints += relative_dual_sage_poly_cone(s_g, v_g,
                                                    name_base=(name_base + ' domain'), log_AbK=X)
    for z_g, g in eq_lag_mults:
        # These generalized Lagrange multipliers "z_g" are arbitrary polynomials.
        # They dualize to homogeneous equality constraints.
        g_m = g * modulator
        c_g = sym_corr.moment_reduction_array(z_g, g_m, lagrangian)
        con = c_g @ v == 0
        con.name += str(g) + ' == 0'
        constraints.append(con)
    # Equality constraint (for the Lagrangian to be bounded).
    a = sym_corr.relative_coeff_vector(modulator, lagrangian.alpha)
    constraints.append(a.T @ v == 1)
    # Define the dual objective function.
    obj_vec = sym_corr.relative_coeff_vector(f, lagrangian.alpha)
    obj = obj_vec.T @ v
    # Return the coniclifts Problem.
    prob = cl.Problem(cl.MIN, obj, constraints)
    prob.metadata = metadata
    cl.clear_variable_indices()
    return prob
Esempio n. 12
0
def poly_dual(f, poly_ell=0, sigrep_ell=0, X=None):
    if poly_ell == 0:
        sr, _ = f.sig_rep
        prob = sage_sigs.sig_dual(sr, sigrep_ell, X=X)
        if AUTO_CLEAR_INDICES:  # pragma:no cover
            cl.clear_variable_indices()
        return prob
    elif sigrep_ell == 0:
        modulator = f.standard_multiplier()**poly_ell
        gamma = cl.Variable()
        lagrangian = (f - gamma) * modulator
        v = cl.Variable(shape=(lagrangian.m, 1), name='v')
        con_base_name = v.name + ' domain'
        constraints = relative_dual_sage_poly_cone(lagrangian,
                                                   v,
                                                   con_base_name,
                                                   log_AbK=X)
        a = sym_corr.relative_coeff_vector(modulator, lagrangian.alpha)
        constraints.append(a.T @ v == 1)
        f_mod = Polynomial(f.alpha, f.c) * modulator
        obj_vec = sym_corr.relative_coeff_vector(f_mod, lagrangian.alpha)
        obj = obj_vec.T @ v
        prob = cl.Problem(cl.MIN, obj, constraints)
        if AUTO_CLEAR_INDICES:  # pragma:no cover
            cl.clear_variable_indices()
        return prob
    else:  # pragma: no cover
        raise NotImplementedError()
Esempio n. 13
0
 def test_unconstrained_3(self):
     # Minimization of the six-hump camel back function.
     p = Polynomial.from_dict({(0, 0): 0,
                               (2, 0): 4,
                               (1, 1): 1,
                               (0, 2): -4,
                               (4, 0): -2.1,
                               (0, 4): 4,
                               (6, 0): 1.0 / 3.0})
     # sigrep_ell=0 has a decent bound, and sigrep_ell=1 is nearly optimal.
     # ECOS is unable to solver sigrep_ell=2 due to conditioning problems.
     # MOSEK easily solves sigrep_ell=2, and this is globally optimal
     res00 = primal_dual_unconstrained(p, poly_ell=0, sigrep_ell=0)
     expect00 = -1.18865
     assert abs(res00[0] - res00[1]) <= 1e-6
     assert abs(res00[0] - expect00) <= 1e-3
     res10 = primal_dual_unconstrained(p, poly_ell=1, sigrep_ell=0)
     expect10 = -1.03416
     assert abs(res10[0] - res10[1]) < 1e-6
     assert abs(res10[0] - expect10) <= 1e-3
     if cl.Mosek.is_installed():
         res01 = primal_dual_unconstrained(p, poly_ell=0, sigrep_ell=1, solver='MOSEK')
         expect01 = -1.03221
         assert abs(res01[0] - res01[1]) <= 1e-6
         assert abs(res01[0] - expect01) <= 1e-3
         res02 = primal_dual_unconstrained(p, poly_ell=0, sigrep_ell=2, solver='MOSEK')
         expect02 = -1.0316
         assert abs(res02[0] - res02[1]) < 1e-6
         assert abs(res02[0] - expect02) <= 1e-3
Esempio n. 14
0
 def test_addition_and_subtraction(self):
     # data for tests
     s0 = Polynomial(np.array([[0], [1], [2]]), np.array([1, 2, 3]))
     t0 = Polynomial(np.array([[4]]), np.array([5]))
     # tests
     s = s0 - s0
     s = s.without_zeros()
     assert s.m == 1 and set(s.c) == {0}
     s = -s0 + s0
     s = s.without_zeros()
     assert s.m == 1 and set(s.c) == {0}
     s = s0 + t0
     assert s.alpha_c == {(0, ): 1, (1, ): 2, (2, ): 3, (4, ): 5}
     alpha = np.array([[1, 0], [0, 1], [1, 1]])
     c = np.array([1, 2, 3])
     f = Signomial(alpha, c)
     self.assertRaises(ValueError, Polynomial.__add__, s0, f)
Esempio n. 15
0
def hierarchy_e_k(polys, k):
    alphas = [s.alpha for s in polys]
    alpha = np.vstack(alphas)
    alpha = np.unique(alpha, axis=0)
    c = np.ones(shape=(alpha.shape[0],))
    s = Polynomial(alpha, c)
    s = s ** k
    return s.alpha
Esempio n. 16
0
 def test_sigrep_3(self):
     alpha = np.random.randint(low=1, high=10, size=(10, 3))
     alpha *= 2
     c = np.random.randn(10)
     p = Polynomial(alpha, c)
     # The signomial representative has the same exponents and coeffs.
     sr, sr_cons = p.sig_rep
     assert len(sr_cons) == 0
     assert p.alpha_c == sr.alpha_c
Esempio n. 17
0
    def as_polynomial(self):
        """
        This function is only applicable if ``alpha`` is a matrix of nonnegative integers.

        Returns
        -------
        f : Polynomial
            For every vector ``x``, we have ``self(x) == f(np.exp(x))``.
        """
        from sageopt.symbolic.polynomials import Polynomial
        f = Polynomial(self.alpha, self.c)
        return f
Esempio n. 18
0
 def test_sigrep_2(self):
     c33 = cl.Variable(shape=(), name='c33')
     alpha = np.array([[0, 0], [1, 1], [3, 3]])
     c = cl.Expression([0, -1, c33])
     p = Polynomial(alpha, c)
     sr, sr_cons = p.sig_rep
     assert len(sr_cons) == 2
     var_names = set(v.name for v in sr_cons[0].variables())
     var_names.union(set(v.name for v in sr_cons[1].variables()))
     for v in var_names:
         assert v == 'c33' or v == str(p) + ' variable sigrep coefficients'
     assert sr.alpha_c[(1, 1)] == -1
Esempio n. 19
0
    def test_polynomial_multiplication(self):
        # data for tests
        s0 = Polynomial(np.array([[0], [1], [2]]), np.array([1, 2, 3]))
        t0 = Polynomial(np.array([[1]]), np.array([1]))
        q0 = Polynomial(np.array([[5]]), np.array([0]))
        alpha = np.array([[1, 0], [0, 1], [1, 1]])
        c = np.array([1, 2, 3])
        f = Signomial(alpha, c)

        # tests
        s = s0 * t0
        s = s.without_zeros()
        assert s.alpha_c == {(1, ): 1, (2, ): 2, (3, ): 3}
        s = t0 * s0
        s = s.without_zeros()
        assert s.alpha_c == {(1, ): 1, (2, ): 2, (3, ): 3}
        s = s0 * q0
        s = s.without_zeros()
        assert s.alpha_c == {(0, ): 0}
        self.assertRaises(ValueError, Polynomial.__sub__, s0, f)
        self.assertRaises(ValueError, Polynomial.__mul__, s0, f)
Esempio n. 20
0
def sage_multiplier_search(f, level=1, X=None):
    """
    Constructs a coniclifts maximization Problem which is feasible if ``f`` can be certified as nonnegative
    over ``X``, by using an appropriate X-SAGE modulating function.

    Parameters
    ----------
    f : Polynomial
        We want to test if ``f`` is nonnegative over ``X``.
    level : int
        Controls the complexity of the X-SAGE modulating function. Must be a positive integer.
    X : PolyDomain or None
        If ``X`` is None, then we test nonnegativity of ``f`` over :math:`R^{\\texttt{f.n}}`.

    Returns
    -------
    prob : sageopt.coniclifts.Problem

    Notes
    -----
    This function provides an alternative to moving up the reference SAGE hierarchy, for the
    goal of certifying nonnegativity of a polynomial ``f`` over some set ``X`` where ``|X|``
    is log-convex. In general, the approach is to introduce a polynomial

        ``mult = Polynomial(alpha_hat, c_tilde)``

    where the rows of alpha_hat are all "level"-wise sums of rows from ``f.alpha``, and ``c_tilde``
    is a coniclifts Variable defining a nonzero SAGE polynomial. Then we can check if
    ``f_mod = f * mult`` is SAGE for any choice of ``c_tilde``.
    """
    constraints = []
    # Make the multiplier polynomial (and require that it be SAGE)
    mult_alpha = hierarchy_e_k([f], k=level)
    c_tilde = cl.Variable(shape=(mult_alpha.shape[0], ), name='c_tilde')
    mult = Polynomial(mult_alpha, c_tilde)
    temp_cons = primal_sage_poly_cone(mult,
                                      name=(c_tilde.name + ' domain'),
                                      log_AbK=X)
    constraints += temp_cons
    constraints.append(cl.sum(c_tilde) >= 1)
    # Make "f_mod := f * mult", and require that it be SAGE.
    f_mod = mult * f
    temp_cons = primal_sage_poly_cone(f_mod, name='f_mod sage poly', log_AbK=X)
    constraints += temp_cons
    # noinspection PyTypeChecker
    prob = cl.Problem(cl.MAX, 0, constraints)
    if AUTO_CLEAR_INDICES:  # pragma:no cover
        cl.clear_variable_indices()
    return prob
Esempio n. 21
0
 def test_unconstrained_2(self):
     # Background
     #
     #       Unconstrained minimization of a polynomial in 2 variables.
     #       This is Example 4.1 from a 2018 paper by Seidler and de Wolff
     #       (https://arxiv.org/abs/1808.08431).
     #
     # Tests
     #
     #       (1) primal / dual consistency for (poly_ell, sigrep_ell) \in {(0, 0), (1, 0), (0, 1)}.
     #
     #       (2) Show that the bound with (poly_ell=0, sigrep_ell=1) is strong than
     #           the bound with (poly_ell=1, sigrep_ell=0).
     #
     # Notes
     #
     #       The global minimum of this polynomial (as verified by gloptipoly3) is 0.85018.
     #
     #       The furthest we could progress up the hierarchy before encountering a solver failure
     #       was (poly_ell=0, sigrep_ell=5). In this case the SAGE bound was 0.8336.
     #
     p = Polynomial.from_dict({
         (0, 0): 1,
         (2, 6): 3,
         (6, 2): 2,
         (2, 2): 6,
         (1, 2): -1,
         (2, 1): 2,
         (3, 3): -3
     })
     res00 = primal_dual_unconstrained(p, poly_ell=0, sigrep_ell=0)
     expect00 = 0.6932
     assert abs(res00[0] - res00[1]) <= 1e-6
     assert abs(res00[0] - expect00) <= 1e-3
     res10 = primal_dual_unconstrained(p, poly_ell=1, sigrep_ell=0)
     expect10 = 0.7587
     assert abs(res10[0] - res10[1]) <= 1e-5
     assert abs(res10[0] - expect10) <= 1e-3
     if cl.Mosek.is_installed():
         # ECOS fails
         res01 = primal_dual_unconstrained(p,
                                           poly_ell=0,
                                           sigrep_ell=1,
                                           solver='MOSEK')
         expect01 = 0.7876
         assert abs(res01[0] - res01[1]) <= 1e-5
         assert abs(res01[0] - expect01) <= 1e-3
Esempio n. 22
0
 def test_scalar_multiplication(self):
     # data for tests
     alpha0 = np.array([[0], [1], [2]])
     c0 = np.array([1, 2, 3])
     s0 = Polynomial(alpha0, c0)
     # Tests
     s = 2 * s0
     # noinspection PyTypeChecker
     assert set(s.c) == set(2 * s0.c)
     s = s0 * 2
     # noinspection PyTypeChecker
     assert set(s.c) == set(2 * s0.c)
     s = 1 * s0
     assert s.alpha_c == s0.alpha_c
     s = 0 * s0
     s = s.without_zeros()
     assert s.m == 1 and set(s.c) == {0}
Esempio n. 23
0
 def test_composition_sigs(self):
     p = Polynomial.from_dict({
         (1, ): 2,
         (0, ): -1
     })  # represents lambda x: 2*x - 1
     s = Signomial.from_dict({(2, ): -1, (0, ): 1})
     f = p(s)  # lambda x: -2*exp(x) + 1
     self.assertAlmostEqual(f(0.5), -2 * np.exp(1.0) + 1, places=4)
     self.assertAlmostEqual(f(1), -2 * np.exp(2.0) + 1, places=4)
     p = np.prod(standard_poly_monomials(3))
     exp_x = standard_sig_monomials(2)
     sig_vec = np.array([exp_x[0], exp_x[0] - exp_x[1], 1.0 / exp_x[1]])
     f = p(sig_vec)
     self.assertEqual(f.n, 2)
     self.assertEqual(f(np.array([1, 1])), 0)
     x_test = np.array([-3, 3])
     self.assertAlmostEqual(f(x_test),
                            np.exp(-6) * (np.exp(-3) - np.exp(3)),
                            places=4)
Esempio n. 24
0
 def test_sigrep_1(self):
     p = Polynomial.from_dict({(0, 0): -1, (1, 2): 1, (2, 2): 10})
     gamma = cl.Variable(shape=(), name='gamma')
     p -= gamma
     sr, sr_cons = p.sig_rep
     # Even though there is a Variable in p.c, no auxiliary
     # variables should have been introduced by defining this
     # signomial representative.
     assert len(sr_cons) == 0
     count_nonconstants = 0
     for i, ci in enumerate(sr.c):
         if isinstance(ci, cl.base.ScalarExpression):
             if not ci.is_constant():
                 assert len(ci.variables()) == 1
                 count_nonconstants += 1
                 assert ci.variables()[0].name == 'gamma'
         elif sr.alpha[i, 0] == 1 and sr.alpha[i, 1] == 2:
             assert ci == -1
         elif sr.alpha[i, 0] == 2 and sr.alpha[i, 1] == 2:
             assert ci == 10
         else:
             assert False
     assert count_nonconstants == 1
Esempio n. 25
0
def poly_solrec(prob, ineq_tol=1e-8, eq_tol=1e-6, skip_ls=False, **kwargs):
    """
    Recover a list of candidate solutions from a dual SAGE relaxation. Solutions are
    guaranteed to be feasible up to specified tolerances, but not necessarily optimal.

    Parameters
    ----------
    prob : coniclifts.Problem
        A dual-form SAGE relaxation, from ``poly_constrained_relaxation``.

    ineq_tol : float
        The amount by which recovered solutions can violate inequality constraints.

    eq_tol : float
        The amount by which recovered solutions can violate equality constraints.

    skip_ls : bool
        Whether or not to skip least-squares solution recovery.

    Returns
    -------
    sols : list of ndarrays
        A list of feasible solutions, sorted in increasing order of objective function value.
        It is possible that this list is empty, in which case no feasible solutions were recovered.

    Notes
    -----
    This function accepts the following keyword arguments:

    zero_tol : float
        Used in magnitude recovery. If a component of the Lagrangian's moment vector is smaller
        than this (in absolute value), pretend it's zero in the least-squares step. Defaults to 1e-20.

    heuristic_signs : bool
        Used in sign recovery. If True, then attempts to infer variable signs from the Lagrangian's
        moment vector even when a completely consistent set of signs does not exist. Defaults to True.

    all_signs : bool
        Used in sign recovery. If True, then consider returning solutions which differ only by sign.
        Defaults to True.

    This function is implemented only for poly_constrained_relaxation (not poly_relaxation).
    """
    zero_tol = kwargs['zero_tol'] if 'zero_tol' in kwargs else 1e-20
    heuristic = kwargs[
        'heuristic_signs'] if 'heuristic_signs' in kwargs else True
    all_signs = kwargs['all_signs'] if 'all_signs' in kwargs else True
    metadata = prob.metadata
    f = metadata['f']
    lag_gts = metadata['gts']
    lag_eqs = metadata['eqs']
    lagrangian = _make_dummy_lagrangian(f, lag_gts, lag_eqs)
    con = prob.constraints[0]
    alpha = con.alpha
    dummy_modulated_lagrangian = Polynomial(
        alpha, np.ones(shape=(alpha.shape[0], )))  # coefficients dont matter
    modulator = metadata['modulator']
    v = metadata['v_poly'].value  # possible that v_sig and v are the same
    if np.any(np.isnan(v)):
        return []
    M = moment_reduction_array(lagrangian, modulator,
                               dummy_modulated_lagrangian)
    v_reduced = M @ v
    alpha_reduced = lagrangian.alpha
    mags = variable_magnitudes(con, alpha_reduced, v_reduced, zero_tol,
                               skip_ls)
    signs = variable_sign_patterns(alpha_reduced, v_reduced, heuristic,
                                   all_signs)
    # Now we need to build the candidate solutions, and check them for feasibility.
    if con.X is not None:
        gts = lag_gts + [g for g in con.X.gts]
        eqs = lag_eqs + [g for g in con.X.eqs]
    else:
        gts = lag_gts
        eqs = lag_eqs
    solutions = []
    for mag in mags:
        for sign in signs:
            x = mag * sign  # elementwise
            if is_feasible(x, gts, eqs, ineq_tol, eq_tol):
                solutions.append(x)
    solutions.sort(key=lambda xi: f(xi))
    return solutions
Esempio n. 26
0
def make_poly_lagrangian(f, gts, eqs, p, q):
    """
    Given a problem

    .. math::

        \\begin{align*}
          \min\{ f(x) :~& g(x) \geq 0 \\text{ for } g \\in \\text{gts}, \\\\
                       & g(x) = 0  \\text{ for } g \\in \\text{eqs}, \\\\
                       & \\text{and } x \\in X \}
        \\end{align*}

    construct the q-fold constraints ``q-gts`` and ``q-eqs``, by taking all products
    of ``<= q`` elements from ``gts`` and ``eqs`` respectively. Then form the Lagrangian

    .. math::

        L = f - \\gamma
            - \sum_{g \, \\in  \, \\text{q-gts}} s_g \cdot g
            - \sum_{g \, \\in  \, \\text{q-eqs}} z_g \cdot g

    where :math:`\\gamma` is a coniclifts Variable of dimension 1, and the coefficients
    on Polynomials  :math:`s_g` and :math:`z_g` are coniclifts Variables of a dimension
    determined by ``p``.

    Parameters
    ----------
    f : Polynomial
        The objective in a desired minimization problem.
    gts : list of Polynomials
        For every ``g in gts``, there is a desired constraint that variables ``x`` satisfy ``g(x) >= 0``.
    eqs : list of Polynomials
        For every ``g in eqs``, there is a desired constraint that variables ``x`` satisfy ``g(x) == 0``.
    p : int
        Controls the complexity of ``s_g`` and ``z_g``.
    q : int
        The number of folds of constraints ``gts`` and ``eqs``.

    Returns
    -------

    L : Polynomial
        ``L.c`` is an affine expression of coniclifts Variables.

    ineq_dual_polys : a list of pairs of Polynomials.
        If the pair ``(s_g, g)`` is in this list, then ``s_g`` is a generalized Lagrange multiplier
        to the constraint ``g(x) >= 0``.

    eq_dual_polys : a list of pairs of Polynomials.
        If the pair ``(z_g, g)`` is in this list, then ``z_g`` is a generalized Lagrange multiplier to the
        constraint ``g(x) == 0``.

    gamma : coniclifts.Variable.
        In primal-form SAGE relaxations, we want to maximize ``gamma``. In dual form SAGE relaxations,
        ``gamma`` induces a normalizing equality constraint.

    Notes
    -----
    The Lagrange multipliers ``s_g`` and ``z_g`` share a common matrix of exponent vectors,
    which we call ``alpha_hat``.

    When ``p = 0``, ``alpha_hat`` consists of a single row, of all zeros. In this case,
    ``s_g`` and ``z_g`` are constant Polynomials, and the coefficient vectors ``s_g.c``
    and ``z_g.c`` are effectively scalars. When ``p > 0``, the rows of ``alpha_hat`` are
    *initially* set set to all ``p``-wise sums  of exponent vectors appearing in either ``f``,
    or some ``g in gts``,  or some ``g in eqs``. Then we replace ::

        alpha_hat = np.vstack([2 * alpha_hat, alpha_hat])
        alpha_multiplier = np.unique(alpha_hat, axis=0)

    This has the effect of improving performance for problems where ``alpha_hat`` would otherwise
    contain very few rows in the even integer lattice.
    """
    folded_gt = con_gen.up_to_q_fold_cons(gts, q)
    gamma = cl.Variable(name='gamma')
    L = f - gamma
    alpha_E_p = hierarchy_e_k([f, f.upcast_to_polynomial(1)] + gts + eqs, k=p)
    alpha_multiplier = np.vstack([2 * alpha_E_p, alpha_E_p])
    alpha_multiplier = np.unique(alpha_multiplier, axis=0)
    ineq_dual_polys = []
    for g in folded_gt:
        s_g_coeff = cl.Variable(name='s_' + str(g), shape=(alpha_multiplier.shape[0],))
        s_g = Polynomial(alpha_multiplier, s_g_coeff)
        L -= s_g * g
        ineq_dual_polys.append((s_g, g))
    eq_dual_polys = []
    folded_eq = con_gen.up_to_q_fold_cons(eqs, q)
    for g in folded_eq:
        z_g_coeff = cl.Variable(name='z_' + str(g), shape=(alpha_multiplier.shape[0],))
        z_g = Polynomial(alpha_multiplier, z_g_coeff)
        L -= z_g * g
        eq_dual_polys.append((z_g, g))
    return L, ineq_dual_polys, eq_dual_polys, gamma
Esempio n. 27
0
 def test_polynomial_hess_val(self):
     f = Polynomial.from_dict({(3, ): 1, (0, ): -1})
     actual = f.hess_val(np.array([0.1234]))
     expect = 3 * 2 * 0.1234
     assert abs(actual[0] - expect) < 1e-8
Esempio n. 28
0
 def test_sigrep_1(self):
     p = Polynomial.from_dict({(0, 0): -1, (1, 2): 1, (2, 2): 10})
     # One non-even lattice point (the only one) changes sign.
     sr, sr_cons = p.sig_rep
     assert len(sr_cons) == 0
     assert sr.alpha_c == {(0, 0): -1, (1, 2): -1, (2, 2): 10}
Esempio n. 29
0
 def test_sigrep_2(self):
     p = Polynomial.from_dict({(0, 0): 0, (1, 1): -1, (3, 3): 5})
     # One non-even lattice point changes sign, another stays the same
     sr, sr_cons = p.sig_rep
     assert len(sr_cons) == 0
     assert sr.alpha_c == {(0, 0): 0, (1, 1): -1, (3, 3): -5}
Esempio n. 30
0
 def test_as_signomial(self):
     s0 = Polynomial(np.array([[0], [1], [2]]), np.array([1, 2, 3]))
     f = s0.as_signomial()
     assert s0.alpha_c == f.alpha_c