def test_standard_monomials(self): x = standard_poly_monomials(3) y_actual = np.prod(x) y_expect = Polynomial.from_dict({(1, 1, 1): 1}) assert TestPolynomials.are_equal(y_actual, y_expect) x = standard_poly_monomials(2) y_actual = np.sum(x)**2 y_expect = Polynomial.from_dict({(2, 0): 1, (1, 1): 2, (0, 2): 1}) assert TestPolynomials.are_equal(y_actual, y_expect)
def test_composition(self): p = Polynomial.from_dict({(2,): 1}) # represents lambda x: x ** 2 z = Polynomial.from_dict({(1,): 2, (0,): -1}) # represents lambda x: 2*x - 1 w = p(z) # represents lambda x: (2*x - 1) ** 2 assert w(0.5) == 0 assert w(1) == 1 assert w(0) == 1 x = standard_poly_monomials(3) p = np.prod(x) y = standard_poly_monomials(2) expr = np.array([y[0], y[0]-y[1], y[1]]) w = p(expr) assert w.n == 2 assert w(np.array([1, 1])) == 0 assert w(np.array([1, -2])) == -6
def test_multiplier_search(self): # Background # # This example comes from Proposition 14 of a 2017 paper by Ahmadi and Majumdar. # It concerns nonnegativity of the polynomial # p(x1, x2, x3) = (x1 + x2 + x3)**2 + a*(x1**2 + x2**2 + x3**2) # for values of "a" in (0, 1). # # Tests # # (1) Find a SAGE polynomial "f1" (over the same exponents as p) so that # when a = 0.3, the product f1 * p is a SAGE polynomial. # # (2) Find a SAGE polynomial "f2" (over the same exponents as p**2) so that # when a = 0.15, the product f2 * p is a SAGE polynomial. # # Notes # # In a previous version of sageopt, ECOS could also be run on these tests, but # we needed larger values of "a" to avoid solver failures. Now, ECOS cannot # solve a level 2 relaxation for any interesting value of "a". It is not known at # what point sageopt's problem compilation started generating problems that ECOS # could not solve. # x = standard_poly_monomials(3) p = (np.sum(x))**2 + 0.35 * (x[0]**2 + x[1]**2 + x[2]**2) res1 = sage_multiplier_search(p, level=1).solve(verbose=False) assert abs(res1[1]) < 1e-8 if cl.Mosek.is_installed(): p -= 0.2 * (x[0]**2 + x[1]**2 + x[2]**2) res2 = sage_multiplier_search(p, level=2).solve(verbose=False) assert abs(res2[1]) < 1e-8
def test_ordinary_constrained_1(self): # Background # # This polynomial is "wrig_5" from a 2008 paper by Ray and Nataraj. # We minimize and maximize this polynomial over the box [-5, 5]^5 \subset R^5. # The minimum and maximum are reported as -30.25 and 40, respectively. # The reported bounds can be certified by SAGE relaxations. # # Tests # # (1) minimization : similar primal / dual objectives for (p, q, ell) = (0, 1, 0). # # (2) maximization : similar primal / dual objectives for (p, q, ell) = (0, 2, 0). n = 5 x = standard_poly_monomials(n) f = x[4]**2 + x[0] + x[1] + x[2] + x[3] - x[4] - 10 lower_gs = [x[i] - (-5) for i in range(n)] upper_gs = [5 - x[i] for i in range(n)] gts = lower_gs + upper_gs claimed_min = -30.25 claimed_max = 40 res_min, _ = primal_dual_constrained(f, gts, [], 0, 1, 0, None) assert abs(res_min[0] - claimed_min) < 1e-5 assert abs(res_min[1] - claimed_min) < 1e-5 res_max, _ = primal_dual_constrained(-f, gts, [], 0, 2, 0, None) res_max = [-res_max[0], -res_max[1]] assert abs(res_max[0] - claimed_max) < 1e-5 assert abs(res_max[1] - claimed_max) < 1e-5
def test_infer_box_polydomain(self): bounds = [(-0.1, 0.4), (0.4, 1), (-0.7, -0.4), (-0.7, 0.4), (0.1, 0.2), (-0.1, 0.2), (-0.3, 1.1), (-1.1, -0.3)] x = standard_poly_monomials(8) gp_gs = [ 0.4**2 - x[0]**2, 1 - x[1]**2, x[1]**2 - 0.4**2, 0.7**2 - x[2]**2, x[2]**2 - 0.4**2, 0.7**2 - x[3]**2, 0.2**2 - x[4]**2, x[4]**2 - 0.1**2, 0.2**2 - x[5]**2, 1.1**2 - x[6]**2, 1.1**2 - x[7]**2, x[7]**2 - 0.3**2 ] lower_gs = [x[i] - lb for i, (lb, ub) in enumerate(bounds)] upper_gs = [ub - x[i] for i, (lb, ub) in enumerate(bounds)] gts = lower_gs + upper_gs + gp_gs dummy_f = x[0] dom = infer_domain(dummy_f, gts, []) assert dom.A.shape == (12, 8) assert len(dom.gts) == 12 assert len(dom.eqs) == 0 x0 = np.array([-0.1, 1, -0.6, 0, 0.2, 0.2, -0.3, -1.05]) is_in = dom.check_membership(x0, tol=1e-10) assert is_in x1 = x0.copy() x1[7] = -1.11 is_in = dom.check_membership(x1, tol=1e-5) assert not is_in x2 = x0.copy() x2[7] = 11.11 is_in = dom.check_membership(x2, tol=1e-5) assert not is_in
def test_infer_expcone_polydomain(self): x = standard_poly_monomials(4) g = 1 - np.sum(np.power(x, 2)) dummy_f = x[0] * 0 dom = infer_domain(dummy_f, [g], []) assert len(dom.K) == 5 assert dom.A.shape == (13, 8) assert dom.K[0].type == '+' assert dom.K[0].len == 1 for i in [1, 2, 3, 4]: assert dom.K[i].type == 'e' assert dom.b[0] == 1
def test_ordinary_constrained_2(self): x = standard_poly_monomials(1)[0] f = -x**2 gts = [1 - x, x - (-1)] eqs = [] res, dual = primal_dual_constrained(f, gts, eqs, 0, 2, 0, None) expect = -1 assert abs(res[0] - expect) < 1e-6 assert abs(res[1] - expect) < 1e-6 sols = poly_solution_recovery.poly_solrec(dual, ineq_tol=0, eq_tol=0) assert len(sols) > 0 x0 = sols[0] assert f(x0) >= expect
def test_conditional_sage_1(self): x = standard_poly_monomials(1)[0] f = -x**2 gts = [1 - x**2] opt = -1 X = infer_domain(f, gts, []) res_uncon00 = primal_dual_unconstrained(f, 0, 0, X) assert abs(res_uncon00[0] - opt) < 1e-6 assert abs(res_uncon00[1] - opt) < 1e-6 res_con010, dual = primal_dual_constrained(f, [], [], 0, 1, 0, X) assert abs(res_con010[0] - opt) < 1e-6 assert abs(res_con010[1] - opt) < 1e-6 solns = poly_solution_recovery.poly_solrec(dual) x_star = solns[0] gap = abs(f(x_star) - opt) assert gap < 1e-6
def test_conditional_sage_4(self): n = 4 x = standard_poly_monomials(n) f0 = -x[0] * x[2] ** 3 + 4 * x[1] * x[2] ** 2 * x[3] + 4 * x[0] * x[2] * x[3] ** 2 f1 = 2 * x[1] * x[3] ** 3 + 4 * x[0] * x[2] + 4 * x[2] ** 2 - 10 * x[1] * x[3] - 10 * x[3] ** 2 + 2 f = f0 + f1 sign_sym = [0.25 - x[i] ** 2 for i in range(n)] X = infer_domain(f, sign_sym, []) gts = [x[i] + 0.5 for i in range(n)] + [0.5 - x[i] for i in range(n)] dual = poly_constrained_relaxation(f, gts, [], X, p=1, q=2) dual.solve() expect = -3.180096 self.assertAlmostEqual(dual.value, expect, places=5) solns = poly_solution_recovery.poly_solrec(dual) self.assertGreaterEqual(len(solns), 1) x_star = solns[0] gap = f(x_star) - dual.value self.assertLessEqual(gap, 1e-5)
def test_composition_sigs(self): p = Polynomial.from_dict({ (1, ): 2, (0, ): -1 }) # represents lambda x: 2*x - 1 s = Signomial.from_dict({(2, ): -1, (0, ): 1}) f = p(s) # lambda x: -2*exp(x) + 1 self.assertAlmostEqual(f(0.5), -2 * np.exp(1.0) + 1, places=4) self.assertAlmostEqual(f(1), -2 * np.exp(2.0) + 1, places=4) p = np.prod(standard_poly_monomials(3)) exp_x = standard_sig_monomials(2) sig_vec = np.array([exp_x[0], exp_x[0] - exp_x[1], 1.0 / exp_x[1]]) f = p(sig_vec) self.assertEqual(f.n, 2) self.assertEqual(f(np.array([1, 1])), 0) x_test = np.array([-3, 3]) self.assertAlmostEqual(f(x_test), np.exp(-6) * (np.exp(-3) - np.exp(3)), places=4)
def test_conditional_sage_3(self): n = 5 x = standard_poly_monomials(n) f = 0 for i in range(n): sel = np.ones(n, dtype=bool) sel[i] = False f += 2 ** (n - 1) * np.prod(x[sel]) gts = [0.25 - x[i] ** 2 for i in range(n)] # -0.5 <= x[i] <= 0.5 for all i. opt = -3 expect = -5 X = infer_domain(f, gts, []) res_con010, dual = primal_dual_constrained(f, [], [], 0, 1, 0, X) assert abs(res_con010[0] - expect) < 1e-4 assert abs(res_con010[1] - expect) < 1e-4 solns = poly_solution_recovery.poly_solrec(dual) assert len(solns) > 0 x_star = solns[0] gap = abs(f(x_star) - opt) assert gap < 1e-4 pass
def local_refine_polys_from_sigs(f, gts, eqs, x0, **kwargs): """ This is a helper function which ... (1) accepts signomial problem data (representative of a desired polynomial optimization problem), (2) transforms the signomial data into equivalent polynomial data, and (3) performs local refinement on the polynomial data, via the COBYLA solver. Parameters ---------- f: Signomial Defines the objective function to be minimized. From "f" we will construct a polynomial "p" where ``p(y) = f(np.log(y))`` for all positive vectors y. gts : list of Signomial Each defining an inequality constraint ``g(x) >= 0``. From this list, we will construct a list of polynomials gts_poly, so that every ``g0 in gts`` has a polynomial representative ``g1 in gts_poly``, satisfying ``g1(y) = g0(np.log(y))`` for all positive vectors y. eqs : list of Signomial Each defining an equality constraint ``g(x) == 0``. From this list, we will construct a list of polynomials ``eqs_poly``, so that every ``g0 in gts`` has a polynomial representative ``g1 in eqs_poly``, satisfying ``g1(y) = g0(np.log(y))`` for all positive vectors y. x0 : ndarray An initial condition for the *signomial* optimization problem ``min{ f(x) | g(x) >= 0 for g in gts, g(x) == 0 for g in eqs }``. Other Parameters ---------------- rhobeg : float Controls the size of COBYLA's initial search space around ``y0 = exp(x0)``. rhoend : float Termination criteria, controlling the size of COBYLA's smallest search space. maxfun : int Termination criteria, bounding the number of COBYLA's iterations. Returns ------- y : ndarray The output of COBYLA for the polynomial optimization problem ``min{ p(y) | g(y) >= 0 for g in gts_poly, g(y) == 0 for g in eqs_poly, y >= 0 }`` with initial condition ``y0 = exp(x0)``. """ rhobeg = kwargs['rhobeg'] if 'rhobeg' in kwargs else 1.0 rhoend = kwargs['rhoend'] if 'rhoend' in kwargs else 1e-7 maxfun = int(kwargs['maxfun']) if 'maxfun' in kwargs else 10000 y0 = np.exp(x0) gts = [g.as_polynomial() for g in gts] x = standard_poly_monomials(y0.size) gts += [x[i] for i in range(y0.size)] # Decision variables must be nonnegative. eqs = [g.as_polynomial() for g in eqs] f = f.as_polynomial() y = fmin_cobyla(f, y0, gts + eqs + [-g for g in eqs], rhobeg=rhobeg, rhoend=rhoend, maxfun=maxfun) return y