def test_partial_optimize_numeric_fn(self): x, y = Variable(1), Variable(1) xval = 4 # Solve the (simple) two-stage problem by "combining" the two stages (i.e., by solving a single linear program) p1 = Problem(Minimize(y), [xval + y >= 3]) p1.solve() # Solve the two-stage problem via partial_optimize constr = [y >= -100] p2 = Problem(Minimize(y), [x + y >= 3] + constr) g = cvxpy.partial_optimize(p2, [y], [x]) x.value = xval y.value = 42 constr[0].dual_variable.value = 42 result = g.value self.assertAlmostEqual(result, p1.value) self.assertAlmostEqual(y.value, 42) self.assertAlmostEqual(constr[0].dual_value, 42) # No variables optimized over. p2 = Problem(Minimize(y), [x + y >= 3]) g = cvxpy.partial_optimize(p2, [], [x, y]) x.value = xval y.value = 42 p2.constraints[0].dual_variable.value = 42 result = g.value self.assertAlmostEqual(result, y.value) self.assertAlmostEqual(y.value, 42) self.assertAlmostEqual(p2.constraints[0].dual_value, 42)
def test_partial_optimize_numeric_fn(self): x, y = Variable(1), Variable(1) xval = 4 # Solve the (simple) two-stage problem by "combining" the two stages (i.e., by solving a single linear program) p1 = Problem(Minimize(y), [xval+y >= 3]) p1.solve() # Solve the two-stage problem via partial_optimize constr = [y >= -100] p2 = Problem(Minimize(y), [x+y >= 3] + constr) g = cvxpy.partial_optimize(p2, [y], [x]) x.value = xval y.value = 42 constr[0].dual_variable.value = 42 result = g.value self.assertAlmostEqual(result, p1.value) self.assertAlmostEqual(y.value, 42) self.assertAlmostEqual(constr[0].dual_value, 42) # No variables optimized over. p2 = Problem(Minimize(y), [x+y >= 3]) g = cvxpy.partial_optimize(p2, [], [x, y]) x.value = xval y.value = 42 p2.constraints[0].dual_variable.value = 42 result = g.value self.assertAlmostEqual(result, y.value) self.assertAlmostEqual(y.value, 42) self.assertAlmostEqual(p2.constraints[0].dual_value, 42)
def test_partial_problem(self): """Test domain for partial minimization/maximization problems. """ for obj in [Minimize((self.a)**-1), Maximize(log(self.a))]: prob = Problem(obj, [self.x + self.a >= [5, 8]]) # Optimize over nothing. expr = cvxpy.partial_optimize(prob, dont_opt_vars=[self.x, self.a]) dom = expr.domain constr = [self.a >= -100, self.x >= 0] prob = Problem(Minimize(sum_entries(self.x + self.a)), dom + constr) prob.solve() self.assertAlmostEqual(prob.value, 13) assert self.a.value >= 0 assert np.all((self.x + self.a - [5, 8]).value >= -1e-3) # Optimize over x. expr = cvxpy.partial_optimize(prob, opt_vars=[self.x]) dom = expr.domain constr = [self.a >= -100, self.x >= 0] prob = Problem(Minimize(sum_entries(self.x + self.a)), dom + constr) prob.solve() self.assertAlmostEqual(prob.value, 0) assert self.a.value >= 0 self.assertItemsAlmostEqual(self.x.value, [0, 0]) # Optimize over x and a. expr = cvxpy.partial_optimize(prob, opt_vars=[self.x, self.a]) dom = expr.domain constr = [self.a >= -100, self.x >= 0] prob = Problem(Minimize(sum_entries(self.x + self.a)), dom + constr) prob.solve() self.assertAlmostEqual(self.a.value, -100) self.assertItemsAlmostEqual(self.x.value, [0, 0])
def test_partial_optimize_dcp(self): """Test DCP properties of partial optimize. """ # Evaluate the 1-norm in the usual way (i.e., in epigraph form). dims = 3 x, t = Variable(dims), Variable(dims) xval = [-5] * dims p2 = Problem(cvxpy.Minimize(sum_entries(t)), [-t <= x, x <= t]) g = cvxpy.partial_optimize(p2, [t], [x]) self.assertEquals(g.curvature, s.CONVEX) p2 = Problem(cvxpy.Maximize(sum_entries(t)), [-t <= x, x <= t]) g = cvxpy.partial_optimize(p2, [t], [x]) self.assertEquals(g.curvature, s.CONCAVE)
def test_partial_optimize_stacked(self): # Minimize the 1-norm in the usual way dims = 3 x, t = Variable(dims), Variable(dims) p1 = Problem(Minimize(sum_entries(t)), [-t <= x, x <= t]) # Minimize the 1-norm via partial_optimize g = cvxpy.partial_optimize(p1, [t], [x]) g2 = cvxpy.partial_optimize(Problem(Minimize(g)), [x]) p2 = Problem(Minimize(g2)) p2.solve() p1.solve() self.assertAlmostEqual(p1.value, p2.value)
def test_partial_optimize_dcp(self): """Test DCP properties of partial optimize. """ # Evaluate the 1-norm in the usual way (i.e., in epigraph form). dims = 3 x, t = Variable(dims), Variable(dims) xval = [-5]*dims p2 = Problem(cvxpy.Minimize(sum_entries(t)), [-t<=x, x<=t]) g = cvxpy.partial_optimize(p2, [t], [x]) self.assertEquals(g.curvature, u.Curvature.CONVEX_KEY) p2 = Problem(cvxpy.Maximize(sum_entries(t)), [-t<=x, x<=t]) g = cvxpy.partial_optimize(p2, [t], [x]) self.assertEquals(g.curvature, u.Curvature.CONCAVE_KEY)
def test_partial_problem(self): """Test grad for partial minimization/maximization problems. """ for obj in [Minimize((self.a)**-1), Maximize(entr(self.a))]: prob = Problem(obj, [self.x + self.a >= [5, 8]]) # Optimize over nothing. expr = cvxpy.partial_optimize(prob, dont_opt_vars=[self.x, self.a]) self.a.value = None self.x.value = None grad = expr.grad self.assertAlmostEqual(grad[self.a], None) self.assertAlmostEqual(grad[self.x], None) # Outside domain. self.a.value = 1.0 self.x.value = [5, 5] grad = expr.grad self.assertAlmostEqual(grad[self.a], None) self.assertAlmostEqual(grad[self.x], None) self.a.value = 1 self.x.value = [10, 10] grad = expr.grad self.assertAlmostEqual(grad[self.a], obj.args[0].grad[self.a]) self.assertItemsAlmostEqual(grad[self.x].todense(), [0, 0, 0, 0]) # Optimize over x. expr = cvxpy.partial_optimize(prob, opt_vars=[self.x]) self.a.value = 1 grad = expr.grad self.assertAlmostEqual(grad[self.a], obj.args[0].grad[self.a] + 0) # Optimize over a. fix_prob = Problem(obj, [self.x + self.a >= [5, 8], self.x == 0]) fix_prob.solve() dual_val = fix_prob.constraints[0].dual_variable.value expr = cvxpy.partial_optimize(prob, opt_vars=[self.a]) self.x.value = [0, 0] grad = expr.grad self.assertItemsAlmostEqual(grad[self.x].todense(), dual_val) # Optimize over x and a. expr = cvxpy.partial_optimize(prob, opt_vars=[self.x, self.a]) grad = expr.grad self.assertAlmostEqual(grad, {})
def test_partial_optimize_eval_1norm(self): # Evaluate the 1-norm in the usual way (i.e., in epigraph form). dims = 3 x, t = Variable(dims), Variable(dims) xval = [-5]*dims p1 = Problem(Minimize(sum_entries(t)), [-t<=xval, xval<=t]) p1.solve() # Minimize the 1-norm via partial_optimize. p2 = Problem(Minimize(sum_entries(t)), [-t<=x, x<=t]) g = cvxpy.partial_optimize(p2, [t], [x]) p3 = Problem(Minimize(g), [x == xval]) p3.solve() self.assertAlmostEqual(p1.value, p3.value) # Try leaving out args. # Minimize the 1-norm via partial_optimize. g = cvxpy.partial_optimize(p2, opt_vars=[t]) p3 = Problem(Minimize(g), [x == xval]) p3.solve() self.assertAlmostEqual(p1.value, p3.value) # Minimize the 1-norm via partial_optimize. g = cvxpy.partial_optimize(p2, dont_opt_vars=[x]) p3 = Problem(Minimize(g), [x == xval]) p3.solve() self.assertAlmostEqual(p1.value, p3.value) with self.assertRaises(Exception) as cm: g = cvxpy.partial_optimize(p2) self.assertEqual(str(cm.exception), "partial_optimize called with neither opt_vars nor dont_opt_vars.") with self.assertRaises(Exception) as cm: g = cvxpy.partial_optimize(p2, [], [x]) self.assertEqual(str(cm.exception), ("If opt_vars and new_opt_vars are both specified, " "they must contain all variables in the problem.") )
def test_partial_optimize_special_constr(self): x, y = Variable(1), Variable(1) # Solve the (simple) two-stage problem by "combining" the two stages (i.e., by solving a single linear program) p1 = Problem(Minimize(x + exp(y)), [x+y >= 3, y >= 4, x >= 5]) p1.solve() # Solve the two-stage problem via partial_optimize p2 = Problem(Minimize(exp(y)), [x+y >= 3, y >= 4]) g = cvxpy.partial_optimize(p2, [y], [x]) p3 = Problem(Minimize(x+g), [x >= 5]) p3.solve() self.assertAlmostEqual(p1.value, p3.value)
def test_partial_optimize_special_constr(self): x, y = Variable(1), Variable(1) # Solve the (simple) two-stage problem by "combining" the two stages (i.e., by solving a single linear program) p1 = Problem(Minimize(x + exp(y)), [x + y >= 3, y >= 4, x >= 5]) p1.solve() # Solve the two-stage problem via partial_optimize p2 = Problem(Minimize(exp(y)), [x + y >= 3, y >= 4]) g = cvxpy.partial_optimize(p2, [y], [x]) p3 = Problem(Minimize(x + g), [x >= 5]) p3.solve() self.assertAlmostEqual(p1.value, p3.value)
def test_partial_optimize_numeric_fn(self): x, y = Variable(1), Variable(1) xval = 4 # Solve the (simple) two-stage problem by "combining" the two stages (i.e., by solving a single linear program) p1 = Problem(Minimize(y), [xval+y>=3]) p1.solve() # Solve the two-stage problem via partial_optimize p2 = Problem(Minimize(y), [x+y>=3]) g = cvxpy.partial_optimize(p2, [y], [x]) x.value = xval result = g.value self.assertAlmostEqual(result, p1.value)
def test_partial_optimize_params(self): """Test partial optimize with parameters. """ x, y = Variable(1), Variable(1) gamma = Parameter() # Solve the (simple) two-stage problem by "combining" the two stages (i.e., by solving a single linear program) p1 = Problem(Minimize(x+y), [x+y>=gamma, y>=4, x>=5]) gamma.value = 3 p1.solve() # Solve the two-stage problem via partial_optimize p2 = Problem(Minimize(y), [x+y>=gamma, y>=4]) g = cvxpy.partial_optimize(p2, [y], [x]) p3 = Problem(Minimize(x+g), [x>=5]) p3.solve() self.assertAlmostEqual(p1.value, p3.value)