def test_indexed_two_uncparams(self): m = pe.ConcreteModel() m.w = ro.UncParam([0, 1, 2]) m.u = ro.UncParam() m.y = ro.AdjustableVar([0, 1, 2], uncparams=[m.w]) m.z = ro.AdjustableVar([0, 1], uncparams=[m.w, m.u]) m.cons = pe.Constraint(expr=m.u + sum(m.z[i] for i in m.z) <= 1) m.o = pe.Objective(expr=m.y[1] + m.u, sense=pe.maximize) t = LDRAdjustableTransformation() t.apply_to(m) self.assertTrue(hasattr(m, 'cons_ldr')) repn = generate_standard_repn(m.cons_ldr.body) self.assertEqual(len(repn.linear_vars), 1) self.assertEqual(len(repn.quadratic_vars), 8) baseline = set( (id(m.w[i]), id(m.z_w_coef[j, i])) for i in m.w for j in m.z) baseline = baseline.union( (id(m.u[None]), id(m.z_u_coef[j, None])) for j in m.z) for x in repn.quadratic_vars: self.assertIn((id(x[0]), id(x[1])), baseline) self.assertTrue(hasattr(m, 'o_ldr')) repn = generate_standard_repn(m.o_ldr.expr) self.assertEqual(len(repn.linear_vars), 1) self.assertEqual(id(repn.linear_vars[0]), id(m.u)) self.assertEqual(len(repn.quadratic_vars), 3) baseline = set((id(m.w[i]), id(m.y_w_coef[1, i])) for i in m.w) for x in repn.quadratic_vars: self.assertIn((id(x[0]), id(x[1])), baseline)
def test_disc_second_order_legendre(self): m = self.m.clone() m.dv1dt2 = DerivativeVar(m.v1, wrt=(m.t, m.t)) disc = TransformationFactory('dae.collocation') disc.apply_to(m, nfe=2, ncp=2, scheme='LAGRANGE-LEGENDRE') self.assertTrue(hasattr(m, 'dv1dt2_disc_eq')) self.assertTrue(hasattr(m, 'v1_t_cont_eq')) self.assertTrue(len(m.dv1dt2_disc_eq) == 4) self.assertTrue(len(m.v1_t_cont_eq) == 2) self.assertTrue(len(m.v1) == 7) self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars) self.assertTrue(m.dv1dt2 in m._pyomo_dae_reclassified_derivativevars) repn_baseline = {id(m.dv1dt2[1.056624]): 1, id(m.v1[0]): -0.48, id(m.v1[1.056624]): 0.65569, id(m.v1[3.943376]): -0.17569} repn = generate_standard_repn(m.dv1dt2_disc_eq[1.056624].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen) repn_baseline = {id(m.dv1dt2[6.056624]): 1, id(m.v1[5.0]): -0.48, id(m.v1[6.056624]): 0.65569, id(m.v1[8.943376]): -0.17569} repn = generate_standard_repn(m.dv1dt2_disc_eq[6.056624].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen)
def test_disc_second_order_radau(self): m = self.m.clone() m.dv1dt2 = DerivativeVar(m.v1, wrt=(m.t, m.t)) disc = TransformationFactory('dae.collocation') disc.apply_to(m, nfe=2, ncp=2) self.assertTrue(hasattr(m, 'dv1dt2_disc_eq')) self.assertTrue(len(m.dv1dt2_disc_eq) == 4) self.assertTrue(len(m.v1) == 5) self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars) self.assertTrue(m.dv1dt2 in m._pyomo_dae_reclassified_derivativevars) repn_baseline = {id(m.dv1dt2[5.0]): 1, id(m.v1[0]): -0.24, id(m.v1[1.666667]): 0.36, id(m.v1[5.0]): -0.12} repn = generate_standard_repn(m.dv1dt2_disc_eq[5.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen) repn_baseline = {id(m.dv1dt2[10]): 1, id(m.v1[5.0]): -0.24, id(m.v1[6.666667]): 0.36, id(m.v1[10]): -0.12} repn = generate_standard_repn(m.dv1dt2_disc_eq[10.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen)
def test_simple_adjustable(self): m = pe.ConcreteModel() m.w = ro.UncParam([0, 1, 2]) m.y = ro.AdjustableVar(uncparams=[m.w]) m.c = pe.Constraint(expr=m.w[0] + m.y <= 1) m.o = pe.Objective(expr=m.y + 3, sense=pe.maximize) t = NominalAdjustableTransformation() t.apply_to(m) self.assertTrue(hasattr(m, 'y_nominal')) self.assertIs(m.y_nominal.ctype, pe.Var) self.assertTrue(hasattr(m, 'c_nominal')) repn = generate_standard_repn(m.c_nominal.body) baseline = set([id(m.w[0]), id(m.y_nominal)]) for x in repn.linear_vars: self.assertIn(id(x), baseline) self.assertEqual(repn.linear_coefs, (1, 1)) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.quadratic_vars), 0) self.assertTrue(hasattr(m, 'o_nominal')) repn = generate_standard_repn(m.o_nominal.expr) self.assertEqual(len(repn.linear_vars), 1) self.assertEqual(id(repn.linear_vars[0]), id(m.y_nominal)) self.assertEqual(len(repn.quadratic_vars), 0) self.assertEqual(len(repn.nonlinear_vars), 0) self.assertEqual(repn.constant, 3)
def test_transformed_constraints_linear(self): m = models.makeTwoTermDisj_Nonlinear() TransformationFactory('gdp.chull').apply_to(m) disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts # the only constraint on the first block is the non-linear one c1 = disjBlock[1].component("d[1].c1") # has only lb self.assertEqual(len(c1), 1) cons = c1['lb'] self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) check_linear_coef(self, repn, disjBlock[1].x, -1) check_linear_coef(self, repn, m.d[1].indicator_var, 2) self.assertEqual(repn.constant, 0) self.assertEqual(disjBlock[1].x.lb, 0) self.assertEqual(disjBlock[1].x.ub, 8) c2 = disjBlock[1].component("d[1].c2") # 'eq' is preserved self.assertEqual(len(c2), 1) cons = c2['eq'] self.assertEqual(cons.lower, 0) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) check_linear_coef(self, repn, disjBlock[1].w, 1) check_linear_coef(self, repn, m.d[1].indicator_var, -3) self.assertEqual(repn.constant, 0) self.assertEqual(disjBlock[1].w.lb, 0) self.assertEqual(disjBlock[1].w.ub, 7) c3 = disjBlock[1].component("d[1].c3") # bounded inequality is split self.assertEqual(len(c3), 2) cons = c3['lb'] self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) check_linear_coef(self, repn, disjBlock[1].x, -1) check_linear_coef(self, repn, m.d[1].indicator_var, 1) self.assertEqual(repn.constant, 0) cons = c3['ub'] self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) check_linear_coef(self, repn, disjBlock[1].x, 1) check_linear_coef(self, repn, m.d[1].indicator_var, -3) self.assertEqual(repn.constant, 0)
def linear(flag): if flag == 1: expr = summation(model.p, model.x) elif flag == 6: expr = Sum(model.p[i] * model.x[i] for i in model.A) elif flag == 2: expr = sum(model.p[i] * model.x[i] for i in model.A) elif flag == 3: expr = 0 for i in model.A: expr += model.p[i] * model.x[i] elif flag == 4: try: with timeout(10): expr = 0 for i in model.A: expr = expr + model.p[i] * model.x[i] except: expr = model.x[1] # BOGUS elif flag == 5: try: with timeout(10): expr = 0 for i in model.A: expr = model.p[i] * model.x[i] + expr except: expr = model.x[1] # BOGUS elif flag == 12: with EXPR.linear_expression as expr: expr = sum((model.p[i] * model.x[i] for i in model.A), expr) elif flag == 13: with EXPR.linear_expression as expr: for i in model.A: expr += model.p[i] * model.x[i] elif flag == 14: with EXPR.linear_expression as expr: for i in model.A: expr = expr + model.p[i] * model.x[i] elif flag == 15: with EXPR.linear_expression as expr: for i in model.A: expr = model.p[i] * model.x[i] + expr elif flag == 7: expr = 0 for i in model.A: expr += model.p[i] * (1 + model.x[i]) elif flag == 17: with EXPR.linear_expression as expr: for i in model.A: expr += model.p[i] * (1 + model.x[i]) if coopr3 or pyomo4: generate_ampl_repn(expr) else: generate_standard_repn(EXPR.compress_expression(expr), quadratic=False)
def test_disc_single_index_legendre(self): m = self.m.clone() disc = TransformationFactory('dae.collocation') disc.apply_to(m, nfe=5, ncp=3, scheme='LAGRANGE-LEGENDRE') self.assertTrue(hasattr(m, 'dv1_disc_eq')) self.assertTrue(hasattr(m, 'v1_t_cont_eq')) self.assertTrue(len(m.dv1_disc_eq) == 15) self.assertTrue(len(m.v1_t_cont_eq) == 5) self.assertTrue(len(m.v1) == 21) expected_tau_points = [ 0.0, 0.11270166537925834, 0.49999999999999989, 0.88729833462074226 ] expected_disc_points = [ 0, 0.225403, 1.0, 1.774597, 2.0, 2.225403, 3.0, 3.774597, 4.0, 4.225403, 5.0, 5.774597, 6.0, 6.225403, 7.0, 7.774597, 8.0, 8.225403, 9.0, 9.774597, 10 ] disc_info = m.t.get_discretization_info() self.assertTrue(disc_info['scheme'] == 'LAGRANGE-LEGENDRE') for idx, val in enumerate(disc_info['tau_points']): self.assertAlmostEqual(val, expected_tau_points[idx]) for idx, val in enumerate(list(m.t)): self.assertAlmostEqual(val, expected_disc_points[idx]) self.assertTrue(type(val) in [float, int]) self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars) repn_baseline = { id(m.dv1[3.0]): 1, id(m.v1[2.0]): -1.5, id(m.v1[2.225403]): 2.86374, id(m.v1[3.0]): -1.0, id(m.v1[3.774597]): -0.36374 } repn = generate_standard_repn(m.dv1_disc_eq[3.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen) repn_baseline = { id(m.dv1[5.0]): 1, id(m.v1[4.0]): -1.5, id(m.v1[4.225403]): 2.86374, id(m.v1[5.0]): -1.0, id(m.v1[5.774597]): -0.36374 } repn = generate_standard_repn(m.dv1_disc_eq[5.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen)
def trial(func, repn=False): try: with timeout(10): expr = func() if repn: generate_standard_repn(expr, quadratic=False) except TimeoutError: pass except: raise
def _constrs_contained_within(test_case, test_constr_tuples, constraint_list): """Checks to see if constraints defined by test_constr_tuples are in the constraint list. Parameters ---------- constraint_list : Constraint test_constr_tuples : list of tuple test_case : unittest.TestCase """ # Move const term from body def _move_const_from_body(lower, repn, upper): if repn.constant is not None and not repn.constant == 0: if lower is not None: lower -= repn.constant if upper is not None: upper -= repn.constant return value(lower), repn, value(upper) def _repns_match(repn, test_repn): if not len(repn.linear_vars) == len(test_repn.linear_vars): return False coef_map = ComponentMap( (var, coef) for var, coef in zip(repn.linear_vars, repn.linear_coefs)) for var, coef in zip(test_repn.linear_vars, test_repn.linear_coefs): if not coef_map.get(var, 0) == coef: return False return True constr_list_tuples = [ _move_const_from_body(constr.lower, generate_standard_repn(constr.body), constr.upper) for constr in constraint_list.values() ] for test_lower, test_body, test_upper in test_constr_tuples: test_repn = generate_standard_repn(test_body) test_lower, test_repn, test_upper = _move_const_from_body( test_lower, test_repn, test_upper) found_match = False # Make sure one of the list tuples matches for lower, repn, upper in constr_list_tuples: if lower == test_lower and upper == test_upper and \ _repns_match(repn, test_repn): found_match = True break test_case.assertTrue( found_match, "{} <= {} <= {} was not found in constraint list.".format( test_lower, test_body, test_upper))
def test_disc_single_index_radau(self): m = self.m.clone() disc = TransformationFactory('dae.collocation') disc.apply_to(m, nfe=5, ncp=3) self.assertTrue(hasattr(m, 'dv1_disc_eq')) self.assertTrue(len(m.dv1_disc_eq) == 15) self.assertTrue(len(m.v1) == 16) expected_tau_points = [ 0.0, 0.1550510257216822, 0.64494897427831788, 1.0 ] expected_disc_points = [ 0, 0.310102, 1.289898, 2.0, 2.310102, 3.289898, 4.0, 4.310102, 5.289898, 6.0, 6.310102, 7.289898, 8.0, 8.310102, 9.289898, 10 ] disc_info = m.t.get_discretization_info() self.assertTrue(disc_info['scheme'] == 'LAGRANGE-RADAU') for idx, val in enumerate(disc_info['tau_points']): self.assertAlmostEqual(val, expected_tau_points[idx]) for idx, val in enumerate(list(m.t)): self.assertAlmostEqual(val, expected_disc_points[idx]) self.assertTrue(type(val) in [float, int]) self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertTrue(m._pyomo_dae_reclassified_derivativevars[0] is m.dv1) repn_baseline = { id(m.dv1[2.0]): 1.0, id(m.v1[0]): 1.5, id(m.v1[0.310102]): -2.76599, id(m.v1[1.289898]): 3.76599, id(m.v1[2.0]): -2.5 } repn = generate_standard_repn(m.dv1_disc_eq[2.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen) repn_baseline = { id(m.dv1[4.0]): 1.0, id(m.v1[2.0]): 1.5, id(m.v1[2.310102]): -2.76599, id(m.v1[3.289898]): 3.76599, id(m.v1[4.0]): -2.5 } repn = generate_standard_repn(m.dv1_disc_eq[4.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen)
def _get_expr_from_pyomo_expr(self, expr, max_degree=2): if max_degree == 2: repn = generate_standard_repn(expr, quadratic=True) else: repn = generate_standard_repn(expr, quadratic=False) try: gurobi_expr, referenced_vars = self._get_expr_from_pyomo_repn(repn, max_degree) except DegreeError as e: msg = e.args[0] msg += '\nexpr: {0}'.format(expr) raise DegreeError(msg) return gurobi_expr, referenced_vars
def _get_expr_from_pyomo_expr(self, expr, max_degree=2): if max_degree == 2: repn = generate_standard_repn(expr, quadratic=True) else: repn = generate_standard_repn(expr, quadratic=False) try: cplex_expr, referenced_vars = self._get_expr_from_pyomo_repn(repn, max_degree) except DegreeError as e: msg = e.args[0] msg += '\nexpr: {0}'.format(expr) raise DegreeError(msg) return cplex_expr, referenced_vars
def test_disc_single_index_legendre(self): m = self.m.clone() disc = TransformationFactory('dae.collocation') disc.apply_to(m, nfe=5, ncp=3, scheme='LAGRANGE-LEGENDRE') self.assertTrue(hasattr(m, 'dv1_disc_eq')) self.assertTrue(hasattr(m, 'v1_t_cont_eq')) self.assertTrue(len(m.dv1_disc_eq) == 15) self.assertTrue(len(m.v1_t_cont_eq) == 5) self.assertTrue(len(m.v1) == 21) expected_tau_points = [0.0, 0.11270166537925834, 0.49999999999999989, 0.88729833462074226] expected_disc_points = [0, 0.225403, 1.0, 1.774597, 2.0, 2.225403, 3.0, 3.774597, 4.0, 4.225403, 5.0, 5.774597, 6.0, 6.225403, 7.0, 7.774597, 8.0, 8.225403, 9.0, 9.774597, 10] disc_info = m.t.get_discretization_info() self.assertTrue(disc_info['scheme'] == 'LAGRANGE-LEGENDRE') for idx, val in enumerate(disc_info['tau_points']): self.assertAlmostEqual(val, expected_tau_points[idx]) for idx, val in enumerate(list(m.t)): self.assertAlmostEqual(val, expected_disc_points[idx]) self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars) repn_baseline = {id(m.dv1[3.0]): 1, id(m.v1[2.0]): -1.5, id(m.v1[2.225403]): 2.86374, id(m.v1[3.0]): -1.0, id(m.v1[3.774597]): -0.36374} repn = generate_standard_repn(m.dv1_disc_eq[3.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen) repn_baseline = {id(m.dv1[5.0]): 1, id(m.v1[4.0]): -1.5, id(m.v1[4.225403]): 2.86374, id(m.v1[5.0]): -1.0, id(m.v1[5.774597]): -0.36374} repn = generate_standard_repn(m.dv1_disc_eq[5.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen)
def test_disaggregation_constraints(self): m = models.makeTwoTermIndexedDisjunction() TransformationFactory('gdp.chull').apply_to(m) disaggregationCons = m._gdp_chull_relaxation_disjunction_disaggregation relaxedDisjuncts = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts self.assertIsInstance(disaggregationCons, Constraint) self.assertEqual(len(disaggregationCons), 3) disaggregatedVars = { (1, 0): [relaxedDisjuncts[0].component('x[1]'), relaxedDisjuncts[1].component('x[1]')], (2, 0): [relaxedDisjuncts[2].component('x[2]'), relaxedDisjuncts[3].component('x[2]')], (3, 0): [relaxedDisjuncts[4].component('x[3]'), relaxedDisjuncts[5].component('x[3]')], } for i, disVars in iteritems(disaggregatedVars): cons = disaggregationCons[i] self.assertEqual(cons.lower, 0) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 3) check_linear_coef(self, repn, m.x[i[0]], 1) check_linear_coef(self, repn, disVars[0], -1) check_linear_coef(self, repn, disVars[1], -1)
def test_transformed_constraint_nonlinear(self): m = models.makeTwoTermDisj_Nonlinear() TransformationFactory('gdp.chull').apply_to(m) disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts # the only constraint on the first block is the non-linear one disj1c = disjBlock[0].component("d[0].c") self.assertIsInstance(disj1c, Constraint) # we only have an upper bound self.assertEqual(len(disj1c), 1) cons = disj1c['ub'] self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertFalse(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 1) # This is a weak test, but as good as any to ensure that the # substitution was done correctly EPS_1 = 1-EPS self.assertEqual( str(cons.body), "(%s*d[0].indicator_var + %s)*(" "_pyomo_gdp_chull_relaxation.relaxedDisjuncts[0].x*" "(1/(%s*d[0].indicator_var + %s)) + " "(_pyomo_gdp_chull_relaxation.relaxedDisjuncts[0].y*" "(1/(%s*d[0].indicator_var + %s)))**2) - " "%s*(0.0 + 0.0**2)*(1 - d[0].indicator_var) " "- 14.0*d[0].indicator_var" % (EPS_1, EPS, EPS_1, EPS, EPS_1, EPS, EPS))
def _bilinear_expressions(model): # TODO for now, we look for only expressions where the bilinearities are # exposed on the root level SumExpression, and thus accessible via # generate_standard_repn. This will not detect exp(x*y). We require a # factorization transformation to be applied beforehand in order to pick # these constraints up. pass # Bilinear map will be stored in the format: # x --> (y --> [constr1, constr2, ...], z --> [constr2, constr3]) bilinear_map = ComponentMap() for constr in model.component_data_objects( Constraint, active=True, descend_into=(Block, Disjunct)): if constr.body.polynomial_degree() in (1, 0): continue # Skip trivial and linear constraints repn = generate_standard_repn(constr.body) for pair in repn.quadratic_vars: v1, v2 = pair v1_pairs = bilinear_map.get(v1, ComponentMap()) if v2 in v1_pairs: # bilinear term has been found before. Simply add constraint to # the set associated with the bilinear term. v1_pairs[v2].add(constr) else: # We encounter the bilinear term for the first time. bilinear_map[v1] = v1_pairs bilinear_map[v2] = bilinear_map.get(v2, ComponentMap()) constraints_with_bilinear_pair = ComponentSet([constr]) bilinear_map[v1][v2] = constraints_with_bilinear_pair bilinear_map[v2][v1] = constraints_with_bilinear_pair return bilinear_map
def _get_equality_linked_variables(constraint): """Return the two variables linked by an equality constraint x == y. If the constraint does not match this form, skip it. """ if value(constraint.lower) != 0 or value(constraint.upper) != 0: # LB and UB on constraint must be zero; otherwise, return empty tuple. return () if constraint.body.polynomial_degree() != 1: # must be a linear constraint; otherwise, return empty tuple. return () # Generate the standard linear representation repn = generate_standard_repn(constraint.body) nonzero_coef_vars = tuple(v for i, v in enumerate(repn.linear_vars) # if coefficient on variable is nonzero if repn.linear_coefs[i] != 0) if len(nonzero_coef_vars) != 2: # Expect two variables with nonzero cofficient in constraint; # otherwise, return empty tuple. return () if sorted(coef for coef in repn.linear_coefs if coef != 0) != [-1, 1]: # Expect a constraint of form x == y --> 0 == -1 * x + 1 * y; # otherwise, return empty tuple. return () # Above checks are satisifed. Return the variables. return nonzero_coef_vars
def add_lazy_no_good_cuts(self, var_values, solve_data, config, opt, feasible=False): """Adds no-good cuts. Add the no-good cuts through Cplex inherent function self.add(). Args: var_values (list): values of the current variables, used to generate the cut. solve_data (MindtPySolveData): data container that holds solve-instance data. config (ConfigBlock): the specific configurations for MindtPy. opt (SolverFactory): cplex_persistent. feasible (bool, optional): whether the integer combination yields a feasible or infeasible NLP. Defaults to False. Raises: ValueError: binary variable is not 0 or 1 """ if not config.add_no_good_cuts: return config.logger.info('Adding no-good cuts') with time_code(solve_data.timing, 'No-good cut generation'): m = solve_data.mip MindtPy = m.MindtPy_utils int_tol = config.integer_tolerance binary_vars = [v for v in MindtPy.variable_list if v.is_binary()] # copy variable values over for var, val in zip(MindtPy.variable_list, var_values): if not var.is_binary(): continue var.set_value(val, skip_validation=True) # check to make sure that binary variables are all 0 or 1 for v in binary_vars: if value(abs(v - 1)) > int_tol and value(abs(v)) > int_tol: raise ValueError('Binary {} = {} is not 0 or 1'.format( v.name, value(v))) if not binary_vars: # if no binary variables, skip return pyomo_no_good_cut = sum( 1 - v for v in binary_vars if value(abs(v - 1)) <= int_tol) + sum( v for v in binary_vars if value(abs(v)) <= int_tol) cplex_no_good_rhs = generate_standard_repn( pyomo_no_good_cut).constant cplex_no_good_cut, _ = opt._get_expr_from_pyomo_expr( pyomo_no_good_cut) self.add(constraint=cplex.SparsePair( ind=cplex_no_good_cut.variables, val=cplex_no_good_cut.coefficients), sense='G', rhs=1 - cplex_no_good_rhs)
def test_equality_one_uncparam(self): m = pe.ConcreteModel() m.w = ro.UncParam([0, 1, 2]) m.y = ro.AdjustableVar([0, 1], uncparams=[m.w]) m.cons = pe.Constraint(expr=(sum(m.w[i] for i in m.w) == sum(2 * m.y[i] for i in m.y))) t = LDRAdjustableTransformation() t.apply_to(m) self.assertTrue(hasattr(m, 'cons_ldr')) baseline = set() for i in m.w: id1 = id(m.y_w_coef[0, i]) id2 = id(m.y_w_coef[1, i]) if id1 < id2: baseline.add((id1, id2)) else: baseline.add((id2, id1)) for c in m.cons_ldr.values(): repn = generate_standard_repn(c.body) self.assertEqual(repn.constant, 1) self.assertEqual(len(repn.linear_vars), 2) self.assertEqual(repn.linear_coefs, (-2, -2)) self.assertEqual(len(repn.quadratic_vars), 0) id1 = id(repn.linear_vars[0]) id2 = id(repn.linear_vars[1]) if id1 < id2: self.assertIn((id1, id2), baseline) else: self.assertIn((id2, id1), baseline)
def pass_single_value(self, port, name, member, val, fixed): """ Fix the value of the port member and add it to the fixed set. If the member is an expression, appropriately fix the value of its free variable. Error if the member is already fixed but different from val, or if the member has more than one free variable." """ eq_tol = self.options["almost_equal_tol"] if member.is_fixed(): if abs(value(member) - val) > eq_tol: raise RuntimeError( "Member '%s' of port '%s' is already fixed but has a " "different value (by > %s) than what is being passed to it" % (name, port.name, eq_tol)) elif member.is_expression_type(): repn = generate_standard_repn(member - val) if repn.is_linear() and len(repn.linear_vars) == 1: # fix the value of the single variable fval = (0 - repn.constant) / repn.linear_coefs[0] var = repn.linear_vars[0] fixed.add(var) var.fix(fval) else: raise RuntimeError( "Member '%s' of port '%s' had more than " "one free variable when trying to pass a value " "to it. Please fix more variables before passing " "to this port." % (name, port.name)) else: fixed.add(member) member.fix(val)
def constraint_generator(): for block in all_blocks: gen_con_repn = getattr(block, "_gen_con_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_repn'): block._repn = ComponentMap() block_repn = block._repn for constraint_data in block.component_data_objects( Constraint, active=True, sort=sortOrder, descend_into=False): if (not constraint_data.has_lb()) and \ (not constraint_data.has_ub()): assert not constraint_data.equality continue # non-binding, so skip if constraint_data._linear_canonical_form: repn = constraint_data.canonical_form() elif gen_con_repn: repn = generate_standard_repn(constraint_data.body) block_repn[constraint_data] = repn else: repn = block_repn[constraint_data] yield constraint_data, repn
def generate_linear_repn(expr, evaluate=False): """ Given an expression containing UncParam return its linear representation. :param c: A pyomo expression :type c: class:`pyomo.core.expr.numeric_expr.ExpressionBase` :param evaluate: If true, evaluate fixed expressions :type c: bool, optional :return: Standard representation containing a constant term (wrt UncParam), linear coefficients, and the corresponding UncParam objects. :rtype: pyomo.repn.StandardRepn """ # Fix all Var types (and set values if necessary) uncparam_list = [] var_list = [] _fixed = [] for v in identify_variables(expr): if isinstance(v.parent_component(), UncParam): uncparam_list.append(v) else: var_list.append(v) if not v.fixed: v.fix() _fixed.append(v) repn = generate_standard_repn(expr, compute_values=False, quadratic=False) for v in _fixed: v.unfix() return repn
def test_disaggregation_constraints(self): m = models.makeTwoTermIndexedDisjunction() TransformationFactory('gdp.chull').apply_to(m) disaggregationCons = m._gdp_chull_relaxation_disjunction_disaggregation relaxedDisjuncts = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts self.assertIsInstance(disaggregationCons, Constraint) self.assertEqual(len(disaggregationCons), 3) disaggregatedVars = { (1, 0): [ relaxedDisjuncts[0].component('x[1]'), relaxedDisjuncts[1].component('x[1]') ], (2, 0): [ relaxedDisjuncts[2].component('x[2]'), relaxedDisjuncts[3].component('x[2]') ], (3, 0): [ relaxedDisjuncts[4].component('x[3]'), relaxedDisjuncts[5].component('x[3]') ], } for i, disVars in iteritems(disaggregatedVars): cons = disaggregationCons[i] self.assertEqual(cons.lower, 0) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 3) check_linear_coef(self, repn, m.x[i[0]], 1) check_linear_coef(self, repn, disVars[0], -1) check_linear_coef(self, repn, disVars[1], -1)
def test_build_linking_constraints(self): c = _build_linking_constraints([], []) self.assertIs(type(c), constraint_tuple) self.assertEqual(len(c), 0) v = [1, data_expression(), variable(), expression(expr=1.0)] vaux = [variable(), variable(), variable(), variable()] c = _build_linking_constraints(v, vaux) self.assertIs(type(c), constraint_tuple) self.assertEqual(len(c), 4) self.assertIs(type(c[0]), linear_constraint) self.assertEqual(c[0].rhs, 1) self.assertEqual(len(list(c[0].terms)), 1) self.assertIs(list(c[0].terms)[0][0], vaux[0]) self.assertEqual(list(c[0].terms)[0][1], 1) self.assertIs(type(c[1]), linear_constraint) self.assertIs(c[1].rhs, v[1]) self.assertEqual(len(list(c[1].terms)), 1) self.assertIs(list(c[1].terms)[0][0], vaux[1]) self.assertEqual(list(c[1].terms)[0][1], 1) self.assertIs(type(c[2]), linear_constraint) self.assertEqual(c[2].rhs, 0) self.assertEqual(len(list(c[2].terms)), 2) self.assertIs(list(c[2].terms)[0][0], vaux[2]) self.assertEqual(list(c[2].terms)[0][1], 1) self.assertIs(list(c[2].terms)[1][0], v[2]) self.assertEqual(list(c[2].terms)[1][1], -1) self.assertIs(type(c[3]), constraint) self.assertEqual(c[3].rhs, 0) from pyomo.repn import generate_standard_repn repn = generate_standard_repn(c[3].body) self.assertEqual(len(repn.linear_vars), 1) self.assertIs(repn.linear_vars[0], vaux[3]) self.assertEqual(repn.linear_coefs[0], 1) self.assertEqual(repn.constant, -1)
def test_transformed_constraint_nonlinear(self): m = models.makeTwoTermDisj_Nonlinear() TransformationFactory('gdp.chull').apply_to(m) disjBlock = m._pyomo_gdp_chull_relaxation.relaxedDisjuncts # the only constraint on the first block is the non-linear one disj1c = disjBlock[0].component("d[0].c") self.assertIsInstance(disj1c, Constraint) # we only have an upper bound self.assertEqual(len(disj1c), 1) cons = disj1c['ub'] self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertFalse(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 1) # This is a weak test, but as good as any to ensure that the # substitution was done correctly EPS_1 = 1 - EPS self.assertEqual( str(cons.body), "(%s*d[0].indicator_var + %s)*(" "_pyomo_gdp_chull_relaxation.relaxedDisjuncts[0].x*" "(1/(%s*d[0].indicator_var + %s)) + " "(_pyomo_gdp_chull_relaxation.relaxedDisjuncts[0].y*" "(1/(%s*d[0].indicator_var + %s)))**2) - " "%s*(0 + 0**2)*(1 - d[0].indicator_var) - 14.0*d[0].indicator_var" % (EPS_1, EPS, EPS_1, EPS, EPS_1, EPS, EPS))
def _apply_to(self, model): """Apply the transformation.""" m = model for constr in m.component_data_objects( ctype=Constraint, active=True, descend_into=True): if not constr.body.polynomial_degree() == 1: continue # we currently only process linear constraints repn = generate_standard_repn(constr.body) # get the index of all nonzero coefficient variables nonzero_vars_indx = [ i for i, _ in enumerate(repn.linear_vars) if not repn.linear_coefs[i] == 0 ] const = repn.constant # reconstitute the constraint, including only variable terms with # nonzero coefficients constr_body = quicksum(repn.linear_coefs[i] * repn.linear_vars[i] for i in nonzero_vars_indx) + const if constr.equality: constr.set_value(constr_body == constr.upper) elif constr.has_lb() and not constr.has_ub(): constr.set_value(constr_body >= constr.lower) elif constr.has_ub() and not constr.has_lb(): constr.set_value(constr_body <= constr.upper) else: # constraint is a bounded inequality of form a <= x <= b. constr.set_value(EXPR.inequality( constr.lower, constr_body, constr.upper))
def test_initialize_param_from_ndarray(self): # Test issue #611 samples = 10 c1 = .5 c2 = .5 model = ConcreteModel() model.i = RangeSet(samples) def init_x(model, i): return np.random.rand(1) def init_y(model, i): return c1 * (model.x[i]**2) + c2 * model.x[i] model.x = Param(model.i, initialize=init_x) model.y = Param(model.i, initialize=init_y, domain=Reals) model.c_1 = Var(initialize=1) model.c_2 = Var(initialize=1) model.error = Objective( # Sum squared error of quadratic fit expr=sum((model.c_1 * model.x[i]**2 + model.c_2 * model.x[i] - model.y[i])**2 for i in model.i)) #model.pprint() repn = generate_standard_repn(model.error.expr, compute_values=True) self.assertIsNone(repn.nonlinear_expr) self.assertEqual(len(repn.quadratic_vars), 3) for i in range(3): self.assertGreater(repn.quadratic_coefs[i], 0) self.assertEqual(len(repn.linear_vars), 2) for i in range(2): self.assertLess(repn.linear_coefs[i], 0) self.assertGreater(repn.constant, 0)
def _bilinear_expressions(model): # TODO for now, we look for only expressions where the bilinearities are # exposed on the root level SumExpression, and thus accessible via # generate_standard_repn. This will not detect exp(x*y). We require a # factorization transformation to be applied beforehand in order to pick # these constraints up. pass # Bilinear map will be stored in the format: # x --> (y --> [constr1, constr2, ...], z --> [constr2, constr3]) bilinear_map = ComponentMap() for constr in model.component_data_objects(Constraint, active=True, descend_into=(Block, Disjunct)): if constr.body.polynomial_degree() in (1, 0): continue # Skip trivial and linear constraints repn = generate_standard_repn(constr.body) for pair in repn.quadratic_vars: v1, v2 = pair v1_pairs = bilinear_map.get(v1, ComponentMap()) if v2 in v1_pairs: # bilinear term has been found before. Simply add constraint to # the set associated with the bilinear term. v1_pairs[v2].add(constr) else: # We encounter the bilinear term for the first time. bilinear_map[v1] = v1_pairs bilinear_map[v2] = bilinear_map.get(v2, ComponentMap()) constraints_with_bilinear_pair = ComponentSet([constr]) bilinear_map[v1][v2] = constraints_with_bilinear_pair bilinear_map[v2][v1] = constraints_with_bilinear_pair return bilinear_map
def _estimate_M(self, expr, name): # Calculate a best guess at M repn = generate_standard_repn(expr) M = [0, 0] if not repn.is_nonlinear(): if repn.constant is not None: for i in (0, 1): if M[i] is not None: M[i] += repn.constant for i, coef in enumerate(repn.linear_coefs or []): var = repn.linear_vars[i] bounds = (value(var.lb), value(var.ub)) for i in (0, 1): # reverse the bounds if the coefficient is negative if coef > 0: j = i else: j = 1 - i if bounds[i] is not None: M[j] += value(bounds[i]) * coef else: raise GDP_Error( "Cannot estimate M for " "expressions with unbounded variables." "\n\t(found unbounded var %s while processing " "constraint %s)" % (var.name, name)) else: raise GDP_Error("Cannot estimate M for nonlinear " "expressions.\n\t(found while processing " "constraint %s)" % name) return tuple(M)
def constraint_generator(): for block in all_blocks: gen_con_repn = getattr(block, "_gen_con_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn for constraint_data in block.component_data_objects( Constraint, active=True, sort=sortOrder, descend_into=False): if (not constraint_data.has_lb()) and \ (not constraint_data.has_ub()): assert not constraint_data.equality continue # non-binding, so skip if constraint_data._linear_canonical_form: repn = constraint_data.canonical_form() elif gen_con_repn: repn = generate_standard_repn(constraint_data.body) block_repn[constraint_data] = repn else: repn = block_repn[constraint_data] yield constraint_data, repn
def is_polyhedral(self): mat = [] rhs = [] param = None for c in self.component_data_objects(Constraint, active=True): # Collect uncertain parameter for p in identify_parent_components(c.body, [UncParam]): if param is None: param = p else: assert param is p, ("Uncertainty set {} should " "only contain one UncParam " "component.".format(self.name)) # Generate standard repn repn = generate_standard_repn(c.body) # If uncertainty set contains a non-linear constraint it's not # polyhedral. if not repn.is_linear(): return False coef_dict = {id(x): y for x, y in zip(repn.linear_vars, repn.linear_coefs)} if c.has_ub(): mat.append({i: coef_dict.get(id(param[i]), 0) for i in param}) rhs.append(c.upper - repn.constant) elif c.has_lb(): mat.append({i: -coef_dict.get(id(param[i]), 0) for i in param}) rhs.append(repn.constant - c.lower) self.mat = mat self.rhs = rhs return True
def test_induced_linearity_case2(self): m = ConcreteModel() m.x = Var([0], bounds=(-3, 8)) m.y = Var(RangeSet(4), domain=Binary) m.z = Var(domain=Integers, bounds=(-1, 2)) m.constr = Constraint( expr=m.x[0] == m.y[1] + 2 * m.y[2] + m.y[3] + 2 * m.y[4] + m.z) m.logical = ConstraintList() m.logical.add(expr=m.y[1] + m.y[2] == 1) m.logical.add(expr=m.y[3] + m.y[4] == 1) m.logical.add(expr=m.y[2] + m.y[4] <= 1) m.b = Var(bounds=(-2, 7)) m.c = Var() m.bilinear = Constraint( expr=(m.x[0] - 3) * (m.b + 2) - (m.c + 4) * m.b + exp(m.b ** 2) * m.x[0] <= m.c) TransformationFactory('contrib.induced_linearity').apply_to(m) xfrmed_blk = m._induced_linearity_info.x0_b_bilinear self.assertSetEqual( set(xfrmed_blk.valid_values), set([1, 2, 3, 4, 5])) select_one_repn = generate_standard_repn( xfrmed_blk.select_one_value.body) self.assertEqual( ComponentSet(select_one_repn.linear_vars), ComponentSet(xfrmed_blk.x_active[i] for i in xfrmed_blk.valid_values))
def is_ellipsoidal(self): # TODO: assumes there is only one constraint on UncSet for c in self.component_data_objects(Constraint, active=True): repn = generate_standard_repn(c.body) if not repn.is_quadratic(): return False # TODO: assumes implicitly that there is one UncParam per UncSet param = repn.quadratic_vars[0][0].parent_component() # Collect covariance matrix and mean quadratic_coefs = {(id(x[0]), id(x[1])): c for x, c in zip(repn.quadratic_vars, repn.quadratic_coefs)} cov = [[quadratic_coefs.get((id(param[i]), id(param[j])), 0) for i in param] for j in param] cov = np.array(cov) cov = 1/2*(cov + cov.T) eig, _ = np.linalg.eig(cov) invcov = np.linalg.inv(cov) mean = -1/2*np.matmul(invcov, np.array(repn.linear_coefs)) self.mean = {x: mean[i] for i, x in enumerate(param)} self.cov = {(x, y): cov[i, j] for i, x in enumerate(param) for j, y in enumerate(param)} # TODO: need to check repn.constant == mean^T * cov * mean? return ((c.has_ub() and np.all(eig > 0)) or (c.has_lb() and np.all(eig < 0)))
def test_equality_constraints_on_disjuncts_with_fme(self): m = models.oneVarDisj_2pts() m.obj.expr = m.x + m.disj1.indicator_var m.obj.sense = maximize TransformationFactory('gdp.cuttingplane').apply_to( m, create_cuts=create_cuts_fme, post_process_cut=None, verbose=True, solver='gurobi', # don't actually need this, but taking the excuse to set solver # options solver_options={'FeasibilityTol': 1e-8}, cuts_name="cuts", bigM=5) # rBigM first iteration solve will give (x = 3, Y = 0.6). If we don't # catch equality constraints, we don't get a cut. But we need to get # x + Y <= 1. (Where Y is the indicator that x = 0). self.assertEqual(len(m.cuts), 1) cut = m.cuts[0] self.assertEqual(cut.lower, 0) self.assertIsNone(cut.upper) repn = generate_standard_repn(cut.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) self.assertIs(repn.linear_vars[0], m.disj1.indicator_var) self.assertEqual(repn.linear_coefs[0], 1) self.assertIs(repn.linear_vars[1], m.x) self.assertEqual(repn.linear_coefs[1], -1)
def pass_single_value(self, port, name, member, val, fixed): """ Fix the value of the port member and add it to the fixed set. If the member is an expression, appropriately fix the value of its free variable. Error if the member is already fixed but different from val, or if the member has more than one free variable." """ eq_tol = self.options["almost_equal_tol"] if member.is_fixed(): if abs(value(member) - val) > eq_tol: raise RuntimeError( "Member '%s' of port '%s' is already fixed but has a " "different value (by > %s) than what is being passed to it" % (name, port.name, eq_tol)) elif member.is_expression_type(): repn = generate_standard_repn(member - val) if repn.is_linear() and len(repn.linear_vars) == 1: # fix the value of the single variable fval = (0 - repn.constant) / repn.linear_coefs[0] var = repn.linear_vars[0] fixed.add(var) # val are numpy.float64; coerce val back to float var.fix(float(fval)) else: raise RuntimeError( "Member '%s' of port '%s' had more than " "one free variable when trying to pass a value " "to it. Please fix more variables before passing " "to this port." % (name, port.name)) else: fixed.add(member) # val are numpy.float64; coerce val back to float member.fix(float(val))
def test_cut_constraint(self): m = self.makeModel() TransformationFactory('gdp.cuttingplane').apply_to(m) cut = m._pyomo_gdp_cuttingplane_relaxation.cuts[0] self.assertEqual(cut.lower, 0) self.assertIsNone(cut.upper) # Var, coef, xhat: expected_cut = [ ( m.x, 0.45, 2.7 ), ( m.y, 0.55, 1.3 ), ( m.d[0].indicator_var, 0.1, 0.85 ), ( m.d[1].indicator_var, -0.1, 0.15 ), ] # test body repn = generate_standard_repn(cut.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 4) for v, coef, xhat in expected_cut: check_linear_coef(self, repn, v, coef) self.assertAlmostEqual( repn.constant, -1*sum(c*x for v,c,x in expected_cut), 5)
def _apply_to(self, model, **kwds): config = self.CONFIG(kwds) for constr in model.component_data_objects(ctype=Constraint, active=True, descend_into=True): # Check if the constraint is k * x + c1 <= c2 or c2 <= k * x + c1 repn = generate_standard_repn(constr.body) if not repn.is_linear() or len(repn.linear_vars) != 1: # Skip nonlinear constraints, trivial constraints, and those # that involve more than one variable. continue else: var = repn.linear_vars[0] const = repn.constant coef = float(repn.linear_coefs[0]) if coef == 0: # Skip trivial constraints continue elif coef > 0: if constr.has_ub(): new_ub = (value(constr.upper) - const) / coef var_ub = float('inf') if var.ub is None else var.ub var.setub(min(var_ub, new_ub)) if constr.has_lb(): new_lb = (value(constr.lower) - const) / coef var_lb = float('-inf') if var.lb is None else var.lb var.setlb(max(var_lb, new_lb)) elif coef < 0: if constr.has_ub(): new_lb = (value(constr.upper) - const) / coef var_lb = float('-inf') if var.lb is None else var.lb var.setlb(max(var_lb, new_lb)) if constr.has_lb(): new_ub = (value(constr.lower) - const) / coef var_ub = float('inf') if var.ub is None else var.ub var.setub(min(var_ub, new_ub)) if var.is_integer() or var.is_binary(): # Make sure that the lb and ub are integral. Use safe construction if near to integer. if var.has_lb(): var.setlb( int( min(math.ceil(var.lb - config.tolerance), math.ceil(var.lb)))) if var.has_ub(): var.setub( int( max(math.floor(var.ub + config.tolerance), math.floor(var.ub)))) if var is not None and var.value is not None: _adjust_var_value_if_not_feasible(var) if (config.detect_fixed and var.has_lb() and var.has_ub() and fabs(value(var.lb) - value(var.ub)) <= config.tolerance): var.fix(var.lb) constr.deactivate()
def test_disc_single_index_radau(self): m = self.m.clone() disc = TransformationFactory('dae.collocation') disc.apply_to(m, nfe=5, ncp=3) self.assertTrue(hasattr(m, 'dv1_disc_eq')) self.assertTrue(len(m.dv1_disc_eq) == 15) self.assertTrue(len(m.v1) == 16) expected_tau_points = [0.0, 0.1550510257216822, 0.64494897427831788, 1.0] expected_disc_points = [0, 0.310102, 1.289898, 2.0, 2.310102, 3.289898, 4.0, 4.310102, 5.289898, 6.0, 6.310102, 7.289898, 8.0, 8.310102, 9.289898, 10] disc_info = m.t.get_discretization_info() self.assertTrue(disc_info['scheme'] == 'LAGRANGE-RADAU') for idx, val in enumerate(disc_info['tau_points']): self.assertAlmostEqual(val, expected_tau_points[idx]) for idx, val in enumerate(list(m.t)): self.assertAlmostEqual(val, expected_disc_points[idx]) self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertTrue(m._pyomo_dae_reclassified_derivativevars[0] is m.dv1) repn_baseline = {id(m.dv1[2.0]): 1.0, id(m.v1[0]): 1.5, id(m.v1[0.310102]): -2.76599, id(m.v1[1.289898]): 3.76599, id(m.v1[2.0]): -2.5} repn = generate_standard_repn(m.dv1_disc_eq[2.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen) repn_baseline = {id(m.dv1[4.0]): 1.0, id(m.v1[2.0]): 1.5, id(m.v1[2.310102]): -2.76599, id(m.v1[3.289898]): 3.76599, id(m.v1[4.0]): -2.5} repn = generate_standard_repn(m.dv1_disc_eq[4.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen)
def check_disaggregation_constraint(self, cons, var, disvar1, disvar2): repn = generate_standard_repn(cons.body) self.assertEqual(cons.lower, 0) self.assertEqual(cons.upper, 0) self.assertEqual(len(repn.linear_vars), 3) check_linear_coef(self, repn, var, 1) check_linear_coef(self, repn, disvar1, -1) check_linear_coef(self, repn, disvar2, -1)
def _fix_equality_fixed_variables(model, scaling_tolerance=1E-10): """Detects variables fixed by a constraint: ax=b. Fixes the variable to the constant value (b/a) and deactivates the relevant constraint. This sub-transformation is different than contrib.detect_fixed_vars because it looks for x = const rather than x.lb = x.ub. """ for constraint in model.component_data_objects( ctype=Constraint, active=True, descend_into=True ): if not (constraint.has_lb() and constraint.has_ub()): # Constraint is not an equality. Skip. continue if value(constraint.lower) != value(constraint.upper): # Constraint is not an equality. Skip. continue if constraint.body.polynomial_degree() != 1: # Constraint is not linear. Skip. continue # Generate the standard linear representation repn = generate_standard_repn(constraint.body) # Generator of tuples with the coefficient and variable object for # nonzero coefficients. nonzero_coef_vars = ( (repn.linear_coefs[i], v) for i, v in enumerate(repn.linear_vars) # if coefficient on variable is nonzero if repn.linear_coefs[i] != 0) # get the coefficient and variable object coef, var = next(nonzero_coef_vars) if next(nonzero_coef_vars, None) is not None: # Expect one variable with nonzero cofficient in constraint; # otherwise, skip. continue # Constant term on the constraint body const = repn.constant if repn.constant is not None else 0 if abs(coef) <= scaling_tolerance: logger.warn( "Skipping fixed variable processing for constraint %s: " "%s * %s + %s = %s because coefficient %s is below " "tolerance of %s. Check your problem scaling." % (constraint.name, coef, var.name, const, value(constraint.lower), coef, scaling_tolerance)) continue # Constraint has form lower <= coef * var + const <= upper. We know that # lower = upper, so coef * var + const = lower. var_value = (value(constraint.lower) - const) / coef var.fix(var_value) constraint.deactivate()
def _apply_to(self, model, **kwds): config = self.CONFIG(kwds) for constr in model.component_data_objects( ctype=Constraint, active=True, descend_into=True): # Check if the constraint is k * x + c1 <= c2 or c2 <= k * x + c1 repn = generate_standard_repn(constr.body) if not repn.is_linear() or len(repn.linear_vars) != 1: # Skip nonlinear constraints, trivial constraints, and those # that involve more than one variable. continue else: var = repn.linear_vars[0] const = repn.constant coef = float(repn.linear_coefs[0]) if coef == 0: # Skip trivial constraints continue elif coef > 0: if constr.has_ub(): new_ub = (value(constr.upper) - const) / coef var_ub = float('inf') if var.ub is None else var.ub var.setub(min(var_ub, new_ub)) if constr.has_lb(): new_lb = (value(constr.lower) - const) / coef var_lb = float('-inf') if var.lb is None else var.lb var.setlb(max(var_lb, new_lb)) elif coef < 0: if constr.has_ub(): new_lb = (value(constr.upper) - const) / coef var_lb = float('-inf') if var.lb is None else var.lb var.setlb(max(var_lb, new_lb)) if constr.has_lb(): new_ub = (value(constr.lower) - const) / coef var_ub = float('inf') if var.ub is None else var.ub var.setub(min(var_ub, new_ub)) if var.is_integer() or var.is_binary(): # Make sure that the lb and ub are integral. Use safe construction if near to integer. if var.has_lb(): var.setlb(int(min(math.ceil(var.lb - config.tolerance), math.ceil(var.lb)))) if var.has_ub(): var.setub(int(max(math.floor(var.ub + config.tolerance), math.floor(var.ub)))) if var is not None and var.value is not None: _adjust_var_value_if_not_feasible(var) if (config.detect_fixed and var.has_lb() and var.has_ub() and fabs(value(var.lb) - value(var.ub)) <= config.tolerance): var.fix(var.lb) constr.deactivate()
def _xfrm_bilinearities(self, dual): """ Replace bilinear terms in constraints with disjunctions """ for (name, data) in dual.component_map(Constraint, active=True).items(): for ndx in data: con = data[ndx] degree = con.body.polynomial_degree() if degree > 2: raise "RuntimeError: Cannot transform a model with polynomial degree %d" % degree if degree == 2: terms = generate_standard_repn(con.body) for i, var in enumerate(terms.quadratic_vars): print("%s %s %s" % (i, str(var), str(terms.quadratic_coefs[i])))
def check_bound_constraints(self, cons, disvar, indvar, lb, ub): self.assertIsInstance(cons, Constraint) # both lb and ub self.assertEqual(len(cons), 2) varlb = cons['lb'] self.assertIsNone(varlb.lower) self.assertEqual(varlb.upper, 0) repn = generate_standard_repn(varlb.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 2) check_linear_coef(self, repn, indvar, lb) check_linear_coef(self, repn, disvar, -1) varub = cons['ub'] self.assertIsNone(varub.lower) self.assertEqual(varub.upper, 0) repn = generate_standard_repn(varub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 2) check_linear_coef(self, repn, indvar, -ub) check_linear_coef(self, repn, disvar, 1)
def test_xor_constraint(self): m = models.makeTwoTermDisj_Nonlinear() TransformationFactory('gdp.chull').apply_to(m) xorC = m._gdp_chull_relaxation_disjunction_xor self.assertIsInstance(xorC, Constraint) self.assertEqual(len(xorC), 1) repn = generate_standard_repn(xorC.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 2) check_linear_coef(self, repn, m.d[0].indicator_var, 1) check_linear_coef(self, repn, m.d[1].indicator_var, 1)
def determine_valid_values(block, discr_var_to_constrs_map, config): """Calculate valid values for each effectively discrete variable. We need the set of possible values for the effectively discrete variable in order to do the reformulations. Right now, we select a naive approach where we look for variables in the discreteness-inducing constraints. We then adjust their values and see if things are stil feasible. Based on their coefficient values, we can infer a set of allowable values for the effectively discrete variable. Args: block: The model or a disjunct on the model. """ possible_values = ComponentMap() for eff_discr_var, constrs in discr_var_to_constrs_map.items(): # get the superset of possible values by looking through the # constraints for constr in constrs: repn = generate_standard_repn(constr.body) var_coef = sum(coef for i, coef in enumerate(repn.linear_coefs) if repn.linear_vars[i] is eff_discr_var) const = -(repn.constant - constr.upper) / var_coef possible_vals = set((const,)) for i, var in enumerate(repn.linear_vars): if var is eff_discr_var: continue coef = -repn.linear_coefs[i] / var_coef if var.is_binary(): var_values = (0, coef) elif var.is_integer(): var_values = [v * coef for v in range(var.lb, var.ub + 1)] else: raise ValueError( '%s has unacceptable variable domain: %s' % (var.name, var.domain)) possible_vals = set( (v1 + v2 for v1 in possible_vals for v2 in var_values)) old_possible_vals = possible_values.get(eff_discr_var, None) if old_possible_vals is not None: possible_values[eff_discr_var] = old_possible_vals & possible_vals else: possible_values[eff_discr_var] = possible_vals possible_values = prune_possible_values(block, possible_values, config) return possible_values
def _reformulate_case_2(blk, v1, v2, bilinear_constr): repn = generate_standard_repn(bilinear_constr.body) replace_index = next( i for i, var_tup in enumerate(repn.quadratic_vars) if (var_tup[0] is v1 and var_tup[1] is v2) or (var_tup[0] is v2 and var_tup[1] is v1)) bilinear_constr.set_value(( bilinear_constr.lower, sum(coef * repn.linear_vars[i] for i, coef in enumerate(repn.linear_coefs)) + repn.quadratic_coefs[replace_index] * sum( val * blk.v_increment[val] for val in blk.valid_values) + sum(repn.quadratic_coefs[i] * var_tup[0] * var_tup[1] for i, var_tup in enumerate(repn.quadratic_vars) if not i == replace_index) + repn.constant + zero_if_None(repn.nonlinear_expr), bilinear_constr.upper ))
def test_build_linking_constraints(self): c = _build_linking_constraints([],[]) self.assertIs(type(c), constraint_tuple) self.assertEqual(len(c), 0) c = _build_linking_constraints([None],[variable()]) self.assertIs(type(c), constraint_tuple) self.assertEqual(len(c), 0) v = [1, data_expression(), variable(), expression(expr=1.0)] vaux = [variable(), variable(), variable(), variable()] c = _build_linking_constraints(v, vaux) self.assertIs(type(c), constraint_tuple) self.assertEqual(len(c), 4) self.assertIs(type(c[0]), linear_constraint) self.assertEqual(c[0].rhs, 1) self.assertEqual(len(list(c[0].terms)), 1) self.assertIs(list(c[0].terms)[0][0], vaux[0]) self.assertEqual(list(c[0].terms)[0][1], 1) self.assertIs(type(c[1]), linear_constraint) self.assertIs(c[1].rhs, v[1]) self.assertEqual(len(list(c[1].terms)), 1) self.assertIs(list(c[1].terms)[0][0], vaux[1]) self.assertEqual(list(c[1].terms)[0][1], 1) self.assertIs(type(c[2]), linear_constraint) self.assertEqual(c[2].rhs, 0) self.assertEqual(len(list(c[2].terms)), 2) self.assertIs(list(c[2].terms)[0][0], vaux[2]) self.assertEqual(list(c[2].terms)[0][1], 1) self.assertIs(list(c[2].terms)[1][0], v[2]) self.assertEqual(list(c[2].terms)[1][1], -1) self.assertIs(type(c[3]), constraint) self.assertEqual(c[3].rhs, 0) from pyomo.repn import generate_standard_repn repn = generate_standard_repn(c[3].body) self.assertEqual(len(repn.linear_vars), 1) self.assertIs(repn.linear_vars[0], vaux[3]) self.assertEqual(repn.linear_coefs[0], 1) self.assertEqual(repn.constant, -1)
def detect_effectively_discrete_vars(block, equality_tolerance): """Detect effectively discrete variables. These continuous variables are the sum of discrete variables. """ # Map of effectively_discrete var --> inducing constraints effectively_discrete = ComponentMap() for constr in block.component_data_objects(Constraint, active=True): if constr.lower is None or constr.upper is None: continue # skip inequality constraints if fabs(value(constr.lower) - value(constr.upper) ) > equality_tolerance: continue # not equality constriant. Skip. if constr.body.polynomial_degree() not in (1, 0): continue # skip nonlinear expressions repn = generate_standard_repn(constr.body) if len(repn.linear_vars) < 2: # TODO should this be < 2 or < 1? # TODO we should make sure that trivial equality relations are # preprocessed before this, or we will end up reformulating # expressions that we do not need to here. continue non_discrete_vars = list(v for v in repn.linear_vars if v.is_continuous()) if len(non_discrete_vars) == 1: # We know that this is an effectively discrete continuous # variable. Add it to our identified variable list. var = non_discrete_vars[0] inducing_constraints = effectively_discrete.get(var, []) inducing_constraints.append(constr) effectively_discrete[var] = inducing_constraints # TODO we should eventually also look at cases where all other # non_discrete_vars are effectively_discrete_vars return effectively_discrete
def pass_values(self, arc, fixed_inputs): """ Pass the values from one unit to the next, recording only those that were not already fixed in the provided dict that maps blocks to sets. """ eblock = arc.expanded_block src, dest = arc.src, arc.dest dest_unit = dest.parent_block() eq_tol = self.options["almost_equal_tol"] if dest_unit not in fixed_inputs: fixed_inputs[dest_unit] = ComponentSet() sf = eblock.component("splitfrac") if sf is not None and not sf.is_fixed(): # fix the splitfrac if it has a current value or else error if sf.value is not None: fixed_inputs[dest_unit].add(sf) sf.fix() else: raise RuntimeError( "Found free splitfrac for arc '%s' with no current value. " "Please use the set_split_fraction method on its source " "port to set this value before expansion, or set its value " "manually if expansion has already occured." % arc.name) elif sf is None: # if there is no splitfrac, but we have extensive members, then we # need to manually set the evar values because there will be no # *_split constraints on the eblock, so it is up to us to set it # TODO: what if there is no splitfrac, but it's missing because # there's only 1 variable per port so it was simplified out? # How do we specify the downstream evars? If we assume that the # user's function will satisfy the *_outsum constraint before # returning, then the evars would at least be specified such that # they satisfy the total sum constraint. But I would think we don't # want to rely on the user calling solve on their unit before # returning, especially since the outsum constraint was auto # generated and not one they made themselves. # Potential Solution: allow the user to specify a splitfrac # (via set_split_fraction or something else) that will be used here # and is only relevant to this SM, and if they didn't specify # anything, throw an error. for name, mem in iteritems(src.vars): if not src.is_extensive(name): continue evar = eblock.component(name) if evar is None: continue if len(src.dests()) > 1: raise Exception( "This still needs to be figured out (arc '%s')" % arc.name) # TODO: for now we know it's obvious what to do if there is # only 1 destination if mem.is_indexed(): evars = [(evar[i], i) for i in evar] else: evars = [(evar, None)] for evar, idx in evars: fixed_inputs[dest_unit].add(evar) evar.fix(value(mem[idx] if mem.is_indexed() else mem)) for con in eblock.component_data_objects(Constraint, active=True): # we expect to find equality constraints with one linear variable if not con.equality: # We assume every constraint here is an equality. # This will only be False if the transformation changes # or if the user puts something unexpected on the eblock. raise RuntimeError( "Found inequality constraint '%s'. Please do not modify " "the expanded block." % con.name) repn = generate_standard_repn(con.body) if repn.is_fixed(): # the port member's peer was already fixed if abs(value(con.lower) - repn.constant) > eq_tol: raise RuntimeError( "Found connected ports '%s' and '%s' both with fixed " "but different values (by > %s) for constraint '%s'" % (src, dest, eq_tol, con.name)) continue if not (repn.is_linear() and len(repn.linear_vars) == 1): raise RuntimeError( "Constraint '%s' had more than one free variable when " "trying to pass a value to its destination. Please fix " "more variables before passing across this arc." % con.name) # fix the value of the single variable to satisfy the constraint # con.lower is usually a NumericConstant but call value on it # just in case it is something else val = (value(con.lower) - repn.constant) / repn.linear_coefs[0] var = repn.linear_vars[0] fixed_inputs[dest_unit].add(var) var.fix(val)
def _print_model_LP(self, model, output_file, solver_capability, labeler, output_fixed_variable_bounds=False, file_determinism=1, row_order=None, column_order=None, skip_trivial_constraints=False, force_objective_constant=False, include_all_variable_bounds=False): eq_string_template = self.eq_string_template leq_string_template = self.leq_string_template geq_string_template = self.geq_string_template ub_string_template = self.ub_string_template lb_string_template = self.lb_string_template symbol_map = SymbolMap() variable_symbol_map = SymbolMap() # NOTE: we use createSymbol instead of getSymbol because we # know whether or not the symbol exists, and don't want # to the overhead of error/duplicate checking. # cache frequently called functions create_symbol_func = SymbolMap.createSymbol create_symbols_func = SymbolMap.createSymbols alias_symbol_func = SymbolMap.alias variable_label_pairs = [] # populate the symbol map in a single pass. #objective_list, constraint_list, sosconstraint_list, variable_list \ # = self._populate_symbol_map(model, # symbol_map, # labeler, # variable_symbol_map, # file_determinism=file_determinism) sortOrder = SortComponents.unsorted if file_determinism >= 1: sortOrder = sortOrder | SortComponents.indices if file_determinism >= 2: sortOrder = sortOrder | SortComponents.alphabetical # # Create variable symbols (and cache the block list) # all_blocks = [] variable_list = [] # # WEH - TODO: See if this is faster # NOTE: This loop doesn't find all of the variables. :( # #for block in model.block_data_objects(active=True, # sort=sortOrder): # # all_blocks.append(block) # # for vardata in block.component_data_objects( # Var, # active=True, # sort=sortOrder, # descend_into=False): # # variable_list.append(vardata) # variable_label_pairs.append( # (vardata,create_symbol_func(symbol_map, # vardata, # labeler))) all_blocks = list( model.block_data_objects( active=True, sort=sortOrder) ) variable_list = list( model.component_data_objects( Var, sort=sortOrder) ) variable_label_pairs = list( (vardata, create_symbol_func(symbol_map, vardata, labeler)) for vardata in variable_list ) variable_symbol_map.addSymbols(variable_label_pairs) # and extract the information we'll need for rapid labeling. object_symbol_dictionary = symbol_map.byObject variable_symbol_dictionary = variable_symbol_map.byObject # cache - these are called all the time. print_expr_canonical = self._print_expr_canonical # print the model name and the source, so we know roughly where # it came from. # # NOTE: this *must* use the "\* ... *\" comment format: the GLPK # LP parser does not correctly handle other formats (notably, "%"). output = [] output.append( "\\* Source Pyomo model name=%s *\\\n\n" % (model.name,) ) # # Objective # supports_quadratic_objective = solver_capability('quadratic_objective') numObj = 0 onames = [] for block in all_blocks: gen_obj_repn = getattr(block, "_gen_obj_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_repn'): block._repn = ComponentMap() block_repn = block._repn for objective_data in block.component_data_objects( Objective, active=True, sort=sortOrder, descend_into=False): numObj += 1 onames.append(objective_data.name) if numObj > 1: raise ValueError( "More than one active objective defined for input " "model '%s'; Cannot write legal LP file\n" "Objectives: %s" % (model.name, ' '.join(onames))) create_symbol_func(symbol_map, objective_data, labeler) symbol_map.alias(objective_data, '__default_objective__') if objective_data.is_minimizing(): output.append("min \n") else: output.append("max \n") if gen_obj_repn: repn = generate_standard_repn(objective_data.expr) block_repn[objective_data] = repn else: repn = block_repn[objective_data] degree = repn.polynomial_degree() if degree == 0: logger.warning("Constant objective detected, replacing " "with a placeholder to prevent solver failure.") force_objective_constant = True elif degree == 2: if not supports_quadratic_objective: raise RuntimeError( "Selected solver is unable to handle " "objective functions with quadratic terms. " "Objective at issue: %s." % objective_data.name) elif degree is None: raise RuntimeError( "Cannot write legal LP file. Objective '%s' " "has nonlinear terms that are not quadratic." % objective_data.name) output.append( object_symbol_dictionary[id(objective_data)]+':\n') offset = print_expr_canonical( repn, output, object_symbol_dictionary, variable_symbol_dictionary, True, column_order, force_objective_constant=force_objective_constant) if numObj == 0: raise ValueError( "ERROR: No objectives defined for input model. " "Cannot write legal LP file.") # Constraints # # If there are no non-trivial constraints, you'll end up with an empty # constraint block. CPLEX is OK with this, but GLPK isn't. And # eliminating the constraint block (i.e., the "s.t." line) causes GLPK # to whine elsewhere. Output a warning if the constraint block is empty, # so users can quickly determine the cause of the solve failure. output.append("\n") output.append("s.t.\n") output.append("\n") have_nontrivial = False supports_quadratic_constraint = solver_capability('quadratic_constraint') def constraint_generator(): for block in all_blocks: gen_con_repn = getattr(block, "_gen_con_repn", True) # Get/Create the ComponentMap for the repn if not hasattr(block,'_repn'): block._repn = ComponentMap() block_repn = block._repn for constraint_data in block.component_data_objects( Constraint, active=True, sort=sortOrder, descend_into=False): if (not constraint_data.has_lb()) and \ (not constraint_data.has_ub()): assert not constraint_data.equality continue # non-binding, so skip if constraint_data._linear_canonical_form: repn = constraint_data.canonical_form() elif gen_con_repn: repn = generate_standard_repn(constraint_data.body) block_repn[constraint_data] = repn else: repn = block_repn[constraint_data] yield constraint_data, repn if row_order is not None: sorted_constraint_list = list(constraint_generator()) sorted_constraint_list.sort(key=lambda x: row_order[x[0]]) def yield_all_constraints(): for data, repn in sorted_constraint_list: yield data, repn else: yield_all_constraints = constraint_generator # FIXME: This is a hack to get nested blocks working... for constraint_data, repn in yield_all_constraints(): have_nontrivial = True degree = repn.polynomial_degree() # # Write constraint # # There are conditions, e.g., when fixing variables, under which # a constraint block might be empty. Ignore these, for both # practical reasons and the fact that the CPLEX LP format # requires a variable in the constraint body. It is also # possible that the body of the constraint consists of only a # constant, in which case the "variable" of if degree == 0: if skip_trivial_constraints: continue elif degree == 2: if not supports_quadratic_constraint: raise ValueError( "Solver unable to handle quadratic expressions. Constraint" " at issue: '%s'" % (constraint_data.name)) elif degree is None: raise ValueError( "Cannot write legal LP file. Constraint '%s' has a body " "with nonlinear terms." % (constraint_data.name)) # Create symbol con_symbol = create_symbol_func(symbol_map, constraint_data, labeler) if constraint_data.equality: assert value(constraint_data.lower) == \ value(constraint_data.upper) label = 'c_e_%s_' % con_symbol alias_symbol_func(symbol_map, constraint_data, label) output.append(label) output.append(':\n') offset = print_expr_canonical(repn, output, object_symbol_dictionary, variable_symbol_dictionary, False, column_order) bound = constraint_data.lower bound = _get_bound(bound) - offset output.append(eq_string_template % (_no_negative_zero(bound))) output.append("\n") else: if constraint_data.has_lb(): if constraint_data.has_ub(): label = 'r_l_%s_' % con_symbol else: label = 'c_l_%s_' % con_symbol alias_symbol_func(symbol_map, constraint_data, label) output.append(label) output.append(':\n') offset = print_expr_canonical(repn, output, object_symbol_dictionary, variable_symbol_dictionary, False, column_order) bound = constraint_data.lower bound = _get_bound(bound) - offset output.append(geq_string_template % (_no_negative_zero(bound))) else: assert constraint_data.has_ub() if constraint_data.has_ub(): if constraint_data.has_lb(): label = 'r_u_%s_' % con_symbol else: label = 'c_u_%s_' % con_symbol alias_symbol_func(symbol_map, constraint_data, label) output.append(label) output.append(':\n') offset = print_expr_canonical(repn, output, object_symbol_dictionary, variable_symbol_dictionary, False, column_order) bound = constraint_data.upper bound = _get_bound(bound) - offset output.append(leq_string_template % (_no_negative_zero(bound))) else: assert constraint_data.has_lb() # A simple hack to avoid caching super large files if len(output) > 1024: output_file.write( "".join(output) ) output = [] if not have_nontrivial: logger.warning('Empty constraint block written in LP format ' \ '- solver may error') # the CPLEX LP format doesn't allow constants in the objective (or # constraint body), which is a bit silly. To avoid painful # book-keeping, we introduce the following "variable", constrained # to the value 1. This is used when quadratic terms are present. # worst-case, if not used, is that CPLEX easily pre-processes it out. prefix = "" output.append('%sc_e_ONE_VAR_CONSTANT: \n' % prefix) output.append('%sONE_VAR_CONSTANT = 1.0\n' % prefix) output.append("\n") # SOS constraints # # For now, we write out SOS1 and SOS2 constraints in the cplex format # # All Component objects are stored in model._component, which is a # dictionary of {class: {objName: object}}. # # Consider the variable X, # # model.X = Var(...) # # We print X to CPLEX format as X(i,j,k,...) where i, j, k, ... are the # indices of X. # SOSlines = [] sos1 = solver_capability("sos1") sos2 = solver_capability("sos2") writtenSOS = False for block in all_blocks: for soscondata in block.component_data_objects( SOSConstraint, active=True, sort=sortOrder, descend_into=False): create_symbol_func(symbol_map, soscondata, labeler) level = soscondata.level if (level == 1 and not sos1) or \ (level == 2 and not sos2) or \ (level > 2): raise ValueError( "Solver does not support SOS level %s constraints" % (level)) if writtenSOS == False: SOSlines.append("SOS\n") writtenSOS = True # This updates the referenced_variable_ids, just in case # there is a variable that only appears in an # SOSConstraint, in which case this needs to be known # before we write the "bounds" section (Cplex does not # handle this correctly, Gurobi does) self.printSOS(symbol_map, labeler, variable_symbol_map, soscondata, SOSlines) # # Bounds # output.append("bounds\n") # Scan all variables even if we're only writing a subset of them. # required because we don't store maps by variable type currently. # Track the number of integer and binary variables, so you can # output their status later. integer_vars = [] binary_vars = [] for vardata in variable_list: # TODO: We could just loop over the set of items in # self._referenced_variable_ids, except this is # a dictionary that is hashed by id(vardata) # which would make the bounds section # nondeterministic (bad for unit testing) if (not include_all_variable_bounds) and \ (id(vardata) not in self._referenced_variable_ids): continue name_to_output = variable_symbol_dictionary[id(vardata)] if name_to_output == "e": raise ValueError( "Attempting to write variable with name 'e' in a CPLEX LP " "formatted file will cause a parse failure due to confusion with " "numeric values expressed in scientific notation") # track the number of integer and binary variables, so we know whether # to output the general / binary sections below. if vardata.is_binary(): binary_vars.append(name_to_output) elif vardata.is_integer(): integer_vars.append(name_to_output) elif not vardata.is_continuous(): raise TypeError("Invalid domain type for variable with name '%s'. " "Variable is not continuous, integer, or binary." % (vardata.name)) if vardata.fixed: if not output_fixed_variable_bounds: raise ValueError( "Encountered a fixed variable (%s) inside an active " "objective or constraint expression on model %s, which is " "usually indicative of a preprocessing error. Use the " "IO-option 'output_fixed_variable_bounds=True' to suppress " "this error and fix the variable by overwriting its bounds " "in the LP file." % (vardata.name, model.name)) if vardata.value is None: raise ValueError("Variable cannot be fixed to a value of None.") vardata_lb = value(vardata.value) vardata_ub = value(vardata.value) output.append(" ") output.append(lb_string_template % (_no_negative_zero(vardata_lb))) output.append(name_to_output) output.append(ub_string_template % (_no_negative_zero(vardata_ub))) else: vardata_lb = _get_bound(vardata.lb) vardata_ub = _get_bound(vardata.ub) # Pyomo assumes that the default variable bounds are -inf and +inf output.append(" ") if vardata.has_lb(): output.append(lb_string_template % (_no_negative_zero(vardata_lb))) else: output.append(" -inf <= ") output.append(name_to_output) if vardata.has_ub(): output.append(ub_string_template % (_no_negative_zero(vardata_ub))) else: output.append(" <= +inf\n") if len(integer_vars) > 0: output.append("general\n") for var_name in integer_vars: output.append(' %s\n' % var_name) if len(binary_vars) > 0: output.append("binary\n") for var_name in binary_vars: output.append(' %s\n' % var_name) # Write the SOS section output.append( "".join(SOSlines) ) # # wrap-up # output.append("end\n") output_file.write( "".join(output) ) # Clean up the symbol map to only contain variables referenced # in the active constraints **Note**: warm start method may # rely on this for choosing the set of potential warm start # variables vars_to_delete = set(variable_symbol_map.byObject.keys()) - \ set(self._referenced_variable_ids.keys()) sm_byObject = symbol_map.byObject sm_bySymbol = symbol_map.bySymbol var_sm_byObject = variable_symbol_map.byObject for varid in vars_to_delete: symbol = var_sm_byObject[varid] del sm_byObject[varid] del sm_bySymbol[symbol] del variable_symbol_map return symbol_map
from pyomo.environ import * from pyomo.repn import generate_standard_repn import time # @runtime M = ConcreteModel() M.A = RangeSet(100000) M.p = Param(M.A, mutable=True, initialize=1) M.x = Var(M.A) start = time.time() e = sum( (M.x[i] - 1)**M.p[i] for i in M.A) print("sum: %f" % (time.time() - start)) start = time.time() generate_standard_repn(e) print("repn: %f" % (time.time() - start)) start = time.time() e = quicksum( (M.x[i] - 1)**M.p[i] for i in M.A) print("quicksum: %f" % (time.time() - start)) start = time.time() generate_standard_repn(e) print("repn: %f" % (time.time() - start)) # @runtime
def _add_optimality_conditions(self, instance, submodel): """ Add optimality conditions for the submodel This assumes that the original model has the form: min c1*x + d1*y A3*x <= b3 A1*x + B1*y <= b1 min c2*x + d2*y + x'*Q*y A2*x + B2*y + x'*E2*y <= b2 y >= 0 NOTE THE VARIABLE BOUNDS! """ # # Populate the block with the linear constraints. # Note that we don't simply clone the current block. # We need to collect a single set of equations that # can be easily expressed. # d2 = {} B2 = {} vtmp = {} utmp = {} sids_set = set() sids_list = [] # block = Block(concrete=True) block.u = VarList() block.v = VarList() block.c1 = ConstraintList() block.c2 = ComplementarityList() block.c3 = ComplementarityList() # # Collect submodel objective terms # # TODO: detect fixed variables # for odata in submodel.component_data_objects(Objective, active=True): if odata.sense == maximize: d_sense = -1 else: d_sense = 1 # # Iterate through the variables in the representation # o_terms = generate_standard_repn(odata.expr, compute_values=False) # # Linear terms # for i, var in enumerate(o_terms.linear_vars): if var.parent_component().local_name in self._fixed_upper_vars: # # Skip fixed upper variables # continue # # Store the coefficient for the variable. The coefficient is # negated if the objective is maximized. # id_ = id(var) d2[id_] = d_sense * o_terms.linear_coefs[i] if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) # # Quadratic terms # for i, var in enumerate(o_terms.quadratic_vars): if var[0].parent_component().local_name in self._fixed_upper_vars: if var[1].parent_component().local_name in self._fixed_upper_vars: # # Skip fixed upper variables # continue # # Add the linear term # id_ = id(var[1]) d2[id_] = d2.get(id_,0) + d_sense * o_terms.quadratic_coefs[i] * var[0] if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) elif var[1].parent_component().local_name in self._fixed_upper_vars: # # Add the linear term # id_ = id(var[0]) d2[id_] = d2.get(id_,0) + d_sense * o_terms.quadratic_coefs[i] * var[1] if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) else: raise RuntimeError("Cannot apply this transformation to a problem with quadratic terms where both variables are in the lower level.") # # Stop after the first objective # break # # Iterate through all lower level variables, adding dual variables # and complementarity slackness conditions for y bound constraints # for vcomponent in instance.component_objects(Var, active=True): if vcomponent.local_name in self._fixed_upper_vars: # # Skip fixed upper variables # continue for ndx in vcomponent: # # For each index, get the bounds for the variable # lb, ub = vcomponent[ndx].bounds if not lb is None: # # Add the complementarity slackness condition for a lower bound # v = block.v.add() block.c3.add( complements(vcomponent[ndx] >= lb, v >= 0) ) else: v = None if not ub is None: # # Add the complementarity slackness condition for an upper bound # w = block.v.add() vtmp[id(vcomponent[ndx])] = w block.c3.add( complements(vcomponent[ndx] <= ub, w >= 0) ) else: w = None if not (v is None and w is None): # # Record the variables for which complementarity slackness conditions # were created. # id_ = id(vcomponent[ndx]) vtmp[id_] = (v,w) if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) # # Iterate through all constraints, adding dual variables and # complementary slackness conditions (for inequality constraints) # for cdata in submodel.component_data_objects(Constraint, active=True): if cdata.equality: # Don't add a complementary slackness condition for an equality constraint u = block.u.add() utmp[id(cdata)] = (None,u) else: if not cdata.lower is None: # # Add the complementarity slackness condition for a greater-than inequality # u = block.u.add() block.c2.add( complements(- cdata.body <= - cdata.lower, u >= 0) ) else: u = None if not cdata.upper is None: # # Add the complementarity slackness condition for a less-than inequality # w = block.u.add() block.c2.add( complements(cdata.body <= cdata.upper, w >= 0) ) else: w = None if not (u is None and w is None): utmp[id(cdata)] = (u,w) # # Store the coefficients for the constraint variables that are not fixed # c_terms = generate_standard_repn(cdata.body, compute_values=False) # # Linear terms # for i, var in enumerate(c_terms.linear_vars): if var.parent_component().local_name in self._fixed_upper_vars: continue id_ = id(var) B2.setdefault(id_,{}).setdefault(id(cdata),c_terms.linear_coefs[i]) if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) # # Quadratic terms # for i, var in enumerate(c_terms.quadratic_vars): if var[0].parent_component().local_name in self._fixed_upper_vars: if var[1].parent_component().local_name in self._fixed_upper_vars: continue id_ = id(var[1]) if id_ in B2: B2[id_][id(cdata)] = c_terms.quadratic_coefs[i] * var[0] else: B2.setdefault(id_,{}).setdefault(id(cdata),c_terms.quadratic_coefs[i] * var[0]) if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) elif var[1].parent_component().local_name in self._fixed_upper_vars: id_ = id(var[0]) if id_ in B2: B2[id_][id(cdata)] = c_terms.quadratic_coefs[i] * var[1] else: B2.setdefault(id_,{}).setdefault(id(cdata),c_terms.quadratic_coefs[i] * var[1]) if not id_ in sids_set: sids_set.add(id_) sids_list.append(id_) else: raise RuntimeError("Cannot apply this transformation to a problem with quadratic terms where both variables are in the lower level.") # # Generate stationarity equations # tmp__ = (None, None) for vid in sids_list: exp = d2.get(vid,0) # lb_dual, ub_dual = vtmp.get(vid, tmp__) if vid in vtmp: if not lb_dual is None: exp -= lb_dual # dual for variable lower bound if not ub_dual is None: exp += ub_dual # dual for variable upper bound # B2_ = B2.get(vid,{}) utmp_keys = list(utmp.keys()) if self._deterministic: utmp_keys.sort(key=lambda x:utmp[x][0].local_name if utmp[x][1] is None else utmp[x][1].local_name) for uid in utmp_keys: if uid in B2_: lb_dual, ub_dual = utmp[uid] if not lb_dual is None: exp -= B2_[uid] * lb_dual if not ub_dual is None: exp += B2_[uid] * ub_dual if type(exp) in six.integer_types or type(exp) is float: # TODO: Annotate the model as unbounded raise IOError("Unbounded variable without side constraints") else: block.c1.add( exp == 0 ) # # Return block # return block
def to_standard_form(self): """ Produces a standard-form representation of the model. Returns the coefficient matrix (A), the cost vector (c), and the constraint vector (b), where the 'standard form' problem is min/max c'x s.t. Ax = b x >= 0 All three returned values are instances of the array.array class, and store Python floats (C doubles). """ from pyomo.repn import generate_standard_repn # We first need to create an map of all variables to their column # number colID = {} ID2name = {} id = 0 tmp = self.variables().keys() tmp.sort() for v in tmp: colID[v] = id ID2name[id] = v id += 1 # First we go through the constraints and introduce slack and excess # variables to eliminate inequality constraints # # N.B. Structure heirarchy: # # active_components: {class: {attr_name: object}} # object -> Constraint: ._data: {ndx: _ConstraintData} # _ConstraintData: .lower, .body, .upper # # So, altogether, we access a lower bound via # # model.component_map(active=True)[Constraint]['con_name']['index'].lower # # {le,ge,eq}Constraints are # {constraint_name: {index: {variable_or_none: coefficient}} objects # that represent each constraint. None in the innermost dictionary # represents the constant term. # # i.e. # # min x1 + 2*x2 + x4 # s.t. x1 = 1 # x2 + 3*x3 <= -1 # x1 + x4 >= 3 # x1 + 2*x2 + + 3*x4 >= 0 # # # would be represented as (modulo the names of the variables, # constraints, and indices) # # eqConstraints = {'c1': {None: {'x1':1, None:-1}}} # leConstraints = {'c2': {None: {'x2':1, 'x3':3, None:1}}} # geConstraints = {'c3': {None: {'x1':1, 'x4':1, None:-3}}, # 'c4': {None: {'x1':1, 'x2':2, 'x4':1, None:0}}} # # Note the we have the luxury of dealing only with linear terms. var_id_map = {} leConstraints = {} geConstraints = {} eqConstraints = {} objectives = {} # For each registered component for c in self.component_map(active=True): # Get all subclasses of Constraint if issubclass(c, Constraint): cons = self.component_map(c, active=True) # Get the name of the constraint, and the constraint set itself for con_set_name in cons: con_set = cons[con_set_name] # For each indexed constraint in the constraint set for ndx in con_set._data: con = con_set._data[ndx] # Process the body terms = self._process_canonical_repn( generate_standard_repn(con.body, var_id_map)) # Process the bounds of the constraint if con.equality: # Equality constraint, only check lower bound lb = self._process_canonical_repn( generate_standard_repn(con.lower, var_id_map)) # Update terms for k in lb: v = lb[k] if k in terms: terms[k] -= v else: terms[k] = -v # Add constraint to equality constraints eqConstraints[(con_set_name, ndx)] = terms else: # Process upper bounds (<= constraints) if con.upper is not None: # Less than or equal to constraint tmp = dict(terms) ub = self._process_canonical_repn( generate_standard_repn(con.upper, var_id_map)) # Update terms for k in ub: if k in terms: tmp[k] -= ub[k] else: tmp[k] = -ub[k] # Add constraint to less than or equal to # constraints leConstraints[(con_set_name, ndx)] = tmp # Process lower bounds (>= constraints) if con.lower is not None: # Less than or equal to constraint tmp = dict(terms) lb = self._process_canonical_repn( generate_standard_repn(con.lower, var_id_map)) # Update terms for k in lb: if k in terms: tmp[k] -= lb[k] else: tmp[k] = -lb[k] # Add constraint to less than or equal to # constraints geConstraints[(con_set_name, ndx)] = tmp elif issubclass(c, Objective): # Process objectives objs = self.component_map(c, active=True) # Get the name of the objective, and the objective set itself for obj_set_name in objs: obj_set = objs[obj_set_name] # For each indexed objective in the objective set for ndx in obj_set._data: obj = obj_set._data[ndx] # Process the objective terms = self._process_canonical_repn( generate_standard_repn(obj.expr, var_id_map)) objectives[(obj_set_name, ndx)] = terms # We now have all the constraints. Add a slack variable for every # <= constraint and an excess variable for every >= constraint. nSlack = len(leConstraints) nExcess = len(geConstraints) nConstraints = len(leConstraints) + len(geConstraints) + \ len(eqConstraints) nVariables = len(colID) + nSlack + nExcess nRegVariables = len(colID) # Make the arrays coefficients = array.array("d", [0]*nConstraints*nVariables) constraints = array.array("d", [0]*nConstraints) costs = array.array("d", [0]*nVariables) # Populate the coefficient matrix constraintID = 0 # Add less than or equal to constraints for ndx in leConstraints: con = leConstraints[ndx] for termKey in con: coef = con[termKey] if termKey is None: # Constraint coefficient constraints[constraintID] = -coef else: # Variable coefficient col = colID[termKey] coefficients[constraintID*nVariables + col] = coef # Add the slack coefficients[constraintID*nVariables + nRegVariables + \ constraintID] = 1 constraintID += 1 # Add greater than or equal to constraints for ndx in geConstraints: con = geConstraints[ndx] for termKey in con: coef = con[termKey] if termKey is None: # Constraint coefficient constraints[constraintID] = -coef else: # Variable coefficient col = colID[termKey] coefficients[constraintID*nVariables + col] = coef # Add the slack coefficients[constraintID*nVariables + nRegVariables + \ constraintID] = -1 constraintID += 1 # Add equality constraints for ndx in eqConstraints: con = eqConstraints[ndx] for termKey in con: coef = con[termKey] if termKey is None: # Constraint coefficient constraints[constraintID] = -coef else: # Variable coefficient col = colID[termKey] coefficients[constraintID*nVariables + col] = coef constraintID += 1 # Determine cost coefficients for obj_name in objectives: obj = objectives[obj_name]() for var in obj: costs[colID[var]] = obj[var] # Print the model # # The goal is to print # # var1 var2 var3 ... # +-- --+ # | cost1 cost2 cost3 ...| # +-- --+ # +-- --+ +-- --+ # con1 | coef11 coef12 coef13 ...| | eq1 | # con2 | coef21 coef22 coef23 ...| | eq2 | # con2 | coef31 coef32 coef33 ...| | eq3 | # . | . . . . | | . | # . | . . . . | | . | # . | . . . . | | . | constraintPadding = 2 numFmt = "% 1.4f" altFmt = "% 1.1g" maxColWidth = max(len(numFmt % 0.0), len(altFmt % 0.0)) maxConstraintColWidth = max(len(numFmt % 0.0), len(altFmt % 0.0)) # Generate constraint names maxConNameLen = 0 conNames = [] for name in leConstraints: strName = str(name) if len(strName) > maxConNameLen: maxConNameLen = len(strName) conNames.append(strName) for name in geConstraints: strName = str(name) if len(strName) > maxConNameLen: maxConNameLen = len(strName) conNames.append(strName) for name in eqConstraints: strName = str(name) if len(strName) > maxConNameLen: maxConNameLen = len(strName) conNames.append(strName) # Generate the variable names varNames = [None]*len(colID) for name in colID: tmp_name = " " + name if len(tmp_name) > maxColWidth: maxColWidth = len(tmp_name) varNames[colID[name]] = tmp_name for i in xrange(0, nSlack): tmp_name = " _slack_%i" % i if len(tmp_name) > maxColWidth: maxColWidth = len(tmp_name) varNames.append(tmp_name) for i in xrange(0, nExcess): tmp_name = " _excess_%i" % i if len(tmp_name) > maxColWidth: maxColWidth = len(tmp_name) varNames.append(tmp_name) # Variable names line = " "*maxConNameLen + (" "*constraintPadding) + " " for col in xrange(0, nVariables): # Format entry token = varNames[col] # Pad with trailing whitespace token += " "*(maxColWidth - len(token)) # Add to line line += " " + token + " " print(line+'\n') # Cost vector print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+" + '\n') line = " "*maxConNameLen + (" "*constraintPadding) + "|" for col in xrange(0, nVariables): # Format entry token = numFmt % costs[col] if len(token) > maxColWidth: token = altFmt % costs[col] # Pad with trailing whitespace token += " "*(maxColWidth - len(token)) # Add to line line += " " + token + " " line += "|" print(line+'\n') print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+"+'\n') # Constraints print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+" + \ (" "*constraintPadding) + "+--" + \ (" "*(maxConstraintColWidth-1)) + "--+"+'\n') for row in xrange(0, nConstraints): # Print constraint name line = conNames[row] + (" "*constraintPadding) + (" "*(maxConNameLen - len(conNames[row]))) + "|" # Print each coefficient for col in xrange(0, nVariables): # Format entry token = numFmt % coefficients[nVariables*row + col] if len(token) > maxColWidth: token = altFmt % coefficients[nVariables*row + col] # Pad with trailing whitespace token += " "*(maxColWidth - len(token)) # Add to line line += " " + token + " " line += "|" + (" "*constraintPadding) + "|" # Add constraint vector token = numFmt % constraints[row] if len(token) > maxConstraintColWidth: token = altFmt % constraints[row] # Pad with trailing whitespace token += " "*(maxConstraintColWidth - len(token)) line += " " + token + " |" print(line+'\n') print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ " "*((maxColWidth+2)*nVariables - 4) + "--+" + \ (" "*constraintPadding) + "+--" + (" "*(maxConstraintColWidth-1))\ + "--+"+'\n') return (coefficients, costs, constraints)