def solve_feasibility_subproblem(solve_data, config): """Solves a feasibility NLP if the fixed_nlp problem is infeasible. Args: solve_data (MindtPySolveData): data container that holds solve-instance data. config (ConfigBlock): the specific configurations for MindtPy. Returns: feas_subproblem (Pyomo model): feasibility NLP from the model. feas_soln (SolverResults): results from solving the feasibility NLP. """ feas_subproblem = solve_data.working_model.clone() add_feas_slacks(feas_subproblem, config) MindtPy = feas_subproblem.MindtPy_utils if MindtPy.find_component('objective_value') is not None: MindtPy.objective_value.value = 0 next(feas_subproblem.component_data_objects( Objective, active=True)).deactivate() for constr in feas_subproblem.MindtPy_utils.nonlinear_constraint_list: constr.deactivate() MindtPy.feas_opt.activate() if config.feasibility_norm == 'L1': MindtPy.feas_obj = Objective( expr=sum(s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize) elif config.feasibility_norm == 'L2': MindtPy.feas_obj = Objective( expr=sum(s*s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize) else: MindtPy.feas_obj = Objective( expr=MindtPy.feas_opt.slack_var, sense=minimize) TransformationFactory('core.fix_integer_vars').apply_to(feas_subproblem) nlpopt = SolverFactory(config.nlp_solver) nlp_args = dict(config.nlp_solver_args) set_solver_options(nlpopt, solve_data, config, solver_type='nlp') with SuppressInfeasibleWarning(): try: with time_code(solve_data.timing, 'feasibility subproblem'): feas_soln = nlpopt.solve( feas_subproblem, tee=config.nlp_solver_tee, **nlp_args) except (ValueError, OverflowError) as error: for nlp_var, orig_val in zip( MindtPy.variable_list, solve_data.initial_var_values): if not nlp_var.fixed and not nlp_var.is_binary(): nlp_var.set_value(orig_val, skip_validation=True) with time_code(solve_data.timing, 'feasibility subproblem'): feas_soln = nlpopt.solve( feas_subproblem, tee=config.nlp_solver_tee, **nlp_args) handle_feasibility_subproblem_tc( feas_soln.solver.termination_condition, MindtPy, solve_data, config) return feas_subproblem, feas_soln
def _apply_solver(self): start_time = time.time() # # Transform instance # xfrm = TransformationFactory('mpec.simple_nonlinear') xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: #pragma:nocover self.options.solver = solver = 'ipopt' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: self.results = [] epsilon_final = self.options.get('epsilon_final', 1e-7) epsilon = self.options.get('epsilon_initial', epsilon_final) while (True): self._instance.mpec_bound.value = epsilon # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # res = opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit) self.results.append(res) epsilon /= 10.0 if epsilon < epsilon_final: break # # Reclassify the Complementarity components # from pyomo.mpec import Complementarity for cuid in self._instance._transformation_data[ 'mpec.simple_nonlinear'].compl_cuids: cobj = cuid.find_component(self._instance) cobj.parent_block().reclassify_component_type( cobj, Complementarity) # # Update timing # stop_time = time.time() self.wall_time = stop_time - start_time # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def _test(self, tname, M): ofile = currdir + tname + '_nlxfrm.out' bfile = currdir + tname + '_nlxfrm.nl' xfrm = TransformationFactory('mpec.nl') xfrm.apply_to(M) M.write(ofile, format=ProblemFormat.nl) if not os.path.exists(bfile): os.rename(ofile, bfile) self.assertFileEqualsBaseline(ofile, bfile)
def solve_local_subproblem(mip_result, solve_data, config): """Set up and solve the local MINLP or NLP subproblem.""" subprob = solve_data.working_model.clone() solve_data.nlp_iteration += 1 # TODO also copy over the variable values? for disj, val in zip(subprob.GDPopt_utils.disjunct_list, mip_result.disjunct_values): rounded_val = int(round(val)) if (fabs(val - rounded_val) > config.integer_tolerance or rounded_val not in (0, 1)): raise ValueError("Disjunct %s indicator value %s is not " "within tolerance %s of 0 or 1." % (disj.name, val.value, config.integer_tolerance)) else: if config.round_discrete_vars: disj.indicator_var.fix(rounded_val) else: disj.indicator_var.fix(val) if config.force_subproblem_nlp: # We also need to copy over the discrete variable values for var, val in zip(subprob.GDPopt_utils.variable_list, mip_result.var_values): if var.is_continuous(): continue rounded_val = int(round(val)) if fabs(val - rounded_val) > config.integer_tolerance: raise ValueError("Discrete variable %s value %s is not " "within tolerance %s of %s." % (var.name, var.value, config.integer_tolerance, rounded_val)) else: # variable is binary and within tolerances if config.round_discrete_vars: var.fix(rounded_val) else: var.fix(val) TransformationFactory('gdp.fix_disjuncts').apply_to(subprob) for disj in subprob.component_data_objects(Disjunct, active=True): disj.deactivate( ) # TODO this is a HACK for something that isn't happening correctly in fix_disjuncts unfixed_discrete_vars = detect_unfixed_discrete_vars(subprob) if config.force_subproblem_nlp and len(unfixed_discrete_vars) > 0: raise RuntimeError( "Unfixed discrete variables found on the NLP subproblem.") elif len(unfixed_discrete_vars) == 0: subprob_result = solve_NLP(subprob, solve_data, config) else: subprob_result = solve_MINLP(subprob, solve_data, config) if subprob_result.feasible: # subproblem is feasible update_subproblem_progress_indicators(subprob, solve_data, config) return subprob_result
def solve_NLP_feas(solve_data, config): """Solves feasibility NLP and copies result to working model Returns: Result values and dual values """ fixed_nlp = solve_data.working_model.clone() add_feas_slacks(fixed_nlp, config) MindtPy = fixed_nlp.MindtPy_utils next(fixed_nlp.component_data_objects(Objective, active=True)).deactivate() for constr in fixed_nlp.component_data_objects(ctype=Constraint, active=True, descend_into=True): if constr.body.polynomial_degree() not in [0, 1]: constr.deactivate() MindtPy.MindtPy_feas.activate() if config.feasibility_norm == 'L1': MindtPy.MindtPy_feas_obj = Objective(expr=sum( s for s in MindtPy.MindtPy_feas.slack_var[...]), sense=minimize) elif config.feasibility_norm == 'L2': MindtPy.MindtPy_feas_obj = Objective(expr=sum( s * s for s in MindtPy.MindtPy_feas.slack_var[...]), sense=minimize) else: MindtPy.MindtPy_feas_obj = Objective( expr=MindtPy.MindtPy_feas.slack_var, sense=minimize) TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp) with SuppressInfeasibleWarning(): feas_soln = SolverFactory(config.nlp_solver).solve( fixed_nlp, **config.nlp_solver_args) subprob_terminate_cond = feas_soln.solver.termination_condition if subprob_terminate_cond is tc.optimal or subprob_terminate_cond is tc.locallyOptimal: copy_var_list_values( MindtPy.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) elif subprob_terminate_cond is tc.infeasible: raise ValueError('Feasibility NLP infeasible. ' 'This should never happen.') else: raise ValueError( 'MindtPy unable to handle feasibility NLP termination condition ' 'of {}'.format(subprob_terminate_cond)) var_values = [v.value for v in MindtPy.variable_list] duals = [0 for _ in MindtPy.constraint_list] for i, c in enumerate(MindtPy.constraint_list): rhs = c.upper if c.has_ub() else c.lower c_geq = -1 if c.has_ub() else 1 duals[i] = c_geq * max(0, c_geq * (rhs - value(c.body))) if value(MindtPy.MindtPy_feas_obj.expr) == 0: raise ValueError('Problem is not feasible, check NLP solver') return fixed_nlp, feas_soln
def _test(self, tname, M): ofile = currdir + tname + '_nlxfrm.out' bfile = currdir + tname + '_nlxfrm.nl' xfrm = TransformationFactory('mpec.nl') xfrm.apply_to(M) M.write(ofile, format=ProblemFormat.nl) if not os.path.exists(bfile): os.rename(ofile, bfile) self.assertTrue(cmp(ofile, bfile), msg="Files %s and %s differ" % (ofile, bfile))
def _apply_solver(self, keep_reformulation=False): start_time = time.time() instance = self._instance xfrm = TransformationFactory('romodel.nominal') xfrm.apply_to(instance) xfrm = TransformationFactory('romodel.adjustable.nominal') xfrm.apply_to(instance) if not self.options.solver: # Use glpk instead solver = 'gurobi' else: solver = self.options.solver with pyomo.opt.SolverFactory(solver) as opt: self.results = [] opt.options = self.options results = opt.solve(instance, tee=self._tee, timelimit=self._timelimit) self.results.append(results) for adjvar_name in xfrm._adjvar_dict: adjvar = instance.find_component(adjvar_name) var = xfrm._adjvar_dict[adjvar_name] for i in adjvar: adjvar[i].value = var[i].value instance.del_component(adjvar_name + '_nominal') for c, c_nominal in xfrm._cons_dict.values(): c.activate() c_nominal.parent_block().del_component(c_nominal) stop_time = time.time() self.wall_time = stop_time - start_time self.results_obj = self._setup_results_obj() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def test_deactivated_parent_block(self): m = ConcreteModel() m.d1 = Block() m.d1.sub1 = Disjunct() m.d1.sub2 = Disjunct() m.d1.disj = Disjunction(expr=[m.d1.sub1, m.d1.sub2]) m.d1.deactivate() TransformationFactory('gdp.reclassify').apply_to(m) self.assertIs(m.d1.type(), Block) self.assertIs(m.d1.sub1.type(), Block) self.assertIs(m.d1.sub2.type(), Block)
def _test(self, tname, M): ofile = currdir + tname + '_%s.out' % str(self.xfrm) bfile = currdir + tname + '_%s.txt' % str(self.xfrm) if self.xfrm is not None: xfrm = TransformationFactory(self.xfrm) xfrm.apply_to(M) with capture_output(ofile): self._print(M) if not os.path.exists(bfile): os.rename(ofile, bfile) self.assertFileEqualsBaseline(ofile, bfile)
def solve_NLP_feas(solve_data, config): """Solves feasibility NLP and copies result to working model Returns: Result values and dual values """ fix_nlp = solve_data.working_model.clone() add_feas_slacks(fix_nlp) MindtPy = fix_nlp.MindtPy_utils next(fix_nlp.component_data_objects(Objective, active=True)).deactivate() for constr in fix_nlp.component_data_objects(ctype=Constraint, active=True, descend_into=True): if constr.body.polynomial_degree() not in [0, 1]: constr.deactivate() MindtPy.MindtPy_feas.activate() MindtPy.MindtPy_feas_obj = Objective(expr=sum( s for s in MindtPy.MindtPy_feas.slack_var[...]), sense=minimize) TransformationFactory('core.fix_discrete').apply_to(fix_nlp) with SuppressInfeasibleWarning(): feas_soln = SolverFactory(config.nlp_solver).solve( fix_nlp, **config.nlp_solver_args) subprob_terminate_cond = feas_soln.solver.termination_condition if subprob_terminate_cond is tc.optimal: copy_var_list_values( MindtPy.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) elif subprob_terminate_cond is tc.infeasible: raise ValueError('Feasibility NLP infeasible. ' 'This should never happen.') else: raise ValueError( 'MindtPy unable to handle feasibility NLP termination condition ' 'of {}'.format(subprob_terminate_cond)) var_values = [v.value for v in MindtPy.variable_list] duals = [0 for _ in MindtPy.constraint_list] for i, constr in enumerate(MindtPy.constraint_list): # TODO rhs only works if constr.upper and constr.lower do not both have values. # Sometimes you might have 1 <= expr <= 1. This would give an incorrect rhs of 2. rhs = ((0 if constr.upper is None else constr.upper) + (0 if constr.lower is None else constr.lower)) sign_adjust = 1 if value(constr.upper) is None else -1 duals[i] = sign_adjust * max(0, sign_adjust * (rhs - value(constr.body))) if value(MindtPy.MindtPy_feas_obj.expr) == 0: raise ValueError('Problem is not feasible, check NLP solver') return fix_nlp, feas_soln
def test_do_not_reactivate_disjuncts_with_abandon(self): m = ConcreteModel() m.x = Var() m.s = RangeSet(4) m.d = Disjunct(m.s) m.d[2].bad_constraint_should_not_be_active = Constraint(expr=m.x >= 1) m.disj1 = Disjunction(expr=[m.d[1], m.d[2]]) m.disj2 = Disjunction(expr=[m.d[3], m.d[4]]) m.d[1].indicator_var.fix(1) m.d[2].deactivate() TransformationFactory('gdp.bigm').apply_to(m) self.assertFalse(m.d[2].active)
def solve_global_subproblem(mip_result, solve_data, config): subprob = solve_data.working_model.clone() solve_data.nlp_iteration += 1 # copy in the discrete variable values for disj, val in zip(subprob.GDPopt_utils.disjunct_list, mip_result.disjunct_values): rounded_val = int(round(val)) if (fabs(val - rounded_val) > config.integer_tolerance or rounded_val not in (0, 1)): raise ValueError("Disjunct %s indicator value %s is not " "within tolerance %s of 0 or 1." % (disj.name, val.value, config.integer_tolerance)) else: if config.round_discrete_vars: disj.indicator_var.fix(rounded_val) else: disj.indicator_var.fix(val) if config.force_subproblem_nlp: # We also need to copy over the discrete variable values for var, val in zip(subprob.GDPopt_utils.variable_list, mip_result.var_values): if var.is_continuous(): continue rounded_val = int(round(val)) if fabs(val - rounded_val) > config.integer_tolerance: raise ValueError("Discrete variable %s value %s is not " "within tolerance %s of %s." % (var.name, var.value, config.integer_tolerance, rounded_val)) else: # variable is binary and within tolerances if config.round_discrete_vars: var.fix(rounded_val) else: var.fix(val) TransformationFactory('gdp.fix_disjuncts').apply_to(subprob) subprob.dual.deactivate() # global solvers may not give dual info unfixed_discrete_vars = detect_unfixed_discrete_vars(subprob) if config.force_subproblem_nlp and len(unfixed_discrete_vars) > 0: raise RuntimeError( "Unfixed discrete variables found on the NLP subproblem.") elif len(unfixed_discrete_vars) == 0: subprob_result = solve_NLP(subprob, solve_data, config) else: subprob_result = solve_MINLP(subprob, solve_data, config) if subprob_result.feasible: # NLP is feasible update_subproblem_progress_indicators(subprob, solve_data, config) return subprob_result
def solve_LOA_subproblem(mip_var_values, solve_data, config): """Set up and solve the local LOA subproblem.""" nlp_model = solve_data.working_model.clone() solve_data.nlp_iteration += 1 # copy in the discrete variable values copy_and_fix_mip_values_to_nlp(nlp_model.GDPopt_utils.working_var_list, mip_var_values, config) TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model) nlp_result = solve_NLP(nlp_model, solve_data, config) if nlp_result.feasible: # NLP is feasible update_nlp_progress_indicators(nlp_model, solve_data, config) return nlp_result
def initialize_model(m, nfe): u_profile = {0: -0.06} m.u_input = Suffix(direction=Suffix.LOCAL) m.u_input[m.u] = u_profile sim = Simulator(m, package='scipy') tsim, profiles = sim.simulate(numpoints=100, varying_inputs=m.u_input) discretizer = TransformationFactory('dae.collocation') discretizer.apply_to(m, nfe=nfe, ncp=1, scheme='LAGRANGE-RADAU') sim.initialize_model()
def obbt_disjunct(orig_model, idx, solver): model = orig_model.clone() # Fix the disjunct to be active disjunct = model._disjuncts_to_process[idx] disjunct.indicator_var.fix(1) for obj in model.component_data_objects(Objective, active=True): obj.deactivate() # Deactivate nonlinear constraints for constr in model.component_data_objects(Constraint, active=True, descend_into=(Block, Disjunct)): if constr.body.polynomial_degree() not in linear_degrees: constr.deactivate() # Only look at the variables participating in active constraints within the scope relevant_var_set = ComponentSet() for constr in disjunct.component_data_objects(Constraint, active=True): relevant_var_set.update( identify_variables(constr.body, include_fixed=False)) TransformationFactory('gdp.bigm').apply_to(model) model._var_bounding_obj = Objective(expr=1, sense=minimize) for var in relevant_var_set: model._var_bounding_obj.set_value(expr=var) var_lb = solve_bounding_problem(model, solver) if var_lb is None: return None # bounding problem infeasible model._var_bounding_obj.set_value(expr=-var) var_ub = solve_bounding_problem(model, solver) if var_ub is None: return None # bounding problem infeasible else: var_ub = -var_ub # sign correction var.setlb(var_lb) var.setub(var_ub) # Maps original variable --> (new computed LB, new computed UB) var_bnds = ComponentMap( ((orig_var, (clone_var.lb if clone_var.has_lb() else -inf, clone_var.ub if clone_var.has_ub() else inf)) for orig_var, clone_var in zip(orig_model._disj_bnds_linear_vars, model._disj_bnds_linear_vars) if clone_var in relevant_var_set)) return var_bnds
def _get_disaggregated_vars(self, hull): disaggregatedVars = ComponentSet() hull_xform = TransformationFactory('gdp.hull') for disjunction in hull.component_data_objects( Disjunction, descend_into=(Disjunct, Block)): for disjunct in disjunction.disjuncts: if disjunct.transformation_block is not None: transBlock = disjunct.transformation_block() for v in transBlock.disaggregatedVars.\ component_data_objects(Var): disaggregatedVars.add(v) return disaggregatedVars
def test_deactivate_nested_disjunction(self): m = ConcreteModel() m.d1 = Disjunct() m.d1.d1 = Disjunct() m.d1.d2 = Disjunct() m.d1.disj = Disjunction(expr=[m.d1.d1, m.d1.d2]) m.d2 = Disjunct() m.disj = Disjunction(expr=[m.d1, m.d2]) m.d1.deactivate() TransformationFactory('gdp.bigm').apply_to(m) # for disj in m.component_data_objects(Disjunction, active=True): # print(disj.name) # There should be no active Disjunction objects. self.assertIsNone( next(m.component_data_objects(Disjunction, active=True), None))
def initialize_model(m, n_sim, n_nfe, n_ncp): vp_profile = {0: 0.75} vt_profile = {0: 0.75} m.u_input = Suffix(direction=Suffix.LOCAL) m.u_input[m.vp] = vp_profile m.u_input[m.vt] = vt_profile sim = Simulator(m, package='scipy') tsim, profiles = sim.simulate(numpoints=n_sim, varying_inputs=m.u_input) discretizer = TransformationFactory('dae.collocation') discretizer.apply_to(m, nfe=n_nfe, ncp=n_ncp, scheme='LAGRANGE-RADAU') sim.initialize_model()
def _presolve(self, *args, **kwds): if (not isinstance(args[0], six.string_types)) and \ (not isinstance(args[0], IBlock)): self._instance = args[0] xfrm = TransformationFactory('mpec.nl') xfrm.apply_to(self._instance) if len(self._instance._transformation_data['mpec.nl'].compl_cuids) == 0: # There were no complementarity conditions # so we don't hold onto the instance self._instance = None else: args = (self._instance,) else: self._instance = None # SystemCallSolver._presolve(self, *args, **kwds)
def init_max_binaries(solve_data, config): """Initialize by maximizing binary variables and disjuncts. This function activates as many binary variables and disjucts as feasible. """ solve_data.mip_iteration += 1 linear_GDP = solve_data.linear_GDP.clone() config.logger.info("Generating initial linear GDP approximation by " "solving a subproblem that maximizes " "the sum of all binary and logical variables.") # Set up binary maximization objective linear_GDP.GDPopt_utils.objective.deactivate() binary_vars = (v for v in linear_GDP.component_data_objects( ctype=Var, descend_into=(Block, Disjunct)) if v.is_binary() and not v.fixed) linear_GDP.GDPopt_utils.max_binary_obj = Objective(expr=sum(binary_vars), sense=maximize) # Solve mip_results = solve_linear_GDP(linear_GDP, solve_data, config) if mip_results: _, mip_var_values = mip_results # use the mip_var_values to create the NLP subproblem nlp_model = solve_data.working_model.clone() # copy in the discrete variable values copy_and_fix_mip_values_to_nlp(nlp_model.GDPopt_utils.working_var_list, mip_var_values, config) TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model) solve_data.nlp_iteration += 1 nlp_result = solve_NLP(nlp_model, solve_data, config) nlp_feasible, nlp_var_values, nlp_duals = nlp_result if nlp_feasible: update_nlp_progress_indicators(nlp_model, solve_data, config) add_outer_approximation_cuts(nlp_var_values, nlp_duals, solve_data, config) add_integer_cut(mip_var_values, solve_data, config, feasible=nlp_feasible) else: config.logger.info( "Linear relaxation for initialization was infeasible. " "Problem is infeasible.") return False
def _apply_solver(self): start_time = time.time() instance = self._instance xfrm = TransformationFactory('romodel.generators') xfrm.apply_to(instance) tdata = instance._transformation_data['romodel.generators'] generators = tdata.generators # Need to set this up for main and sub solver if not self.options.solver: # Use glpk instead solver = 'gurobi' else: solver = self.options.solver with pyomo.opt.SolverFactory(solver) as opt: self.results = [] feasible = {} # Solve nominal problem opt.options = self.options results = opt.solve(instance, tee=self._tee, timelimit=self._timelimit) # Add initial cut to check feasibility for g in generators: feasible[g.name] = g.add_cut() # Keep adding cuts until feasible while not all(feasible.values()): results = opt.solve(instance, tee=self._tee, timelimit=self._timelimit) for g in generators: # Only add cut if uncertain constraint isnt feasible if not feasible[g.name]: feasible[g.name] = g.add_cut() self.results.append(results) stop_time = time.time() self.wall_time = stop_time - start_time self.results_obj = self._setup_results_obj() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def init_custom_disjuncts(solve_data, config): """Initialize by using user-specified custom disjuncts.""" # TODO error checking to make sure that the user gave proper disjuncts for active_disjunct_set in config.custom_init_disjuncts: # custom_init_disjuncts contains a list of sets, giving the disjuncts # active at each initialization iteration # fix the disjuncts in the linear GDP and send for solution. solve_data.mip_iteration += 1 linear_GDP = solve_data.linear_GDP.clone() config.logger.info( "Generating initial linear GDP approximation by " "solving subproblems with user-specified active disjuncts.") for orig_disj, clone_disj in zip( solve_data.original_model.GDPopt_utils.orig_disjuncts_list, linear_GDP.GDPopt_utils.orig_disjuncts_list): if orig_disj in active_disjunct_set: clone_disj.indicator_var.fix(1) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result: _, mip_var_values = mip_result # use the mip_var_values to create the NLP subproblem nlp_model = solve_data.working_model.clone() # copy in the discrete variable values copy_and_fix_mip_values_to_nlp( nlp_model.GDPopt_utils.working_var_list, mip_var_values, config) TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model) solve_data.nlp_iteration += 1 nlp_result = solve_NLP(nlp_model, solve_data, config) nlp_feasible, nlp_var_values, nlp_duals = nlp_result if nlp_feasible: update_nlp_progress_indicators(nlp_model, solve_data, config) add_outer_approximation_cuts(nlp_var_values, nlp_duals, solve_data, config) add_integer_cut(mip_var_values, solve_data, config, feasible=nlp_feasible) else: config.logger.error('Linear GDP infeasible for user-specified ' 'custom initialization disjunct set %s. ' 'Skipping that set and continuing on.' % list(disj.name for disj in active_disjunct_set))
def _test(self, tname, M): ofile = currdir + tname + '_%s.out' % str(self.xfrm) bfile = currdir + tname + '_%s.txt' % str(self.xfrm) if self.xfrm is not None: xfrm = TransformationFactory(self.xfrm) xfrm.apply_to(M) with capture_output(ofile): self._print(M) if not os.path.exists(bfile): os.rename(ofile, bfile) try: self.assertTrue(cmp(ofile, bfile), msg="Files %s and %s differ" % (ofile, bfile)) except: with open(ofile, 'r') as f1, open(bfile, 'r') as f2: f1_contents = list(filter(None, f1.read().split())) f2_contents = list(filter(None, f2.read().split())) self.assertEqual(f1_contents, f2_contents)
def init_rNLP(solve_data, config): """Initialize by solving the rNLP (relaxed binary variables).""" solve_data.nlp_iter += 1 m = solve_data.working_model.clone() config.logger.info( "NLP %s: Solve relaxed integrality" % (solve_data.nlp_iter,)) MindtPy = m.MindtPy_utils TransformationFactory('core.relax_integer_vars').apply_to(m) with SuppressInfeasibleWarning(): results = SolverFactory(config.nlp_solver).solve( m, **config.nlp_solver_args) subprob_terminate_cond = results.solver.termination_condition if subprob_terminate_cond is tc.optimal or subprob_terminate_cond is tc.locallyOptimal: main_objective = next(m.component_data_objects(Objective, active=True)) nlp_solution_values = list(v.value for v in MindtPy.variable_list) dual_values = list(m.dual[c] for c in MindtPy.constraint_list) # Add OA cut if main_objective.sense == minimize: solve_data.LB = value(main_objective.expr) else: solve_data.UB = value(main_objective.expr) config.logger.info( 'NLP %s: OBJ: %s LB: %s UB: %s' % (solve_data.nlp_iter, value(main_objective.expr), solve_data.LB, solve_data.UB)) if config.strategy == 'OA': copy_var_list_values(m.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config, ignore_integrality=True) add_oa_cuts(solve_data.mip, dual_values, solve_data, config) # TODO check if value of the binary or integer varibles is 0/1 or integer value. for var in solve_data.mip.component_data_objects(ctype=Var): if var.is_integer(): var.value = int(round(var.value)) elif subprob_terminate_cond is tc.infeasible: # TODO fail? try something else? config.logger.info( 'Initial relaxed NLP problem is infeasible. ' 'Problem may be infeasible.') else: raise ValueError( 'MindtPy unable to handle relaxed NLP termination condition ' 'of %s. Solver message: %s' % (subprob_terminate_cond, results.solver.message))
def prune_possible_values(block_scope, possible_values, config): # Prune the set of possible values by solving a series of feasibility # problems top_level_scope = block_scope.model() tmp_name = unique_component_name(top_level_scope, '_induced_linearity_prune_data') tmp_orig_blk = Block() setattr(top_level_scope, tmp_name, tmp_orig_blk) tmp_orig_blk._possible_values = possible_values tmp_orig_blk._possible_value_vars = list(v for v in possible_values) tmp_orig_blk._tmp_block_scope = (block_scope, ) model = top_level_scope.clone() tmp_clone_blk = getattr(model, tmp_name) for obj in model.component_data_objects(Objective, active=True): obj.deactivate() for constr in model.component_data_objects(Constraint, active=True, descend_into=(Block, Disjunct)): if constr.body.polynomial_degree() not in (1, 0): constr.deactivate() if block_scope.ctype == Disjunct: disj = tmp_clone_blk._tmp_block_scope[0] disj.indicator_var.fix(1) TransformationFactory('gdp.bigm').apply_to(model) tmp_clone_blk.test_feasible = Constraint() tmp_clone_blk._obj = Objective(expr=1) for eff_discr_var, vals in tmp_clone_blk._possible_values.items(): val_feasible = {} for val in vals: tmp_clone_blk.test_feasible.set_value(eff_discr_var == val) with SuppressConstantObjectiveWarning(): res = SolverFactory(config.pruning_solver).solve(model) if res.solver.termination_condition is tc.infeasible: val_feasible[val] = False tmp_clone_blk._possible_values[eff_discr_var] = set( v for v in tmp_clone_blk._possible_values[eff_discr_var] if val_feasible.get(v, True)) for i, var in enumerate(tmp_orig_blk._possible_value_vars): possible_values[var] = tmp_clone_blk._possible_values[ tmp_clone_blk._possible_value_vars[i]] return possible_values
def _get_rBigM_obj_and_constraints(self, instance_rBigM): # We try to grab the first active objective. If there is more # than one, the writer will yell when we try to solve below. If # there are 0, we will yell here. rBigM_obj = next(instance_rBigM.component_data_objects( Objective, active=True), None) if rBigM_obj is None: raise GDP_Error("Cannot apply cutting planes transformation " "without an active objective in the model!") # # Collect all of the linear constraints that are in the rBigM # instance. We will need these so that we can compare what we get from # FME to them and make sure we aren't adding redundant constraints to # the model. For convenience, we will make sure they are all in the form # lb <= expr (so we will break equality constraints) # fme = TransformationFactory('contrib.fourier_motzkin_elimination') rBigM_linear_constraints = [] for cons in instance_rBigM.component_data_objects( Constraint, descend_into=Block, sort=SortComponents.deterministic, active=True): body = cons.body if body.polynomial_degree() != 1: # We will never get a nonlinear constraint out of FME, so we # don't risk it being identical to this one. continue # TODO: Guess this shouldn't have been private... rBigM_linear_constraints.extend(fme._process_constraint(cons)) # [ESJ Aug 13 2020] NOTE: We actually don't need to worry about variable # bounds here because the FME transformation will take care of them # (i.e. convert those of the disaggregated variables to constraints for # the purposes of the projection.) return rBigM_obj, rBigM_linear_constraints
def _apply_solver(self): start_time = time.time() def _check_termination_condition(results): # do we want to be more restrictive of termination conditions? # do we want to have different behavior for sub-optimal termination? if results.solver.termination_condition not in safe_termination_conditions: raise Exception( 'Problem encountered during solve, termination_condition {}' .format(results.solver.termination_condition)) # construct the high-point problem (LL feasible, no LL objective) # s0 <- solve the high-point # if s0 infeasible then return high_point_infeasible xfrm = TransformationFactory('pao.bilevel.highpoint') xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'ipopt' for c in self._instance.component_objects(Block, descend_into=False): if '_hp' in c.name: c.activate() opt = SolverFactory(solver) results = opt.solve(c) _check_termination_condition(results) c.deactivate() # s1 <- solve the optimistic bilevel (linear/linear) problem (call solver3) # if s1 infeasible then return optimistic_infeasible opt = BilevelSolver3() opt.options.solver = solver results = opt.solve(self._instance) _check_termination_condition(results)
def init_fixed_disjuncts(solve_data, config): """Initialize by solving the problem with the current disjunct values.""" # TODO error checking to make sure that the user gave proper disjuncts # fix the disjuncts in the linear GDP and send for solution. solve_data.mip_iteration += 1 config.logger.info( "Generating initial linear GDP approximation by " "solving subproblem with original user-specified disjunct values.") linear_GDP = solve_data.linear_GDP.clone() TransformationFactory('gdp.fix_disjuncts').apply_to(linear_GDP) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result.feasible: nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, config) if nlp_result.feasible: add_subproblem_cuts(nlp_result, solve_data, config) add_integer_cut( mip_result.var_values, solve_data.linear_GDP, solve_data, config, feasible=nlp_result.feasible) else: config.logger.error( 'Linear GDP infeasible for initial user-specified ' 'disjunct values. ' 'Skipping initialization.')
def _setup_subproblems(self, instance, bigM): # create transformation block transBlockName, transBlock = self._add_relaxation_block( instance, '_pyomo_gdp_cuttingplane_relaxation') # We store a list of all vars so that we can efficiently # generate maps among the subproblems transBlock.all_vars = list(v for v in instance.component_data_objects( Var, descend_into=(Block, Disjunct), sort=SortComponents.deterministic) if not v.is_fixed()) # we'll store all the cuts we add together transBlock.cuts = Constraint(Any) # get bigM and chull relaxations bigMRelaxation = TransformationFactory('gdp.bigm') chullRelaxation = TransformationFactory('gdp.chull') relaxIntegrality = TransformationFactory('core.relax_integrality') # HACK: for the current writers, we need to also apply gdp.reclassify so # that the indicator variables stay where they are in the big M model # (since that is what we are eventually going to solve after we add our # cuts). reclassify = TransformationFactory('gdp.reclassify') # # Generalte the CHull relaxation (used for the separation # problem to generate cutting planes # instance_rCHull = chullRelaxation.create_using(instance) # This relies on relaxIntegrality relaxing variables on deactivated # blocks, which should be fine. reclassify.apply_to(instance_rCHull) relaxIntegrality.apply_to(instance_rCHull) # # Reformulate the instance using the BigM relaxation (this will # be the final instance returned to the user) # bigMRelaxation.apply_to(instance, bigM=bigM) reclassify.apply_to(instance) # # Generate the continuous relaxation of the BigM transformation # instance_rBigM = relaxIntegrality.create_using(instance) # # Add the xstar parameter for the CHull problem # transBlock_rCHull = instance_rCHull.component(transBlockName) # # this will hold the solution to rbigm each time we solve it. We # add it to the transformation block so that we don't have to # worry about name conflicts. transBlock_rCHull.xstar = Param(range(len(transBlock.all_vars)), mutable=True, default=None) transBlock_rBigM = instance_rBigM.component(transBlockName) # # Generate the mapping between the variables on all the # instances and the xstar parameter # var_info = tuple( (v, transBlock_rBigM.all_vars[i], transBlock_rCHull.all_vars[i], transBlock_rCHull.xstar[i]) for i, v in enumerate(transBlock.all_vars)) # # Add the separation objective to the chull subproblem # self._add_separation_objective(var_info, transBlock_rCHull) return instance_rBigM, instance_rCHull, var_info, transBlockName
def _apply_solver(self): start_time = time.time() # # Cache the instance # xfrm = TransformationFactory('bilevel.linear_dual') xfrm.apply_to(self._instance) # # Apply an additional transformation to remap bilinear terms # if self.options.transform is None: xfrm = None else: xfrm = TransformationFactory(self.options.transform) xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append( opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) # # Transform the result back into the original model # tdata = self._instance._transformation_data['bilevel.linear_dual'] unfixed_cuids = set() # Copy variable values and fix them for vuid in tdata.fixed: for index_, data_ in vuid.find_component_on( self._instance).iteritems(): if not data_.fixed: data_.value = self._instance.find_component( data_).value data_.fixed = True unfixed_cuids.add(ComponentUID(data_)) # Reclassify the SubModel components and resolve for name_ in tdata.submodel: submodel = getattr(self._instance, name_) submodel.activate() dual_submodel = getattr(self._instance, name_ + '_dual') dual_submodel.deactivate() pyomo.util.PyomoAPIFactory( 'pyomo.repn.compute_canonical_repn')({}, model=submodel) self._instance.reclassify_component_type(name_, Block) # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt_inner: # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append( opt_inner.solve(self._instance, tee=self._tee, timelimit=self._timelimit, select=None)) self._instance.solutions.select(0, ignore_fixed_vars=True) data_.parent_component().parent_block( ).reclassify_component_type(name_, SubModel) # Unfix variables for vuid in tdata.fixed: for index_, data_ in vuid.find_component_on( self._instance).iteritems(): if ComponentUID(data_) in unfixed_cuids: data_.fixed = False stop_time = time.time() self.wall_time = stop_time - start_time # Reactivate top level objective for oname, odata in self._instance.component_map( Objective).items(): odata.activate() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))