def init_fixed_disjuncts(solve_data, config): """Initialize by solving the problem with the current disjunct values.""" # TODO error checking to make sure that the user gave proper disjuncts # fix the disjuncts in the linear GDP and send for solution. solve_data.mip_iteration += 1 linear_GDP = solve_data.linear_GDP.clone() config.logger.info( "Generating initial linear GDP approximation by " "solving subproblem with original user-specified disjunct values.") TransformationFactory('gdp.fix_disjuncts').apply_to(linear_GDP) mip_result = solve_linear_GDP(linear_GDP, solve_data, config) if mip_result: _, mip_var_values = mip_result # use the mip_var_values to create the NLP subproblem nlp_model = solve_data.working_model.clone() # copy in the discrete variable values copy_and_fix_mip_values_to_nlp(nlp_model.GDPopt_utils.working_var_list, mip_var_values, config) TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model) solve_data.nlp_iteration += 1 nlp_result = solve_NLP(nlp_model, solve_data, config) nlp_feasible, nlp_var_values, nlp_duals = nlp_result if nlp_feasible: update_nlp_progress_indicators(nlp_model, solve_data, config) add_outer_approximation_cuts(nlp_var_values, nlp_duals, solve_data, config) add_integer_cut(mip_var_values, solve_data, config, feasible=nlp_feasible) else: config.logger.error('Linear GDP infeasible for initial user-specified ' 'disjunct values. ' 'Skipping initialization.')
def apply(self, **kwds): instance = kwds.pop('instance') # Not sure why the ModifyInstance callback started passing the # model along with the instance. We will ignore it. model = kwds.pop('model', None) xform = TransformationFactory('gdp.bigm') return xform.apply_to(instance, **kwds)
def apply(self, **kwds): instance = kwds.pop('instance') # Not sure why the ModifyInstance callback started passing the # model along with the instance. We will ignore it. model = kwds.pop('model', None) xform = TransformationFactory('gdp.chull') return xform.apply_to(instance, **kwds)
def _apply_solver(self): start_time = time.time() instance = self._instance xfrm = TransformationFactory('romodel.nominal') xfrm.apply_to(instance) if not self.options.solver: # Use glpk instead solver = 'gurobi' else: solver = self.options.solver with pyomo.opt.SolverFactory(solver) as opt: self.results = [] results = opt.solve(instance, tee=self._tee, timelimit=self._timelimit) self.results.append(results) stop_time = time.time() self.wall_time = stop_time - start_time self.results_obj = self._setup_results_obj() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def _apply_solver(self): start_time = time.time() # construct the high-point problem (LL feasible, no LL objective) # s0 <- solve the high-point # if s0 infeasible then return high_point_infeasible xfrm = TransformationFactory('pao.bilevel.highpoint') xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'ipopt' for c in self._instance.component_objects(Block, descend_into=False): if '_hp' in c.name: c.activate() opt = pyomo.opt.SolverFactory(solver) results = opt.solve(c) _check_termination_condition(results) c.deactivate() # s1 <- solve the optimistic bilevel (linear/linear) problem (call solver3) # if s1 infeasible then return optimistic_infeasible opt = BilevelSolver3() opt.options.solver = solver results = opt.solve(self._instance) _check_termination_condition(results)
def test_reformulation(self, name, model, reformulation): """ Tests bilevel reformulation and checks whether the derivation is equivalent to the known solution in the reformulation/*.out file Parameters ---------- name : `string` model: `string` reformulation: `string` """ from importlib.machinery import SourceFileLoader namespace = SourceFileLoader(name, model).load_module() instance = namespace.pyomo_create_model() xfrm = TransformationFactory('pao.duality.linear_dual') for submodel in instance.component_objects(SubModel, descend_into=True): instance.reclassify_component_type(submodel, Block) dualmodel = xfrm._create_using(instance, block=submodel.name) break with open(join(aux_dir, name + '_linear_mpec.out'), 'w') as ofile: dualmodel.pprint(ostream=ofile) self.assertFileEqualsBaseline(join(aux_dir, name + '_linear_mpec.out'), reformulation, tolerance=1e-5)
def help_transformations(): import pyomo.environ from pyomo.core import TransformationFactory wrapper = textwrap.TextWrapper() wrapper.initial_indent = ' ' wrapper.subsequent_indent = ' ' print("") print("Pyomo Model Transformations") print("---------------------------") for xform in sorted(TransformationFactory): print(" " + xform) _doc = TransformationFactory.doc(xform) or "" # Ideally, the Factory would ensure that the doc string # indicated deprecation, but as @deprecated() is Pyomo # functionality and the Factory comes directly from PyUtilib, # PyUtilib probably shouldn't contain Pyomo-specific processing. # The next best thing is to ensure that the deprecation status # is indicated here. _init_doc = TransformationFactory.get_class(xform).__init__.__doc__ \ or "" if _init_doc.strip().startswith( 'DEPRECATED') and 'DEPRECAT' not in _doc: _doc = ' '.join(('[DEPRECATED]', _doc)) if _doc: print(wrapper.fill(_doc))
def _apply_solver(self): start_time = time.time() instance = self._instance transformations = [ 'romodel.ellipsoidal', 'romodel.polyhedral', 'romodel.unknown' ] for transform in transformations: xfrm = TransformationFactory(transform) xfrm.apply_to(instance) if not self.options.solver: solver = 'gurobi' else: solver = self.options.solver with pyomo.opt.SolverFactory(solver) as opt: self.results = [] opt.options = self.options results = opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit) self.results.append(results) stop_time = time.time() self.wall_time = stop_time - start_time self.results_obj = self._setup_results_obj() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def _test(self, tname, M): ofile = currdir + tname + '_nlxfrm.out' bfile = currdir + tname + '_nlxfrm.nl' xfrm = TransformationFactory('mpec.nl') xfrm.apply_to(M) M.write(ofile, format=ProblemFormat.nl) if not os.path.exists(bfile): os.rename(ofile, bfile) self.assertFileEqualsBaseline(ofile, bfile)
def _apply_solver(self): start_time = time.time() # # Transform instance # xfrm = TransformationFactory('mpec.simple_nonlinear') xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: #pragma:nocover self.options.solver = solver = 'ipopt' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: self.results = [] epsilon_final = self.options.get('epsilon_final', 1e-7) epsilon = self.options.get('epsilon_initial', epsilon_final) while (True): self._instance.mpec_bound.value = epsilon # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # res = opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit) self.results.append(res) epsilon /= 10.0 if epsilon < epsilon_final: break # # Reclassify the Complementarity components # from pyomo.mpec import Complementarity for cuid in self._instance._transformation_data[ 'mpec.simple_nonlinear'].compl_cuids: cobj = cuid.find_component(self._instance) cobj.parent_block().reclassify_component_type( cobj, Complementarity) # # Update timing # stop_time = time.time() self.wall_time = stop_time - start_time # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def _test(self, tname, M): ofile = currdir + tname + '_nlxfrm.out' bfile = currdir + tname + '_nlxfrm.nl' xfrm = TransformationFactory('mpec.nl') xfrm.apply_to(M) M.write(ofile, format=ProblemFormat.nl) if not os.path.exists(bfile): os.rename(ofile, bfile) self.assertTrue(cmp(ofile, bfile), msg="Files %s and %s differ" % (ofile, bfile))
def _apply_solver(self): start_time = time.time() # # Transform instance # xfrm = TransformationFactory('mpec.simple_nonlinear') xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: #pragma:nocover self.options.solver = solver = 'ipopt' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: self.results = [] epsilon_final = self.options.get('epsilon_final', 1e-7) epsilon = self.options.get('epsilon_initial', epsilon_final) while (True): self._instance.mpec_bound.value = epsilon # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # res = opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit) self.results.append(res) epsilon /= 10.0 if epsilon < epsilon_final: break # # Reclassify the Complementarity components # from pyomo.mpec import Complementarity for cuid in self._instance._transformation_data['mpec.simple_nonlinear'].compl_cuids: cobj = cuid.find_component(self._instance) cobj.parent_block().reclassify_component_type(cobj, Complementarity) # # Update timing # stop_time = time.time() self.wall_time = stop_time - start_time # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def _test(self, tname, M): ofile = currdir + tname + '_%s.out' % str(self.xfrm) bfile = currdir + tname + '_%s.txt' % str(self.xfrm) if self.xfrm is not None: xfrm = TransformationFactory(self.xfrm) xfrm.apply_to(M) with capture_output(ofile): self._print(M) if not os.path.exists(bfile): os.rename(ofile, bfile) self.assertFileEqualsBaseline(ofile, bfile)
def help_transformations(): import pyomo.environ from pyomo.core import TransformationFactory wrapper = textwrap.TextWrapper() wrapper.initial_indent = ' ' wrapper.subsequent_indent = ' ' print("") print("Pyomo Model Transformations") print("---------------------------") for xform in sorted(TransformationFactory.services()): print(" " + xform) print(wrapper.fill(TransformationFactory.doc(xform)))
def test_active_parent_disjunct_target(self): m = ConcreteModel() m.d1 = Disjunct() m.d1.sub1 = Disjunct() m.d1.sub2 = Disjunct() m.d1.disj = Disjunction(expr=[m.d1.sub1, m.d1.sub2]) TransformationFactory('gdp.bigm').apply_to(m, targets=m.d1.disj) m.d1.indicator_var.fix(1) TransformationFactory('gdp.reclassify').apply_to(m) self.assertIs(m.d1.type(), Block) self.assertIs(m.d1.sub1.type(), Block) self.assertIs(m.d1.sub2.type(), Block)
def help_transformations(): import pyomo.environ from pyomo.core import TransformationFactory wrapper = textwrap.TextWrapper() wrapper.initial_indent = ' ' wrapper.subsequent_indent = ' ' print("") print("Pyomo Model Transformations") print("---------------------------") for xform in sorted(TransformationFactory.services()): print(" "+xform) print(wrapper.fill(TransformationFactory.doc(xform)))
def initialize_model(m,nfe): u_profile = {0:-0.06} m.u_input = Suffix(direction=Suffix.LOCAL) m.u_input[m.u]=u_profile sim = Simulator(m,package='scipy') tsim, profiles = sim.simulate(numpoints=100, varying_inputs=m.u_input) discretizer = TransformationFactory('dae.collocation') discretizer.apply_to(m, nfe=nfe, ncp=1, scheme='LAGRANGE-RADAU') sim.initialize_model()
def initialize_model(m, nfe): u_profile = {0: -0.06} m.u_input = Suffix(direction=Suffix.LOCAL) m.u_input[m.u] = u_profile sim = Simulator(m, package='scipy') tsim, profiles = sim.simulate(numpoints=100, varying_inputs=m.u_input) discretizer = TransformationFactory('dae.collocation') discretizer.apply_to(m, nfe=nfe, ncp=1, scheme='LAGRANGE-RADAU') sim.initialize_model()
def _apply_solver(self): start_time = time.time() # # Transform the instance # xfrm = TransformationFactory('bilevel.linear_mpec') xfrm.apply_to(self._instance) xfrm = TransformationFactory('mpec.simple_nonlinear') xfrm.apply_to(self._instance, mpec_bound=self.options.get('mpec_bound', 1e-7)) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: # self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append( opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) # # Load the result back into the original model # ##self._instance.load(self.results[0], ignore_invalid_labels=True) # stop_time = time.time() self.wall_time = stop_time - start_time # # Deactivate the block that contains the optimality conditions, # and reactivate SubModel # submodel = self._instance._transformation_data[ 'bilevel.linear_mpec'].submodel_cuid.find_component(self._instance) for (name, data) in submodel.component_map(active=False).items(): if not isinstance(data, Var) and not isinstance(data, Set): data.activate() # TODO: delete this subblock self._instance._transformation_data[ 'bilevel.linear_mpec'].block_cuid.find_component( self._instance).deactivate() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def initialize_model(m, n_sim, n_nfe, n_ncp): vp_profile = {0: 0.75} vt_profile = {0: 0.75} m.u_input = Suffix(direction=Suffix.LOCAL) m.u_input[m.vp] = vp_profile m.u_input[m.vt] = vt_profile sim = Simulator(m, package='scipy') tsim, profiles = sim.simulate(numpoints=n_sim, varying_inputs=m.u_input) discretizer = TransformationFactory('dae.collocation') discretizer.apply_to(m, nfe=n_nfe, ncp=n_ncp, scheme='LAGRANGE-RADAU') sim.initialize_model()
def solve_NLP_subproblem(solve_data, config): """ Solves fixed NLP with fixed working model binaries Sets up local working model `fix_nlp` Fixes binaries Sets continuous variables to initial var values Precomputes dual values Deactivates trivial constraints Solves NLP model Returns the fixed-NLP model and the solver results """ fix_nlp = solve_data.working_model.clone() MindtPy = fix_nlp.MindtPy_utils main_objective = next( fix_nlp.component_data_objects(Objective, active=True)) solve_data.nlp_iter += 1 config.logger.info('NLP %s: Solve subproblem for fixed binaries.' % (solve_data.nlp_iter, )) # Set up NLP TransformationFactory('core.fix_discrete').apply_to(fix_nlp) # restore original variable values for nlp_var, orig_val in zip(MindtPy.variable_list, solve_data.initial_var_values): if not nlp_var.fixed and not nlp_var.is_binary(): nlp_var.value = orig_val MindtPy.MindtPy_linear_cuts.deactivate() fix_nlp.tmp_duals = ComponentMap() for c in fix_nlp.component_data_objects(ctype=Constraint, active=True, descend_into=True): rhs = ((0 if c.upper is None else c.upper) + (0 if c.lower is None else c.lower)) sign_adjust = 1 if value(c.upper) is None else -1 fix_nlp.tmp_duals[c] = sign_adjust * max( 0, sign_adjust * (rhs - value(c.body))) # TODO check sign_adjust TransformationFactory('contrib.deactivate_trivial_constraints')\ .apply_to(fix_nlp, tmp=True, ignore_infeasible=True) # Solve the NLP with SuppressInfeasibleWarning(): results = SolverFactory(config.nlp_solver).solve( fix_nlp, **config.nlp_solver_args) return fix_nlp, results
def add_affine_cuts(nlp_result, solve_data, config): with time_code(solve_data.timing, "affine cut generation"): m = solve_data.linear_GDP if config.calc_disjunctive_bounds: with time_code(solve_data.timing, "disjunctive variable bounding"): TransformationFactory( 'contrib.compute_disj_var_bounds').apply_to( m, solver=config.mip_solver if config.obbt_disjunctive_bounds else None) config.logger.info("Adding affine cuts.") GDPopt = m.GDPopt_utils counter = 0 for var, val in zip(GDPopt.variable_list, nlp_result.var_values): if val is not None and not var.fixed: var.value = val for constr in constraints_in_True_disjuncts(m, config): # Note: this includes constraints that are deactivated in the current model (linear_GDP) disjunctive_var_bounds = disjunctive_bounds(constr.parent_block()) if constr.body.polynomial_degree() in (1, 0): continue vars_in_constr = list(identify_variables(constr.body)) if any(var.value is None for var in vars_in_constr): continue # a variable has no values # mcpp stuff mc_eqn = mc(constr.body, disjunctive_var_bounds) # mc_eqn = mc(constr.body) ccSlope = mc_eqn.subcc() cvSlope = mc_eqn.subcv() ccStart = mc_eqn.concave() cvStart = mc_eqn.convex() ub_int = min( constr.upper, mc_eqn.upper()) if constr.has_ub() else mc_eqn.upper() lb_int = max( constr.lower, mc_eqn.lower()) if constr.has_lb() else mc_eqn.lower() parent_block = constr.parent_block() # Create a block on which to put outer approximation cuts. aff_utils = parent_block.component('GDPopt_aff') if aff_utils is None: aff_utils = parent_block.GDPopt_aff = Block( doc="Block holding affine constraints") aff_utils.GDPopt_aff_cons = ConstraintList() aff_cuts = aff_utils.GDPopt_aff_cons concave_cut = sum(ccSlope[var] * (var - var.value) for var in vars_in_constr) + ccStart >= lb_int convex_cut = sum(cvSlope[var] * (var - var.value) for var in vars_in_constr) + cvStart <= ub_int aff_cuts.add(expr=concave_cut) aff_cuts.add(expr=convex_cut) counter += 2 config.logger.info("Added %s affine cuts" % counter)
def _presolve(self, *args, **kwds): if (not isinstance(args[0], six.string_types)) and \ (not isinstance(args[0], IBlock)): self._instance = args[0] xfrm = TransformationFactory('mpec.nl') xfrm.apply_to(self._instance) if len(self._instance._transformation_data['mpec.nl'].compl_cuids) == 0: # There were no complementarity conditions # so we don't hold onto the instance self._instance = None else: args = (self._instance,) else: self._instance = None # SystemCallSolver._presolve(self, *args, **kwds)
def _apply_to(self, instance, bigM=None, **kwds): log_level = logger.getEffectiveLevel() try: assert not NAME_BUFFER self._config = self.CONFIG(kwds.pop('options', {})) self._config.set_value(kwds) if self._config.verbose and log_level > logging.INFO: logger.setLevel(logging.INFO) self.verbose = True elif log_level <= logging.INFO: self.verbose = True else: self.verbose = False (instance_rBigM, cuts_obj, instance_rHull, var_info, transBlockName) = self._setup_subproblems( instance, bigM, self._config.\ tighten_relaxation) self._generate_cuttingplanes( instance_rBigM, cuts_obj, instance_rHull, var_info, transBlockName) # restore integrality TransformationFactory('core.relax_integer_vars').apply_to(instance, undo=True) finally: del self._config del self.verbose # clear the global name buffer NAME_BUFFER.clear() # restore logging level logger.setLevel(log_level)
def solve_master_feasibility_problem(model_data, config): """ Solve a slack variable based feasibility model for the master problem """ model = model_data.master_model.clone() for o in model.component_data_objects(Objective): o.deactivate() TransformationFactory("core.add_slack_variables").apply_to(model) solver = config.global_solver if not solver.available(): raise RuntimeError("NLP solver %s is not available." % config.solver) try: results = solver.solve(model, tee=config.tee) except ValueError as err: if 'Cannot load a SolverResults object with bad status: error' in str( err): results.solver.termination_condition = tc.error results.solver.message = str(err) else: raise if check_optimal_termination(results) and value( model._core_add_slack_variables._slack_objective) <= 0: # If this led to a feasible solution, continue with this model # Load solution into master for v in model.component_data_objects(Var): master_v = model_data.master_model.find_component(v) if master_v is not None: master_v.set_value(v.value, skip_validation=True) return results
def init_rNLP(solve_data, config): """Initialize by solving the rNLP (relaxed binary variables).""" solve_data.nlp_iter += 1 m = solve_data.working_model.clone() config.logger.info("NLP %s: Solve relaxed integrality" % (solve_data.nlp_iter, )) MindtPy = m.MindtPy_utils TransformationFactory('core.relax_integrality').apply_to(m) with SuppressInfeasibleWarning(): results = SolverFactory(config.nlp_solver).solve( m, **config.nlp_solver_args) subprob_terminate_cond = results.solver.termination_condition if subprob_terminate_cond is tc.optimal: main_objective = next(m.component_data_objects(Objective, active=True)) nlp_solution_values = list(v.value for v in MindtPy.variable_list) dual_values = list(m.dual[c] for c in MindtPy.constraint_list) # Add OA cut if main_objective.sense == minimize: solve_data.LB = value(main_objective.expr) else: solve_data.UB = value(main_objective.expr) config.logger.info('NLP %s: OBJ: %s LB: %s UB: %s' % (solve_data.nlp_iter, value(main_objective.expr), solve_data.LB, solve_data.UB)) if config.strategy == 'OA': add_oa_cut(nlp_solution_values, dual_values, solve_data, config) elif subprob_terminate_cond is tc.infeasible: # TODO fail? try something else? config.logger.info('Initial relaxed NLP problem is infeasible. ' 'Problem may be infeasible.') else: raise ValueError( 'MindtPy unable to handle relaxed NLP termination condition ' 'of %s. Solver message: %s' % (subprob_terminate_cond, results.solver.message))
def init_fixed_disjuncts(solve_data, config): """Initialize by solving the problem with the current disjunct values.""" # TODO error checking to make sure that the user gave proper disjuncts # fix the disjuncts in the linear GDP and send for solution. solve_data.mip_iteration += 1 config.logger.info( "Generating initial linear GDP approximation by " "solving subproblem with original user-specified disjunct values.") linear_GDP = solve_data.linear_GDP.clone() TransformationFactory('gdp.fix_disjuncts').apply_to(linear_GDP) mip_result = solve_data.mip_solve_function(linear_GDP, solve_data, config) if mip_result.feasible: nlp_result = solve_data.nlp_solve_function(mip_result.var_values, solve_data, config) if nlp_result.feasible: solve_data.cut_generation_function(nlp_result, solve_data, config) solve_data.integer_cut_function(mip_result.var_values, solve_data, config, feasible=nlp_result.feasible) else: config.logger.error('Linear GDP infeasible for initial user-specified ' 'disjunct values. ' 'Skipping initialization.')
def solve_local_NLP(mip_var_values, solve_data, config): """Set up and solve the local LOA subproblem.""" nlp_model = solve_data.working_model.clone() solve_data.nlp_iteration += 1 # copy in the discrete variable values for var, val in zip(nlp_model.GDPopt_utils.variable_list, mip_var_values): if val is None: continue if var.is_continuous(): var.value = val elif ((fabs(val) > config.integer_tolerance and fabs(val - 1) > config.integer_tolerance)): raise ValueError("Binary variable %s value %s is not " "within tolerance %s of 0 or 1." % (var.name, var.value, config.integer_tolerance)) else: # variable is binary and within tolerances if config.round_discrete_vars: var.fix(int(round(val))) else: var.fix(val) TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model) nlp_result = solve_NLP(nlp_model, solve_data, config) if nlp_result.feasible: # NLP is feasible update_subproblem_progress_indicators(nlp_model, solve_data, config) return nlp_result
def subproblem_solve(gdp, config): subproblem = gdp.clone() TransformationFactory('gdp.bigm').apply_to(subproblem) main_obj = next( subproblem.component_data_objects(Objective, active=True)) obj_sign = 1 if main_obj.sense == minimize else -1 try: result = SolverFactory(config.solver).solve( subproblem, **config.solver_args) except RuntimeError as e: config.logger.warning( "Solver encountered RuntimeError. Treating as infeasible. " "Msg: %s\n%s" % (str(e), traceback.format_exc())) var_values = [ v.value for v in subproblem.GDPbb_utils.variable_list ] return obj_sign * float('inf'), SolverResults(), var_values var_values = [v.value for v in subproblem.GDPbb_utils.variable_list] term_cond = result.solver.termination_condition if result.solver.status is SolverStatus.ok and any( term_cond == valid_cond for valid_cond in (tc.optimal, tc.locallyOptimal, tc.feasible)): return value(main_obj.expr), result, var_values elif term_cond == tc.unbounded: return obj_sign * float('-inf'), result, var_values elif term_cond == tc.infeasible: return obj_sign * float('inf'), result, var_values else: config.logger.warning("Unknown termination condition of %s" % term_cond) return obj_sign * float('inf'), result, var_values
def solve_global_subproblem(mip_result, solve_data, config): subprob = solve_data.working_model.clone() solve_data.nlp_iteration += 1 # copy in the discrete variable values for disj, val in zip(subprob.GDPopt_utils.disjunct_list, mip_result.disjunct_values): rounded_val = int(round(val)) if (fabs(val - rounded_val) > config.integer_tolerance or rounded_val not in (0, 1)): raise ValueError("Disjunct %s indicator value %s is not " "within tolerance %s of 0 or 1." % (disj.name, val.value, config.integer_tolerance)) else: if config.round_discrete_vars: disj.indicator_var.fix(rounded_val) else: disj.indicator_var.fix(val) if config.force_subproblem_nlp: # We also need to copy over the discrete variable values for var, val in zip(subprob.GDPopt_utils.variable_list, mip_result.var_values): if var.is_continuous(): continue rounded_val = int(round(val)) if fabs(val - rounded_val) > config.integer_tolerance: raise ValueError("Discrete variable %s value %s is not " "within tolerance %s of %s." % (var.name, var.value, config.integer_tolerance, rounded_val)) else: # variable is binary and within tolerances if config.round_discrete_vars: var.fix(rounded_val) else: var.fix(val) TransformationFactory('gdp.fix_disjuncts').apply_to(subprob) subprob.dual.deactivate() # global solvers may not give dual info if config.subproblem_presolve: try: preprocess_subproblem(subprob, config) except InfeasibleConstraintException as e: # FBBT found the problem to be infeasible return get_infeasible_result_object( subprob, "Preprocessing determined problem to be infeasible.") unfixed_discrete_vars = detect_unfixed_discrete_vars(subprob) if config.force_subproblem_nlp and len(unfixed_discrete_vars) > 0: raise RuntimeError( "Unfixed discrete variables found on the NLP subproblem.") elif len(unfixed_discrete_vars) == 0: subprob_result = solve_NLP(subprob, solve_data, config) else: subprob_result = solve_MINLP(subprob, solve_data, config) if subprob_result.feasible: # NLP is feasible update_subproblem_progress_indicators(subprob, solve_data, config) return subprob_result
def _apply_solver(self): start_time = time.time() instance = self._instance # Reformulate adjustable variables if not self.options.adjustable: adjustable = 'romodel.adjustable.ldr' else: adjustable = self.options.adjustable xfrm = TransformationFactory(adjustable) xfrm.apply_to(instance) # Reformulate uncertain parameters transformations = [ 'romodel.ellipsoidal', 'romodel.polyhedral', 'romodel.gp', 'romodel.warpedgp', 'romodel.unknown' ] transformation_kwargs = { 'romodel.ellipsoidal': [], 'romodel.polyhedral': [], 'romodel.gp': [], 'romodel.warpedgp': ['initialize_wolfe'], 'romodel.unknown': [] } for transform in transformations: xfrm = TransformationFactory(transform) kwargs = {} for kw in transformation_kwargs[transform]: if self.options[kw]: kwargs[kw] = self.options[kw] xfrm.apply_to(instance, **kwargs) instance.transformation_time = time.time() - start_time if not self.options.solver: solver = 'gurobi' else: solver = self.options.solver with pyomo.opt.SolverFactory(solver) as opt: self.results = [] opt.options = self.options results = opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit) self.results.append(results) stop_time = time.time() self.wall_time = stop_time - start_time self.termination_condition = results.solver.termination_condition self.results_obj = self._setup_results_obj() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def _apply_solver(self): start_time = time.time() # # Cache the instance # xfrm = TransformationFactory('bilevel.linear_mpec') xfrm.apply_to(self._instance) xfrm = TransformationFactory('mpec.simple_disjunction') xfrm.apply_to(self._instance) xfrm = TransformationFactory('gdp.bigm') xfrm.apply_to(self._instance, bigM=self.options.get('bigM',100000)) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: # self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append(opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) # stop_time = time.time() self.wall_time = stop_time - start_time # # Deactivate the block that contains the optimality conditions, # and reactivate SubModel # submodel = self._instance._transformation_data['bilevel.linear_mpec'].submodel_cuid. find_component(self._instance) for (name, data) in submodel.component_map(active=False).items(): if not isinstance(data,Var) and not isinstance(data,Set): data.activate() # TODO: delete this subblock self._instance._transformation_data['bilevel.linear_mpec'].block_cuid.find_component(self._instance).deactivate() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def initialize_model(m,n_sim,n_nfe,n_ncp): vp_profile = {0:0.75} vt_profile = {0:0.75} m.u_input = Suffix(direction=Suffix.LOCAL) m.u_input[m.vp] = vp_profile m.u_input[m.vt] = vt_profile sim = Simulator(m, package='scipy') tsim, profiles = sim.simulate(numpoints=n_sim, varying_inputs=m.u_input) discretizer = TransformationFactory('dae.collocation') discretizer.apply_to(m,nfe=n_nfe,ncp=n_ncp,scheme='LAGRANGE-RADAU') sim.initialize_model()
def _apply_solver(self): start_time = time.time() instance = self._instance xfrm = TransformationFactory('romodel.generators') xfrm.apply_to(instance) tdata = instance._transformation_data['romodel.generators'] generators = tdata.generators # Need to set this up for main and sub solver if not self.options.solver: # Use glpk instead solver = 'gurobi' else: solver = self.options.solver with pyomo.opt.SolverFactory(solver) as opt: self.results = [] feasible = {} # Solve nominal problem opt.options = self.options results = opt.solve(instance, tee=self._tee, timelimit=self._timelimit) # Add initial cut to check feasibility for g in generators: feasible[g.name] = g.add_cut() # Keep adding cuts until feasible while not all(feasible.values()): results = opt.solve(instance, tee=self._tee, timelimit=self._timelimit) for g in generators: # Only add cut if uncertain constraint isnt feasible if not feasible[g.name]: feasible[g.name] = g.add_cut() self.results.append(results) stop_time = time.time() self.wall_time = stop_time - start_time self.results_obj = self._setup_results_obj() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def _apply_solver(self): start_time = time.time() # # Transform instance # xfrm = TransformationFactory('mpec.simple_disjunction') xfrm.apply_to(self._instance) xfrm = TransformationFactory('gdp.bigm') xfrm.apply_to(self._instance, bigM=self.options.get('bigM', 10**6)) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: #pragma:nocover self.options.solver = solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results = opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit) # # Reclassify the Complementarity components # from pyomo.mpec import Complementarity for cuid in self._instance._transformation_data[ 'mpec.simple_disjunction'].compl_cuids: cobj = cuid.find_component(self._instance) cobj.parent_block().reclassify_component_type( cobj, Complementarity) # # Transform the result back into the original model # ##self._instance.solutions.load_from(self.results, ignore_invalid_labels=True) # # Update timing # stop_time = time.time() self.wall_time = stop_time - start_time # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def test_active_parent_block(self): m = ConcreteModel() m.d1 = Block() m.d1.sub1 = Disjunct() m.d1.sub2 = Disjunct() m.d1.disj = Disjunction(expr=[m.d1.sub1, m.d1.sub2]) with self.assertRaises(GDP_Error): TransformationFactory('gdp.reclassify').apply_to(m)
def _apply_solver(self): start_time = time.time() # # Transform instance # xfrm = TransformationFactory('mpec.simple_disjunction') xfrm.apply_to(self._instance) xfrm = TransformationFactory('gdp.bigm') xfrm.apply_to(self._instance, default_bigM=self.options.get('bigM',10**6)) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: #pragma:nocover self.options.solver = solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results = opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit) # # Reclassify the Complementarity components # from pyomo.mpec import Complementarity for cuid in self._instance._transformation_data['mpec.simple_disjunction'].compl_cuids: cobj = cuid.find_component(self._instance) cobj.parent_block().reclassify_component_type(cobj, Complementarity) # # Transform the result back into the original model # ##self._instance.solutions.load_from(self.results, ignore_invalid_labels=True) # # Update timing # stop_time = time.time() self.wall_time = stop_time - start_time # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def _apply_solver(self): start_time = time.time() # # Transform the instance # xfrm = TransformationFactory('bilevel.linear_mpec') xfrm.apply_to(self._instance) xfrm = TransformationFactory('mpec.simple_nonlinear') xfrm.apply_to(self._instance, mpec_bound=1e-7) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: # self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append(opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) # # Load the result back into the original model # ##self._instance.load(self.results[0], ignore_invalid_labels=True) # stop_time = time.time() self.wall_time = stop_time - start_time # # Deactivate the block that contains the optimality conditions, # and reactivate SubModel # self._instance._transformation_data['bilevel.linear_mpec'].submodel_cuid.find_component(self._instance).activate() self._instance._transformation_data['bilevel.linear_mpec'].block_cuid.find_component(self._instance).deactivate() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def _apply_solver(self): start_time = time.time() # # Cache the instance # xfrm = TransformationFactory('bilevel.linear_dual') xfrm.apply_to(self._instance) # # Verify whether the objective is linear # nonlinear=False for odata in self._instance.component_objects(Objective, active=True): nonlinear = odata.expr.polynomial_degree() != 1 # Stop after the first objective break # # Apply an additional transformation to remap bilinear terms # if nonlinear: gdp_xfrm = TransformationFactory("gdp.bilinear") gdp_xfrm.apply_to(self._instance) mip_xfrm = TransformationFactory("gdp.bigm") mip_xfrm.apply_to(self._instance, default_bigM=self.options.get('bigM',100000)) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append(opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) #print("POST-SOLVE - BEGIN") #self._instance.write("tmp.lp", io_options={"symbolic_solver_labels":True}) #self._instance.pprint() #self._instance.display() #print("POST-SOLVE - END") # # If the problem was bilinear, then reactivate the original data # if nonlinear: i = 0 for v in self._instance.bilinear_data_.vlist.itervalues(): #print(v) #print(v.cname()) #print(type(v)) #print(v.value) if abs(v.value) <= 1e-7: self._instance.bilinear_data_.vlist_boolean[i] = 0 else: self._instance.bilinear_data_.vlist_boolean[i] = 1 i = i + 1 # self._instance.bilinear_data_.deactivate() # # Transform the result back into the original model # tdata = self._instance._transformation_data['bilevel.linear_dual'] unfixed_cuids = set() # Copy variable values and fix them for vuid in tdata.fixed: for index_, data_ in vuid.find_component_on(self._instance).iteritems(): if not data_.fixed: data_.value = self._instance.find_component(data_).value data_.fixed = True unfixed_cuids.add(ComponentUID(data_)) # Reclassify the SubModel components and resolve for name_ in tdata.submodel: submodel = getattr(self._instance, name_) submodel.activate() for (name, data) in submodel.component_map(active=False).items(): if not isinstance(data,Var) and not isinstance(data,Set): data.activate() dual_submodel = getattr(self._instance, name_+'_dual') dual_submodel.deactivate() pyomo.util.PyomoAPIFactory('pyomo.repn.compute_canonical_repn')({}, model=submodel) self._instance.reclassify_component_type(name_, Block) # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt_inner: # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # results = opt_inner.solve(self._instance, tee=self._tee, timelimit=self._timelimit) #select=None) # Unfix variables for vuid in tdata.fixed: for index_, data_ in vuid.find_component_on(self._instance).iteritems(): if ComponentUID(data_) in unfixed_cuids: data_.fixed = False # self._instance.solutions.select(0, ignore_fixed_vars=True) self.results.append(results) # stop_time = time.time() self.wall_time = stop_time - start_time self.results_obj = self._setup_results_obj() # # Reactivate top level objective # and reclassify the submodel # for oname, odata in self._instance.component_map(Objective).items(): odata.activate() # TODO: rework the Block logic to allow for searching SubModel objects for variables, etc. #data_.parent_component().parent_block().reclassify_component_type(name_, SubModel) # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def _apply_solver(self): start_time = time.time() # # Cache the instance # xfrm = TransformationFactory('bilevel.linear_dual') xfrm.apply_to(self._instance) # # Apply an additional transformation to remap bilinear terms # if self.options.transform is None: xfrm = None else: xfrm = TransformationFactory(self.options.transform) xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'glpk' # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt: self.results = [] # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append(opt.solve(self._instance, tee=self._tee, timelimit=self._timelimit)) # # Transform the result back into the original model # tdata = self._instance._transformation_data['bilevel.linear_dual'] unfixed_cuids = set() # Copy variable values and fix them for vuid in tdata.fixed: for index_, data_ in vuid.find_component_on(self._instance).iteritems(): if not data_.fixed: data_.value = self._instance.find_component(data_).value data_.fixed = True unfixed_cuids.add(ComponentUID(data_)) # Reclassify the SubModel components and resolve for name_ in tdata.submodel: submodel = getattr(self._instance, name_) submodel.activate() dual_submodel = getattr(self._instance, name_+'_dual') dual_submodel.deactivate() pyomo.util.PyomoAPIFactory('pyomo.repn.compute_canonical_repn')({}, model=submodel) self._instance.reclassify_component_type(name_, Block) # use the with block here so that deactivation of the # solver plugin always occurs thereby avoiding memory # leaks caused by plugins! with pyomo.opt.SolverFactory(solver) as opt_inner: # # **NOTE: It would be better to override _presolve on the # base class of this solver as you might be # missing a number of keywords that were passed # into the solve method (e.g., none of the # io_options are getting relayed to the subsolver # here). # self.results.append(opt_inner.solve(self._instance, tee=self._tee, timelimit=self._timelimit, select=None)) self._instance.solutions.select(0, ignore_fixed_vars=True) data_.parent_component().parent_block().reclassify_component_type(name_, SubModel) # Unfix variables for vuid in tdata.fixed: for index_, data_ in vuid.find_component_on(self._instance).iteritems(): if ComponentUID(data_) in unfixed_cuids: data_.fixed = False stop_time = time.time() self.wall_time = stop_time - start_time # Reactivate top level objective for oname, odata in self._instance.component_map(Objective).items(): odata.activate() # # Return the sub-solver return condition value and log # return pyutilib.misc.Bunch(rc=getattr(opt,'_rc', None), log=getattr(opt,'_log',None))
def _setup_subproblems(self, instance, bigM): # create transformation block transBlockName, transBlock = self._add_relaxation_block( instance, '_pyomo_gdp_cuttingplane_relaxation') # We store a list of all vars so that we can efficiently # generate maps among the subproblems transBlock.all_vars = list(v for v in instance.component_data_objects( Var, descend_into=(Block, Disjunct), sort=SortComponents.deterministic) if not v.is_fixed()) # we'll store all the cuts we add together transBlock.cuts = Constraint(Any) # get bigM and chull relaxations bigMRelaxation = TransformationFactory('gdp.bigm') chullRelaxation = TransformationFactory('gdp.chull') relaxIntegrality = TransformationFactory('core.relax_integrality') # HACK: for the current writers, we need to also apply gdp.reclassify so # that the indicator variables stay where they are in the big M model # (since that is what we are eventually going to solve after we add our # cuts). reclassify = TransformationFactory('gdp.reclassify') # # Generalte the CHull relaxation (used for the separation # problem to generate cutting planes # instance_rCHull = chullRelaxation.create_using(instance) # This relies on relaxIntegrality relaxing variables on deactivated # blocks, which should be fine. reclassify.apply_to(instance_rCHull) relaxIntegrality.apply_to(instance_rCHull) # # Reformulate the instance using the BigM relaxation (this will # be the final instance returned to the user) # bigMRelaxation.apply_to(instance, bigM=bigM) reclassify.apply_to(instance) # # Generate the continuous relaxation of the BigM transformation # instance_rBigM = relaxIntegrality.create_using(instance) # # Add the xstar parameter for the CHull problem # transBlock_rCHull = instance_rCHull.component(transBlockName) # # this will hold the solution to rbigm each time we solve it. We # add it to the transformation block so that we don't have to # worry about name conflicts. transBlock_rCHull.xstar = Param( range(len(transBlock.all_vars)), mutable=True, default=None) transBlock_rBigM = instance_rBigM.component(transBlockName) # # Generate the mapping between the variables on all the # instances and the xstar parameter # var_info = tuple( (v, transBlock_rBigM.all_vars[i], transBlock_rCHull.all_vars[i], transBlock_rCHull.xstar[i]) for i,v in enumerate(transBlock.all_vars)) # # Add the separation objective to the chull subproblem # self._add_separation_objective(var_info, transBlock_rCHull) return instance_rBigM, instance_rCHull, var_info, transBlockName
def solve_NLP_subproblem(solve_data, config): m = solve_data.working_model.clone() MindtPy = m.MindtPy_utils main_objective = next(m.component_data_objects(Objective, active=True)) solve_data.nlp_iter += 1 config.logger.info('NLP %s: Solve subproblem for fixed binaries.' % (solve_data.nlp_iter,)) # Set up NLP for v in MindtPy.variable_list: if v.is_binary(): v.fix(int(round(value(v)))) # restore original variable values for nlp_var, orig_val in zip( MindtPy.variable_list, solve_data.initial_var_values): if not nlp_var.fixed and not nlp_var.is_binary(): nlp_var.value = orig_val MindtPy.MindtPy_linear_cuts.deactivate() m.tmp_duals = ComponentMap() for c in m.component_data_objects(ctype=Constraint, active=True, descend_into=True): rhs = ((0 if c.upper is None else c.upper) + (0 if c.lower is None else c.lower)) sign_adjust = 1 if value(c.upper) is None else -1 m.tmp_duals[c] = sign_adjust * max(0, sign_adjust * (rhs - value(c.body))) # TODO check sign_adjust t = TransformationFactory('contrib.deactivate_trivial_constraints') t.apply_to(m, tmp=True, ignore_infeasible=True) # Solve the NLP # m.pprint() # print nlp problem for debugging with SuppressInfeasibleWarning(): results = SolverFactory(config.nlp_solver).solve( m, **config.nlp_solver_args) var_values = list(v.value for v in MindtPy.variable_list) subprob_terminate_cond = results.solver.termination_condition if subprob_terminate_cond is tc.optimal: copy_var_list_values( m.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) for c in m.tmp_duals: if m.dual.get(c, None) is None: m.dual[c] = m.tmp_duals[c] duals = list(m.dual[c] for c in MindtPy.constraint_list) if main_objective.sense == minimize: solve_data.UB = min(value(main_objective.expr), solve_data.UB) solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[-1] solve_data.UB_progress.append(solve_data.UB) else: solve_data.LB = max(value(main_objective.expr), solve_data.LB) solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[-1] solve_data.LB_progress.append(solve_data.LB) config.logger.info( 'NLP {}: OBJ: {} LB: {} UB: {}' .format(solve_data.nlp_iter, value(main_objective.expr), solve_data.LB, solve_data.UB)) if solve_data.solution_improved: solve_data.best_solution_found = m.clone() # Add the linear cut if config.strategy == 'OA': add_oa_cut(var_values, duals, solve_data, config) elif config.strategy == 'PSC': add_psc_cut(solve_data, config) elif config.strategy == 'GBD': add_gbd_cut(solve_data, config) # This adds an integer cut to the feasible_integer_cuts # ConstraintList, which is not activated by default. However, it # may be activated as needed in certain situations or for certain # values of option flags. add_int_cut(var_values, solve_data, config, feasible=True) config.call_after_subproblem_feasible(m, solve_data) elif subprob_terminate_cond is tc.infeasible: # TODO try something else? Reinitialize with different initial # value? config.logger.info('NLP subproblem was locally infeasible.') for c in m.component_data_objects(ctype=Constraint, active=True, descend_into=True): rhs = ((0 if c.upper is None else c.upper) + (0 if c.lower is None else c.lower)) sign_adjust = 1 if value(c.upper) is None else -1 m.dual[c] = sign_adjust * max(0, sign_adjust * (rhs - value(c.body))) for var in m.component_data_objects(ctype=Var, descend_into=True): if config.strategy == 'PSC' or config.strategy == 'GBD': m.ipopt_zL_out[var] = 0 m.ipopt_zU_out[var] = 0 if var.ub is not None and abs(var.ub - value(var)) < config.bound_tolerance: m.ipopt_zL_out[var] = 1 elif var.lb is not None and abs(value(var) - var.lb) < config.bound_tolerance: m.ipopt_zU_out[var] = -1 # m.pprint() #print infeasible nlp problem for debugging if config.strategy == 'OA': config.logger.info('Solving feasibility problem') if config.initial_feas: # add_feas_slacks(m, solve_data) # config.initial_feas = False var_values, duals = solve_NLP_feas(solve_data, config) add_oa_cut(var_values, duals, solve_data, config) # Add an integer cut to exclude this discrete option add_int_cut(var_values, solve_data, config) elif subprob_terminate_cond is tc.maxIterations: # TODO try something else? Reinitialize with different initial # value? config.logger.info('NLP subproblem failed to converge within iteration limit.') # Add an integer cut to exclude this discrete option add_int_cut(solve_data, config) else: raise ValueError( 'MindtPy unable to handle NLP subproblem termination ' 'condition of {}'.format(subprob_terminate_cond)) # Call the NLP post-solve callback config.call_after_subproblem_solve(m, solve_data)
def get_dual_values(solver, model): if id(model) not in get_dual_values.discrete_stage2_vars: # 1st attempt to get duals: we need to see if the model has # discrete variables (solvers won't give duals if there are # still active discrete variables) try: get_dual_values.discrete_stage2_vars[id(model)] = False return get_dual_values(solver, model) except: get_dual_values.discrete_stage2_vars[id(model)] = True # Find the discrete variables to populate the list return get_dual_values(solver, model) duals = {} _con = model._interscenario_plugin.fixed_variables_constraint if get_dual_values.discrete_stage2_vars[id(model)]: # Fix all discrete variables xfrm = TransformationFactory('core.relax_discrete') if PYOMO_4_0: xfrm.apply(model, inplace=True) else: xfrm.apply_to(model) # Note: preprocessing is only necessary if we are changing a # fixed/freed variable. if FALLBACK_ON_BRUTE_FORCE_PREPROCESS: model.preprocess() else: _map = {} preprocess_block_constraints( model._interscenario_plugin, idMap=_map) #SOLVE results = solver.solve(model, warmstart=True) ss = results.solver.status tc = results.solver.termination_condition #self.timeInSolver += results['Solver'][0]['Time'] if ss == SolverStatus.ok and tc in _acceptable_termination_conditions: state = '' elif tc in _infeasible_termination_conditions: state = 'INFEASIBLE' else: state = 'NONOPTIMAL' if state: logger.warning( "Resolving subproblem model with relaxed second-stage " "discrete variables failed (%s). " "Dual values not available." % (state,) ) else: # Get the duals if PYOMO_4_0: model.load(results) else: model.solutions.load_from(results) #model.dual.pprint() for varid in model._interscenario_plugin.STAGE1VAR: duals[varid] = model.dual[_con[varid]] # Free the discrete second-stage variables if PYOMO_4_0: xfrm.apply(model, inplace=True, undo=True) else: xfrm.apply_to(model, undo=True) else: # return the duals for varid in model._interscenario_plugin.STAGE1VAR: duals[varid] = model.dual[_con[varid]] return duals
def solve_separation_problem(solver, model, fallback): xfrm = TransformationFactory('core.relax_discrete') if PYOMO_4_0: xfrm.apply(model, inplace=True) else: xfrm.apply_to(model) _block = model._interscenario_plugin # Switch objectives _block.original_obj().deactivate() _block.separation_obj.activate() #_block.separation_variables.unfix() _par = _block.fixed_variable_values _sep = _block.separation_variables allow_slack = _block.allow_slack if allow_slack: epsilon = _block.epsilon for idx in _sep: _sep[idx].setlb(None) _sep[idx].setub(None) else: _sep.unfix() # Note: preprocessing is only necessary if we are changing a # fixed/freed variable. if FALLBACK_ON_BRUTE_FORCE_PREPROCESS: model.preprocess() else: _map = {} preprocess_block_objectives(_block, idMap=_map) preprocess_block_constraints(_block, idMap=_map) #SOLVE output_buffer = StringIO() pyutilib.misc.setup_redirect(output_buffer) try: results = solver.solve(model, tee=True) except: logger.warning("Exception raised solving the interscenario " "evaluation subproblem") logger.warning("Solver log:\n%s" % output_buffer.getvalue()) raise finally: pyutilib.misc.reset_redirect() ss = results.solver.status tc = results.solver.termination_condition #self.timeInSolver += results['Solver'][0]['Time'] if ss == SolverStatus.ok and tc in _acceptable_termination_conditions: state = '' if PYOMO_4_0: model.load(results) else: model.solutions.load_from(results) elif tc in _infeasible_termination_conditions: state = 'INFEASIBLE' ans = "!!!!" else: state = 'NONOPTIMAL' ans = "????" if state: if fallback: #logger.warning("Initial attempt to solve the interscenario cut " # "separation subproblem failed with the default " # "solver (%s)." % (state,) ) pass else: logger.warning("Solving the interscenario cut separation " "subproblem failed (%s)." % (state,) ) logger.warning("Solver log:\n%s" % output_buffer.getvalue()) else: cut = dict((vid, (value(_sep[vid]), value(_par[vid]))) for vid in _block.STAGE1VAR) obj = value(_block.separation_obj) ans = (math.sqrt(obj), cut) output_buffer.close() # Restore the objective _block.original_obj().activate() _block.separation_obj.deactivate() # Turn off the separation variables if allow_slack: for idx in _sep: _sep[idx].setlb(-epsilon) _sep[idx].setub(epsilon) else: _sep.fix(0) if PYOMO_4_0: xfrm.apply(model, inplace=True, undo=True) else: xfrm.apply_to(model, undo=True) if FALLBACK_ON_BRUTE_FORCE_PREPROCESS: pass else: _map = {} preprocess_block_objectives(_block, idMap=_map) return ans