def handle_main_unbounded(main_mip, solve_data, config): """This function handles the result of the latest iteration of solving the MIP problem given an unbounded solution due to the relaxation. Args: main_mip (Pyomo model): the MIP main problem. solve_data (MindtPySolveData): data container that holds solve-instance data. config (ConfigBlock): the specific configurations for MindtPy. Returns: main_mip_results (SolverResults): the results of the bounded main problem. """ # Solution is unbounded. Add an arbitrary bound to the objective and resolve. # This occurs when the objective is nonlinear. The nonlinear objective is moved # to the constraints, and deactivated for the linear main problem. MindtPy = main_mip.MindtPy_utils config.logger.warning( 'main MILP was unbounded. ' 'Resolving with arbitrary bound values of (-{0:.10g}, {0:.10g}) on the objective. ' 'You can change this bound with the option obj_bound.'.format( config.obj_bound)) MindtPy.objective_bound = Constraint(expr=(-config.obj_bound, MindtPy.mip_obj.expr, config.obj_bound)) mainopt = SolverFactory(config.mip_solver) if isinstance(mainopt, PersistentSolver): mainopt.set_instance(main_mip) set_solver_options(mainopt, solve_data, config, solver_type='mip') with SuppressInfeasibleWarning(): main_mip_results = mainopt.solve(main_mip, tee=config.mip_solver_tee, **config.mip_solver_args) return main_mip_results
def solve_feasibility_subproblem(solve_data, config): """Solves a feasibility NLP if the fixed_nlp problem is infeasible. Args: solve_data (MindtPySolveData): data container that holds solve-instance data. config (ConfigBlock): the specific configurations for MindtPy. Returns: feas_subproblem (Pyomo model): feasibility NLP from the model. feas_soln (SolverResults): results from solving the feasibility NLP. """ feas_subproblem = solve_data.working_model.clone() add_feas_slacks(feas_subproblem, config) MindtPy = feas_subproblem.MindtPy_utils if MindtPy.find_component('objective_value') is not None: MindtPy.objective_value.value = 0 next(feas_subproblem.component_data_objects( Objective, active=True)).deactivate() for constr in feas_subproblem.MindtPy_utils.nonlinear_constraint_list: constr.deactivate() MindtPy.feas_opt.activate() if config.feasibility_norm == 'L1': MindtPy.feas_obj = Objective( expr=sum(s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize) elif config.feasibility_norm == 'L2': MindtPy.feas_obj = Objective( expr=sum(s*s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize) else: MindtPy.feas_obj = Objective( expr=MindtPy.feas_opt.slack_var, sense=minimize) TransformationFactory('core.fix_integer_vars').apply_to(feas_subproblem) nlpopt = SolverFactory(config.nlp_solver) nlp_args = dict(config.nlp_solver_args) set_solver_options(nlpopt, solve_data, config, solver_type='nlp') with SuppressInfeasibleWarning(): try: with time_code(solve_data.timing, 'feasibility subproblem'): feas_soln = nlpopt.solve( feas_subproblem, tee=config.nlp_solver_tee, **nlp_args) except (ValueError, OverflowError) as error: for nlp_var, orig_val in zip( MindtPy.variable_list, solve_data.initial_var_values): if not nlp_var.fixed and not nlp_var.is_binary(): nlp_var.set_value(orig_val, skip_validation=True) with time_code(solve_data.timing, 'feasibility subproblem'): feas_soln = nlpopt.solve( feas_subproblem, tee=config.nlp_solver_tee, **nlp_args) handle_feasibility_subproblem_tc( feas_soln.solver.termination_condition, MindtPy, solve_data, config) return feas_subproblem, feas_soln
def test_handle_termination_condition(self): """Test the outer approximation decomposition algorithm.""" model = SimpleMINLP() config = _get_MindtPy_config() solve_data = set_up_solve_data(model, config) with time_code(solve_data.timing, 'total', is_main_timer=True), \ create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data): MindtPy = solve_data.working_model.MindtPy_utils MindtPy = solve_data.working_model.MindtPy_utils setup_results_object(solve_data, config) process_objective( solve_data, config, move_linear_objective=(config.init_strategy == 'FP' or config.add_regularization is not None), use_mcpp=config.use_mcpp, update_var_con_list=config.add_regularization is None) feas = MindtPy.feas_opt = Block() feas.deactivate() feas.feas_constraints = ConstraintList( doc='Feasibility Problem Constraints') lin = MindtPy.cuts = Block() lin.deactivate() if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2': feas.nl_constraint_set = RangeSet( len(MindtPy.nonlinear_constraint_list), doc='Integer index set over the nonlinear constraints.') # Create slack variables for feasibility problem feas.slack_var = Var(feas.nl_constraint_set, domain=NonNegativeReals, initialize=1) else: feas.slack_var = Var(domain=NonNegativeReals, initialize=1) # no-good cuts exclude particular discrete decisions lin.no_good_cuts = ConstraintList(doc='no-good cuts') fixed_nlp = solve_data.working_model.clone() TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp) MindtPy_initialize_main(solve_data, config) # test handle_subproblem_other_termination termination_condition = tc.maxIterations config.add_no_good_cuts = True handle_subproblem_other_termination(fixed_nlp, termination_condition, solve_data, config) self.assertEqual( len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts), 1) # test handle_main_other_conditions main_mip, main_mip_results = solve_main(solve_data, config) main_mip_results.solver.termination_condition = tc.infeasible handle_main_other_conditions(solve_data.mip, main_mip_results, solve_data, config) self.assertIs(solve_data.results.solver.termination_condition, tc.feasible) main_mip_results.solver.termination_condition = tc.unbounded handle_main_other_conditions(solve_data.mip, main_mip_results, solve_data, config) self.assertIn(main_mip.MindtPy_utils.objective_bound, main_mip.component_data_objects(ctype=Constraint)) main_mip.MindtPy_utils.del_component('objective_bound') main_mip_results.solver.termination_condition = tc.infeasibleOrUnbounded handle_main_other_conditions(solve_data.mip, main_mip_results, solve_data, config) self.assertIn(main_mip.MindtPy_utils.objective_bound, main_mip.component_data_objects(ctype=Constraint)) main_mip_results.solver.termination_condition = tc.maxTimeLimit handle_main_other_conditions(solve_data.mip, main_mip_results, solve_data, config) self.assertIs(solve_data.results.solver.termination_condition, tc.maxTimeLimit) main_mip_results.solver.termination_condition = tc.other main_mip_results.solution.status = SolutionStatus.feasible handle_main_other_conditions(solve_data.mip, main_mip_results, solve_data, config) for v1, v2 in zip( main_mip.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list): self.assertEqual(v1.value, v2.value) # test handle_feasibility_subproblem_tc feas_subproblem = solve_data.working_model.clone() add_feas_slacks(feas_subproblem, config) MindtPy = feas_subproblem.MindtPy_utils MindtPy.feas_opt.activate() if config.feasibility_norm == 'L1': MindtPy.feas_obj = Objective(expr=sum( s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize) elif config.feasibility_norm == 'L2': MindtPy.feas_obj = Objective(expr=sum( s * s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize) else: MindtPy.feas_obj = Objective(expr=MindtPy.feas_opt.slack_var, sense=minimize) handle_feasibility_subproblem_tc(tc.optimal, MindtPy, solve_data, config) handle_feasibility_subproblem_tc(tc.infeasible, MindtPy, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.status, SolverStatus.error) solve_data.should_terminate = False solve_data.results.solver.status = None handle_feasibility_subproblem_tc(tc.maxIterations, MindtPy, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.status, SolverStatus.error) solve_data.should_terminate = False solve_data.results.solver.status = None handle_feasibility_subproblem_tc(tc.solverFailure, MindtPy, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.status, SolverStatus.error) # test NLP subproblem infeasible solve_data.working_model.Y[1].value = 0 solve_data.working_model.Y[2].value = 0 solve_data.working_model.Y[3].value = 0 fixed_nlp, fixed_nlp_results = solve_subproblem(solve_data, config) solve_data.working_model.Y[1].value = None solve_data.working_model.Y[2].value = None solve_data.working_model.Y[3].value = None # test handle_nlp_subproblem_tc fixed_nlp_results.solver.termination_condition = tc.maxTimeLimit handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.termination_condition, tc.maxTimeLimit) fixed_nlp_results.solver.termination_condition = tc.maxEvaluations handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.termination_condition, tc.maxEvaluations) fixed_nlp_results.solver.termination_condition = tc.maxIterations handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.termination_condition, tc.maxEvaluations) # test handle_fp_main_tc config.init_strategy = 'FP' solve_data.fp_iter = 1 init_rNLP(solve_data, config) feas_main, feas_main_results = solve_main(solve_data, config, fp=True) feas_main_results.solver.termination_condition = tc.optimal fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, False) feas_main_results.solver.termination_condition = tc.maxTimeLimit fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, True) self.assertIs(solve_data.results.solver.termination_condition, tc.maxTimeLimit) feas_main_results.solver.termination_condition = tc.infeasible fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, True) feas_main_results.solver.termination_condition = tc.unbounded fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, True) feas_main_results.solver.termination_condition = tc.other feas_main_results.solution.status = SolutionStatus.feasible fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, False) feas_main_results.solver.termination_condition = tc.solverFailure fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, True) # test generate_norm_constraint fp_nlp = solve_data.working_model.clone() config.fp_main_norm = 'L1' generate_norm_constraint(fp_nlp, solve_data, config) self.assertIsNotNone( fp_nlp.MindtPy_utils.find_component('L1_norm_constraint')) config.fp_main_norm = 'L2' generate_norm_constraint(fp_nlp, solve_data, config) self.assertIsNotNone(fp_nlp.find_component('norm_constraint')) fp_nlp.del_component('norm_constraint') config.fp_main_norm = 'L_infinity' generate_norm_constraint(fp_nlp, solve_data, config) self.assertIsNotNone(fp_nlp.find_component('norm_constraint')) # test set_solver_options config.mip_solver = 'gams' config.threads = 1 opt = SolverFactory(config.mip_solver) set_solver_options(opt, solve_data, config, 'mip', regularization=False) config.mip_solver = 'gurobi' config.mip_regularization_solver = 'gurobi' config.regularization_mip_threads = 1 opt = SolverFactory(config.mip_solver) set_solver_options(opt, solve_data, config, 'mip', regularization=True) config.nlp_solver = 'gams' config.nlp_solver_args['solver'] = 'ipopt' set_solver_options(opt, solve_data, config, 'nlp', regularization=False) config.nlp_solver_args['solver'] = 'ipopth' set_solver_options(opt, solve_data, config, 'nlp', regularization=False) config.nlp_solver_args['solver'] = 'conopt' set_solver_options(opt, solve_data, config, 'nlp', regularization=False) config.nlp_solver_args['solver'] = 'msnlp' set_solver_options(opt, solve_data, config, 'nlp', regularization=False) config.nlp_solver_args['solver'] = 'baron' set_solver_options(opt, solve_data, config, 'nlp', regularization=False) # test algorithm_should_terminate solve_data.should_terminate = True solve_data.UB = float('inf') self.assertIs( algorithm_should_terminate(solve_data, config, check_cycling=False), True) self.assertIs(solve_data.results.solver.termination_condition, tc.noSolution) solve_data.UB = 100 self.assertIs( algorithm_should_terminate(solve_data, config, check_cycling=False), True) self.assertIs(solve_data.results.solver.termination_condition, tc.feasible) solve_data.objective_sense = maximize solve_data.LB = float('-inf') self.assertIs( algorithm_should_terminate(solve_data, config, check_cycling=False), True) self.assertIs(solve_data.results.solver.termination_condition, tc.noSolution) solve_data.LB = 100 self.assertIs( algorithm_should_terminate(solve_data, config, check_cycling=False), True) self.assertIs(solve_data.results.solver.termination_condition, tc.feasible)
def init_max_binaries(solve_data, config): """Modifies model by maximizing the number of activated binary variables. Note - The user would usually want to call solve_subproblem after an invocation of this function. Parameters ---------- solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. Raises ------ ValueError MILP main problem is infeasible. ValueError MindtPy unable to handle the termination condition of the MILP main problem. """ m = solve_data.working_model.clone() if config.calculate_dual: m.dual.deactivate() MindtPy = m.MindtPy_utils solve_data.mip_subiter += 1 config.logger.debug('Initialization: maximize value of binaries') for c in MindtPy.nonlinear_constraint_list: c.deactivate() objective = next(m.component_data_objects(Objective, active=True)) objective.deactivate() binary_vars = (v for v in m.MindtPy_utils.discrete_variable_list if v.is_binary() and not v.fixed) MindtPy.max_binary_obj = Objective(expr=sum(v for v in binary_vars), sense=maximize) getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate() getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate() mipopt = SolverFactory(config.mip_solver) if isinstance(mipopt, PersistentSolver): mipopt.set_instance(m) mip_args = dict(config.mip_solver_args) set_solver_options(mipopt, solve_data, config, solver_type='mip') results = mipopt.solve(m, tee=config.mip_solver_tee, **mip_args) solve_terminate_cond = results.solver.termination_condition if solve_terminate_cond is tc.optimal: copy_var_list_values( MindtPy.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) config.logger.info( solve_data.log_formatter.format( '-', 'Max binary MILP', value(MindtPy.max_binary_obj.expr), solve_data.LB, solve_data.UB, solve_data.rel_gap, get_main_elapsed_time(solve_data.timing))) elif solve_terminate_cond is tc.infeasible: raise ValueError('MILP main problem is infeasible. ' 'Problem may have no more feasible ' 'binary configurations.') elif solve_terminate_cond is tc.maxTimeLimit: config.logger.info( 'NLP subproblem failed to converge within time limit.') solve_data.results.solver.termination_condition = tc.maxTimeLimit elif solve_terminate_cond is tc.maxIterations: config.logger.info( 'NLP subproblem failed to converge within iteration limit.') else: raise ValueError( 'MindtPy unable to handle MILP main termination condition ' 'of %s. Solver message: %s' % (solve_terminate_cond, results.solver.message))
def init_rNLP(solve_data, config): """Initialize the problem by solving the relaxed NLP and then store the optimal variable values obtained from solving the rNLP. Parameters ---------- solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. Raises ------ ValueError MindtPy unable to handle the termination condition of the relaxed NLP. """ m = solve_data.working_model.clone() config.logger.debug('Relaxed NLP: Solve relaxed integrality') MindtPy = m.MindtPy_utils TransformationFactory('core.relax_integer_vars').apply_to(m) nlp_args = dict(config.nlp_solver_args) nlpopt = SolverFactory(config.nlp_solver) set_solver_options(nlpopt, solve_data, config, solver_type='nlp') with SuppressInfeasibleWarning(): results = nlpopt.solve(m, tee=config.nlp_solver_tee, **nlp_args) subprob_terminate_cond = results.solver.termination_condition if subprob_terminate_cond in {tc.optimal, tc.feasible, tc.locallyOptimal}: main_objective = MindtPy.objective_list[-1] if subprob_terminate_cond == tc.optimal: update_dual_bound(solve_data, value(main_objective.expr)) else: config.logger.info('relaxed NLP is not solved to optimality.') update_suboptimal_dual_bound(solve_data, results) dual_values = list( m.dual[c] for c in MindtPy.constraint_list) if config.calculate_dual else None config.logger.info( solve_data.log_formatter.format( '-', 'Relaxed NLP', value(main_objective.expr), solve_data.LB, solve_data.UB, solve_data.rel_gap, get_main_elapsed_time(solve_data.timing))) # Add OA cut if config.strategy in {'OA', 'GOA', 'FP'}: copy_var_list_values(m.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config, ignore_integrality=True) if config.init_strategy == 'FP': copy_var_list_values( m.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config, ignore_integrality=True) if config.strategy in {'OA', 'FP'}: add_oa_cuts(solve_data.mip, dual_values, solve_data, config) elif config.strategy == 'GOA': add_affine_cuts(solve_data, config) for var in solve_data.mip.MindtPy_utils.discrete_variable_list: # We don't want to trigger the reset of the global stale # indicator, so we will set this variable to be "stale", # knowing that set_value will switch it back to "not # stale" var.stale = True var.set_value(int(round(var.value)), skip_validation=True) elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}: # TODO fail? try something else? config.logger.info('Initial relaxed NLP problem is infeasible. ' 'Problem may be infeasible.') elif subprob_terminate_cond is tc.maxTimeLimit: config.logger.info( 'NLP subproblem failed to converge within time limit.') solve_data.results.solver.termination_condition = tc.maxTimeLimit elif subprob_terminate_cond is tc.maxIterations: config.logger.info( 'NLP subproblem failed to converge within iteration limit.') else: raise ValueError( 'MindtPy unable to handle relaxed NLP termination condition ' 'of %s. Solver message: %s' % (subprob_terminate_cond, results.solver.message))
def solve_main(solve_data, config, fp=False, regularization_problem=False): """This function solves the MIP main problem. Args: solve_data (MindtPySolveData): data container that holds solve-instance data. config (ConfigBlock): the specific configurations for MindtPy. fp (bool, optional): whether it is in the loop of feasibility pump. Defaults to False. regularization_problem (bool, optional): whether it is solving a regularization problem. Defaults to False. Returns: solve_data.mip (Pyomo model): the MIP stored in solve_data. main_mip_results (SolverResults): results from solving the main MIP. """ if not fp and not regularization_problem: solve_data.mip_iter += 1 # setup main problem setup_main(solve_data, config, fp, regularization_problem) mainopt = set_up_mip_solver(solve_data, config, regularization_problem) mip_args = dict(config.mip_solver_args) if config.mip_solver in { 'cplex', 'cplex_persistent', 'gurobi', 'gurobi_persistent' }: mip_args['warmstart'] = True set_solver_options(mainopt, solve_data, config, solver_type='mip', regularization=regularization_problem) try: with time_code( solve_data.timing, 'regularization main' if regularization_problem else ('fp main' if fp else 'main')): main_mip_results = mainopt.solve(solve_data.mip, tee=config.mip_solver_tee, **mip_args) except (ValueError, AttributeError): if config.single_tree: config.logger.warning('Single tree terminate.') if get_main_elapsed_time( solve_data.timing) >= config.time_limit - 2: config.logger.warning('due to the timelimit.') solve_data.results.solver.termination_condition = tc.maxTimeLimit if config.strategy == 'GOA' or config.add_no_good_cuts: config.logger.warning( 'ValueError: Cannot load a SolverResults object with bad status: error. ' 'MIP solver failed. This usually happens in the single-tree GOA algorithm. ' "No-good cuts are added and GOA algorithm doesn't converge within the time limit. " 'No integer solution is found, so the cplex solver will report an error status. ' ) return None, None if config.solution_pool: main_mip_results._solver_model = mainopt._solver_model main_mip_results._pyomo_var_to_solver_var_map = mainopt._pyomo_var_to_solver_var_map if main_mip_results.solver.termination_condition is tc.optimal: if config.single_tree and not config.add_no_good_cuts and not regularization_problem: uptade_suboptimal_dual_bound(solve_data, main_mip_results) if regularization_problem: config.logger.info( solve_data.log_formatter.format( solve_data.mip_iter, 'Reg ' + solve_data.regularization_mip_type, value(solve_data.mip.MindtPy_utils.loa_proj_mip_obj), solve_data.LB, solve_data.UB, solve_data.rel_gap, get_main_elapsed_time(solve_data.timing))) elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: # Linear solvers will sometimes tell me that it's infeasible or # unbounded during presolve, but fails to distinguish. We need to # resolve with a solver option flag on. main_mip_results, _ = distinguish_mip_infeasible_or_unbounded( solve_data.mip, config) return solve_data.mip, main_mip_results if regularization_problem: solve_data.mip.MindtPy_utils.objective_constr.deactivate() solve_data.mip.MindtPy_utils.del_component('loa_proj_mip_obj') solve_data.mip.MindtPy_utils.cuts.del_component('obj_reg_estimate') if config.add_regularization == 'level_L1': solve_data.mip.MindtPy_utils.del_component('L1_obj') elif config.add_regularization == 'level_L_infinity': solve_data.mip.MindtPy_utils.del_component('L_infinity_obj') return solve_data.mip, main_mip_results
def solve_main(solve_data, config, fp=False, regularization_problem=False): """ This function solves the MIP main problem Parameters ---------- solve_data: MindtPy Data Container data container that holds solve-instance data config: ConfigBlock contains the specific configurations for the algorithm Returns ------- solve_data.mip: Pyomo model the MIP stored in solve_data main_mip_results: Pyomo results object result from solving the main MIP fp: Bool generate the feasibility pump regularization main problem regularization_problem: Bool generate the ROA regularization main problem """ if fp: config.logger.info('FP-MIP %s: Solve main problem.' % (solve_data.fp_iter,)) elif regularization_problem: config.logger.info('Regularization-MIP %s: Solve main regularization problem.' % (solve_data.mip_iter,)) else: solve_data.mip_iter += 1 config.logger.info('MIP %s: Solve main problem.' % (solve_data.mip_iter,)) # setup main problem setup_main(solve_data, config, fp, regularization_problem) mainopt = setup_mip_solver(solve_data, config, regularization_problem) mip_args = dict(config.mip_solver_args) if config.mip_solver in {'cplex', 'cplex_persistent', 'gurobi', 'gurobi_persistent'}: mip_args['warmstart'] = True set_solver_options(mainopt, solve_data, config, solver_type='mip', regularization=regularization_problem) try: with time_code(solve_data.timing, 'regularization main' if regularization_problem else ('fp main' if fp else 'main')): main_mip_results = mainopt.solve(solve_data.mip, tee=config.mip_solver_tee, **mip_args) except (ValueError, AttributeError): if config.single_tree: config.logger.warning('Single tree terminate.') if get_main_elapsed_time(solve_data.timing) >= config.time_limit - 2: config.logger.warning('due to the timelimit.') solve_data.results.solver.termination_condition = tc.maxTimeLimit if config.strategy == 'GOA' or config.add_no_good_cuts: config.logger.warning('ValueError: Cannot load a SolverResults object with bad status: error. ' 'MIP solver failed. This usually happens in the single-tree GOA algorithm. ' "No-good cuts are added and GOA algorithm doesn't converge within the time limit. " 'No integer solution is found, so the cplex solver will report an error status. ') return None, None if main_mip_results.solver.termination_condition is tc.optimal: if config.single_tree and not config.add_no_good_cuts and not regularization_problem: if solve_data.objective_sense == minimize: solve_data.LB = max( main_mip_results.problem.lower_bound, solve_data.LB) solve_data.bound_improved = solve_data.LB > solve_data.LB_progress[-1] solve_data.LB_progress.append(solve_data.LB) else: solve_data.UB = min( main_mip_results.problem.upper_bound, solve_data.UB) solve_data.bound_improved = solve_data.UB < solve_data.UB_progress[-1] solve_data.UB_progress.append(solve_data.UB) elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: # Linear solvers will sometimes tell me that it's infeasible or # unbounded during presolve, but fails to distinguish. We need to # resolve with a solver option flag on. main_mip_results, _ = distinguish_mip_infeasible_or_unbounded( solve_data.mip, config) return solve_data.mip, main_mip_results if regularization_problem: solve_data.mip.MindtPy_utils.objective_constr.deactivate() solve_data.mip.MindtPy_utils.del_component('loa_proj_mip_obj') solve_data.mip.MindtPy_utils.cuts.del_component('obj_reg_estimate') if config.add_regularization == 'level_L1': solve_data.mip.MindtPy_utils.del_component('L1_obj') elif config.add_regularization == 'level_L_infinity': solve_data.mip.MindtPy_utils.del_component( 'L_infinity_obj') return solve_data.mip, main_mip_results
def solve_fp_subproblem(solve_data, config): """ Solves the feasibility pump NLP This function sets up the 'fp_nlp' by relax integer variables. precomputes dual values, deactivates trivial constraints, and then solves NLP model. Parameters ---------- solve_data: MindtPy Data Container data container that holds solve-instance data config: ConfigBlock contains the specific configurations for the algorithm Returns ------- fp_nlp: Pyomo model Fixed-NLP from the model results: Pyomo results object result from solving the Fixed-NLP """ fp_nlp = solve_data.working_model.clone() MindtPy = fp_nlp.MindtPy_utils config.logger.info('FP-NLP %s: Solve feasibility pump NLP subproblem.' % (solve_data.fp_iter,)) # Set up NLP fp_nlp.MindtPy_utils.objective_list[-1].deactivate() if solve_data.objective_sense == minimize: fp_nlp.improving_objective_cut = Constraint( expr=fp_nlp.MindtPy_utils.objective_value <= solve_data.UB) else: fp_nlp.improving_objective_cut = Constraint( expr=fp_nlp.MindtPy_utils.objective_value >= solve_data.LB) # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP' # the norm type is consistant with the norm obj of the FP-main problem. if config.fp_norm_constraint: if config.fp_main_norm == 'L1': # TODO: check if we can access the block defined in FP-main problem generate_norm1_norm_constraint( fp_nlp, solve_data.mip, config, discrete_only=True) elif config.fp_main_norm == 'L2': fp_nlp.norm_constraint = Constraint(expr=sum((nlp_var - mip_var.value)**2 - config.fp_norm_constraint_coef*(nlp_var.value - mip_var.value)**2 for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) <= 0) elif config.fp_main_norm == 'L_infinity': fp_nlp.norm_constraint = ConstraintList() rhs = config.fp_norm_constraint_coef * max(nlp_var.value - mip_var.value for nlp_var, mip_var in zip( fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list): fp_nlp.norm_constraint.add(nlp_var - mip_var.value <= rhs) MindtPy.fp_nlp_obj = generate_norm2sq_objective_function( fp_nlp, solve_data.mip, discrete_only=config.fp_discrete_only) MindtPy.cuts.deactivate() TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp) try: TransformationFactory('contrib.deactivate_trivial_constraints').apply_to( fp_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance) except ValueError: config.logger.warning( 'infeasibility detected in deactivate_trivial_constraints') results = SolverResults() results.solver.termination_condition = tc.infeasible return fp_nlp, results # Solve the NLP nlpopt = SolverFactory(config.nlp_solver) nlp_args = dict(config.nlp_solver_args) set_solver_options(nlpopt, solve_data, config, solver_type='nlp') with SuppressInfeasibleWarning(): with time_code(solve_data.timing, 'fp subproblem'): results = nlpopt.solve( fp_nlp, tee=config.nlp_solver_tee, **nlp_args) return fp_nlp, results
def fix_dual_bound(solve_data, config, last_iter_cuts): """Fix the dual bound when no-good cuts or tabu list is activated. Args: solve_data (MindtPySolveData): data container that holds solve-instance data. config (ConfigBlock): the specific configurations for MindtPy. last_iter_cuts (bool): whether the cuts in the last iteration have been added. """ if config.single_tree: config.logger.info( 'Fix the bound to the value of one iteration before optimal solution is found.' ) try: if solve_data.objective_sense == minimize: solve_data.LB = solve_data.stored_bound[solve_data.UB] else: solve_data.UB = solve_data.stored_bound[solve_data.LB] except KeyError: config.logger.info('No stored bound found. Bound fix failed.') else: config.logger.info( 'Solve the main problem without the last no_good cut to fix the bound.' 'zero_tolerance is set to 1E-4') config.zero_tolerance = 1E-4 # Solve NLP subproblem # The constraint linearization happens in the handlers if not last_iter_cuts: fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config) MindtPy = solve_data.mip.MindtPy_utils # deactivate the integer cuts generated after the best solution was found. if config.strategy == 'GOA': try: if solve_data.objective_sense == minimize: valid_no_good_cuts_num = solve_data.num_no_good_cuts_added[ solve_data.UB] else: valid_no_good_cuts_num = solve_data.num_no_good_cuts_added[ solve_data.LB] if config.add_no_good_cuts: for i in range(valid_no_good_cuts_num + 1, len(MindtPy.cuts.no_good_cuts) + 1): MindtPy.cuts.no_good_cuts[i].deactivate() if config.use_tabu_list: solve_data.integer_list = solve_data.integer_list[: valid_no_good_cuts_num] except KeyError: config.logger.info('No-good cut deactivate failed.') elif config.strategy == 'OA': # Only deactive the last OA cuts may not be correct. # Since integer solution may also be cut off by OA cuts due to calculation approximation. if config.add_no_good_cuts: MindtPy.cuts.no_good_cuts[len( MindtPy.cuts.no_good_cuts)].deactivate() if config.use_tabu_list: solve_data.integer_list = solve_data.integer_list[:-1] if config.add_regularization is not None and MindtPy.find_component( 'mip_obj') is None: MindtPy.objective_list[-1].activate() mainopt = SolverFactory(config.mip_solver) # determine if persistent solver is called. if isinstance(mainopt, PersistentSolver): mainopt.set_instance(solve_data.mip, symbolic_solver_labels=True) if config.use_tabu_list: tabulist = mainopt._solver_model.register_callback( tabu_list.IncumbentCallback_cplex) tabulist.solve_data = solve_data tabulist.opt = mainopt tabulist.config = config mainopt._solver_model.parameters.preprocessing.reduce.set(1) # If the callback is used to reject incumbents, the user must set the # parameter c.parameters.preprocessing.reduce either to the value 1 (one) # to restrict presolve to primal reductions only or to 0 (zero) to disable all presolve reductions mainopt._solver_model.set_warning_stream(None) mainopt._solver_model.set_log_stream(None) mainopt._solver_model.set_error_stream(None) mip_args = dict(config.mip_solver_args) set_solver_options(mainopt, solve_data, config, solver_type='mip') main_mip_results = mainopt.solve(solve_data.mip, tee=config.mip_solver_tee, **mip_args) if main_mip_results.solver.termination_condition is tc.infeasible: config.logger.info( 'Bound fix failed. The bound fix problem is infeasible') else: uptade_suboptimal_dual_bound(solve_data, main_mip_results) config.logger.info('Fixed bound values: LB: {} UB: {}'.format( solve_data.LB, solve_data.UB)) # Check bound convergence if solve_data.LB + config.bound_tolerance >= solve_data.UB: solve_data.results.solver.termination_condition = tc.optimal
def solve_fp_subproblem(solve_data, config): """Solves the feasibility pump NLP subproblem. This function sets up the 'fp_nlp' by relax integer variables. precomputes dual values, deactivates trivial constraints, and then solves NLP model. Parameters ---------- solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. Returns ------- fp_nlp : Pyomo model Fixed-NLP from the model. results : SolverResults Results from solving the fixed-NLP subproblem. """ fp_nlp = solve_data.working_model.clone() MindtPy = fp_nlp.MindtPy_utils # Set up NLP fp_nlp.MindtPy_utils.objective_list[-1].deactivate() if solve_data.objective_sense == minimize: fp_nlp.improving_objective_cut = Constraint( expr=sum(fp_nlp.MindtPy_utils.objective_value[:]) <= solve_data.UB) else: fp_nlp.improving_objective_cut = Constraint( expr=sum(fp_nlp.MindtPy_utils.objective_value[:]) >= solve_data.LB) # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP' https://doi.org/10.1007/s10107-012-0608-x # the norm type is consistant with the norm obj of the FP-main problem. if config.fp_norm_constraint: generate_norm_constraint(fp_nlp, solve_data, config) MindtPy.fp_nlp_obj = generate_norm2sq_objective_function( fp_nlp, solve_data.mip, discrete_only=config.fp_discrete_only) MindtPy.cuts.deactivate() TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp) try: TransformationFactory( 'contrib.deactivate_trivial_constraints').apply_to( fp_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance) except ValueError: config.logger.warning( 'infeasibility detected in deactivate_trivial_constraints') results = SolverResults() results.solver.termination_condition = tc.infeasible return fp_nlp, results # Solve the NLP nlpopt = SolverFactory(config.nlp_solver) nlp_args = dict(config.nlp_solver_args) set_solver_options(nlpopt, solve_data, config, solver_type='nlp') with SuppressInfeasibleWarning(): with time_code(solve_data.timing, 'fp subproblem'): results = nlpopt.solve(fp_nlp, tee=config.nlp_solver_tee, **nlp_args) return fp_nlp, results
def solve_subproblem(solve_data, config): """Solves the Fixed-NLP (with fixed integers). This function sets up the 'fixed_nlp' by fixing binaries, sets continuous variables to their intial var values, precomputes dual values, deactivates trivial constraints, and then solves NLP model. Parameters ---------- solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. Returns ------- fixed_nlp : Pyomo model Integer-variable-fixed NLP model. results : SolverResults Results from solving the Fixed-NLP. """ fixed_nlp = solve_data.working_model.clone() MindtPy = fixed_nlp.MindtPy_utils solve_data.nlp_iter += 1 # Set up NLP TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp) MindtPy.cuts.deactivate() if config.calculate_dual: fixed_nlp.tmp_duals = ComponentMap() # tmp_duals are the value of the dual variables stored before using deactivate trivial contraints # The values of the duals are computed as follows: (Complementary Slackness) # # | constraint | c_geq | status at x1 | tmp_dual (violation) | # |------------|-------|--------------|----------------------| # | g(x) <= b | -1 | g(x1) <= b | 0 | # | g(x) <= b | -1 | g(x1) > b | g(x1) - b | # | g(x) >= b | +1 | g(x1) >= b | 0 | # | g(x) >= b | +1 | g(x1) < b | b - g(x1) | evaluation_error = False for c in fixed_nlp.MindtPy_utils.constraint_list: # We prefer to include the upper bound as the right hand side since we are # considering c by default a (hopefully) convex function, which would make # c >= lb a nonconvex inequality which we wouldn't like to add linearizations # if we don't have to rhs = value(c.upper) if c.has_ub() else value(c.lower) c_geq = -1 if c.has_ub() else 1 try: fixed_nlp.tmp_duals[c] = c_geq * max( 0, c_geq * (rhs - value(c.body))) except (ValueError, OverflowError) as error: fixed_nlp.tmp_duals[c] = None evaluation_error = True if evaluation_error: for nlp_var, orig_val in zip(MindtPy.variable_list, solve_data.initial_var_values): if not nlp_var.fixed and not nlp_var.is_binary(): nlp_var.set_value(orig_val, skip_validation=True) try: TransformationFactory( 'contrib.deactivate_trivial_constraints').apply_to( fixed_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance) except InfeasibleConstraintException: config.logger.warning( 'infeasibility detected in deactivate_trivial_constraints') results = SolverResults() results.solver.termination_condition = tc.infeasible return fixed_nlp, results # Solve the NLP nlpopt = SolverFactory(config.nlp_solver) nlp_args = dict(config.nlp_solver_args) set_solver_options(nlpopt, solve_data, config, solver_type='nlp') with SuppressInfeasibleWarning(): with time_code(solve_data.timing, 'fixed subproblem'): results = nlpopt.solve(fixed_nlp, tee=config.nlp_solver_tee, **nlp_args) return fixed_nlp, results
def solve_feasibility_subproblem(solve_data, config): """ Solves a feasibility NLP if the fixed_nlp problem is infeasible Parameters ---------- solve_data: MindtPy Data Container data container that holds solve-instance data config: ConfigBlock contains the specific configurations for the algorithm Returns ------- feas_subproblem: Pyomo model feasibility NLP from the model feas_soln: Pyomo results object result from solving the feasibility NLP """ feas_subproblem = solve_data.working_model.clone() add_feas_slacks(feas_subproblem, config) MindtPy = feas_subproblem.MindtPy_utils if MindtPy.find_component('objective_value') is not None: MindtPy.objective_value.value = 0 next(feas_subproblem.component_data_objects(Objective, active=True)).deactivate() for constr in feas_subproblem.MindtPy_utils.nonlinear_constraint_list: constr.deactivate() MindtPy.feas_opt.activate() if config.feasibility_norm == 'L1': MindtPy.feas_obj = Objective(expr=sum( s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize) elif config.feasibility_norm == 'L2': MindtPy.feas_obj = Objective(expr=sum( s * s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize) else: MindtPy.feas_obj = Objective(expr=MindtPy.feas_opt.slack_var, sense=minimize) TransformationFactory('core.fix_integer_vars').apply_to(feas_subproblem) nlpopt = SolverFactory(config.nlp_solver) nlp_args = dict(config.nlp_solver_args) set_solver_options(nlpopt, solve_data, config, solver_type='nlp') with SuppressInfeasibleWarning(): try: with time_code(solve_data.timing, 'feasibility subproblem'): feas_soln = nlpopt.solve(feas_subproblem, tee=config.nlp_solver_tee, **nlp_args) except (ValueError, OverflowError) as error: for nlp_var, orig_val in zip(MindtPy.variable_list, solve_data.initial_var_values): if not nlp_var.fixed and not nlp_var.is_binary(): nlp_var.value = orig_val with time_code(solve_data.timing, 'feasibility subproblem'): feas_soln = nlpopt.solve(feas_subproblem, tee=config.nlp_solver_tee, **nlp_args) subprob_terminate_cond = feas_soln.solver.termination_condition if subprob_terminate_cond in {tc.optimal, tc.locallyOptimal, tc.feasible}: copy_var_list_values( MindtPy.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}: config.logger.error('Feasibility subproblem infeasible. ' 'This should never happen.') solve_data.should_terminate = True solve_data.results.solver.status = SolverStatus.error return feas_subproblem, feas_soln elif subprob_terminate_cond is tc.maxIterations: config.logger.error( 'Subsolver reached its maximum number of iterations without converging, ' 'consider increasing the iterations limit of the subsolver or reviewing your formulation.' ) solve_data.should_terminate = True solve_data.results.solver.status = SolverStatus.error return feas_subproblem, feas_soln else: config.error( 'MindtPy unable to handle feasibility subproblem termination condition ' 'of {}'.format(subprob_terminate_cond)) solve_data.should_terminate = True solve_data.results.solver.status = SolverStatus.error return feas_subproblem, feas_soln if value(MindtPy.feas_obj.expr) <= config.zero_tolerance: config.logger.warning( 'The objective value %.4E of feasibility problem is less than zero_tolerance. ' 'This indicates that the nlp subproblem is feasible, although it is found infeasible in the previous step. ' 'Check the nlp solver output' % value(MindtPy.feas_obj.expr)) return feas_subproblem, feas_soln
def init_rNLP(solve_data, config): """ Initialize the problem by solving the relaxed NLP and then store the optimal variable values obtained from solving the rNLP Parameters ---------- solve_data: MindtPy Data Container data container that holds solve-instance data config: ConfigBlock contains the specific configurations for the algorithm """ m = solve_data.working_model.clone() config.logger.info('Relaxed NLP: Solve relaxed integrality') MindtPy = m.MindtPy_utils TransformationFactory('core.relax_integer_vars').apply_to(m) nlp_args = dict(config.nlp_solver_args) nlpopt = SolverFactory(config.nlp_solver) set_solver_options(nlpopt, solve_data, config, solver_type='nlp') with SuppressInfeasibleWarning(): results = nlpopt.solve(m, tee=config.nlp_solver_tee, **nlp_args) subprob_terminate_cond = results.solver.termination_condition if subprob_terminate_cond in {tc.optimal, tc.feasible, tc.locallyOptimal}: if subprob_terminate_cond in {tc.feasible, tc.locallyOptimal}: config.logger.info('relaxed NLP is not solved to optimality.') dual_values = list( m.dual[c] for c in MindtPy.constraint_list) if config.calculate_dual else None # Add OA cut # This covers the case when the Lower bound does not exist. # TODO: should we use the bound of the rNLP here? if solve_data.objective_sense == minimize: if not math.isnan(results.problem.lower_bound): solve_data.LB = results.problem.lower_bound solve_data.bound_improved = solve_data.LB > solve_data.LB_progress[ -1] solve_data.LB_progress.append(results.problem.lower_bound) elif not math.isnan(results.problem.upper_bound): solve_data.UB = results.problem.upper_bound solve_data.bound_improved = solve_data.UB < solve_data.UB_progress[ -1] solve_data.UB_progress.append(results.problem.upper_bound) main_objective = MindtPy.objective_list[-1] config.logger.info( 'Relaxed NLP: OBJ: %s LB: %s UB: %s TIME:%ss' % (value(main_objective.expr), solve_data.LB, solve_data.UB, round(get_main_elapsed_time(solve_data.timing), 2))) if config.strategy in {'OA', 'GOA', 'FP'}: copy_var_list_values(m.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config, ignore_integrality=True) if config.init_strategy == 'FP': copy_var_list_values( m.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config, ignore_integrality=True) if config.strategy == 'OA': add_oa_cuts(solve_data.mip, dual_values, solve_data, config) elif config.strategy == 'GOA': add_affine_cuts(solve_data, config) # TODO check if value of the binary or integer varibles is 0/1 or integer value. for var in solve_data.mip.MindtPy_utils.discrete_variable_list: var.value = int(round(var.value)) elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}: # TODO fail? try something else? config.logger.info('Initial relaxed NLP problem is infeasible. ' 'Problem may be infeasible.') elif subprob_terminate_cond is tc.maxTimeLimit: config.logger.info( 'NLP subproblem failed to converge within time limit.') solve_data.results.solver.termination_condition = tc.maxTimeLimit elif subprob_terminate_cond is tc.maxIterations: config.logger.info( 'NLP subproblem failed to converge within iteration limit.') else: raise ValueError( 'MindtPy unable to handle relaxed NLP termination condition ' 'of %s. Solver message: %s' % (subprob_terminate_cond, results.solver.message))