def test_handle_termination_condition(self): """Test the outer approximation decomposition algorithm.""" model = SimpleMINLP() config = _get_MindtPy_config() solve_data = set_up_solve_data(model, config) with time_code(solve_data.timing, 'total', is_main_timer=True), \ create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data): MindtPy = solve_data.working_model.MindtPy_utils MindtPy = solve_data.working_model.MindtPy_utils setup_results_object(solve_data, config) process_objective( solve_data, config, move_linear_objective=(config.init_strategy == 'FP' or config.add_regularization is not None), use_mcpp=config.use_mcpp, updata_var_con_list=config.add_regularization is None) feas = MindtPy.feas_opt = Block() feas.deactivate() feas.feas_constraints = ConstraintList( doc='Feasibility Problem Constraints') lin = MindtPy.cuts = Block() lin.deactivate() if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2': feas.nl_constraint_set = RangeSet( len(MindtPy.nonlinear_constraint_list), doc='Integer index set over the nonlinear constraints.') # Create slack variables for feasibility problem feas.slack_var = Var(feas.nl_constraint_set, domain=NonNegativeReals, initialize=1) else: feas.slack_var = Var(domain=NonNegativeReals, initialize=1) # no-good cuts exclude particular discrete decisions lin.no_good_cuts = ConstraintList(doc='no-good cuts') fixed_nlp = solve_data.working_model.clone() TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp) MindtPy_initialize_main(solve_data, config) # test handle_subproblem_other_termination termination_condition = tc.maxIterations config.add_no_good_cuts = True handle_subproblem_other_termination(fixed_nlp, termination_condition, solve_data, config) self.assertEqual( len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts), 1) # test handle_main_other_conditions main_mip, main_mip_results = solve_main(solve_data, config) main_mip_results.solver.termination_condition = tc.infeasible handle_main_other_conditions(solve_data.mip, main_mip_results, solve_data, config) self.assertIs(solve_data.results.solver.termination_condition, tc.feasible) main_mip_results.solver.termination_condition = tc.unbounded handle_main_other_conditions(solve_data.mip, main_mip_results, solve_data, config) self.assertIn(main_mip.MindtPy_utils.objective_bound, main_mip.component_data_objects(ctype=Constraint)) main_mip.MindtPy_utils.del_component('objective_bound') main_mip_results.solver.termination_condition = tc.infeasibleOrUnbounded handle_main_other_conditions(solve_data.mip, main_mip_results, solve_data, config) self.assertIn(main_mip.MindtPy_utils.objective_bound, main_mip.component_data_objects(ctype=Constraint)) main_mip_results.solver.termination_condition = tc.maxTimeLimit handle_main_other_conditions(solve_data.mip, main_mip_results, solve_data, config) self.assertIs(solve_data.results.solver.termination_condition, tc.maxTimeLimit) main_mip_results.solver.termination_condition = tc.other main_mip_results.solution.status = SolutionStatus.feasible handle_main_other_conditions(solve_data.mip, main_mip_results, solve_data, config) for v1, v2 in zip( main_mip.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list): self.assertEqual(v1.value, v2.value) # test handle_feasibility_subproblem_tc feas_subproblem = solve_data.working_model.clone() add_feas_slacks(feas_subproblem, config) MindtPy = feas_subproblem.MindtPy_utils MindtPy.feas_opt.activate() if config.feasibility_norm == 'L1': MindtPy.feas_obj = Objective(expr=sum( s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize) elif config.feasibility_norm == 'L2': MindtPy.feas_obj = Objective(expr=sum( s * s for s in MindtPy.feas_opt.slack_var[...]), sense=minimize) else: MindtPy.feas_obj = Objective(expr=MindtPy.feas_opt.slack_var, sense=minimize) handle_feasibility_subproblem_tc(tc.optimal, MindtPy, solve_data, config) handle_feasibility_subproblem_tc(tc.infeasible, MindtPy, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.status, SolverStatus.error) solve_data.should_terminate = False solve_data.results.solver.status = None handle_feasibility_subproblem_tc(tc.maxIterations, MindtPy, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.status, SolverStatus.error) solve_data.should_terminate = False solve_data.results.solver.status = None handle_feasibility_subproblem_tc(tc.solverFailure, MindtPy, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.status, SolverStatus.error) # test NLP subproblem infeasible solve_data.working_model.Y[1].value = 0 solve_data.working_model.Y[2].value = 0 solve_data.working_model.Y[3].value = 0 fixed_nlp, fixed_nlp_results = solve_subproblem(solve_data, config) solve_data.working_model.Y[1].value = None solve_data.working_model.Y[2].value = None solve_data.working_model.Y[3].value = None # test handle_nlp_subproblem_tc fixed_nlp_results.solver.termination_condition = tc.maxTimeLimit handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.termination_condition, tc.maxTimeLimit) fixed_nlp_results.solver.termination_condition = tc.maxEvaluations handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.termination_condition, tc.maxEvaluations) fixed_nlp_results.solver.termination_condition = tc.maxIterations handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data, config) self.assertIs(solve_data.should_terminate, True) self.assertIs(solve_data.results.solver.termination_condition, tc.maxEvaluations) # test handle_fp_main_tc config.init_strategy = 'FP' solve_data.fp_iter = 1 init_rNLP(solve_data, config) feas_main, feas_main_results = solve_main(solve_data, config, fp=True) feas_main_results.solver.termination_condition = tc.optimal fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, False) feas_main_results.solver.termination_condition = tc.maxTimeLimit fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, True) self.assertIs(solve_data.results.solver.termination_condition, tc.maxTimeLimit) feas_main_results.solver.termination_condition = tc.infeasible fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, True) feas_main_results.solver.termination_condition = tc.unbounded fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, True) feas_main_results.solver.termination_condition = tc.other feas_main_results.solution.status = SolutionStatus.feasible fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, False) feas_main_results.solver.termination_condition = tc.solverFailure fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) self.assertIs(fp_should_terminate, True) # test generate_norm_constraint fp_nlp = solve_data.working_model.clone() config.fp_main_norm = 'L1' generate_norm_constraint(fp_nlp, solve_data, config) self.assertIsNotNone( fp_nlp.MindtPy_utils.find_component('L1_norm_constraint')) config.fp_main_norm = 'L2' generate_norm_constraint(fp_nlp, solve_data, config) self.assertIsNotNone(fp_nlp.find_component('norm_constraint')) fp_nlp.del_component('norm_constraint') config.fp_main_norm = 'L_infinity' generate_norm_constraint(fp_nlp, solve_data, config) self.assertIsNotNone(fp_nlp.find_component('norm_constraint')) # test set_solver_options config.mip_solver = 'gams' config.threads = 1 opt = SolverFactory(config.mip_solver) set_solver_options(opt, solve_data, config, 'mip', regularization=False) config.mip_solver = 'gurobi' config.mip_regularization_solver = 'gurobi' config.regularization_mip_threads = 1 opt = SolverFactory(config.mip_solver) set_solver_options(opt, solve_data, config, 'mip', regularization=True) config.nlp_solver = 'gams' config.nlp_solver_args['solver'] = 'ipopt' set_solver_options(opt, solve_data, config, 'nlp', regularization=False) config.nlp_solver_args['solver'] = 'ipopth' set_solver_options(opt, solve_data, config, 'nlp', regularization=False) config.nlp_solver_args['solver'] = 'conopt' set_solver_options(opt, solve_data, config, 'nlp', regularization=False) config.nlp_solver_args['solver'] = 'msnlp' set_solver_options(opt, solve_data, config, 'nlp', regularization=False) config.nlp_solver_args['solver'] = 'baron' set_solver_options(opt, solve_data, config, 'nlp', regularization=False) # test algorithm_should_terminate solve_data.should_terminate = True solve_data.UB = float('inf') self.assertIs( algorithm_should_terminate(solve_data, config, check_cycling=False), True) self.assertIs(solve_data.results.solver.termination_condition, tc.noSolution) solve_data.UB = 100 self.assertIs( algorithm_should_terminate(solve_data, config, check_cycling=False), True) self.assertIs(solve_data.results.solver.termination_condition, tc.feasible) solve_data.objective_sense = maximize solve_data.LB = float('-inf') self.assertIs( algorithm_should_terminate(solve_data, config, check_cycling=False), True) self.assertIs(solve_data.results.solver.termination_condition, tc.noSolution) solve_data.LB = 100 self.assertIs( algorithm_should_terminate(solve_data, config, check_cycling=False), True) self.assertIs(solve_data.results.solver.termination_condition, tc.feasible)
def MindtPy_iteration_loop(solve_data, config): """ Main loop for MindtPy Algorithms This is the outermost function for the algorithms in this package; this function controls the progression of solving the model. Parameters ---------- solve_data: MindtPy Data Container data container that holds solve-instance data config: ConfigBlock contains the specific configurations for the algorithm """ last_iter_cuts = False while solve_data.mip_iter < config.iteration_limit: config.logger.info('---MindtPy main Iteration %s---' % (solve_data.mip_iter + 1)) solve_data.mip_subiter = 0 # solve MILP main problem if config.strategy in {'OA', 'GOA', 'ECP'}: main_mip, main_mip_results = solve_main(solve_data, config) if main_mip_results is not None: if not config.single_tree: if main_mip_results.solver.termination_condition is tc.optimal: handle_main_optimal(main_mip, solve_data, config) elif main_mip_results.solver.termination_condition is tc.infeasible: handle_main_infeasible(main_mip, solve_data, config) last_iter_cuts = True break else: handle_main_other_conditions(main_mip, main_mip_results, solve_data, config) # Call the MILP post-solve callback with time_code(solve_data.timing, 'Call after main solve'): config.call_after_main_solve(main_mip, solve_data) else: config.logger.info('Algorithm should terminate here.') break else: raise NotImplementedError() # regularization is activated after the first feasible solution is found. if config.add_regularization is not None and solve_data.best_solution_found is not None and not config.single_tree: # the main problem might be unbounded, regularization is activated only when a valid bound is provided. if (solve_data.objective_sense == minimize and solve_data.LB != float('-inf')) or (solve_data.objective_sense == maximize and solve_data.UB != float('inf')): main_mip, main_mip_results = solve_main( solve_data, config, regularization_problem=True) handle_regularization_main_tc(main_mip, main_mip_results, solve_data, config) if config.add_regularization is not None and config.single_tree: solve_data.curr_int_sol = get_integer_solution(solve_data.mip, string_zero=True) copy_var_list_values( main_mip.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) if solve_data.curr_int_sol not in set(solve_data.integer_list): fixed_nlp, fixed_nlp_result = solve_subproblem( solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config) if algorithm_should_terminate(solve_data, config, check_cycling=True): last_iter_cuts = False break if not config.single_tree and config.strategy != 'ECP': # if we don't use lazy callback, i.e. LP_NLP # Solve NLP subproblem # The constraint linearization happens in the handlers fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config) # Call the NLP post-solve callback with time_code(solve_data.timing, 'Call after subproblem solve'): config.call_after_subproblem_solve(fixed_nlp, solve_data) if algorithm_should_terminate(solve_data, config, check_cycling=False): last_iter_cuts = True break if config.strategy == 'ECP': add_ecp_cuts(solve_data.mip, solve_data, config) # if config.strategy == 'PSC': # # If the hybrid algorithm is not making progress, switch to OA. # progress_required = 1E-6 # if solve_data.objective_sense == minimize: # log = solve_data.LB_progress # sign_adjust = 1 # else: # log = solve_data.UB_progress # sign_adjust = -1 # # Maximum number of iterations in which the lower (optimistic) # # bound does not improve before switching to OA # max_nonimprove_iter = 5 # making_progress = True # # TODO-romeo Unneccesary for OA and ROA, right? # for i in range(1, max_nonimprove_iter + 1): # try: # if (sign_adjust * log[-i] # <= (log[-i - 1] + progress_required) # * sign_adjust): # making_progress = False # else: # making_progress = True # break # except IndexError: # # Not enough history yet, keep going. # making_progress = True # break # if not making_progress and ( # config.strategy == 'hPSC' or # config.strategy == 'PSC'): # config.logger.info( # 'Not making enough progress for {} iterations. ' # 'Switching to OA.'.format(max_nonimprove_iter)) # config.strategy = 'OA' # if add_no_good_cuts is True, the bound obtained in the last iteration is no reliable. # we correct it after the iteration. if ( config.add_no_good_cuts or config.use_tabu_list ) and config.strategy != 'FP' and not solve_data.should_terminate and config.add_regularization is None: bound_fix(solve_data, config, last_iter_cuts)
def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, solve_data, config): """This is a GUROBI callback function defined for LP/NLP based B&B algorithm. Parameters ---------- cb_m : Pyomo model The MIP main problem. cb_opt : SolverFactory The gurobi_persistent solver. cb_where : int An enum member of gurobipy.GRB.Callback. solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. """ if cb_where == gurobipy.GRB.Callback.MIPSOL: # gurobipy.GRB.Callback.MIPSOL means that an integer solution is found during the branch and bound process if solve_data.should_terminate: cb_opt._solver_model.terminate() return cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list) handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config) if config.add_cuts_at_incumbent: if config.strategy == 'OA': add_oa_cuts(solve_data.mip, None, solve_data, config, cb_opt) # # regularization is activated after the first feasible solution is found. if config.add_regularization is not None and solve_data.best_solution_found is not None: # the main problem might be unbounded, regularization is activated only when a valid bound is provided. if not solve_data.bound_improved and not solve_data.solution_improved: config.logger.debug( 'the bound and the best found solution have neither been improved.' 'We will skip solving the regularization problem and the Fixed-NLP subproblem' ) solve_data.solution_improved = False return if ((solve_data.objective_sense == minimize and solve_data.LB != float('-inf')) or (solve_data.objective_sense == maximize and solve_data.UB != float('inf'))): main_mip, main_mip_results = solve_main( solve_data, config, regularization_problem=True) handle_regularization_main_tc(main_mip, main_mip_results, solve_data, config) if solve_data.LB + config.bound_tolerance >= solve_data.UB: config.logger.info('MindtPy exiting on bound convergence. ' 'LB: {} + (tol {}) >= UB: {}\n'.format( solve_data.LB, config.bound_tolerance, solve_data.UB)) solve_data.results.solver.termination_condition = tc.optimal cb_opt._solver_model.terminate() return # # check if the same integer combination is obtained. solve_data.curr_int_sol = get_integer_solution( solve_data.working_model, string_zero=True) if solve_data.curr_int_sol in set(solve_data.integer_list): config.logger.debug( 'This integer combination has been explored. ' 'We will skip solving the Fixed-NLP subproblem.') solve_data.solution_improved = False if config.strategy == 'GOA': if config.add_no_good_cuts: var_values = list( v.value for v in solve_data.working_model.MindtPy_utils.variable_list) add_no_good_cuts(var_values, solve_data, config) return elif config.strategy == 'OA': return else: solve_data.integer_list.append(solve_data.curr_int_sol) # solve subproblem # The constraint linearization happens in the handlers fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config, cb_opt)
def __call__(self): """This is an inherent function in LazyConstraintCallback in cplex. This function is called whenever an integer solution is found during the branch and bound process. """ solve_data = self.solve_data config = self.config opt = self.opt main_mip = self.main_mip if solve_data.should_terminate: self.abort() return self.handle_lazy_main_feasible_solution(main_mip, solve_data, config, opt) if config.add_cuts_at_incumbent: self.copy_lazy_var_list_values( opt, main_mip.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config) if config.strategy == 'OA': self.add_lazy_oa_cuts(solve_data.mip, None, solve_data, config, opt) # regularization is activated after the first feasible solution is found. if config.add_regularization is not None and solve_data.best_solution_found is not None: # the main problem might be unbounded, regularization is activated only when a valid bound is provided. if not solve_data.bound_improved and not solve_data.solution_improved: config.logger.debug( 'the bound and the best found solution have neither been improved.' 'We will skip solving the regularization problem and the Fixed-NLP subproblem' ) solve_data.solution_improved = False return if ((solve_data.objective_sense == minimize and solve_data.LB != float('-inf')) or (solve_data.objective_sense == maximize and solve_data.UB != float('inf'))): main_mip, main_mip_results = solve_main( solve_data, config, regularization_problem=True) self.handle_lazy_regularization_problem( main_mip, main_mip_results, solve_data, config) if solve_data.LB + config.bound_tolerance >= solve_data.UB: config.logger.info('MindtPy exiting on bound convergence. ' 'LB: {} + (tol {}) >= UB: {}\n'.format( solve_data.LB, config.bound_tolerance, solve_data.UB)) solve_data.results.solver.termination_condition = tc.optimal self.abort() return # check if the same integer combination is obtained. solve_data.curr_int_sol = get_integer_solution( solve_data.working_model, string_zero=True) if solve_data.curr_int_sol in set(solve_data.integer_list): config.logger.debug( 'This integer combination has been explored. ' 'We will skip solving the Fixed-NLP subproblem.') solve_data.solution_improved = False if config.strategy == 'GOA': if config.add_no_good_cuts: var_values = list( v.value for v in solve_data.working_model.MindtPy_utils.variable_list) self.add_lazy_no_good_cuts(var_values, solve_data, config, opt) return elif config.strategy == 'OA': return else: solve_data.integer_list.append(solve_data.curr_int_sol) # solve subproblem # The constraint linearization happens in the handlers fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) # add oa cuts if fixed_nlp_result.solver.termination_condition in { tc.optimal, tc.locallyOptimal, tc.feasible }: self.handle_lazy_subproblem_optimal(fixed_nlp, solve_data, config, opt) if solve_data.LB + config.bound_tolerance >= solve_data.UB: config.logger.info('MindtPy exiting on bound convergence. ' 'LB: {} + (tol {}) >= UB: {}\n'.format( solve_data.LB, config.bound_tolerance, solve_data.UB)) solve_data.results.solver.termination_condition = tc.optimal return elif fixed_nlp_result.solver.termination_condition in { tc.infeasible, tc.noSolution }: self.handle_lazy_subproblem_infeasible(fixed_nlp, solve_data, config, opt) else: self.handle_lazy_subproblem_other_termination( fixed_nlp, fixed_nlp_result.solver.termination_condition, solve_data, config)
def handle_lazy_regularization_problem(self, main_mip, main_mip_results, solve_data, config): """Handles the termination condition of the regularization main problem in RLP/NLP. Parameters ---------- main_mip : Pyomo model The MIP main problem. main_mip_results : SolverResults Results from solving the regularization MIP problem. solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. Raises ------ ValueError MindtPy unable to handle the termination condition of the regularization problem. ValueError MindtPy unable to handle the termination condition of the regularization problem. """ if main_mip_results.solver.termination_condition in { tc.optimal, tc.feasible }: handle_main_optimal(main_mip, solve_data, config, update_bound=False) elif main_mip_results.solver.termination_condition in { tc.infeasible, tc.infeasibleOrUnbounded }: config.logger.info( solve_data.log_note_formatter.format( solve_data.mip_iter, 'Reg ' + solve_data.regularization_mip_type, 'infeasible')) if config.reduce_level_coef: config.level_coef = config.level_coef / 2 main_mip, main_mip_results = solve_main( solve_data, config, regularization_problem=True) if main_mip_results.solver.termination_condition in { tc.optimal, tc.feasible }: handle_main_optimal(main_mip, solve_data, config, update_bound=False) elif main_mip_results.solver.termination_condition is tc.infeasible: config.logger.info( 'regularization problem still infeasible with reduced level_coef. ' 'NLP subproblem is generated based on the incumbent solution of the main problem.' ) elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: config.logger.info( 'Regularization problem failed to converge within the time limit.' ) solve_data.results.solver.termination_condition = tc.maxTimeLimit elif main_mip_results.solver.termination_condition is tc.unbounded: config.logger.info( 'Regularization problem ubounded.' 'Sometimes solving MIQP using cplex, unbounded means infeasible.' ) elif main_mip_results.solver.termination_condition is tc.unknown: config.logger.info( 'Termination condition of the regularization problem is unknown.' ) if main_mip_results.problem.lower_bound != float('-inf'): config.logger.info('Solution limit has been reached.') handle_main_optimal(main_mip, solve_data, config, update_bound=False) else: config.logger.info( 'No solution obtained from the regularization subproblem.' 'Please set mip_solver_tee to True for more informations.' 'The solution of the OA main problem will be adopted.' ) else: raise ValueError( 'MindtPy unable to handle regularization problem termination condition ' 'of %s. Solver message: %s' % (main_mip_results.solver.termination_condition, main_mip_results.solver.message)) elif config.use_bb_tree_incumbent: config.logger.debug( 'Fixed subproblem will be generated based on the incumbent solution of the main problem.' ) elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: config.logger.info( 'Regularization problem failed to converge within the time limit.' ) solve_data.results.solver.termination_condition = tc.maxTimeLimit elif main_mip_results.solver.termination_condition is tc.unbounded: config.logger.info( 'Regularization problem ubounded.' 'Sometimes solving MIQP using cplex, unbounded means infeasible.' ) elif main_mip_results.solver.termination_condition is tc.unknown: config.logger.info( 'Termination condition of the regularization problem is unknown.' ) if main_mip_results.problem.lower_bound != float('-inf'): config.logger.info('Solution limit has been reached.') handle_main_optimal(main_mip, solve_data, config, update_bound=False) else: raise ValueError( 'MindtPy unable to handle regularization problem termination condition ' 'of %s. Solver message: %s' % (main_mip_results.solver.termination_condition, main_mip_results.solver.message))
def fp_loop(solve_data, config): """Feasibility pump loop. This is the outermost function for the algorithms in this package; this function controls the progression of solving the model. Parameters ---------- solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. Raises ------ ValueError MindtPy unable to handle the termination condition of the FP-NLP subproblem. """ while solve_data.fp_iter < config.fp_iteration_limit: solve_data.mip_subiter = 0 # solve MILP main problem feas_main, feas_main_results = solve_main(solve_data, config, fp=True) fp_should_terminate = handle_fp_main_tc(feas_main_results, solve_data, config) if fp_should_terminate: break # Solve NLP subproblem # The constraint linearization happens in the handlers fp_nlp, fp_nlp_result = solve_fp_subproblem(solve_data, config) if fp_nlp_result.solver.termination_condition in { tc.optimal, tc.locallyOptimal, tc.feasible }: config.logger.info( solve_data.log_formatter.format( solve_data.fp_iter, 'FP-NLP', value(fp_nlp.MindtPy_utils.fp_nlp_obj), solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, get_main_elapsed_time(solve_data.timing))) handle_fp_subproblem_optimal(fp_nlp, solve_data, config) elif fp_nlp_result.solver.termination_condition in { tc.infeasible, tc.noSolution }: config.logger.error('Feasibility pump NLP subproblem infeasible') solve_data.should_terminate = True solve_data.results.solver.status = SolverStatus.error return elif fp_nlp_result.solver.termination_condition is tc.maxIterations: config.logger.error( 'Feasibility pump NLP subproblem failed to converge within iteration limit.' ) solve_data.should_terminate = True solve_data.results.solver.status = SolverStatus.error return else: raise ValueError( 'MindtPy unable to handle NLP subproblem termination ' 'condition of {}'.format( fp_nlp_result.solver.termination_condition)) # Call the NLP post-solve callback config.call_after_subproblem_solve(fp_nlp, solve_data) solve_data.fp_iter += 1 solve_data.mip.MindtPy_utils.del_component('fp_mip_obj') if config.fp_main_norm == 'L1': solve_data.mip.MindtPy_utils.del_component('L1_obj') elif config.fp_main_norm == 'L_infinity': solve_data.mip.MindtPy_utils.del_component('L_infinity_obj') # deactivate the improving_objective_cut solve_data.mip.MindtPy_utils.cuts.del_component('improving_objective_cut') if not config.fp_transfercuts: for c in solve_data.mip.MindtPy_utils.cuts.oa_cuts: c.deactivate() for c in solve_data.mip.MindtPy_utils.cuts.no_good_cuts: c.deactivate() if config.fp_projcuts: solve_data.working_model.MindtPy_utils.cuts.del_component( 'fp_orthogonality_cuts')