def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, solve_data, config): """This is a GUROBI callback function defined for LP/NLP based B&B algorithm. Parameters ---------- cb_m : Pyomo model The MIP main problem. cb_opt : SolverFactory The gurobi_persistent solver. cb_where : int An enum member of gurobipy.GRB.Callback. solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. """ if cb_where == gurobipy.GRB.Callback.MIPSOL: # gurobipy.GRB.Callback.MIPSOL means that an integer solution is found during the branch and bound process if solve_data.should_terminate: cb_opt._solver_model.terminate() return cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list) handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config) if config.add_cuts_at_incumbent: if config.strategy == 'OA': add_oa_cuts(solve_data.mip, None, solve_data, config, cb_opt) # Regularization is activated after the first feasible solution is found. if config.add_regularization is not None and solve_data.best_solution_found is not None: # The main problem might be unbounded, regularization is activated only when a valid bound is provided. if not solve_data.dual_bound_improved and not solve_data.primal_bound_improved: config.logger.debug( 'The bound and the best found solution have neither been improved.' 'We will skip solving the regularization problem and the Fixed-NLP subproblem' ) solve_data.primal_bound_improved = False return if solve_data.dual_bound != solve_data.dual_bound_progress[0]: main_mip, main_mip_results = solve_main( solve_data, config, regularization_problem=True) handle_regularization_main_tc(main_mip, main_mip_results, solve_data, config) if abs(solve_data.primal_bound - solve_data.dual_bound) <= config.absolute_bound_tolerance: config.logger.info( 'MindtPy exiting on bound convergence. ' '|Primal Bound: {} - Dual Bound: {}| <= (absolute tolerance {}) \n' .format(solve_data.primal_bound, solve_data.dual_bound, config.absolute_bound_tolerance)) solve_data.results.solver.termination_condition = tc.optimal cb_opt._solver_model.terminate() return # # check if the same integer combination is obtained. solve_data.curr_int_sol = get_integer_solution( solve_data.working_model, string_zero=True) if solve_data.curr_int_sol in set(solve_data.integer_list): config.logger.debug( 'This integer combination has been explored. ' 'We will skip solving the Fixed-NLP subproblem.') solve_data.primal_bound_improved = False if config.strategy == 'GOA': if config.add_no_good_cuts: var_values = list( v.value for v in solve_data.working_model.MindtPy_utils.variable_list) add_no_good_cuts(var_values, solve_data, config) return elif config.strategy == 'OA': return else: solve_data.integer_list.append(solve_data.curr_int_sol) # solve subproblem # The constraint linearization happens in the handlers fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config, cb_opt)
def MindtPy_iteration_loop(solve_data, config): """ Main loop for MindtPy Algorithms This is the outermost function for the algorithms in this package; this function controls the progression of solving the model. Parameters ---------- solve_data: MindtPy Data Container data container that holds solve-instance data config: ConfigBlock contains the specific configurations for the algorithm """ last_iter_cuts = False while solve_data.mip_iter < config.iteration_limit: config.logger.info( '---MindtPy main Iteration %s---' % (solve_data.mip_iter+1)) solve_data.mip_subiter = 0 # solve MILP main problem if config.strategy in {'OA', 'GOA', 'ECP'}: main_mip, main_mip_results = solve_main(solve_data, config) if main_mip_results is not None: if not config.single_tree: if main_mip_results.solver.termination_condition is tc.optimal: handle_main_optimal(main_mip, solve_data, config) elif main_mip_results.solver.termination_condition is tc.infeasible: handle_main_infeasible(main_mip, solve_data, config) last_iter_cuts = True break else: handle_main_other_conditions( main_mip, main_mip_results, solve_data, config) # Call the MILP post-solve callback with time_code(solve_data.timing, 'Call after main solve'): config.call_after_main_solve(main_mip, solve_data) else: config.logger.info('Algorithm should terminate here.') break else: raise NotImplementedError() # regularization is activated after the first feasible solution is found. if config.add_regularization is not None and solve_data.best_solution_found is not None and not config.single_tree: # the main problem might be unbounded, regularization is activated only when a valid bound is provided. if (solve_data.objective_sense == minimize and solve_data.LB != float('-inf')) or (solve_data.objective_sense == maximize and solve_data.UB != float('inf')): main_mip, main_mip_results = solve_main( solve_data, config, regularization_problem=True) handle_regularization_main_tc( main_mip, main_mip_results, solve_data, config) if config.add_regularization is not None and config.single_tree: solve_data.curr_int_sol = get_integer_solution( solve_data.mip, string_zero=True) copy_var_list_values( main_mip.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) if solve_data.curr_int_sol not in set(solve_data.integer_list): fixed_nlp, fixed_nlp_result = solve_subproblem( solve_data, config) handle_nlp_subproblem_tc( fixed_nlp, fixed_nlp_result, solve_data, config) if algorithm_should_terminate(solve_data, config, check_cycling=True): last_iter_cuts = False break if not config.single_tree and config.strategy != 'ECP': # if we don't use lazy callback, i.e. LP_NLP # Solve NLP subproblem # The constraint linearization happens in the handlers fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) handle_nlp_subproblem_tc( fixed_nlp, fixed_nlp_result, solve_data, config) # Call the NLP post-solve callback with time_code(solve_data.timing, 'Call after subproblem solve'): config.call_after_subproblem_solve(fixed_nlp, solve_data) if algorithm_should_terminate(solve_data, config, check_cycling=False): last_iter_cuts = True break if config.strategy == 'ECP': add_ecp_cuts(solve_data.mip, solve_data, config) # if config.strategy == 'PSC': # # If the hybrid algorithm is not making progress, switch to OA. # progress_required = 1E-6 # if solve_data.objective_sense == minimize: # log = solve_data.LB_progress # sign_adjust = 1 # else: # log = solve_data.UB_progress # sign_adjust = -1 # # Maximum number of iterations in which the lower (optimistic) # # bound does not improve before switching to OA # max_nonimprove_iter = 5 # making_progress = True # # TODO-romeo Unneccesary for OA and ROA, right? # for i in range(1, max_nonimprove_iter + 1): # try: # if (sign_adjust * log[-i] # <= (log[-i - 1] + progress_required) # * sign_adjust): # making_progress = False # else: # making_progress = True # break # except IndexError: # # Not enough history yet, keep going. # making_progress = True # break # if not making_progress and ( # config.strategy == 'hPSC' or # config.strategy == 'PSC'): # config.logger.info( # 'Not making enough progress for {} iterations. ' # 'Switching to OA.'.format(max_nonimprove_iter)) # config.strategy = 'OA' # if add_no_good_cuts is True, the bound obtained in the last iteration is no reliable. # we correct it after the iteration. if (config.add_no_good_cuts or config.use_tabu_list) and config.strategy is not 'FP' and not solve_data.should_terminate and config.add_regularization is None: bound_fix(solve_data, config, last_iter_cuts)
def MindtPy_iteration_loop(solve_data, config): """Main loop for MindtPy Algorithms. This is the outermost function for the algorithms in this package; this function controls the progression of solving the model. Args: solve_data (MindtPySolveData): data container that holds solve-instance data. config (ConfigBlock): the specific configurations for MindtPy. Raises: ValueError: the strategy value is not correct or not included. """ last_iter_cuts = False while solve_data.mip_iter < config.iteration_limit: solve_data.mip_subiter = 0 # solve MILP main problem if config.strategy in {'OA', 'GOA', 'ECP'}: main_mip, main_mip_results = solve_main(solve_data, config) if main_mip_results is not None: if not config.single_tree: if main_mip_results.solver.termination_condition is tc.optimal: handle_main_optimal(main_mip, solve_data, config) elif main_mip_results.solver.termination_condition is tc.infeasible: handle_main_infeasible(main_mip, solve_data, config) last_iter_cuts = True break else: handle_main_other_conditions(main_mip, main_mip_results, solve_data, config) # Call the MILP post-solve callback with time_code(solve_data.timing, 'Call after main solve'): config.call_after_main_solve(main_mip, solve_data) else: config.logger.info('Algorithm should terminate here.') break else: raise ValueError() # regularization is activated after the first feasible solution is found. if config.add_regularization is not None and solve_data.best_solution_found is not None and not config.single_tree: # the main problem might be unbounded, regularization is activated only when a valid bound is provided. if (solve_data.objective_sense == minimize and solve_data.LB != float('-inf')) or (solve_data.objective_sense == maximize and solve_data.UB != float('inf')): main_mip, main_mip_results = solve_main( solve_data, config, regularization_problem=True) handle_regularization_main_tc(main_mip, main_mip_results, solve_data, config) # TODO: add descriptions for the following code if config.add_regularization is not None and config.single_tree: solve_data.curr_int_sol = get_integer_solution(solve_data.mip, string_zero=True) copy_var_list_values( main_mip.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) if solve_data.curr_int_sol not in set(solve_data.integer_list): fixed_nlp, fixed_nlp_result = solve_subproblem( solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config) if algorithm_should_terminate(solve_data, config, check_cycling=True): last_iter_cuts = False break if not config.single_tree and config.strategy != 'ECP': # if we don't use lazy callback, i.e. LP_NLP # Solve NLP subproblem # The constraint linearization happens in the handlers if not config.solution_pool: fixed_nlp, fixed_nlp_result = solve_subproblem( solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config) # Call the NLP post-solve callback with time_code(solve_data.timing, 'Call after subproblem solve'): config.call_after_subproblem_solve(fixed_nlp, solve_data) if algorithm_should_terminate(solve_data, config, check_cycling=False): last_iter_cuts = True break else: if config.mip_solver == 'cplex_persistent': solution_pool_names = main_mip_results._solver_model.solution.pool.get_names( ) elif config.mip_solver == 'gurobi_persistent': solution_pool_names = list( range(main_mip_results._solver_model.SolCount)) # list to store the name and objective value of the solutions in the solution pool solution_name_obj = [] for name in solution_pool_names: if config.mip_solver == 'cplex_persistent': obj = main_mip_results._solver_model.solution.pool.get_objective_value( name) elif config.mip_solver == 'gurobi_persistent': main_mip_results._solver_model.setParam( gurobipy.GRB.Param.SolutionNumber, name) obj = main_mip_results._solver_model.PoolObjVal solution_name_obj.append([name, obj]) solution_name_obj.sort( key=itemgetter(1), reverse=solve_data.objective_sense == maximize) counter = 0 for name, _ in solution_name_obj: # the optimal solution of the main problem has been added to integer_list above # so we should skip checking cycling for the first solution in the solution pool if counter >= 1: copy_var_list_values_from_solution_pool( solve_data.mip.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils. variable_list, config, solver_model=main_mip_results._solver_model, var_map=main_mip_results. _pyomo_var_to_solver_var_map, solution_name=name) solve_data.curr_int_sol = get_integer_solution( solve_data.working_model) if solve_data.curr_int_sol in set( solve_data.integer_list): config.logger.info( 'The same combination has been explored and will be skipped here.' ) continue else: solve_data.integer_list.append( solve_data.curr_int_sol) counter += 1 fixed_nlp, fixed_nlp_result = solve_subproblem( solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config) # Call the NLP post-solve callback with time_code(solve_data.timing, 'Call after subproblem solve'): config.call_after_subproblem_solve( fixed_nlp, solve_data) if algorithm_should_terminate(solve_data, config, check_cycling=False): last_iter_cuts = True break if counter >= config.num_solution_iteration: break if config.strategy == 'ECP': add_ecp_cuts(solve_data.mip, solve_data, config) # if config.strategy == 'PSC': # # If the hybrid algorithm is not making progress, switch to OA. # progress_required = 1E-6 # if solve_data.objective_sense == minimize: # log = solve_data.LB_progress # sign_adjust = 1 # else: # log = solve_data.UB_progress # sign_adjust = -1 # # Maximum number of iterations in which the lower (optimistic) # # bound does not improve before switching to OA # max_nonimprove_iter = 5 # making_progress = True # # TODO-romeo Unnecessary for OA and ROA, right? # for i in range(1, max_nonimprove_iter + 1): # try: # if (sign_adjust * log[-i] # <= (log[-i - 1] + progress_required) # * sign_adjust): # making_progress = False # else: # making_progress = True # break # except IndexError: # # Not enough history yet, keep going. # making_progress = True # break # if not making_progress and ( # config.strategy == 'hPSC' or # config.strategy == 'PSC'): # config.logger.info( # 'Not making enough progress for {} iterations. ' # 'Switching to OA.'.format(max_nonimprove_iter)) # config.strategy = 'OA' # if add_no_good_cuts is True, the bound obtained in the last iteration is no reliable. # we correct it after the iteration. if ( config.add_no_good_cuts or config.use_tabu_list ) and config.strategy != 'FP' and not solve_data.should_terminate and config.add_regularization is None: fix_dual_bound(solve_data, config, last_iter_cuts) config.logger.info( ' =============================================================================================' )
def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, solve_data, config): """This is a GUROBI callback function defined for LP/NLP based B&B algorithm. Args: cb_m (Pyomo model): the MIP main problem. cb_opt (SolverFactory): the gurobi_persistent solver. cb_where (int): an enum member of gurobipy.GRB.Callback. solve_data (MindtPySolveData): data container that holds solve-instance data. config (ConfigBlock): the specific configurations for MindtPy. """ if cb_where == gurobipy.GRB.Callback.MIPSOL: # gurobipy.GRB.Callback.MIPSOL means that an integer solution is found during the branch and bound process if solve_data.should_terminate: cb_opt._solver_model.terminate() return cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list) handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config) if config.add_cuts_at_incumbent: if config.strategy == 'OA': add_oa_cuts(solve_data.mip, None, solve_data, config, cb_opt) # # regularization is activated after the first feasible solution is found. if config.add_regularization is not None and solve_data.best_solution_found is not None: # the main problem might be unbounded, regularization is activated only when a valid bound is provided. if not solve_data.bound_improved and not solve_data.solution_improved: config.logger.debug( 'the bound and the best found solution have neither been improved.' 'We will skip solving the regularization problem and the Fixed-NLP subproblem' ) solve_data.solution_improved = False return if ((solve_data.objective_sense == minimize and solve_data.LB != float('-inf')) or (solve_data.objective_sense == maximize and solve_data.UB != float('inf'))): main_mip, main_mip_results = solve_main( solve_data, config, regularization_problem=True) handle_regularization_main_tc(main_mip, main_mip_results, solve_data, config) if solve_data.LB + config.bound_tolerance >= solve_data.UB: config.logger.info('MindtPy exiting on bound convergence. ' 'LB: {} + (tol {}) >= UB: {}\n'.format( solve_data.LB, config.bound_tolerance, solve_data.UB)) solve_data.results.solver.termination_condition = tc.optimal cb_opt._solver_model.terminate() return # # check if the same integer combination is obtained. solve_data.curr_int_sol = get_integer_solution( solve_data.working_model, string_zero=True) if solve_data.curr_int_sol in set(solve_data.integer_list): config.logger.debug( 'This integer combination has been explored. ' 'We will skip solving the Fixed-NLP subproblem.') solve_data.solution_improved = False if config.strategy == 'GOA': if config.add_no_good_cuts: var_values = list( v.value for v in solve_data.working_model.MindtPy_utils.variable_list) add_no_good_cuts(var_values, solve_data, config) return elif config.strategy == 'OA': return else: solve_data.integer_list.append(solve_data.curr_int_sol) # solve subproblem # The constraint linearization happens in the handlers fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config, cb_opt)