def handle_subproblem_other_termination(fixed_nlp, termination_condition, solve_data, config): """Handles the result of the latest iteration of solving the fixed NLP subproblem given a solution that is neither optimal nor infeasible. Parameters ---------- fixed_nlp : Pyomo model Integer-variable-fixed NLP model. termination_condition : Pyomo TerminationCondition The termination condition of the fixed NLP subproblem. solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. Raises ------ ValueError MindtPy unable to handle the NLP subproblem termination condition. """ if termination_condition is tc.maxIterations: # TODO try something else? Reinitialize with different initial value? config.logger.info( 'NLP subproblem failed to converge within iteration limit.') var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) if config.add_no_good_cuts: # excludes current discrete option add_no_good_cuts(var_values, solve_data, config) else: raise ValueError('MindtPy unable to handle NLP subproblem termination ' 'condition of {}'.format(termination_condition))
def handle_subproblem_infeasible(fixed_nlp, solve_data, config, cb_opt=None): """Solves feasibility problem and adds cut according to the specified strategy. This function handles the result of the latest iteration of solving the NLP subproblem given an infeasible solution and copies the solution of the feasibility problem to the working model. Parameters ---------- fixed_nlp : Pyomo model Integer-variable-fixed NLP model. solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. cb_opt : SolverFactory, optional The gurobi_persistent solver, by default None. """ # TODO try something else? Reinitialize with different initial # value? config.logger.info('NLP subproblem was locally infeasible.') solve_data.nlp_infeasible_counter += 1 if config.calculate_dual: for c in fixed_nlp.MindtPy_utils.constraint_list: rhs = value(c.upper) if c.has_ub() else value(c.lower) c_geq = -1 if c.has_ub() else 1 fixed_nlp.dual[c] = (c_geq * max(0, c_geq * (rhs - value(c.body)))) dual_values = list(fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list) else: dual_values = None # if config.strategy == 'PSC' or config.strategy == 'GBD': # for var in fixed_nlp.component_data_objects(ctype=Var, descend_into=True): # fixed_nlp.ipopt_zL_out[var] = 0 # fixed_nlp.ipopt_zU_out[var] = 0 # if var.has_ub() and abs(var.ub - value(var)) < config.absolute_bound_tolerance: # fixed_nlp.ipopt_zL_out[var] = 1 # elif var.has_lb() and abs(value(var) - var.lb) < config.absolute_bound_tolerance: # fixed_nlp.ipopt_zU_out[var] = -1 if config.strategy in {'OA', 'GOA'}: config.logger.info('Solving feasibility problem') feas_subproblem, feas_subproblem_results = solve_feasibility_subproblem( solve_data, config) # TODO: do we really need this? if solve_data.should_terminate: return copy_var_list_values(feas_subproblem.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config) if config.strategy == 'OA': add_oa_cuts(solve_data.mip, dual_values, solve_data, config, cb_opt) elif config.strategy == 'GOA': add_affine_cuts(solve_data, config) # Add a no-good cut to exclude this discrete option var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) if config.add_no_good_cuts: # excludes current discrete option add_no_good_cuts(var_values, solve_data, config)
def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, solve_data, config): """This is a GUROBI callback function defined for LP/NLP based B&B algorithm. Parameters ---------- cb_m : Pyomo model The MIP main problem. cb_opt : SolverFactory The gurobi_persistent solver. cb_where : int An enum member of gurobipy.GRB.Callback. solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. """ if cb_where == gurobipy.GRB.Callback.MIPSOL: # gurobipy.GRB.Callback.MIPSOL means that an integer solution is found during the branch and bound process if solve_data.should_terminate: cb_opt._solver_model.terminate() return cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list) handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config) if config.add_cuts_at_incumbent: if config.strategy == 'OA': add_oa_cuts(solve_data.mip, None, solve_data, config, cb_opt) # Regularization is activated after the first feasible solution is found. if config.add_regularization is not None and solve_data.best_solution_found is not None: # The main problem might be unbounded, regularization is activated only when a valid bound is provided. if not solve_data.dual_bound_improved and not solve_data.primal_bound_improved: config.logger.debug( 'The bound and the best found solution have neither been improved.' 'We will skip solving the regularization problem and the Fixed-NLP subproblem' ) solve_data.primal_bound_improved = False return if solve_data.dual_bound != solve_data.dual_bound_progress[0]: main_mip, main_mip_results = solve_main( solve_data, config, regularization_problem=True) handle_regularization_main_tc(main_mip, main_mip_results, solve_data, config) if abs(solve_data.primal_bound - solve_data.dual_bound) <= config.absolute_bound_tolerance: config.logger.info( 'MindtPy exiting on bound convergence. ' '|Primal Bound: {} - Dual Bound: {}| <= (absolute tolerance {}) \n' .format(solve_data.primal_bound, solve_data.dual_bound, config.absolute_bound_tolerance)) solve_data.results.solver.termination_condition = tc.optimal cb_opt._solver_model.terminate() return # # check if the same integer combination is obtained. solve_data.curr_int_sol = get_integer_solution( solve_data.working_model, string_zero=True) if solve_data.curr_int_sol in set(solve_data.integer_list): config.logger.debug( 'This integer combination has been explored. ' 'We will skip solving the Fixed-NLP subproblem.') solve_data.primal_bound_improved = False if config.strategy == 'GOA': if config.add_no_good_cuts: var_values = list( v.value for v in solve_data.working_model.MindtPy_utils.variable_list) add_no_good_cuts(var_values, solve_data, config) return elif config.strategy == 'OA': return else: solve_data.integer_list.append(solve_data.curr_int_sol) # solve subproblem # The constraint linearization happens in the handlers fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config, cb_opt)
def handle_subproblem_optimal(fixed_nlp, solve_data, config, cb_opt=None, fp=False): """This function copies the result of the NLP solver function ('solve_subproblem') to the working model, updates the bounds, adds OA and no-good cuts, and then stores the new solution if it is the new best solution. This function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution. Parameters ---------- fixed_nlp : Pyomo model Integer-variable-fixed NLP model. solve_data : MindtPySolveData Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. cb_opt : SolverFactory, optional The gurobi_persistent solver, by default None. fp : bool, optional Whether it is in the loop of feasibility pump, by default False. """ copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) if config.calculate_dual: for c in fixed_nlp.tmp_duals: if fixed_nlp.dual.get(c, None) is None: fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c] dual_values = list(fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list) else: dual_values = None main_objective = fixed_nlp.MindtPy_utils.objective_list[-1] update_primal_bound(solve_data, value(main_objective.expr)) if solve_data.primal_bound_improved: solve_data.best_solution_found = fixed_nlp.clone() solve_data.best_solution_found_time = get_main_elapsed_time( solve_data.timing) if config.strategy == 'GOA': solve_data.num_no_good_cuts_added.update({ solve_data.primal_bound: len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts) }) # add obj increasing constraint for fp if fp: solve_data.mip.MindtPy_utils.cuts.del_component( 'improving_objective_cut') if solve_data.objective_sense == minimize: solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint( expr=sum(solve_data.mip.MindtPy_utils.objective_value[:]) <= solve_data.primal_bound - config.fp_cutoffdecr * max(1, abs(solve_data.primal_bound))) else: solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint( expr=sum(solve_data.mip.MindtPy_utils.objective_value[:]) >= solve_data.primal_bound + config.fp_cutoffdecr * max(1, abs(solve_data.primal_bound))) # Add the linear cut if config.strategy == 'OA' or fp: copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config) add_oa_cuts(solve_data.mip, dual_values, solve_data, config, cb_opt) elif config.strategy == 'GOA': copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config) add_affine_cuts(solve_data, config) # elif config.strategy == 'PSC': # # !!THIS SEEMS LIKE A BUG!! - mrmundt # # add_psc_cut(solve_data, config) # elif config.strategy == 'GBD': # # !!THIS SEEMS LIKE A BUG!! - mrmundt # # add_gbd_cut(solve_data, config) var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) if config.add_no_good_cuts: add_no_good_cuts(var_values, solve_data, config) config.call_after_subproblem_feasible(fixed_nlp, solve_data) config.logger.info( solve_data.fixed_nlp_log_formatter.format( '*' if solve_data.primal_bound_improved else ' ', solve_data.nlp_iter if not fp else solve_data.fp_iter, 'Fixed NLP', value(main_objective.expr), solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, get_main_elapsed_time(solve_data.timing)))
def handle_subproblem_optimal(fixed_nlp, solve_data, config, fp=False): """ This function copies the result of the NLP solver function ('solve_subproblem') to the working model, updates the bounds, adds OA and no-good cuts, and then stores the new solution if it is the new best solution. This function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution. Parameters ---------- fixed_nlp: Pyomo model Fixed-NLP from the model solve_data: MindtPy Data Container data container that holds solve-instance data config: ConfigBlock contains the specific configurations for the algorithm """ copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list, config) if config.calculate_dual: for c in fixed_nlp.tmp_duals: if fixed_nlp.dual.get(c, None) is None: fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c] dual_values = list(fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list) else: dual_values = None main_objective = fixed_nlp.MindtPy_utils.objective_list[-1] if solve_data.objective_sense == minimize: solve_data.UB = min(value(main_objective.expr), solve_data.UB) solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[ -1] solve_data.UB_progress.append(solve_data.UB) else: solve_data.LB = max(value(main_objective.expr), solve_data.LB) solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[ -1] solve_data.LB_progress.append(solve_data.LB) config.logger.info( 'Fixed-NLP {}: OBJ: {} LB: {} UB: {} TIME: {}s'.format( solve_data.nlp_iter if not fp else solve_data.fp_iter, value(main_objective.expr), solve_data.LB, solve_data.UB, round(get_main_elapsed_time(solve_data.timing), 2))) if solve_data.solution_improved: solve_data.best_solution_found = fixed_nlp.clone() solve_data.best_solution_found_time = get_main_elapsed_time( solve_data.timing) if config.strategy == 'GOA': if solve_data.objective_sense == minimize: solve_data.num_no_good_cuts_added.update({ solve_data.UB: len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts) }) else: solve_data.num_no_good_cuts_added.update({ solve_data.LB: len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts) }) # add obj increasing constraint for fp if fp: solve_data.mip.MindtPy_utils.cuts.del_component( 'improving_objective_cut') if solve_data.objective_sense == minimize: solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint( expr=solve_data.mip.MindtPy_utils.objective_value <= solve_data.UB - config.fp_cutoffdecr * max(1, abs(solve_data.UB))) else: solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint( expr=solve_data.mip.MindtPy_utils.objective_value >= solve_data.LB + config.fp_cutoffdecr * max(1, abs(solve_data.UB))) # Add the linear cut if config.strategy == 'OA' or fp: copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config) add_oa_cuts(solve_data.mip, dual_values, solve_data, config) elif config.strategy == 'GOA': copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, solve_data.mip.MindtPy_utils.variable_list, config) add_affine_cuts(solve_data, config) # elif config.strategy == 'PSC': # # !!THIS SEEMS LIKE A BUG!! - mrmundt # # add_psc_cut(solve_data, config) # elif config.strategy == 'GBD': # # !!THIS SEEMS LIKE A BUG!! - mrmundt # # add_gbd_cut(solve_data, config) var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) if config.add_no_good_cuts: add_no_good_cuts(var_values, solve_data, config, feasible=True) config.call_after_subproblem_feasible(fixed_nlp, solve_data)
def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, solve_data, config): """This is a GUROBI callback function defined for LP/NLP based B&B algorithm. Args: cb_m (Pyomo model): the MIP main problem. cb_opt (SolverFactory): the gurobi_persistent solver. cb_where (int): an enum member of gurobipy.GRB.Callback. solve_data (MindtPySolveData): data container that holds solve-instance data. config (ConfigBlock): the specific configurations for MindtPy. """ if cb_where == gurobipy.GRB.Callback.MIPSOL: # gurobipy.GRB.Callback.MIPSOL means that an integer solution is found during the branch and bound process if solve_data.should_terminate: cb_opt._solver_model.terminate() return cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list) handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config) if config.add_cuts_at_incumbent: if config.strategy == 'OA': add_oa_cuts(solve_data.mip, None, solve_data, config, cb_opt) # # regularization is activated after the first feasible solution is found. if config.add_regularization is not None and solve_data.best_solution_found is not None: # the main problem might be unbounded, regularization is activated only when a valid bound is provided. if not solve_data.bound_improved and not solve_data.solution_improved: config.logger.debug( 'the bound and the best found solution have neither been improved.' 'We will skip solving the regularization problem and the Fixed-NLP subproblem' ) solve_data.solution_improved = False return if ((solve_data.objective_sense == minimize and solve_data.LB != float('-inf')) or (solve_data.objective_sense == maximize and solve_data.UB != float('inf'))): main_mip, main_mip_results = solve_main( solve_data, config, regularization_problem=True) handle_regularization_main_tc(main_mip, main_mip_results, solve_data, config) if solve_data.LB + config.bound_tolerance >= solve_data.UB: config.logger.info('MindtPy exiting on bound convergence. ' 'LB: {} + (tol {}) >= UB: {}\n'.format( solve_data.LB, config.bound_tolerance, solve_data.UB)) solve_data.results.solver.termination_condition = tc.optimal cb_opt._solver_model.terminate() return # # check if the same integer combination is obtained. solve_data.curr_int_sol = get_integer_solution( solve_data.working_model, string_zero=True) if solve_data.curr_int_sol in set(solve_data.integer_list): config.logger.debug( 'This integer combination has been explored. ' 'We will skip solving the Fixed-NLP subproblem.') solve_data.solution_improved = False if config.strategy == 'GOA': if config.add_no_good_cuts: var_values = list( v.value for v in solve_data.working_model.MindtPy_utils.variable_list) add_no_good_cuts(var_values, solve_data, config) return elif config.strategy == 'OA': return else: solve_data.integer_list.append(solve_data.curr_int_sol) # solve subproblem # The constraint linearization happens in the handlers fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data, config, cb_opt)