Exemple #1
0
def handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data,
                                              config):
    """This function is called during the branch and bound of main MIP problem, 
    more exactly when a feasible solution is found and LazyCallback is activated.

    Copy the solution to working model and update upper or lower bound.
    In LP-NLP, upper or lower bound are updated during solving the main problem.

    Parameters
    ----------
    cb_m : Pyomo model
        The MIP main problem.
    cb_opt : SolverFactory
        The gurobi_persistent solver.
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.
    """
    # proceed. Just need integer values
    cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list)
    # this value copy is useful since we need to fix subproblem based on the solution of the main problem
    copy_var_list_values(cb_m.MindtPy_utils.variable_list,
                         solve_data.working_model.MindtPy_utils.variable_list,
                         config)
    update_dual_bound(solve_data,
                      cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJBND))
    config.logger.info(
        solve_data.log_formatter.format(
            solve_data.mip_iter, 'restrLP',
            cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJ),
            solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap,
            get_main_elapsed_time(solve_data.timing)))
Exemple #2
0
    def handle_lazy_main_feasible_solution(self, main_mip, solve_data, config,
                                           opt):
        """This function is called during the branch and bound of main mip, more 
        exactly when a feasible solution is found and LazyCallback is activated.
        Copy the result to working model and update upper or lower bound.
        In LP-NLP, upper or lower bound are updated during solving the main problem.

        Parameters
        ----------
        main_mip : Pyomo model
            The MIP main problem.
        solve_data : MindtPySolveData
            Data container that holds solve-instance data.
        config : ConfigBlock
            The specific configurations for MindtPy.
        opt : SolverFactory
            The cplex_persistent solver.
        """
        # proceed. Just need integer values

        # this value copy is useful since we need to fix subproblem based on the solution of the main problem
        self.copy_lazy_var_list_values(
            opt, main_mip.MindtPy_utils.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list, config)
        update_dual_bound(solve_data, self.get_best_objective_value())
        config.logger.info(
            solve_data.log_formatter.format(
                solve_data.mip_iter, 'restrLP', self.get_objective_value(),
                solve_data.primal_bound, solve_data.dual_bound,
                solve_data.rel_gap, get_main_elapsed_time(solve_data.timing)))
Exemple #3
0
def handle_main_optimal(main_mip, solve_data, config, update_bound=True):
    """This function copies the results from 'solve_main' to the working model and updates the upper/lower bound. This
    function is called after an optimal solution is found for the main problem.

    Args:
        main_mip (Pyomo model): the MIP main problem.
        solve_data (MindtPySolveData): data container that holds solve-instance data.
        config (ConfigBlock): the specific configurations for MindtPy.
        update_bound (bool, optional): whether update the bound. Bound will not be updated when handle regularization problem. Defaults to True.
    """
    # proceed. Just need integer values
    MindtPy = main_mip.MindtPy_utils
    # check if the value of binary variable is valid
    for var in MindtPy.discrete_variable_list:
        if var.value is None:
            config.logger.warning(
                f"Integer variable {var.name} not initialized.  "
                "Setting it to its lower bound")
            var.set_value(var.lb, skip_validation=True)  # nlp_var.bounds[0]
    # warm start for the nlp subproblem
    copy_var_list_values(main_mip.MindtPy_utils.variable_list,
                         solve_data.working_model.MindtPy_utils.variable_list,
                         config)

    if update_bound:
        update_dual_bound(solve_data, value(MindtPy.mip_obj.expr))
        config.logger.info(
            solve_data.log_formatter.format(
                solve_data.mip_iter, 'MILP', value(MindtPy.mip_obj.expr),
                solve_data.LB, solve_data.UB, solve_data.rel_gap,
                get_main_elapsed_time(solve_data.timing)))
Exemple #4
0
def init_rNLP(solve_data, config):
    """Initialize the problem by solving the relaxed NLP and then store the optimal variable
    values obtained from solving the rNLP.

    Parameters
    ----------
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.

    Raises
    ------
    ValueError
        MindtPy unable to handle the termination condition of the relaxed NLP.
    """
    m = solve_data.working_model.clone()
    config.logger.debug('Relaxed NLP: Solve relaxed integrality')
    MindtPy = m.MindtPy_utils
    TransformationFactory('core.relax_integer_vars').apply_to(m)
    nlp_args = dict(config.nlp_solver_args)
    nlpopt = SolverFactory(config.nlp_solver)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        results = nlpopt.solve(m, tee=config.nlp_solver_tee, **nlp_args)
    subprob_terminate_cond = results.solver.termination_condition
    if subprob_terminate_cond in {tc.optimal, tc.feasible, tc.locallyOptimal}:
        main_objective = MindtPy.objective_list[-1]
        if subprob_terminate_cond == tc.optimal:
            update_dual_bound(solve_data, value(main_objective.expr))
        else:
            config.logger.info('relaxed NLP is not solved to optimality.')
            update_suboptimal_dual_bound(solve_data, results)
        dual_values = list(
            m.dual[c] for c in
            MindtPy.constraint_list) if config.calculate_dual else None
        config.logger.info(
            solve_data.log_formatter.format(
                '-', 'Relaxed NLP', value(main_objective.expr), solve_data.LB,
                solve_data.UB, solve_data.rel_gap,
                get_main_elapsed_time(solve_data.timing)))
        # Add OA cut
        if config.strategy in {'OA', 'GOA', 'FP'}:
            copy_var_list_values(m.MindtPy_utils.variable_list,
                                 solve_data.mip.MindtPy_utils.variable_list,
                                 config,
                                 ignore_integrality=True)
            if config.init_strategy == 'FP':
                copy_var_list_values(
                    m.MindtPy_utils.variable_list,
                    solve_data.working_model.MindtPy_utils.variable_list,
                    config,
                    ignore_integrality=True)
            if config.strategy in {'OA', 'FP'}:
                add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
            elif config.strategy == 'GOA':
                add_affine_cuts(solve_data, config)
            for var in solve_data.mip.MindtPy_utils.discrete_variable_list:
                # We don't want to trigger the reset of the global stale
                # indicator, so we will set this variable to be "stale",
                # knowing that set_value will switch it back to "not
                # stale"
                var.stale = True
                var.set_value(int(round(var.value)), skip_validation=True)
    elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}:
        # TODO fail? try something else?
        config.logger.info('Initial relaxed NLP problem is infeasible. '
                           'Problem may be infeasible.')
    elif subprob_terminate_cond is tc.maxTimeLimit:
        config.logger.info(
            'NLP subproblem failed to converge within time limit.')
        solve_data.results.solver.termination_condition = tc.maxTimeLimit
    elif subprob_terminate_cond is tc.maxIterations:
        config.logger.info(
            'NLP subproblem failed to converge within iteration limit.')
    else:
        raise ValueError(
            'MindtPy unable to handle relaxed NLP termination condition '
            'of %s. Solver message: %s' %
            (subprob_terminate_cond, results.solver.message))