Example #1
0
def handle_NLP_subproblem_other_termination(fixed_nlp, termination_condition,
                                            solve_data, config):
    """
    Handles the result of the latest iteration of solving the NLP subproblem given a solution that is neither optimal
    nor infeasible.

    Parameters
    ----------
    termination_condition: Pyomo TerminationCondition
        the termination condition of the NLP subproblem
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    if termination_condition is tc.maxIterations:
        # TODO try something else? Reinitialize with different initial value?
        config.logger.info(
            'NLP subproblem failed to converge within iteration limit.')
        var_values = list(v.value
                          for v in fixed_nlp.MindtPy_utils.variable_list)
        if config.add_nogood_cuts:
            # excludes current discrete option
            add_nogood_cuts(var_values, solve_data, config)
    else:
        raise ValueError('MindtPy unable to handle NLP subproblem termination '
                         'condition of {}'.format(termination_condition))
Example #2
0
def handle_NLP_subproblem_infeasible(fixed_nlp, solve_data, config):
    """
    Solves feasibility problem and adds cut according to the specified strategy

    This function handles the result of the latest iteration of solving the NLP subproblem given an infeasible
    solution and copies the solution of the feasibility problem to the working model.

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    # TODO try something else? Reinitialize with different initial
    # value?
    config.logger.info('NLP subproblem was locally infeasible.')
    if config.use_dual:
        for c in fixed_nlp.component_data_objects(ctype=Constraint):
            rhs = c.upper if c.has_ub() else c.lower
            c_geq = -1 if c.has_ub() else 1
            fixed_nlp.dual[c] = (c_geq * max(0, c_geq * (rhs - value(c.body))))
        dual_values = list(fixed_nlp.dual[c]
                           for c in fixed_nlp.MindtPy_utils.constraint_list)
    else:
        dual_values = None

    # if config.strategy == 'PSC' or config.strategy == 'GBD':
    #     for var in fixed_nlp.component_data_objects(ctype=Var, descend_into=True):
    #         fixed_nlp.ipopt_zL_out[var] = 0
    #         fixed_nlp.ipopt_zU_out[var] = 0
    #         if var.has_ub() and abs(var.ub - value(var)) < config.bound_tolerance:
    #             fixed_nlp.ipopt_zL_out[var] = 1
    #         elif var.has_lb() and abs(value(var) - var.lb) < config.bound_tolerance:
    #             fixed_nlp.ipopt_zU_out[var] = -1

    if config.strategy in {'OA', 'GOA'}:
        config.logger.info('Solving feasibility problem')
        if config.initial_feas:
            # add_feas_slacks(fixed_nlp, solve_data)
            # config.initial_feas = False
            feas_NLP, feas_NLP_results = solve_NLP_feas(solve_data, config)
            copy_var_list_values(feas_NLP.MindtPy_utils.variable_list,
                                 solve_data.mip.MindtPy_utils.variable_list,
                                 config)
            if config.strategy == "OA":
                add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
            elif config.strategy == "GOA":
                add_affine_cuts(solve_data, config)
    # Add an integer cut to exclude this discrete option
    var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list)
    if config.add_nogood_cuts:
        # excludes current discrete option
        add_nogood_cuts(var_values, solve_data, config)
Example #3
0
def handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config):
    """
    This function copies the result of the NLP solver function ('solve_NLP_subproblem') to the working model, updates
    the bounds, adds OA and integer cuts, and then stores the new solution if it is the new best solution. This
    function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution.

    Parameters
    ----------
    fixed_nlp: Pyomo model
        fixed NLP from the model
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                         solve_data.working_model.MindtPy_utils.variable_list,
                         config)
    if config.use_dual:
        for c in fixed_nlp.tmp_duals:
            if fixed_nlp.dual.get(c, None) is None:
                fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
        dual_values = list(fixed_nlp.dual[c]
                           for c in fixed_nlp.MindtPy_utils.constraint_list)
    else:
        dual_values = None

    main_objective = next(
        fixed_nlp.component_data_objects(Objective, active=True))
    if main_objective.sense == minimize:
        solve_data.UB = min(value(main_objective.expr), solve_data.UB)
        solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[
            -1]
        solve_data.UB_progress.append(solve_data.UB)
    else:
        solve_data.LB = max(value(main_objective.expr), solve_data.LB)
        solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[
            -1]
        solve_data.LB_progress.append(solve_data.LB)

    config.logger.info('NLP {}: OBJ: {}  LB: {}  UB: {}'.format(
        solve_data.nlp_iter, value(main_objective.expr), solve_data.LB,
        solve_data.UB))

    if solve_data.solution_improved:
        solve_data.best_solution_found = fixed_nlp.clone()
        solve_data.best_solution_found_time = get_main_elapsed_time(
            solve_data.timing)
        if config.strategy == 'GOA':
            if solve_data.results.problem.sense == ProblemSense.minimize:
                solve_data.num_no_good_cuts_added.update({
                    solve_data.UB:
                    len(solve_data.mip.MindtPy_utils.MindtPy_linear_cuts.
                        integer_cuts)
                })
            else:
                solve_data.num_no_good_cuts_added.update({
                    solve_data.LB:
                    len(solve_data.mip.MindtPy_utils.MindtPy_linear_cuts.
                        integer_cuts)
                })

    # Add the linear cut
    if config.strategy == 'OA':
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
    elif config.strategy == "GOA":
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_affine_cuts(solve_data, config)
    elif config.strategy == 'PSC':
        # !!THIS SEEMS LIKE A BUG!! - mrmundt #
        add_psc_cut(solve_data, config)
    elif config.strategy == 'GBD':
        # !!THIS SEEMS LIKE A BUG!! - mrmundt #
        add_gbd_cut(solve_data, config)

    # This adds an integer cut to the feasible_integer_cuts
    # ConstraintList, which is not activated by default. However, it
    # may be activated as needed in certain situations or for certain
    # values of option flags.
    var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list)
    if config.add_nogood_cuts:
        add_nogood_cuts(var_values, solve_data, config, feasible=True)

    config.call_after_subproblem_feasible(fixed_nlp, solve_data)