Ejemplo n.º 1
0
def solve_NLP_feas(solve_data, config):
    m = solve_data.working_model.clone()
    add_feas_slacks(m)
    MindtPy = m.MindtPy_utils
    next(m.component_data_objects(Objective, active=True)).deactivate()
    for constr in m.component_data_objects(
            ctype=Constraint, active=True, descend_into=True):
        constr.deactivate()
    MindtPy.MindtPy_feas.activate()
    MindtPy.MindtPy_feas_obj = Objective(
        expr=sum(s for s in MindtPy.MindtPy_feas.slack_var[...]),
        sense=minimize)
    for v in MindtPy.variable_list:
        if v.is_binary():
            v.fix(int(round(v.value)))
    # m.pprint()  #print nlp feasibility problem for debugging
    with SuppressInfeasibleWarning():
        feas_soln = SolverFactory(config.nlp_solver).solve(
            m, **config.nlp_solver_args)
    subprob_terminate_cond = feas_soln.solver.termination_condition
    if subprob_terminate_cond is tc.optimal:
        copy_var_list_values(
            MindtPy.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list,
            config)
        pass
    elif subprob_terminate_cond is tc.infeasible:
        raise ValueError('Feasibility NLP infeasible. '
                         'This should never happen.')
    else:
        raise ValueError(
            'MindtPy unable to handle feasibility NLP termination condition '
            'of {}'.format(subprob_terminate_cond))

    var_values = [v.value for v in MindtPy.variable_list]
    duals = [0 for _ in MindtPy.constraint_list]

    for i, constr in enumerate(MindtPy.constraint_list):
        # TODO rhs only works if constr.upper and constr.lower do not both have values.
        # Sometimes you might have 1 <= expr <= 1. This would give an incorrect rhs of 2.
        rhs = ((0 if constr.upper is None else constr.upper) +
               (0 if constr.lower is None else constr.lower))
        sign_adjust = 1 if value(constr.upper) is None else -1
        duals[i] = sign_adjust * max(
            0, sign_adjust * (rhs - value(constr.body)))

    if value(MindtPy.MindtPy_feas_obj.expr) == 0:
        raise ValueError(
            'Problem is not feasible, check NLP solver')

    return var_values, duals
Ejemplo n.º 2
0
def init_max_binaries(solve_data, config):
    """Initialize by turning on as many binary variables as possible.

    The user would usually want to call _solve_NLP_subproblem after an
    invocation of this function.

    """
    m = solve_data.working_model.clone()
    MindtPy = m.MindtPy_utils
    solve_data.mip_subiter += 1
    config.logger.info(
        "MILP %s: maximize value of binaries" %
        (solve_data.mip_iter))
    for c in MindtPy.constraint_list:
        if c.body.polynomial_degree() not in (1, 0):
            c.deactivate()
    objective = next(m.component_data_objects(Objective, active=True))
    objective.deactivate()
    binary_vars = (
        v for v in m.component_data_objects(ctype=Var)
        if v.is_binary() and not v.fixed)
    MindtPy.MindtPy_max_binary_obj = Objective(
        expr=sum(v for v in binary_vars), sense=maximize)

    getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate()
    getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate()

    results = SolverFactory(config.mip_solver).solve(m, options=config.mip_solver_args)

    solve_terminate_cond = results.solver.termination_condition
    if solve_terminate_cond is tc.optimal:
        copy_var_list_values(
            MindtPy.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list,
            config)
        pass  # good
    elif solve_terminate_cond is tc.infeasible:
        raise ValueError(
            'MILP master problem is infeasible. '
            'Problem may have no more feasible '
            'binary configurations.')
    else:
        raise ValueError(
            'MindtPy unable to handle MILP master termination condition '
            'of %s. Solver message: %s' %
            (solve_terminate_cond, results.solver.message))
Ejemplo n.º 3
0
    def handle_lazy_subproblem_optimal(self, fixed_nlp, solve_data, config,
                                       opt):
        """This function copies the optimal solution of the fixed NLP subproblem to the MIP
        main problem(explanation see below), updates bound, adds OA and no-good cuts, 
        stores incumbent solution if it has been improved.

        Parameters
        ----------
        fixed_nlp : Pyomo model
            Integer-variable-fixed NLP model.
        solve_data : MindtPySolveData
            Data container that holds solve-instance data.
        config : ConfigBlock
            The specific configurations for MindtPy.
        opt : SolverFactory
            The cplex_persistent solver.
        """
        if config.calculate_dual:
            for c in fixed_nlp.tmp_duals:
                if fixed_nlp.dual.get(c, None) is None:
                    fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
            dual_values = list(
                fixed_nlp.dual[c]
                for c in fixed_nlp.MindtPy_utils.constraint_list)
        else:
            dual_values = None
        main_objective = fixed_nlp.MindtPy_utils.objective_list[-1]
        update_primal_bound(solve_data, value(main_objective.expr))
        if solve_data.solution_improved:
            solve_data.best_solution_found = fixed_nlp.clone()
            solve_data.best_solution_found_time = get_main_elapsed_time(
                solve_data.timing)
            if config.add_no_good_cuts or config.use_tabu_list:
                if solve_data.results.problem.sense == ProblemSense.minimize:
                    solve_data.stored_bound.update(
                        {solve_data.UB: solve_data.LB})
                else:
                    solve_data.stored_bound.update(
                        {solve_data.LB: solve_data.UB})
        config.logger.info(
            solve_data.fixed_nlp_log_formatter.format(
                '*' if solve_data.solution_improved else ' ',
                solve_data.nlp_iter, 'Fixed NLP', value(main_objective.expr),
                solve_data.LB, solve_data.UB, solve_data.rel_gap,
                get_main_elapsed_time(solve_data.timing)))

        # In OA algorithm, OA cuts are generated based on the solution of the subproblem
        # We need to first copy the value of variables from the subproblem and then add cuts
        # since value(constr.body), value(jacs[constr][var]), value(var) are used in self.add_lazy_oa_cuts()
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        if config.strategy == 'OA':
            self.add_lazy_oa_cuts(solve_data.mip, dual_values, solve_data,
                                  config, opt)
            if config.add_regularization is not None:
                add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
        elif config.strategy == 'GOA':
            self.add_lazy_affine_cuts(solve_data, config, opt)
        if config.add_no_good_cuts:
            var_values = list(v.value
                              for v in fixed_nlp.MindtPy_utils.variable_list)
            self.add_lazy_no_good_cuts(var_values, solve_data, config, opt)
Ejemplo n.º 4
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
             restore_logger_level(config.logger), \
             create_utility_block(model, 'MindtPy_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info("---Starting MindtPy---")

            solve_data.original_model = model
            solve_data.working_model = model.clone()
            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            nonlinear_constraints = [
                c for c in MindtPy.constraint_list
                if c.body.polynomial_degree() not in (1, 0)
            ]
            lin.nl_constraint_set = RangeSet(
                len(nonlinear_constraints),
                doc="Integer index set over the nonlinear constraints")
            feas.constraint_set = RangeSet(
                len(MindtPy.constraint_list),
                doc="integer index set over the constraints")

            # # Mapping Constraint -> integer index
            # MindtPy.feas_map = {}
            # # Mapping integer index -> Constraint
            # MindtPy.feas_inverse_map = {}
            # # Generate the two maps. These maps may be helpful for later
            # # interpreting indices on the slack variables or generated cuts.
            # for c, n in zip(MindtPy.constraint_list, feas.constraint_set):
            #     MindtPy.feas_map[c] = n
            #     MindtPy.feas_inverse_map[n] = c

            # Create slack variables for OA cuts
            lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                     initialize=0,
                                     domain=NonNegativeReals)
            # Create slack variables for feasibility problem
            feas.slack_var = Var(feas.constraint_set,
                                 domain=NonNegativeReals,
                                 initialize=1)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                solve_data.working_model.ipopt_zL_out = Suffix(
                    direction=Suffix.IMPORT)
            if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                solve_data.working_model.ipopt_zU_out = Suffix(
                    direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.MindtPy_utils.variable_list,
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.mip_iter

        return solve_data.results
Ejemplo n.º 5
0
def _solve_local_rnGDP_subproblem(model, solve_data):
    # TODO for now, return (LB, UB) = (-inf, inf) (for minimize)
    config = solve_data.config
    subproblem = TransformationFactory('gdp.bigm').create_using(model)
    obj_sense_correction = solve_data.objective_sense != minimize

    try:
        with SuppressInfeasibleWarning():
            result = SolverFactory(config.local_minlp_solver).solve(
                subproblem, **config.local_minlp_solver_args)
    except RuntimeError as e:
        config.logger.warning(
            "Solver encountered RuntimeError. Treating as infeasible. "
            "Msg: %s\n%s" % (str(e), traceback.format_exc()))
        copy_var_list_values(  # copy variable values, even if errored
            from_list=subproblem.GDPopt_utils.variable_list,
            to_list=model.GDPopt_utils.variable_list,
            config=config,
            ignore_integrality=True)
        return float('-inf'), float('inf')

    term_cond = result.solver.termination_condition
    if term_cond == tc.optimal:
        assert result.solver.status is SolverStatus.ok
        lb = result.problem.lower_bound if not obj_sense_correction else \
             -result.problem.upper_bound
        ub = result.problem.upper_bound if not obj_sense_correction else \
             -result.problem.lower_bound
        copy_var_list_values(
            from_list=subproblem.GDPopt_utils.variable_list,
            to_list=model.GDPopt_utils.variable_list,
            config=config,
        )
        return float('-inf'), ub
    elif term_cond == tc.locallyOptimal or term_cond == tc.feasible:
        assert result.solver.status is SolverStatus.ok
        lb = result.problem.lower_bound if not obj_sense_correction else \
             -result.problem.upper_bound
        ub = result.problem.upper_bound if not obj_sense_correction else \
             -result.problem.lower_bound
        # TODO handle LB absent
        copy_var_list_values(
            from_list=subproblem.GDPopt_utils.variable_list,
            to_list=model.GDPopt_utils.variable_list,
            config=config,
        )
        return float('-inf'), ub
    elif term_cond == tc.unbounded:
        copy_var_list_values(from_list=subproblem.GDPopt_utils.variable_list,
                             to_list=model.GDPopt_utils.variable_list,
                             config=config,
                             ignore_integrality=True)
        return float('-inf'), float('-inf')
    elif term_cond == tc.infeasible:
        copy_var_list_values(from_list=subproblem.GDPopt_utils.variable_list,
                             to_list=model.GDPopt_utils.variable_list,
                             config=config,
                             ignore_integrality=True)
        return float('-inf'), float('inf')
    else:
        config.logger.warning("Unknown termination condition of %s. "
                              "Treating as infeasible." % term_cond)
        copy_var_list_values(from_list=subproblem.GDPopt_utils.variable_list,
                             to_list=model.GDPopt_utils.variable_list,
                             config=config,
                             ignore_integrality=True)
        return float('-inf'), float('inf')
Ejemplo n.º 6
0
def MindtPy_iteration_loop(solve_data, config):
    """
    Main loop for MindtPy Algorithms

    This is the outermost function for the algorithms in this package; this function controls the progression of
    solving the model.

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    last_iter_cuts = False
    while solve_data.mip_iter < config.iteration_limit:

        config.logger.info(
            '---MindtPy main Iteration %s---'
            % (solve_data.mip_iter+1))

        solve_data.mip_subiter = 0
        # solve MILP main problem
        if config.strategy in {'OA', 'GOA', 'ECP'}:
            main_mip, main_mip_results = solve_main(solve_data, config)
            if main_mip_results is not None:
                if not config.single_tree:
                    if main_mip_results.solver.termination_condition is tc.optimal:
                        handle_main_optimal(main_mip, solve_data, config)
                    elif main_mip_results.solver.termination_condition is tc.infeasible:
                        handle_main_infeasible(main_mip, solve_data, config)
                        last_iter_cuts = True
                        break
                    else:
                        handle_main_other_conditions(
                            main_mip, main_mip_results, solve_data, config)
                    # Call the MILP post-solve callback
                    with time_code(solve_data.timing, 'Call after main solve'):
                        config.call_after_main_solve(main_mip, solve_data)
            else:
                config.logger.info('Algorithm should terminate here.')
                break
        else:
            raise NotImplementedError()

        # regularization is activated after the first feasible solution is found.
        if config.add_regularization is not None and solve_data.best_solution_found is not None and not config.single_tree:
            # the main problem might be unbounded, regularization is activated only when a valid bound is provided.
            if (solve_data.objective_sense == minimize and solve_data.LB != float('-inf')) or (solve_data.objective_sense == maximize and solve_data.UB != float('inf')):
                main_mip, main_mip_results = solve_main(
                    solve_data, config, regularization_problem=True)
                handle_regularization_main_tc(
                    main_mip, main_mip_results, solve_data, config)
        if config.add_regularization is not None and config.single_tree:
            solve_data.curr_int_sol = get_integer_solution(
                solve_data.mip, string_zero=True)
            copy_var_list_values(
                main_mip.MindtPy_utils.variable_list,
                solve_data.working_model.MindtPy_utils.variable_list,
                config)
            if solve_data.curr_int_sol not in set(solve_data.integer_list):
                fixed_nlp, fixed_nlp_result = solve_subproblem(
                    solve_data, config)
                handle_nlp_subproblem_tc(
                    fixed_nlp, fixed_nlp_result, solve_data, config)

        if algorithm_should_terminate(solve_data, config, check_cycling=True):
            last_iter_cuts = False
            break

        if not config.single_tree and config.strategy != 'ECP':  # if we don't use lazy callback, i.e. LP_NLP
            # Solve NLP subproblem
            # The constraint linearization happens in the handlers
            fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config)
            handle_nlp_subproblem_tc(
                fixed_nlp, fixed_nlp_result, solve_data, config)

            # Call the NLP post-solve callback
            with time_code(solve_data.timing, 'Call after subproblem solve'):
                config.call_after_subproblem_solve(fixed_nlp, solve_data)

        if algorithm_should_terminate(solve_data, config, check_cycling=False):
            last_iter_cuts = True
            break

        if config.strategy == 'ECP':
            add_ecp_cuts(solve_data.mip, solve_data, config)

        # if config.strategy == 'PSC':
        #     # If the hybrid algorithm is not making progress, switch to OA.
        #     progress_required = 1E-6
        #     if solve_data.objective_sense == minimize:
        #         log = solve_data.LB_progress
        #         sign_adjust = 1
        #     else:
        #         log = solve_data.UB_progress
        #         sign_adjust = -1
        #     # Maximum number of iterations in which the lower (optimistic)
        #     # bound does not improve before switching to OA
        #     max_nonimprove_iter = 5
        #     making_progress = True
        #     # TODO-romeo Unneccesary for OA and ROA, right?
        #     for i in range(1, max_nonimprove_iter + 1):
        #         try:
        #             if (sign_adjust * log[-i]
        #                     <= (log[-i - 1] + progress_required)
        #                     * sign_adjust):
        #                 making_progress = False
        #             else:
        #                 making_progress = True
        #                 break
        #         except IndexError:
        #             # Not enough history yet, keep going.
        #             making_progress = True
        #             break
        #     if not making_progress and (
        #             config.strategy == 'hPSC' or
        #             config.strategy == 'PSC'):
        #         config.logger.info(
        #             'Not making enough progress for {} iterations. '
        #             'Switching to OA.'.format(max_nonimprove_iter))
        #         config.strategy = 'OA'

    # if add_no_good_cuts is True, the bound obtained in the last iteration is no reliable.
    # we correct it after the iteration.
    if (config.add_no_good_cuts or config.use_tabu_list) and config.strategy is not 'FP' and not solve_data.should_terminate and config.add_regularization is None:
        bound_fix(solve_data, config, last_iter_cuts)
Ejemplo n.º 7
0
def solve_NLP_subproblem(solve_data, config):
    m = solve_data.working_model.clone()
    MindtPy = m.MindtPy_utils
    main_objective = next(m.component_data_objects(Objective, active=True))
    solve_data.nlp_iter += 1
    config.logger.info('NLP %s: Solve subproblem for fixed binaries.' %
                       (solve_data.nlp_iter, ))
    # Set up NLP
    for v in MindtPy.variable_list:
        if v.is_binary():
            v.fix(int(round(value(v))))

    # restore original variable values
    for nlp_var, orig_val in zip(MindtPy.variable_list,
                                 solve_data.initial_var_values):
        if not nlp_var.fixed and not nlp_var.is_binary():
            nlp_var.value = orig_val

    MindtPy.MindtPy_linear_cuts.deactivate()
    m.tmp_duals = ComponentMap()
    for c in m.component_data_objects(ctype=Constraint,
                                      active=True,
                                      descend_into=True):
        rhs = ((0 if c.upper is None else c.upper) +
               (0 if c.lower is None else c.lower))
        sign_adjust = 1 if value(c.upper) is None else -1
        m.tmp_duals[c] = sign_adjust * max(0,
                                           sign_adjust * (rhs - value(c.body)))
        # TODO check sign_adjust
    t = TransformationFactory('contrib.deactivate_trivial_constraints')
    t.apply_to(m, tmp=True, ignore_infeasible=True)
    # Solve the NLP
    # m.pprint() # print nlp problem for debugging
    with SuppressInfeasibleWarning():
        results = SolverFactory(config.nlp_solver).solve(
            m, **config.nlp_solver_args)
    var_values = list(v.value for v in MindtPy.variable_list)
    subprob_terminate_cond = results.solver.termination_condition
    if subprob_terminate_cond is tc.optimal:
        copy_var_list_values(
            m.MindtPy_utils.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list, config)
        for c in m.tmp_duals:
            if m.dual.get(c, None) is None:
                m.dual[c] = m.tmp_duals[c]
        duals = list(m.dual[c] for c in MindtPy.constraint_list)
        if main_objective.sense == minimize:
            solve_data.UB = min(value(main_objective.expr), solve_data.UB)
            solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[
                -1]
            solve_data.UB_progress.append(solve_data.UB)
        else:
            solve_data.LB = max(value(main_objective.expr), solve_data.LB)
            solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[
                -1]
            solve_data.LB_progress.append(solve_data.LB)
        config.logger.info('NLP {}: OBJ: {}  LB: {}  UB: {}'.format(
            solve_data.nlp_iter, value(main_objective.expr), solve_data.LB,
            solve_data.UB))
        if solve_data.solution_improved:
            solve_data.best_solution_found = m.clone()
        # Add the linear cut
        if config.strategy == 'OA':
            add_oa_cut(var_values, duals, solve_data, config)
        elif config.strategy == 'PSC':
            add_psc_cut(solve_data, config)
        elif config.strategy == 'GBD':
            add_gbd_cut(solve_data, config)

        # This adds an integer cut to the feasible_integer_cuts
        # ConstraintList, which is not activated by default. However, it
        # may be activated as needed in certain situations or for certain
        # values of option flags.
        add_int_cut(var_values, solve_data, config, feasible=True)

        config.call_after_subproblem_feasible(m, solve_data)
    elif subprob_terminate_cond is tc.infeasible:
        # TODO try something else? Reinitialize with different initial
        # value?
        config.logger.info('NLP subproblem was locally infeasible.')
        for c in m.component_data_objects(ctype=Constraint,
                                          active=True,
                                          descend_into=True):
            rhs = ((0 if c.upper is None else c.upper) +
                   (0 if c.lower is None else c.lower))
            sign_adjust = 1 if value(c.upper) is None else -1
            m.dual[c] = sign_adjust * max(0, sign_adjust *
                                          (rhs - value(c.body)))
        for var in m.component_data_objects(ctype=Var, descend_into=True):

            if config.strategy == 'PSC' or config.strategy == 'GBD':
                m.ipopt_zL_out[var] = 0
                m.ipopt_zU_out[var] = 0
                if var.ub is not None and abs(
                        var.ub - value(var)) < config.bound_tolerance:
                    m.ipopt_zL_out[var] = 1
                elif var.lb is not None and abs(
                        value(var) - var.lb) < config.bound_tolerance:
                    m.ipopt_zU_out[var] = -1
        # m.pprint() #print infeasible nlp problem for debugging
        if config.strategy == 'OA':
            config.logger.info('Solving feasibility problem')
            if config.initial_feas:
                # add_feas_slacks(m, solve_data)
                # config.initial_feas = False
                var_values, duals = solve_NLP_feas(solve_data, config)
                add_oa_cut(var_values, duals, solve_data, config)
        # Add an integer cut to exclude this discrete option
        add_int_cut(var_values, solve_data, config)
    elif subprob_terminate_cond is tc.maxIterations:
        # TODO try something else? Reinitialize with different initial
        # value?
        config.logger.info(
            'NLP subproblem failed to converge within iteration limit.')
        # Add an integer cut to exclude this discrete option
        add_int_cut(solve_data, config)
    else:
        raise ValueError('MindtPy unable to handle NLP subproblem termination '
                         'condition of {}'.format(subprob_terminate_cond))

    # Call the NLP post-solve callback
    config.call_after_subproblem_solve(m, solve_data)
Ejemplo n.º 8
0
def init_max_binaries(solve_data, config):
    """
    Modifies model by maximizing the number of activated binary variables

    Note - The user would usually want to call solve_subproblem after an
    invocation of this function.

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    m = solve_data.working_model.clone()
    if config.calculate_dual:
        m.dual.deactivate()
    MindtPy = m.MindtPy_utils
    solve_data.mip_subiter += 1
    config.logger.info('MILP %s: maximize value of binaries' %
                       (solve_data.mip_iter))
    for c in MindtPy.nonlinear_constraint_list:
        c.deactivate()
    objective = next(m.component_data_objects(Objective, active=True))
    objective.deactivate()
    binary_vars = (v for v in m.component_data_objects(ctype=Var)
                   if v.is_binary() and not v.fixed)
    MindtPy.MindtPy_max_binary_obj = Objective(expr=sum(v
                                                        for v in binary_vars),
                                               sense=maximize)

    getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate()
    getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate()

    mipopt = SolverFactory(config.mip_solver)
    if isinstance(mipopt, PersistentSolver):
        mipopt.set_instance(m)
    mip_args = dict(config.mip_solver_args)
    set_solver_options(mipopt, solve_data, config, solver_type='mip')
    results = mipopt.solve(m, tee=config.mip_solver_tee, **mip_args)

    solve_terminate_cond = results.solver.termination_condition
    if solve_terminate_cond is tc.optimal:
        copy_var_list_values(
            MindtPy.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list, config)
    elif solve_terminate_cond is tc.infeasible:
        raise ValueError('MILP main problem is infeasible. '
                         'Problem may have no more feasible '
                         'binary configurations.')
    elif solve_terminate_cond is tc.maxTimeLimit:
        config.logger.info(
            'NLP subproblem failed to converge within time limit.')
        solve_data.results.solver.termination_condition = tc.maxTimeLimit
    elif solve_terminate_cond is tc.maxIterations:
        config.logger.info(
            'NLP subproblem failed to converge within iteration limit.')
    else:
        raise ValueError(
            'MindtPy unable to handle MILP main termination condition '
            'of %s. Solver message: %s' %
            (solve_terminate_cond, results.solver.message))
Ejemplo n.º 9
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Bunch()
        solve_data.curr_int_sol = []
        solve_data.should_terminate = False
        solve_data.integer_list = []

        check_config(config)

        # if the objective function is a constant, dual bound constraint is not added.
        obj = next(model.component_data_objects(ctype=Objective, active=True))
        if obj.expr.polynomial_degree() == 0:
            config.use_dual_bound = False

        if config.use_fbbt:
            fbbt(model)
            # TODO: logging_level is not logging.INFO here
            config.logger.info(
                'Use the fbbt to tighten the bounds of variables')

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info('---Starting MindtPy---')

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(
                solve_data,
                config,
                move_linear_objective=(config.init_strategy == 'FP' or
                                       config.add_regularization is not None),
                use_mcpp=config.use_mcpp,
                updata_var_con_list=config.add_regularization is None)
            # The epigraph constraint is very "flat" for branching rules,
            # we want to use to original model for the main mip.
            if MindtPy.objective_list[0].expr.polynomial_degree() in {
                    1, 0
            } and config.add_regularization is not None:
                MindtPy.objective_list[0].activate()
                MindtPy.objective_constr.deactivate()
                MindtPy.objective.deactivate()

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.feas_opt = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.cuts = Block()
            lin.deactivate()

            # no-good cuts exclude particular discrete decisions
            lin.no_good_cuts = ConstraintList(doc='no-good cuts')
            # Feasible no-good cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary no_good_cuts ConstraintList.
            lin.feasible_no_good_cuts = ConstraintList(
                doc='explored no-good cuts')
            lin.feasible_no_good_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0
            solve_data.nlp_infeasible_counter = 0
            if config.init_strategy == 'FP':
                solve_data.fp_iter = 1

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]
            if config.single_tree and (config.add_no_good_cuts
                                       or config.use_tabu_list):
                solve_data.stored_bound = {}
            if config.strategy == 'GOA' and (config.add_no_good_cuts
                                             or config.use_tabu_list):
                solve_data.num_no_good_cuts_added = {}

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = RangeSet(
                    len(MindtPy.nonlinear_constraint_list),
                    doc='Integer index set over the nonlinear constraints.')
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                         initialize=0,
                                         domain=NonNegativeReals)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False
            solve_data.bound_improved = False

            if config.nlp_solver == 'ipopt':
                if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                    solve_data.working_model.ipopt_zL_out = Suffix(
                        direction=Suffix.IMPORT)
                if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                    solve_data.working_model.ipopt_zU_out = Suffix(
                        direction=Suffix.IMPORT)

            # Initialize the main problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_main(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)
            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                copy_var_list_values(MindtPy.variable_list, [
                    i
                    for i in solve_data.original_model.component_data_objects(
                        Var) if not i.fixed
                ], config)
                # exclude fixed variables here. This is consistent with the definition of variable_list in GDPopt.util

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total
        solve_data.results.solver.iterations = solve_data.mip_iter
        solve_data.results.solver.num_infeasible_nlp_subproblem = solve_data.nlp_infeasible_counter
        solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
Ejemplo n.º 10
0
    def solve(self, model, **kwds):
        """Solve the model.

        Parameters
        ----------
        model : Pyomo model
            The MINLP model to be solved.

        Returns
        -------
        results : SolverResults
            Results from solving the MINLP problem by MindtPy.
        """
        config = self.CONFIG(
            kwds.pop('options', {}), preserve_implicit=True
        )  # TODO: do we need to set preserve_implicit=True?
        config.set_value(kwds)
        set_up_logger(config)
        check_config(config)

        solve_data = set_up_solve_data(model, config)

        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info(
                '---------------------------------------------------------------------------------------------\n'
                '              Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo (MindtPy)               \n'
                '---------------------------------------------------------------------------------------------\n'
                'For more information, please visit https://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html'
            )

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            # In the process_objective function, as long as the objective function is nonlinear, it will be reformulated and the variable/constraint/objective lists will be updated.
            # For OA/GOA/LP-NLP algorithm, if the objective funtion is linear, it will not be reformulated as epigraph constraint.
            # If the objective function is linear, it will be reformulated as epigraph constraint only if the Feasibility Pump or ROA/RLP-NLP algorithm is activated. (move_linear_objective = True)
            # In some cases, the variable/constraint/objective lists will not be updated even if the objective is epigraph-reformulated.
            # In Feasibility Pump, since the distance calculation only includes discrete variables and the epigraph slack variables are continuous variables, the Feasibility Pump algorithm will not affected even if the variable list are updated.
            # In ROA and RLP/NLP, since the distance calculation does not include these epigraph slack variables, they should not be added to the variable list. (update_var_con_list = False)
            # In the process_objective function, once the objective function has been reformulated as epigraph constraint, the variable/constraint/objective lists will not be updated only if the MINLP has a linear objective function and regularization is activated at the same time.
            # This is because the epigraph constraint is very "flat" for branching rules. The original objective function will be used for the main problem and epigraph reformulation will be used for the projection problem.
            # TODO: The logic here is too complicated, can we simplify it?
            process_objective(
                solve_data,
                config,
                move_linear_objective=(config.init_strategy == 'FP' or
                                       config.add_regularization is not None),
                use_mcpp=config.use_mcpp,
                update_var_con_list=config.add_regularization is None,
                partition_nonlinear_terms=config.partition_obj_nonlinear_terms)
            # The epigraph constraint is very "flat" for branching rules.
            # If ROA/RLP-NLP is activated and the original objective function is linear, we will use the original objective for the main mip.
            if MindtPy.objective_list[0].expr.polynomial_degree() in {
                    1, 0
            } and config.add_regularization is not None:
                MindtPy.objective_list[0].activate()
                MindtPy.objective_constr.deactivate()
                MindtPy.objective.deactivate()

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.feas_opt = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.cuts = Block()
            lin.deactivate()

            # no-good cuts exclude particular discrete decisions
            lin.no_good_cuts = ConstraintList(doc='no-good cuts')
            # Feasible no-good cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary no_good_cuts ConstraintList.
            lin.feasible_no_good_cuts = ConstraintList(
                doc='explored no-good cuts')
            lin.feasible_no_good_cuts.deactivate()

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = RangeSet(
                    len(MindtPy.nonlinear_constraint_list),
                    doc='Integer index set over the nonlinear constraints.')
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                         initialize=0,
                                         domain=NonNegativeReals)

            # Initialize the main problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_main(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)
            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                copy_var_list_values(MindtPy.variable_list, [
                    i
                    for i in solve_data.original_model.component_data_objects(
                        Var) if not i.fixed
                ], config)
                # exclude fixed variables here. This is consistent with the definition of variable_list in GDPopt.util

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total
        solve_data.results.solver.iterations = solve_data.mip_iter
        solve_data.results.solver.num_infeasible_nlp_subproblem = solve_data.nlp_infeasible_counter
        solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
Ejemplo n.º 11
0
def init_rNLP(solve_data, config):
    """
    Initialize the problem by solving the relaxed NLP and then store the optimal variable
    values obtained from solving the rNLP

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    m = solve_data.working_model.clone()
    config.logger.info('Relaxed NLP: Solve relaxed integrality')
    MindtPy = m.MindtPy_utils
    TransformationFactory('core.relax_integer_vars').apply_to(m)
    nlp_args = dict(config.nlp_solver_args)
    nlpopt = SolverFactory(config.nlp_solver)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        results = nlpopt.solve(m, tee=config.nlp_solver_tee, **nlp_args)
    subprob_terminate_cond = results.solver.termination_condition
    if subprob_terminate_cond in {tc.optimal, tc.feasible, tc.locallyOptimal}:
        if subprob_terminate_cond in {tc.feasible, tc.locallyOptimal}:
            config.logger.info('relaxed NLP is not solved to optimality.')
        dual_values = list(
            m.dual[c] for c in
            MindtPy.constraint_list) if config.calculate_dual else None
        # Add OA cut
        # This covers the case when the Lower bound does not exist.
        # TODO: should we use the bound of the rNLP here?
        if solve_data.objective_sense == minimize:
            if not math.isnan(results.problem.lower_bound):
                solve_data.LB = results.problem.lower_bound
                solve_data.bound_improved = solve_data.LB > solve_data.LB_progress[
                    -1]
                solve_data.LB_progress.append(results.problem.lower_bound)
        elif not math.isnan(results.problem.upper_bound):
            solve_data.UB = results.problem.upper_bound
            solve_data.bound_improved = solve_data.UB < solve_data.UB_progress[
                -1]
            solve_data.UB_progress.append(results.problem.upper_bound)
        main_objective = MindtPy.objective_list[-1]
        config.logger.info(
            'Relaxed NLP: OBJ: %s  LB: %s  UB: %s  TIME:%ss' %
            (value(main_objective.expr), solve_data.LB, solve_data.UB,
             round(get_main_elapsed_time(solve_data.timing), 2)))
        if config.strategy in {'OA', 'GOA', 'FP'}:
            copy_var_list_values(m.MindtPy_utils.variable_list,
                                 solve_data.mip.MindtPy_utils.variable_list,
                                 config,
                                 ignore_integrality=True)
            if config.init_strategy == 'FP':
                copy_var_list_values(
                    m.MindtPy_utils.variable_list,
                    solve_data.working_model.MindtPy_utils.variable_list,
                    config,
                    ignore_integrality=True)
            if config.strategy == 'OA':
                add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
            elif config.strategy == 'GOA':
                add_affine_cuts(solve_data, config)
            # TODO check if value of the binary or integer varibles is 0/1 or integer value.
            for var in solve_data.mip.MindtPy_utils.discrete_variable_list:
                var.value = int(round(var.value))
    elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}:
        # TODO fail? try something else?
        config.logger.info('Initial relaxed NLP problem is infeasible. '
                           'Problem may be infeasible.')
    elif subprob_terminate_cond is tc.maxTimeLimit:
        config.logger.info(
            'NLP subproblem failed to converge within time limit.')
        solve_data.results.solver.termination_condition = tc.maxTimeLimit
    elif subprob_terminate_cond is tc.maxIterations:
        config.logger.info(
            'NLP subproblem failed to converge within iteration limit.')
    else:
        raise ValueError(
            'MindtPy unable to handle relaxed NLP termination condition '
            'of %s. Solver message: %s' %
            (subprob_terminate_cond, results.solver.message))
Ejemplo n.º 12
0
def init_rNLP(solve_data, config):
    """
    Initialize the problem by solving the relaxed NLP (fixed binary variables) and then store the optimal variable
    values obtained from solving the rNLP

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    solve_data.nlp_iter += 1
    m = solve_data.working_model.clone()
    config.logger.info(
        "NLP %s: Solve relaxed integrality" % (solve_data.nlp_iter,))
    MindtPy = m.MindtPy_utils
    TransformationFactory('core.relax_integer_vars').apply_to(m)
    nlp_args = dict(config.nlp_solver_args)
    elapsed = get_main_elapsed_time(solve_data.timing)
    remaining = int(max(config.time_limit - elapsed, 1))
    if config.nlp_solver == 'gams':
        nlp_args['add_options'] = nlp_args.get('add_options', [])
        nlp_args['add_options'].append('option reslim=%s;' % remaining)
    with SuppressInfeasibleWarning():
        results = SolverFactory(config.nlp_solver).solve(
            m, tee=config.solver_tee, **nlp_args)
    subprob_terminate_cond = results.solver.termination_condition
    if subprob_terminate_cond in {tc.optimal, tc.feasible, tc.locallyOptimal}:
        if subprob_terminate_cond in {tc.feasible, tc.locallyOptimal}:
            config.logger.info(
                'relaxed NLP is not solved to optimality.')
        main_objective = next(m.component_data_objects(Objective, active=True))
        nlp_solution_values = list(v.value for v in MindtPy.variable_list)
        dual_values = list(
            m.dual[c] for c in MindtPy.constraint_list) if config.use_dual else None
        # Add OA cut
        if main_objective.sense == minimize and not math.isnan(results['Problem'][0]['Lower bound']):
            solve_data.LB = results['Problem'][0]['Lower bound']
        elif not math.isnan(results['Problem'][0]['Upper bound']):
            solve_data.UB = results['Problem'][0]['Upper bound']
        config.logger.info(
            'NLP %s: OBJ: %s  LB: %s  UB: %s'
            % (solve_data.nlp_iter, value(main_objective.expr),
               solve_data.LB, solve_data.UB))
        if config.strategy in {'OA', 'GOA'}:
            copy_var_list_values(m.MindtPy_utils.variable_list,
                                 solve_data.mip.MindtPy_utils.variable_list,
                                 config, ignore_integrality=True)
            if config.strategy == 'OA':
                add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
            elif config.strategy == 'GOA':
                add_affine_cuts(solve_data, config)
            # TODO check if value of the binary or integer varibles is 0/1 or integer value.
            for var in solve_data.mip.component_data_objects(ctype=Var):
                if var.is_integer():
                    var.value = int(round(var.value))
    elif subprob_terminate_cond is tc.infeasible:
        # TODO fail? try something else?
        config.logger.info(
            'Initial relaxed NLP problem is infeasible. '
            'Problem may be infeasible.')
    elif subprob_terminate_cond is tc.maxTimeLimit:
        config.logger.info(
            'NLP subproblem failed to converge within time limit.')
    elif subprob_terminate_cond is tc.maxIterations:
        config.logger.info(
            'NLP subproblem failed to converge within iteration limit.')
    else:
        raise ValueError(
            'MindtPy unable to handle relaxed NLP termination condition '
            'of %s. Solver message: %s' %
            (subprob_terminate_cond, results.solver.message))
Ejemplo n.º 13
0
def init_rNLP(solve_data, config):
    """Initialize the problem by solving the relaxed NLP and then store the optimal variable
    values obtained from solving the rNLP.

    Args:
        solve_data (MindtPySolveData): data container that holds solve-instance data.
        config (ConfigBlock): the specific configurations for MindtPy.

    Raises:
        ValueError: MindtPy unable to handlen the termination condition of the relaxed NLP.
    """
    m = solve_data.working_model.clone()
    config.logger.debug('Relaxed NLP: Solve relaxed integrality')
    MindtPy = m.MindtPy_utils
    TransformationFactory('core.relax_integer_vars').apply_to(m)
    nlp_args = dict(config.nlp_solver_args)
    nlpopt = SolverFactory(config.nlp_solver)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        results = nlpopt.solve(m, tee=config.nlp_solver_tee, **nlp_args)
    subprob_terminate_cond = results.solver.termination_condition
    if subprob_terminate_cond in {tc.optimal, tc.feasible, tc.locallyOptimal}:
        main_objective = MindtPy.objective_list[-1]
        if subprob_terminate_cond == tc.optimal:
            update_dual_bound(solve_data, value(main_objective.expr))
        else:
            config.logger.info('relaxed NLP is not solved to optimality.')
            uptade_suboptimal_dual_bound(solve_data, results)
        dual_values = list(
            m.dual[c] for c in
            MindtPy.constraint_list) if config.calculate_dual else None
        config.logger.info(
            solve_data.log_formatter.format(
                '-', 'Relaxed NLP', value(main_objective.expr), solve_data.LB,
                solve_data.UB, solve_data.rel_gap,
                get_main_elapsed_time(solve_data.timing)))
        # Add OA cut
        if config.strategy in {'OA', 'GOA', 'FP'}:
            copy_var_list_values(m.MindtPy_utils.variable_list,
                                 solve_data.mip.MindtPy_utils.variable_list,
                                 config,
                                 ignore_integrality=True)
            if config.init_strategy == 'FP':
                copy_var_list_values(
                    m.MindtPy_utils.variable_list,
                    solve_data.working_model.MindtPy_utils.variable_list,
                    config,
                    ignore_integrality=True)
            if config.strategy in {'OA', 'FP'}:
                add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
            elif config.strategy == 'GOA':
                add_affine_cuts(solve_data, config)
            for var in solve_data.mip.MindtPy_utils.discrete_variable_list:
                var.set_value(int(round(var.value)), skip_validation=True)
    elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}:
        # TODO fail? try something else?
        config.logger.info('Initial relaxed NLP problem is infeasible. '
                           'Problem may be infeasible.')
    elif subprob_terminate_cond is tc.maxTimeLimit:
        config.logger.info(
            'NLP subproblem failed to converge within time limit.')
        solve_data.results.solver.termination_condition = tc.maxTimeLimit
    elif subprob_terminate_cond is tc.maxIterations:
        config.logger.info(
            'NLP subproblem failed to converge within iteration limit.')
    else:
        raise ValueError(
            'MindtPy unable to handle relaxed NLP termination condition '
            'of %s. Solver message: %s' %
            (subprob_terminate_cond, results.solver.message))
Ejemplo n.º 14
0
def init_max_binaries(solve_data, config):
    """
    Modifies model by maximizing the number of activated binary variables

    Note - The user would usually want to call solve_NLP_subproblem after an
    invocation of this function.

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    m = solve_data.working_model.clone()
    if config.use_dual:
        m.dual.deactivate()
    MindtPy = m.MindtPy_utils
    solve_data.mip_subiter += 1
    config.logger.info(
        "MILP %s: maximize value of binaries" %
        (solve_data.mip_iter))
    for c in MindtPy.constraint_list:
        if c.body.polynomial_degree() not in (1, 0):
            c.deactivate()
    objective = next(m.component_data_objects(Objective, active=True))
    objective.deactivate()
    binary_vars = (
        v for v in m.component_data_objects(ctype=Var)
        if v.is_binary() and not v.fixed)
    MindtPy.MindtPy_max_binary_obj = Objective(
        expr=sum(v for v in binary_vars), sense=maximize)

    getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate()
    getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate()

    opt = SolverFactory(config.mip_solver)
    if isinstance(opt, PersistentSolver):
        opt.set_instance(m)
    mip_args = dict(config.mip_solver_args)
    elapsed = get_main_elapsed_time(solve_data.timing)
    remaining = int(max(config.time_limit - elapsed, 1))
    if config.mip_solver == 'gams':
        mip_args['add_options'] = mip_args.get('add_options', [])
        mip_args['add_options'].append('option optcr=0.001;')
    results = opt.solve(m, tee=config.solver_tee, **mip_args)

    solve_terminate_cond = results.solver.termination_condition
    if solve_terminate_cond is tc.optimal:
        copy_var_list_values(
            MindtPy.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list,
            config)

        pass  # good
    elif solve_terminate_cond is tc.infeasible:
        raise ValueError(
            'MILP master problem is infeasible. '
            'Problem may have no more feasible '
            'binary configurations.')
    elif subprob_terminate_cond is tc.maxTimeLimit:
        config.logger.info(
            'NLP subproblem failed to converge within time limit.')
    elif subprob_terminate_cond is tc.maxIterations:
        config.logger.info(
            'NLP subproblem failed to converge within iteration limit.')
    else:
        raise ValueError(
            'MindtPy unable to handle MILP master termination condition '
            'of %s. Solver message: %s' %
            (solve_terminate_cond, results.solver.message))
Ejemplo n.º 15
0
    def handle_lazy_subproblem_optimal(self, fixed_nlp, solve_data, config,
                                       opt):
        """
        This function copies  result to mip(explaination see below), updates bound, adds OA and no-good cuts,
        stores best solution if new one is best

        Parameters
        ----------
        fixed_nlp: Pyomo model
            Fixed-NLP from the model
        solve_data: MindtPy Data Container
            data container that holds solve-instance data
        config: ConfigBlock
            contains the specific configurations for the algorithm
        opt: SolverFactory
            the mip solver
        """
        if config.calculate_dual:
            for c in fixed_nlp.tmp_duals:
                if fixed_nlp.dual.get(c, None) is None:
                    fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
            dual_values = list(
                fixed_nlp.dual[c]
                for c in fixed_nlp.MindtPy_utils.constraint_list)
        else:
            dual_values = None
        main_objective = fixed_nlp.MindtPy_utils.objective_list[-1]
        if solve_data.objective_sense == minimize:
            solve_data.UB = min(value(main_objective.expr), solve_data.UB)
            solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[
                -1]
            solve_data.UB_progress.append(solve_data.UB)
        else:
            solve_data.LB = max(value(main_objective.expr), solve_data.LB)
            solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[
                -1]
            solve_data.LB_progress.append(solve_data.LB)

        config.logger.info(
            'Fixed-NLP {}: OBJ: {}  LB: {}  UB: {}  TIME: {}'.format(
                solve_data.nlp_iter, value(main_objective.expr), solve_data.LB,
                solve_data.UB,
                round(get_main_elapsed_time(solve_data.timing), 2)))

        if solve_data.solution_improved:
            solve_data.best_solution_found = fixed_nlp.clone()
            solve_data.best_solution_found_time = get_main_elapsed_time(
                solve_data.timing)
            if config.add_no_good_cuts or config.use_tabu_list:
                if solve_data.results.problem.sense == ProblemSense.minimize:
                    solve_data.stored_bound.update(
                        {solve_data.UB: solve_data.LB})
                else:
                    solve_data.stored_bound.update(
                        {solve_data.LB: solve_data.UB})

        # In OA algorithm, OA cuts are generated based on the solution of the subproblem
        # We need to first copy the value of variables from the subproblem and then add cuts
        # since value(constr.body), value(jacs[constr][var]), value(var) are used in self.add_lazy_oa_cuts()
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        if config.strategy == 'OA':
            self.add_lazy_oa_cuts(solve_data.mip, dual_values, solve_data,
                                  config, opt)
            if config.add_regularization is not None:
                add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
        elif config.strategy == 'GOA':
            self.add_lazy_affine_cuts(solve_data, config, opt)
        if config.add_no_good_cuts:
            var_values = list(v.value
                              for v in fixed_nlp.MindtPy_utils.variable_list)
            self.add_lazy_no_good_cuts(var_values, solve_data, config, opt)
Ejemplo n.º 16
0
def handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config):
    """
    This function copies the result of the NLP solver function ('solve_NLP_subproblem') to the working model, updates
    the bounds, adds OA and integer cuts, and then stores the new solution if it is the new best solution. This
    function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution.

    Parameters
    ----------
    fixed_nlp: Pyomo model
        fixed NLP from the model
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                         solve_data.working_model.MindtPy_utils.variable_list,
                         config)
    if config.use_dual:
        for c in fixed_nlp.tmp_duals:
            if fixed_nlp.dual.get(c, None) is None:
                fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
        dual_values = list(fixed_nlp.dual[c]
                           for c in fixed_nlp.MindtPy_utils.constraint_list)
    else:
        dual_values = None

    main_objective = next(
        fixed_nlp.component_data_objects(Objective, active=True))
    if main_objective.sense == minimize:
        solve_data.UB = min(value(main_objective.expr), solve_data.UB)
        solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[
            -1]
        solve_data.UB_progress.append(solve_data.UB)
    else:
        solve_data.LB = max(value(main_objective.expr), solve_data.LB)
        solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[
            -1]
        solve_data.LB_progress.append(solve_data.LB)

    config.logger.info('NLP {}: OBJ: {}  LB: {}  UB: {}'.format(
        solve_data.nlp_iter, value(main_objective.expr), solve_data.LB,
        solve_data.UB))

    if solve_data.solution_improved:
        solve_data.best_solution_found = fixed_nlp.clone()
        solve_data.best_solution_found_time = get_main_elapsed_time(
            solve_data.timing)
        if config.strategy == 'GOA':
            if solve_data.results.problem.sense == ProblemSense.minimize:
                solve_data.num_no_good_cuts_added.update({
                    solve_data.UB:
                    len(solve_data.mip.MindtPy_utils.MindtPy_linear_cuts.
                        integer_cuts)
                })
            else:
                solve_data.num_no_good_cuts_added.update({
                    solve_data.LB:
                    len(solve_data.mip.MindtPy_utils.MindtPy_linear_cuts.
                        integer_cuts)
                })

    # Add the linear cut
    if config.strategy == 'OA':
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
    elif config.strategy == "GOA":
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_affine_cuts(solve_data, config)
    elif config.strategy == 'PSC':
        # !!THIS SEEMS LIKE A BUG!! - mrmundt #
        add_psc_cut(solve_data, config)
    elif config.strategy == 'GBD':
        # !!THIS SEEMS LIKE A BUG!! - mrmundt #
        add_gbd_cut(solve_data, config)

    # This adds an integer cut to the feasible_integer_cuts
    # ConstraintList, which is not activated by default. However, it
    # may be activated as needed in certain situations or for certain
    # values of option flags.
    var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list)
    if config.add_nogood_cuts:
        add_nogood_cuts(var_values, solve_data, config, feasible=True)

    config.call_after_subproblem_feasible(fixed_nlp, solve_data)
Ejemplo n.º 17
0
def handle_subproblem_optimal(fixed_nlp,
                              solve_data,
                              config,
                              cb_opt=None,
                              fp=False):
    """This function copies the result of the NLP solver function ('solve_subproblem') to the working model, updates
    the bounds, adds OA and no-good cuts, and then stores the new solution if it is the new best solution. This
    function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution.

    Parameters
    ----------
    fixed_nlp : Pyomo model
        Integer-variable-fixed NLP model.
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.
    cb_opt : SolverFactory, optional
        The gurobi_persistent solver, by default None.
    fp : bool, optional
        Whether it is in the loop of feasibility pump, by default False.
    """
    copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                         solve_data.working_model.MindtPy_utils.variable_list,
                         config)
    if config.calculate_dual:
        for c in fixed_nlp.tmp_duals:
            if fixed_nlp.dual.get(c, None) is None:
                fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
        dual_values = list(fixed_nlp.dual[c]
                           for c in fixed_nlp.MindtPy_utils.constraint_list)
    else:
        dual_values = None
    main_objective = fixed_nlp.MindtPy_utils.objective_list[-1]
    update_primal_bound(solve_data, value(main_objective.expr))
    if solve_data.primal_bound_improved:
        solve_data.best_solution_found = fixed_nlp.clone()
        solve_data.best_solution_found_time = get_main_elapsed_time(
            solve_data.timing)
        if config.strategy == 'GOA':
            solve_data.num_no_good_cuts_added.update({
                solve_data.primal_bound:
                len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts)
            })

        # add obj increasing constraint for fp
        if fp:
            solve_data.mip.MindtPy_utils.cuts.del_component(
                'improving_objective_cut')
            if solve_data.objective_sense == minimize:
                solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint(
                    expr=sum(solve_data.mip.MindtPy_utils.objective_value[:])
                    <= solve_data.primal_bound - config.fp_cutoffdecr *
                    max(1, abs(solve_data.primal_bound)))
            else:
                solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint(
                    expr=sum(solve_data.mip.MindtPy_utils.objective_value[:])
                    >= solve_data.primal_bound + config.fp_cutoffdecr *
                    max(1, abs(solve_data.primal_bound)))
    # Add the linear cut
    if config.strategy == 'OA' or fp:
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_oa_cuts(solve_data.mip, dual_values, solve_data, config, cb_opt)
    elif config.strategy == 'GOA':
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_affine_cuts(solve_data, config)
    # elif config.strategy == 'PSC':
    #     # !!THIS SEEMS LIKE A BUG!! - mrmundt #
    #     add_psc_cut(solve_data, config)
    # elif config.strategy == 'GBD':
    #     # !!THIS SEEMS LIKE A BUG!! - mrmundt #
    #     add_gbd_cut(solve_data, config)

    var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list)
    if config.add_no_good_cuts:
        add_no_good_cuts(var_values, solve_data, config)

    config.call_after_subproblem_feasible(fixed_nlp, solve_data)

    config.logger.info(
        solve_data.fixed_nlp_log_formatter.format(
            '*' if solve_data.primal_bound_improved else ' ',
            solve_data.nlp_iter if not fp else solve_data.fp_iter, 'Fixed NLP',
            value(main_objective.expr), solve_data.primal_bound,
            solve_data.dual_bound, solve_data.rel_gap,
            get_main_elapsed_time(solve_data.timing)))
Ejemplo n.º 18
0
def solve_NLP_feas(solve_data, config):
    """
    Solves a feasibility NLP if the fixed_nlp problem is infeasible

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm

    Returns
    -------
    feas_nlp: Pyomo model
        feasibility NLP from the model
    feas_soln: Pyomo results object
        result from solving the feasibility NLP
    """
    feas_nlp = solve_data.working_model.clone()
    add_feas_slacks(feas_nlp, config)

    MindtPy = feas_nlp.MindtPy_utils
    if MindtPy.find_component('objective_value') is not None:
        MindtPy.objective_value.value = 0

    next(feas_nlp.component_data_objects(Objective, active=True)).deactivate()
    for constr in feas_nlp.component_data_objects(ctype=Constraint,
                                                  active=True,
                                                  descend_into=True):
        if constr.body.polynomial_degree() not in [0, 1]:
            constr.deactivate()

    MindtPy.MindtPy_feas.activate()
    if config.feasibility_norm == 'L1':
        MindtPy.MindtPy_feas_obj = Objective(expr=sum(
            s for s in MindtPy.MindtPy_feas.slack_var[...]),
                                             sense=minimize)
    elif config.feasibility_norm == 'L2':
        MindtPy.MindtPy_feas_obj = Objective(expr=sum(
            s * s for s in MindtPy.MindtPy_feas.slack_var[...]),
                                             sense=minimize)
    else:
        MindtPy.MindtPy_feas_obj = Objective(
            expr=MindtPy.MindtPy_feas.slack_var, sense=minimize)
    TransformationFactory('core.fix_integer_vars').apply_to(feas_nlp)
    with SuppressInfeasibleWarning():
        try:
            nlpopt = SolverFactory(config.nlp_solver)
            nlp_args = dict(config.nlp_solver_args)
            elapsed = get_main_elapsed_time(solve_data.timing)
            remaining = int(max(config.time_limit - elapsed, 1))
            if config.nlp_solver == 'gams':
                nlp_args['add_options'] = nlp_args.get('add_options', [])
                nlp_args['add_options'].append('option reslim=%s;' % remaining)
            feas_soln = nlpopt.solve(feas_nlp,
                                     tee=config.solver_tee,
                                     **nlp_args)
        except (ValueError, OverflowError) as error:
            for nlp_var, orig_val in zip(MindtPy.variable_list,
                                         solve_data.initial_var_values):
                if not nlp_var.fixed and not nlp_var.is_binary():
                    nlp_var.value = orig_val
            feas_soln = nlpopt.solve(feas_nlp,
                                     tee=config.solver_tee,
                                     **nlp_args)
    subprob_terminate_cond = feas_soln.solver.termination_condition
    if subprob_terminate_cond in {tc.optimal, tc.locallyOptimal, tc.feasible}:
        copy_var_list_values(
            MindtPy.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list, config)
    elif subprob_terminate_cond is tc.infeasible:
        raise ValueError('Feasibility NLP infeasible. '
                         'This should never happen.')
    elif subprob_terminate_cond is tc.maxIterations:
        raise ValueError(
            'Subsolver reached its maximum number of iterations without converging, '
            'consider increasing the iterations limit of the subsolver or reviewing your formulation.'
        )
    else:
        raise ValueError(
            'MindtPy unable to handle feasibility NLP termination condition '
            'of {}'.format(subprob_terminate_cond))

    var_values = [v.value for v in MindtPy.variable_list]
    duals = [0 for _ in MindtPy.constraint_list]

    for i, c in enumerate(MindtPy.constraint_list):
        rhs = c.upper if c.has_ub() else c.lower
        c_geq = -1 if c.has_ub() else 1
        duals[i] = c_geq * max(0, c_geq * (rhs - value(c.body)))

    if value(MindtPy.MindtPy_feas_obj.expr) <= config.zero_tolerance:
        config.logger.warning(
            "The objective value %.4E of feasibility problem is less than zero_tolerance. "
            "This indicates that the nlp subproblem is feasible, although it is found infeasible in the previous step. "
            "Check the nlp solver output" %
            value(MindtPy.MindtPy_feas_obj.expr))

    return feas_nlp, feas_soln
Ejemplo n.º 19
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        Args:
            model (Block): a Pyomo model or block to be solved.
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        set_up_logger(config)
        check_config(config)

        solve_data = set_up_solve_data(model, config)

        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info(
                '---------------------------------------------------------------------------------------------\n'
                '              Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo (MindtPy)               \n'
                '---------------------------------------------------------------------------------------------\n'
                'For more information, please visit https://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html')

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config,
                              move_linear_objective=(config.init_strategy == 'FP'
                                                     or config.add_regularization is not None),
                              use_mcpp=config.use_mcpp,
                              updata_var_con_list=config.add_regularization is None
                              )
            # The epigraph constraint is very "flat" for branching rules,
            # we want to use to original model for the main mip.
            if MindtPy.objective_list[0].expr.polynomial_degree() in {1, 0} and config.add_regularization is not None:
                MindtPy.objective_list[0].activate()
                MindtPy.objective_constr.deactivate()
                MindtPy.objective.deactivate()

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.feas_opt = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.cuts = Block()
            lin.deactivate()

            # no-good cuts exclude particular discrete decisions
            lin.no_good_cuts = ConstraintList(doc='no-good cuts')
            # Feasible no-good cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary no_good_cuts ConstraintList.
            lin.feasible_no_good_cuts = ConstraintList(
                doc='explored no-good cuts')
            lin.feasible_no_good_cuts.deactivate()

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = RangeSet(len(MindtPy.nonlinear_constraint_list),
                                                  doc='Integer index set over the nonlinear constraints.')
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals, initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(
                    bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals)

            # Initialize the main problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_main(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)
            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(
                    from_list=solve_data.best_solution_found.MindtPy_utils.variable_list,
                    to_list=MindtPy.variable_list,
                    config=config)
                copy_var_list_values(
                    MindtPy.variable_list,
                    [i for i in solve_data.original_model.component_data_objects(
                        Var) if not i.fixed],
                    config)
                # exclude fixed variables here. This is consistent with the definition of variable_list in GDPopt.util

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total
        solve_data.results.solver.iterations = solve_data.mip_iter
        solve_data.results.solver.num_infeasible_nlp_subproblem = solve_data.nlp_infeasible_counter
        solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
Ejemplo n.º 20
0
def MindtPy_iteration_loop(solve_data, config):
    """Main loop for MindtPy Algorithms.

    This is the outermost function for the algorithms in this package; this function controls the progression of
    solving the model.

    Parameters
    ----------
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.

    Raises
    ------
    ValueError
        The strategy value is not correct or not included.
    """
    last_iter_cuts = False
    while solve_data.mip_iter < config.iteration_limit:

        solve_data.mip_subiter = 0
        # solve MILP main problem
        if config.strategy in {'OA', 'GOA', 'ECP'}:
            main_mip, main_mip_results = solve_main(solve_data, config)
            if main_mip_results is not None:
                if not config.single_tree:
                    if main_mip_results.solver.termination_condition is tc.optimal:
                        handle_main_optimal(main_mip, solve_data, config)
                    elif main_mip_results.solver.termination_condition is tc.infeasible:
                        handle_main_infeasible(main_mip, solve_data, config)
                        last_iter_cuts = True
                        break
                    else:
                        handle_main_other_conditions(main_mip,
                                                     main_mip_results,
                                                     solve_data, config)
                    # Call the MILP post-solve callback
                    with time_code(solve_data.timing, 'Call after main solve'):
                        config.call_after_main_solve(main_mip, solve_data)
            else:
                config.logger.info('Algorithm should terminate here.')
                break
        else:
            raise ValueError()

        # Regularization is activated after the first feasible solution is found.
        if config.add_regularization is not None and solve_data.best_solution_found is not None and not config.single_tree:
            # The main problem might be unbounded, regularization is activated only when a valid bound is provided.
            if solve_data.dual_bound != solve_data.dual_bound_progress[0]:
                main_mip, main_mip_results = solve_main(
                    solve_data, config, regularization_problem=True)
                handle_regularization_main_tc(main_mip, main_mip_results,
                                              solve_data, config)

        # TODO: add descriptions for the following code
        if config.add_regularization is not None and config.single_tree:
            solve_data.curr_int_sol = get_integer_solution(solve_data.mip,
                                                           string_zero=True)
            copy_var_list_values(
                main_mip.MindtPy_utils.variable_list,
                solve_data.working_model.MindtPy_utils.variable_list, config)
            if solve_data.curr_int_sol not in set(solve_data.integer_list):
                fixed_nlp, fixed_nlp_result = solve_subproblem(
                    solve_data, config)
                handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result,
                                         solve_data, config)
        if algorithm_should_terminate(solve_data, config, check_cycling=True):
            last_iter_cuts = False
            break

        if not config.single_tree and config.strategy != 'ECP':  # if we don't use lazy callback, i.e. LP_NLP
            # Solve NLP subproblem
            # The constraint linearization happens in the handlers
            if not config.solution_pool:
                fixed_nlp, fixed_nlp_result = solve_subproblem(
                    solve_data, config)
                handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result,
                                         solve_data, config)

                # Call the NLP post-solve callback
                with time_code(solve_data.timing,
                               'Call after subproblem solve'):
                    config.call_after_subproblem_solve(fixed_nlp, solve_data)

                if algorithm_should_terminate(solve_data,
                                              config,
                                              check_cycling=False):
                    last_iter_cuts = True
                    break
            else:
                if config.mip_solver == 'cplex_persistent':
                    solution_pool_names = main_mip_results._solver_model.solution.pool.get_names(
                    )
                elif config.mip_solver == 'gurobi_persistent':
                    solution_pool_names = list(
                        range(main_mip_results._solver_model.SolCount))
                # list to store the name and objective value of the solutions in the solution pool
                solution_name_obj = []
                for name in solution_pool_names:
                    if config.mip_solver == 'cplex_persistent':
                        obj = main_mip_results._solver_model.solution.pool.get_objective_value(
                            name)
                    elif config.mip_solver == 'gurobi_persistent':
                        main_mip_results._solver_model.setParam(
                            gurobipy.GRB.Param.SolutionNumber, name)
                        obj = main_mip_results._solver_model.PoolObjVal
                    solution_name_obj.append([name, obj])
                solution_name_obj.sort(
                    key=itemgetter(1),
                    reverse=solve_data.objective_sense == maximize)
                counter = 0
                for name, _ in solution_name_obj:
                    # the optimal solution of the main problem has been added to integer_list above
                    # so we should skip checking cycling for the first solution in the solution pool
                    if counter >= 1:
                        copy_var_list_values_from_solution_pool(
                            solve_data.mip.MindtPy_utils.variable_list,
                            solve_data.working_model.MindtPy_utils.
                            variable_list,
                            config,
                            solver_model=main_mip_results._solver_model,
                            var_map=main_mip_results.
                            _pyomo_var_to_solver_var_map,
                            solution_name=name)
                        solve_data.curr_int_sol = get_integer_solution(
                            solve_data.working_model)
                        if solve_data.curr_int_sol in set(
                                solve_data.integer_list):
                            config.logger.info(
                                'The same combination has been explored and will be skipped here.'
                            )
                            continue
                        else:
                            solve_data.integer_list.append(
                                solve_data.curr_int_sol)
                    counter += 1
                    fixed_nlp, fixed_nlp_result = solve_subproblem(
                        solve_data, config)
                    handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result,
                                             solve_data, config)

                    # Call the NLP post-solve callback
                    with time_code(solve_data.timing,
                                   'Call after subproblem solve'):
                        config.call_after_subproblem_solve(
                            fixed_nlp, solve_data)

                    if algorithm_should_terminate(solve_data,
                                                  config,
                                                  check_cycling=False):
                        last_iter_cuts = True
                        break

                    if counter >= config.num_solution_iteration:
                        break

        if config.strategy == 'ECP':
            add_ecp_cuts(solve_data.mip, solve_data, config)

        # if config.strategy == 'PSC':
        #     # If the hybrid algorithm is not making progress, switch to OA.
        #     progress_required = 1E-6
        #     if solve_data.objective_sense == minimize:
        #         log = solve_data.LB_progress
        #         sign_adjust = 1
        #     else:
        #         log = solve_data.UB_progress
        #         sign_adjust = -1
        #     # Maximum number of iterations in which the lower (optimistic)
        #     # bound does not improve before switching to OA
        #     max_nonimprove_iter = 5
        #     making_progress = True
        #     # TODO-romeo Unnecessary for OA and ROA, right?
        #     for i in range(1, max_nonimprove_iter + 1):
        #         try:
        #             if (sign_adjust * log[-i]
        #                     <= (log[-i - 1] + progress_required)
        #                     * sign_adjust):
        #                 making_progress = False
        #             else:
        #                 making_progress = True
        #                 break
        #         except IndexError:
        #             # Not enough history yet, keep going.
        #             making_progress = True
        #             break
        #     if not making_progress and (
        #             config.strategy == 'hPSC' or
        #             config.strategy == 'PSC'):
        #         config.logger.info(
        #             'Not making enough progress for {} iterations. '
        #             'Switching to OA.'.format(max_nonimprove_iter))
        #         config.strategy = 'OA'

    # if add_no_good_cuts is True, the bound obtained in the last iteration is no reliable.
    # we correct it after the iteration.
    if (
            config.add_no_good_cuts or config.use_tabu_list
    ) and config.strategy != 'FP' and not solve_data.should_terminate and config.add_regularization is None:
        fix_dual_bound(solve_data, config, last_iter_cuts)
    config.logger.info(
        ' ==============================================================================================='
    )
Ejemplo n.º 21
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        # configration confirmation
        if config.single_tree:
            config.iteration_limit = 1
            config.add_slack = False
            config.add_integer_cuts = False
            config.mip_solver = 'cplex_persistent'
            config.logger.info(
                "Single tree implementation is activated. The defalt MIP solver is 'cplex_persistent'"
            )
        # if the slacks fix to zero, just don't add them
        if config.max_slack == 0.0:
            config.add_slack = False

        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()
        solve_data.curr_int_sol = []
        solve_data.prev_int_sol = []

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info("---Starting MindtPy---")

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config, use_mcpp=False)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = Set(
                    initialize=[
                        i
                        for i, constr in enumerate(MindtPy.constraint_list, 1)
                        if constr.body.polynomial_degree() not in (1, 0)
                    ],
                    doc="Integer index set over the nonlinear constraints."
                    "The set corresponds to the index of nonlinear constraint in constraint_set"
                )
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                         initialize=0,
                                         domain=NonNegativeReals)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                solve_data.working_model.ipopt_zL_out = Suffix(
                    direction=Suffix.IMPORT)
            if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                solve_data.working_model.ipopt_zU_out = Suffix(
                    direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.component_data_objects(Var),
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.mip_iter

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
Ejemplo n.º 22
0
def handle_subproblem_optimal(fixed_nlp, solve_data, config, fp=False):
    """
    This function copies the result of the NLP solver function ('solve_subproblem') to the working model, updates
    the bounds, adds OA and no-good cuts, and then stores the new solution if it is the new best solution. This
    function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution.

    Parameters
    ----------
    fixed_nlp: Pyomo model
        Fixed-NLP from the model
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                         solve_data.working_model.MindtPy_utils.variable_list,
                         config)
    if config.calculate_dual:
        for c in fixed_nlp.tmp_duals:
            if fixed_nlp.dual.get(c, None) is None:
                fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
        dual_values = list(fixed_nlp.dual[c]
                           for c in fixed_nlp.MindtPy_utils.constraint_list)
    else:
        dual_values = None
    main_objective = fixed_nlp.MindtPy_utils.objective_list[-1]
    if solve_data.objective_sense == minimize:
        solve_data.UB = min(value(main_objective.expr), solve_data.UB)
        solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[
            -1]
        solve_data.UB_progress.append(solve_data.UB)
    else:
        solve_data.LB = max(value(main_objective.expr), solve_data.LB)
        solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[
            -1]
        solve_data.LB_progress.append(solve_data.LB)
    config.logger.info(
        'Fixed-NLP {}: OBJ: {}  LB: {}  UB: {}  TIME: {}s'.format(
            solve_data.nlp_iter if not fp else solve_data.fp_iter,
            value(main_objective.expr), solve_data.LB, solve_data.UB,
            round(get_main_elapsed_time(solve_data.timing), 2)))

    if solve_data.solution_improved:
        solve_data.best_solution_found = fixed_nlp.clone()
        solve_data.best_solution_found_time = get_main_elapsed_time(
            solve_data.timing)
        if config.strategy == 'GOA':
            if solve_data.objective_sense == minimize:
                solve_data.num_no_good_cuts_added.update({
                    solve_data.UB:
                    len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts)
                })
            else:
                solve_data.num_no_good_cuts_added.update({
                    solve_data.LB:
                    len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts)
                })

        # add obj increasing constraint for fp
        if fp:
            solve_data.mip.MindtPy_utils.cuts.del_component(
                'improving_objective_cut')
            if solve_data.objective_sense == minimize:
                solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint(
                    expr=solve_data.mip.MindtPy_utils.objective_value <=
                    solve_data.UB -
                    config.fp_cutoffdecr * max(1, abs(solve_data.UB)))
            else:
                solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint(
                    expr=solve_data.mip.MindtPy_utils.objective_value >=
                    solve_data.LB +
                    config.fp_cutoffdecr * max(1, abs(solve_data.UB)))

    # Add the linear cut
    if config.strategy == 'OA' or fp:
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
    elif config.strategy == 'GOA':
        copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
                             solve_data.mip.MindtPy_utils.variable_list,
                             config)
        add_affine_cuts(solve_data, config)
    # elif config.strategy == 'PSC':
    #     # !!THIS SEEMS LIKE A BUG!! - mrmundt #
    #     add_psc_cut(solve_data, config)
    # elif config.strategy == 'GBD':
    #     # !!THIS SEEMS LIKE A BUG!! - mrmundt #
    #     add_gbd_cut(solve_data, config)

    var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list)
    if config.add_no_good_cuts:
        add_no_good_cuts(var_values, solve_data, config, feasible=True)

    config.call_after_subproblem_feasible(fixed_nlp, solve_data)
Ejemplo n.º 23
0
def _solve_rnGDP_subproblem(model, solve_data):
    config = solve_data.config
    subproblem = TransformationFactory('gdp.bigm').create_using(model)
    obj_sense_correction = solve_data.objective_sense != minimize

    try:
        with SuppressInfeasibleWarning():
            try:
                fbbt(subproblem, integer_tol=config.integer_tolerance)
            except InfeasibleConstraintException:
                copy_var_list_values(  # copy variable values, even if errored
                    from_list=subproblem.GDPopt_utils.variable_list,
                    to_list=model.GDPopt_utils.variable_list,
                    config=config,
                    ignore_integrality=True)
                return float('inf'), float('inf')
            minlp_args = dict(config.minlp_solver_args)
            if config.minlp_solver == 'gams':
                elapsed = get_main_elapsed_time(solve_data.timing)
                remaining = max(config.time_limit - elapsed, 1)
                minlp_args['add_options'] = minlp_args.get('add_options', [])
                minlp_args['add_options'].append('option reslim=%s;' %
                                                 remaining)
            result = SolverFactory(config.minlp_solver).solve(
                subproblem, **minlp_args)
    except RuntimeError as e:
        config.logger.warning(
            "Solver encountered RuntimeError. Treating as infeasible. "
            "Msg: %s\n%s" % (str(e), traceback.format_exc()))
        copy_var_list_values(  # copy variable values, even if errored
            from_list=subproblem.GDPopt_utils.variable_list,
            to_list=model.GDPopt_utils.variable_list,
            config=config,
            ignore_integrality=True)
        return float('inf'), float('inf')

    term_cond = result.solver.termination_condition
    if term_cond == tc.optimal:
        assert result.solver.status is SolverStatus.ok
        lb = result.problem.lower_bound if not obj_sense_correction else \
             -result.problem.upper_bound
        ub = result.problem.upper_bound if not obj_sense_correction else \
             -result.problem.lower_bound
        copy_var_list_values(
            from_list=subproblem.GDPopt_utils.variable_list,
            to_list=model.GDPopt_utils.variable_list,
            config=config,
        )
        return lb, ub
    elif term_cond == tc.locallyOptimal or term_cond == tc.feasible:
        assert result.solver.status is SolverStatus.ok
        lb = result.problem.lower_bound if not obj_sense_correction else \
             -result.problem.upper_bound
        ub = result.problem.upper_bound if not obj_sense_correction else \
             -result.problem.lower_bound
        # TODO handle LB absent
        copy_var_list_values(
            from_list=subproblem.GDPopt_utils.variable_list,
            to_list=model.GDPopt_utils.variable_list,
            config=config,
        )
        return lb, ub
    elif term_cond == tc.unbounded:
        copy_var_list_values(from_list=subproblem.GDPopt_utils.variable_list,
                             to_list=model.GDPopt_utils.variable_list,
                             config=config,
                             ignore_integrality=True)
        return float('-inf'), float('-inf')
    elif term_cond == tc.infeasible:
        copy_var_list_values(from_list=subproblem.GDPopt_utils.variable_list,
                             to_list=model.GDPopt_utils.variable_list,
                             config=config,
                             ignore_integrality=True)
        return float('inf'), float('inf')
    else:
        config.logger.warning("Unknown termination condition of %s. "
                              "Treating as infeasible." % term_cond)
        copy_var_list_values(from_list=subproblem.GDPopt_utils.variable_list,
                             to_list=model.GDPopt_utils.variable_list,
                             config=config,
                             ignore_integrality=True)
        return float('inf'), float('inf')
Ejemplo n.º 24
0
def solve_feasibility_subproblem(solve_data, config):
    """
    Solves a feasibility NLP if the fixed_nlp problem is infeasible

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm

    Returns
    -------
    feas_subproblem: Pyomo model
        feasibility NLP from the model
    feas_soln: Pyomo results object
        result from solving the feasibility NLP
    """
    feas_subproblem = solve_data.working_model.clone()
    add_feas_slacks(feas_subproblem, config)

    MindtPy = feas_subproblem.MindtPy_utils
    if MindtPy.find_component('objective_value') is not None:
        MindtPy.objective_value.value = 0

    next(feas_subproblem.component_data_objects(Objective,
                                                active=True)).deactivate()
    for constr in feas_subproblem.MindtPy_utils.nonlinear_constraint_list:
        constr.deactivate()

    MindtPy.feas_opt.activate()
    if config.feasibility_norm == 'L1':
        MindtPy.feas_obj = Objective(expr=sum(
            s for s in MindtPy.feas_opt.slack_var[...]),
                                     sense=minimize)
    elif config.feasibility_norm == 'L2':
        MindtPy.feas_obj = Objective(expr=sum(
            s * s for s in MindtPy.feas_opt.slack_var[...]),
                                     sense=minimize)
    else:
        MindtPy.feas_obj = Objective(expr=MindtPy.feas_opt.slack_var,
                                     sense=minimize)
    TransformationFactory('core.fix_integer_vars').apply_to(feas_subproblem)
    nlpopt = SolverFactory(config.nlp_solver)
    nlp_args = dict(config.nlp_solver_args)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        try:
            with time_code(solve_data.timing, 'feasibility subproblem'):
                feas_soln = nlpopt.solve(feas_subproblem,
                                         tee=config.nlp_solver_tee,
                                         **nlp_args)
        except (ValueError, OverflowError) as error:
            for nlp_var, orig_val in zip(MindtPy.variable_list,
                                         solve_data.initial_var_values):
                if not nlp_var.fixed and not nlp_var.is_binary():
                    nlp_var.value = orig_val
            with time_code(solve_data.timing, 'feasibility subproblem'):
                feas_soln = nlpopt.solve(feas_subproblem,
                                         tee=config.nlp_solver_tee,
                                         **nlp_args)
    subprob_terminate_cond = feas_soln.solver.termination_condition
    if subprob_terminate_cond in {tc.optimal, tc.locallyOptimal, tc.feasible}:
        copy_var_list_values(
            MindtPy.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list, config)
    elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}:
        config.logger.error('Feasibility subproblem infeasible. '
                            'This should never happen.')
        solve_data.should_terminate = True
        solve_data.results.solver.status = SolverStatus.error
        return feas_subproblem, feas_soln
    elif subprob_terminate_cond is tc.maxIterations:
        config.logger.error(
            'Subsolver reached its maximum number of iterations without converging, '
            'consider increasing the iterations limit of the subsolver or reviewing your formulation.'
        )
        solve_data.should_terminate = True
        solve_data.results.solver.status = SolverStatus.error
        return feas_subproblem, feas_soln
    else:
        config.error(
            'MindtPy unable to handle feasibility subproblem termination condition '
            'of {}'.format(subprob_terminate_cond))
        solve_data.should_terminate = True
        solve_data.results.solver.status = SolverStatus.error
        return feas_subproblem, feas_soln

    if value(MindtPy.feas_obj.expr) <= config.zero_tolerance:
        config.logger.warning(
            'The objective value %.4E of feasibility problem is less than zero_tolerance. '
            'This indicates that the nlp subproblem is feasible, although it is found infeasible in the previous step. '
            'Check the nlp solver output' % value(MindtPy.feas_obj.expr))

    return feas_subproblem, feas_soln
Ejemplo n.º 25
0
def _perform_branch_and_bound(solve_data):
    solve_data.explored_nodes = 0
    root_node = solve_data.working_model
    root_util_blk = root_node.GDPopt_utils
    config = solve_data.config

    # Map unfixed disjunct -> list of deactivated constraints
    root_util_blk.disjunct_to_nonlinear_constraints = ComponentMap()
    # Map relaxed disjunctions -> list of unfixed disjuncts
    root_util_blk.disjunction_to_unfixed_disjuncts = ComponentMap()

    # Preprocess the active disjunctions
    for disjunction in root_util_blk.disjunction_list:
        assert disjunction.active

        disjuncts_fixed_True = []
        disjuncts_fixed_False = []
        unfixed_disjuncts = []

        # categorize the disjuncts in the disjunction
        for disjunct in disjunction.disjuncts:
            if disjunct.indicator_var.fixed:
                if disjunct.indicator_var.value:
                    disjuncts_fixed_True.append(disjunct)
                elif not disjunct.indicator_var.value:
                    disjuncts_fixed_False.append(disjunct)
                else:
                    pass  # raise error for fractional value?
            else:
                unfixed_disjuncts.append(disjunct)

        # update disjunct lists for predetermined disjunctions
        if len(disjuncts_fixed_False) == len(disjunction.disjuncts) - 1:
            # all but one disjunct in the disjunction is fixed to False.
            # Remaining one must be true. If not already fixed to True, do so.
            if not disjuncts_fixed_True:
                disjuncts_fixed_True = unfixed_disjuncts
                unfixed_disjuncts = []
                disjuncts_fixed_True[0].indicator_var.fix(True)
        elif disjuncts_fixed_True and disjunction.xor:
            assert len(disjuncts_fixed_True) == 1, ("XOR (only one True) "
                                                    "violated: %s" %
                                                    disjunction.name)
            disjuncts_fixed_False.extend(unfixed_disjuncts)
            unfixed_disjuncts = []

        # Make sure disjuncts fixed to False are properly deactivated.
        for disjunct in disjuncts_fixed_False:
            disjunct.deactivate()

        # Deactivate nonlinear constraints in unfixed disjuncts
        for disjunct in unfixed_disjuncts:
            nonlinear_constraints_in_disjunct = [
                constr
                for constr in disjunct.component_data_objects(Constraint,
                                                              active=True)
                if constr.body.polynomial_degree() not in _linear_degrees
            ]
            for constraint in nonlinear_constraints_in_disjunct:
                constraint.deactivate()
            if nonlinear_constraints_in_disjunct:
                # TODO might be worthwhile to log number of nonlinear
                # constraints in each disjunction for later branching purposes
                root_util_blk.disjunct_to_nonlinear_constraints[
                    disjunct] = nonlinear_constraints_in_disjunct

        root_util_blk.disjunction_to_unfixed_disjuncts[
            disjunction] = unfixed_disjuncts
        pass

    # Add the BigM suffix if it does not already exist. Used later during
    # nonlinear constraint activation.
    # TODO is this still necessary?
    if not hasattr(root_node, 'BigM'):
        root_node.BigM = Suffix()

    # Set up the priority queue
    queue = solve_data.bb_queue = []
    solve_data.created_nodes = 0
    unbranched_disjunction_indices = [
        i for i, disjunction in enumerate(root_util_blk.disjunction_list)
        if disjunction in root_util_blk.disjunction_to_unfixed_disjuncts
    ]
    sort_tuple = BBNodeData(
        obj_lb=float('-inf'),
        obj_ub=float('inf'),
        is_screened=False,
        is_evaluated=False,
        num_unbranched_disjunctions=len(unbranched_disjunction_indices),
        node_count=0,
        unbranched_disjunction_indices=unbranched_disjunction_indices,
    )
    heappush(queue, (sort_tuple, root_node))

    # Do the branch and bound
    while len(queue) > 0:
        # visit the top node on the heap
        # from pprint import pprint
        # pprint([(
        #     x[0].node_count, x[0].obj_lb, x[0].obj_ub, x[0].num_unbranched_disjunctions
        # ) for x in sorted(queue)])
        node_data, node_model = heappop(queue)
        config.logger.info("Nodes: %s LB %.10g Unbranched %s" %
                           (solve_data.explored_nodes, node_data.obj_lb,
                            node_data.num_unbranched_disjunctions))

        # Check time limit
        elapsed = get_main_elapsed_time(solve_data.timing)
        if elapsed >= config.time_limit:
            config.logger.info('GDPopt-LBB unable to converge bounds '
                               'before time limit of {} seconds. '
                               'Elapsed: {} seconds'.format(
                                   config.time_limit, elapsed))
            no_feasible_soln = float('inf')
            solve_data.LB = node_data.obj_lb if \
                            solve_data.objective_sense == minimize else \
                            -no_feasible_soln
            solve_data.UB = no_feasible_soln if \
                            solve_data.objective_sense == minimize else \
                            -node_data.obj_lb
            config.logger.info('Final bound values: LB: {}  UB: {}'.format(
                solve_data.LB, solve_data.UB))
            solve_data.results.solver.termination_condition = tc.maxTimeLimit
            return True

        # Handle current node
        if not node_data.is_screened:
            # Node has not been evaluated.
            solve_data.explored_nodes += 1
            new_node_data = _prescreen_node(node_data, node_model, solve_data)
            heappush(queue,
                     (new_node_data, node_model))  # replace with updated
            # node data
        elif node_data.obj_lb < node_data.obj_ub - config.bound_tolerance and \
             not node_data.is_evaluated:
            # Node has not been fully evaluated.
            # Note: infeasible and unbounded nodes will skip this condition,
            # because of strict inequality
            new_node_data = _evaluate_node(node_data, node_model, solve_data)
            heappush(queue,
                     (new_node_data, node_model))  # replace with updated
            # node data
        elif node_data.num_unbranched_disjunctions == 0 or \
             node_data.obj_lb == float('inf'):
            # We have reached a leaf node, or the best available node is
            # infeasible.
            original_model = solve_data.original_model
            copy_var_list_values(
                from_list=node_model.GDPopt_utils.variable_list,
                to_list=original_model.GDPopt_utils.variable_list,
                config=config,
            )

            solve_data.LB = node_data.obj_lb if \
                            solve_data.objective_sense == minimize else \
                            -node_data.obj_ub
            solve_data.UB = node_data.obj_ub if \
                            solve_data.objective_sense == minimize else \
                            -node_data.obj_lb
            solve_data.master_iteration = solve_data.explored_nodes
            if node_data.obj_lb == float('inf'):
                solve_data.results.solver.termination_condition = tc.infeasible
            elif node_data.obj_ub == float('-inf'):
                solve_data.results.solver.termination_condition = tc.unbounded
            else:
                solve_data.results.solver.termination_condition = tc.optimal
            return
        else:
            _branch_on_node(node_data, node_model, solve_data)
Ejemplo n.º 26
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = GDPoptSolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPopt_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPopt version %s using %s algorithm"
                % (".".join(map(str, self.version())), config.strategy)
            )
            config.logger.info(
                """
If you use this software, you may cite the following:
- Implementation:
    Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE.
    Pyomo.GDP: Disjunctive Models in Python. 
    Proc. of the 13th Intl. Symposium on Process Systems Eng.
    San Diego, 2018.
- LOA algorithm:
    Türkay, M; Grossmann, IE.
    Logic-based MINLP algorithms for the optimal synthesis of process networks.
    Comp. and Chem. Eng. 1996, 20(8), 959–978.
    DOI: 10.1016/0098-1354(95)00219-7.
- GLOA algorithm:
    Lee, S; Grossmann, IE.
    A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems
    Comp. and Chem. Eng. 2001, 25, 1675-1697.
    DOI: 10.1016/S0098-1354(01)00732-3
                """.strip()
            )
            solve_data.results.solver.name = 'GDPopt %s - %s' % (
                str(self.version()), config.strategy)

            solve_data.original_model = model
            solve_data.working_model = model.clone()
            GDPopt = solve_data.working_model.GDPopt_utils
            setup_results_object(solve_data, config)

            solve_data.current_strategy = config.strategy

            # Verify that objective has correct form
            process_objective(solve_data, config)

            # Save model initial values. These are used later to initialize NLP
            # subproblems.
            solve_data.initial_var_values = list(
                v.value for v in GDPopt.variable_list)
            solve_data.best_solution_found = None

            # Validate the model to ensure that GDPopt is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Integer cuts exclude particular discrete decisions
            GDPopt.integer_cuts = ConstraintList(doc='integer cuts')

            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default, unless the initial model has no
            # discrete decisions.

            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary GDPopt_integer_cuts ConstraintList.
            GDPopt.no_backtracking = ConstraintList(
                doc='explored integer cuts')

            # Set up iteration counters
            solve_data.master_iteration = 0
            solve_data.mip_iteration = 0
            solve_data.nlp_iteration = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.iteration_log = {}

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.feasible_solution_improved = False

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                GDPopt_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                GDPopt_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in working model
                copy_var_list_values(
                    from_list=solve_data.best_solution_found.GDPopt_utils.variable_list,
                    to_list=GDPopt.variable_list,
                    config=config)
                # Update values in original model
                copy_var_list_values(
                    GDPopt.variable_list,
                    solve_data.original_model.GDPopt_utils.variable_list,
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.master_iteration

        return solve_data.results
Ejemplo n.º 27
0
def solve_NLP_subproblem(solve_data, config):
    m = solve_data.working_model.clone()
    MindtPy = m.MindtPy_utils
    main_objective = next(m.component_data_objects(Objective, active=True))
    solve_data.nlp_iter += 1
    config.logger.info('NLP %s: Solve subproblem for fixed binaries.'
                       % (solve_data.nlp_iter,))
    # Set up NLP
    for v in MindtPy.variable_list:
        if v.is_binary():
            v.fix(int(round(value(v))))

    # restore original variable values
    for nlp_var, orig_val in zip(
            MindtPy.variable_list,
            solve_data.initial_var_values):
        if not nlp_var.fixed and not nlp_var.is_binary():
            nlp_var.value = orig_val

    MindtPy.MindtPy_linear_cuts.deactivate()
    m.tmp_duals = ComponentMap()
    for c in m.component_data_objects(ctype=Constraint, active=True,
                                      descend_into=True):
        rhs = ((0 if c.upper is None else c.upper) +
               (0 if c.lower is None else c.lower))
        sign_adjust = 1 if value(c.upper) is None else -1
        m.tmp_duals[c] = sign_adjust * max(0,
                                           sign_adjust * (rhs - value(c.body)))
        # TODO check sign_adjust
    t = TransformationFactory('contrib.deactivate_trivial_constraints')
    t.apply_to(m, tmp=True, ignore_infeasible=True)
    # Solve the NLP
    # m.pprint() # print nlp problem for debugging
    with SuppressInfeasibleWarning():
        results = SolverFactory(config.nlp_solver).solve(
            m, **config.nlp_solver_args)
    var_values = list(v.value for v in MindtPy.variable_list)
    subprob_terminate_cond = results.solver.termination_condition
    if subprob_terminate_cond is tc.optimal:
        copy_var_list_values(
            m.MindtPy_utils.variable_list,
            solve_data.working_model.MindtPy_utils.variable_list,
            config)
        for c in m.tmp_duals:
            if m.dual.get(c, None) is None:
                m.dual[c] = m.tmp_duals[c]
        duals = list(m.dual[c] for c in MindtPy.constraint_list)
        if main_objective.sense == minimize:
            solve_data.UB = min(value(main_objective.expr), solve_data.UB)
            solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[-1]
            solve_data.UB_progress.append(solve_data.UB)
        else:
            solve_data.LB = max(value(main_objective.expr), solve_data.LB)
            solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[-1]
            solve_data.LB_progress.append(solve_data.LB)
        config.logger.info(
            'NLP {}: OBJ: {}  LB: {}  UB: {}'
            .format(solve_data.nlp_iter, value(main_objective.expr), solve_data.LB, solve_data.UB))
        if solve_data.solution_improved:
            solve_data.best_solution_found = m.clone()
        # Add the linear cut
        if config.strategy == 'OA':
            add_oa_cut(var_values, duals, solve_data, config)
        elif config.strategy == 'PSC':
            add_psc_cut(solve_data, config)
        elif config.strategy == 'GBD':
            add_gbd_cut(solve_data, config)

        # This adds an integer cut to the feasible_integer_cuts
        # ConstraintList, which is not activated by default. However, it
        # may be activated as needed in certain situations or for certain
        # values of option flags.
        add_int_cut(var_values, solve_data, config, feasible=True)

        config.call_after_subproblem_feasible(m, solve_data)
    elif subprob_terminate_cond is tc.infeasible:
        # TODO try something else? Reinitialize with different initial
        # value?
        config.logger.info('NLP subproblem was locally infeasible.')
        for c in m.component_data_objects(ctype=Constraint, active=True,
                                          descend_into=True):
            rhs = ((0 if c.upper is None else c.upper) +
                   (0 if c.lower is None else c.lower))
            sign_adjust = 1 if value(c.upper) is None else -1
            m.dual[c] = sign_adjust * max(0,
                                          sign_adjust * (rhs - value(c.body)))
        for var in m.component_data_objects(ctype=Var,
                                            descend_into=True):

            if config.strategy == 'PSC' or config.strategy == 'GBD':
                m.ipopt_zL_out[var] = 0
                m.ipopt_zU_out[var] = 0
                if var.ub is not None and abs(var.ub - value(var)) < config.bound_tolerance:
                    m.ipopt_zL_out[var] = 1
                elif var.lb is not None and abs(value(var) - var.lb) < config.bound_tolerance:
                    m.ipopt_zU_out[var] = -1
        # m.pprint() #print infeasible nlp problem for debugging
        if config.strategy == 'OA':
            config.logger.info('Solving feasibility problem')
            if config.initial_feas:
                # add_feas_slacks(m, solve_data)
                # config.initial_feas = False
                var_values, duals = solve_NLP_feas(solve_data, config)
                add_oa_cut(var_values, duals, solve_data, config)
        # Add an integer cut to exclude this discrete option
        add_int_cut(var_values, solve_data, config)
    elif subprob_terminate_cond is tc.maxIterations:
        # TODO try something else? Reinitialize with different initial
        # value?
        config.logger.info('NLP subproblem failed to converge within iteration limit.')
        # Add an integer cut to exclude this discrete option
        add_int_cut(solve_data, config)
    else:
        raise ValueError(
            'MindtPy unable to handle NLP subproblem termination '
            'condition of {}'.format(subprob_terminate_cond))

    # Call the NLP post-solve callback
    config.call_after_subproblem_solve(m, solve_data)
Ejemplo n.º 28
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = GDPoptSolveData()
        created_GDPopt_block = False

        old_logger_level = config.logger.getEffectiveLevel()
        try:
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info("---Starting GDPopt---")

            # Create a model block on which to store GDPopt-specific utility
            # modeling objects.
            if hasattr(model, 'GDPopt_utils'):
                raise RuntimeError(
                    "GDPopt needs to create a Block named GDPopt_utils "
                    "on the model object, but an attribute with that name "
                    "already exists.")
            else:
                created_GDPopt_block = True
                model.GDPopt_utils = Block(
                    doc="Container for GDPopt solver utility modeling objects")

            solve_data.original_model = model

            solve_data.working_model = clone_orig_model_with_lists(model)
            GDPopt = solve_data.working_model.GDPopt_utils
            record_original_model_statistics(solve_data, config)

            solve_data.current_strategy = config.strategy

            # Reformulate integer variables to binary
            reformulate_integer_variables(solve_data.working_model, config)

            # Save ordered lists of main modeling components, so that data can
            # be easily transferred between future model clones.
            build_ordered_component_lists(solve_data.working_model)
            record_working_model_statistics(solve_data, config)
            solve_data.results.solver.name = 'GDPopt ' + str(self.version())

            # Save model initial values. These are used later to initialize NLP
            # subproblems.
            solve_data.initial_var_values = list(
                v.value for v in GDPopt.working_var_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = solve_data.initial_var_values

            # Validate the model to ensure that GDPopt is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Maps in order to keep track of certain generated constraints
            GDPopt.oa_cut_map = ComponentMap()

            # Integer cuts exclude particular discrete decisions
            GDPopt.integer_cuts = ConstraintList(doc='integer cuts')

            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default, unless the initial model has no
            # discrete decisions.

            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary GDPopt_integer_cuts ConstraintList.
            GDPopt.no_backtracking = ConstraintList(
                doc='explored integer cuts')

            # Set up iteration counters
            solve_data.master_iteration = 0
            solve_data.mip_iteration = 0
            solve_data.nlp_iteration = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.iteration_log = {}

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.feasible_solution_improved = False

            # Initialize the master problem
            GDPopt_initialize_master(solve_data, config)

            # Algorithm main loop
            GDPopt_iteration_loop(solve_data, config)

            # Update values in working model
            copy_var_list_values(
                from_list=solve_data.best_solution_found,
                to_list=GDPopt.working_var_list,
                config=config)
            GDPopt.objective_value.set_value(
                value(solve_data.working_objective_expr, exception=False))

            # Update values in original model
            copy_var_list_values(
                GDPopt.orig_var_list,
                solve_data.original_model.GDPopt_utils.orig_var_list,
                config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        finally:
            config.logger.setLevel(old_logger_level)
            if created_GDPopt_block:
                model.del_component('GDPopt_utils')
Ejemplo n.º 29
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total'), \
             restore_logger_level(config.logger), \
             create_utility_block(model, 'MindtPy_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info("---Starting MindtPy---")

            solve_data.original_model = model
            solve_data.working_model = model.clone()
            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            nonlinear_constraints = [c for c in MindtPy.constraint_list if
                                     c.body.polynomial_degree() not in (1, 0)]
            lin.nl_constraint_set = RangeSet(
                len(nonlinear_constraints),
                doc="Integer index set over the nonlinear constraints")
            feas.constraint_set = RangeSet(
                len(MindtPy.constraint_list),
                doc="integer index set over the constraints")

            # # Mapping Constraint -> integer index
            # MindtPy.feas_map = {}
            # # Mapping integer index -> Constraint
            # MindtPy.feas_inverse_map = {}
            # # Generate the two maps. These maps may be helpful for later
            # # interpreting indices on the slack variables or generated cuts.
            # for c, n in zip(MindtPy.constraint_list, feas.constraint_set):
            #     MindtPy.feas_map[c] = n
            #     MindtPy.feas_inverse_map[n] = c

            # Create slack variables for OA cuts
            lin.slack_vars = VarList(bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals)
            # Create slack variables for feasibility problem
            feas.slack_var = Var(feas.constraint_set,
                                 domain=NonNegativeReals, initialize=1)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                solve_data.working_model.ipopt_zL_out = Suffix(
                    direction=Suffix.IMPORT)
            if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                solve_data.working_model.ipopt_zU_out = Suffix(
                    direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(
                    from_list=solve_data.best_solution_found.MindtPy_utils.variable_list,
                    to_list=MindtPy.variable_list,
                    config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.MindtPy_utils.variable_list,
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB
Ejemplo n.º 30
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = GDPoptSolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPopt_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPopt version %s using %s algorithm" %
                (".".join(map(str, self.version())), config.strategy))
            config.logger.info("""
If you use this software, you may cite the following:
- Implementation:
    Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE.
    Pyomo.GDP: Disjunctive Models in Python. 
    Proc. of the 13th Intl. Symposium on Process Systems Eng.
    San Diego, 2018.
- LOA algorithm:
    Türkay, M; Grossmann, IE.
    Logic-based MINLP algorithms for the optimal synthesis of process networks.
    Comp. and Chem. Eng. 1996, 20(8), 959–978.
    DOI: 10.1016/0098-1354(95)00219-7.
- GLOA algorithm:
    Lee, S; Grossmann, IE.
    A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems
    Comp. and Chem. Eng. 2001, 25, 1675-1697.
    DOI: 10.1016/S0098-1354(01)00732-3
                """.strip())
            solve_data.results.solver.name = 'GDPopt %s - %s' % (str(
                self.version()), config.strategy)

            solve_data.original_model = model
            solve_data.working_model = model.clone()
            GDPopt = solve_data.working_model.GDPopt_utils
            setup_results_object(solve_data, config)

            solve_data.current_strategy = config.strategy

            # Verify that objective has correct form
            process_objective(solve_data, config)

            # Save model initial values. These are used later to initialize NLP
            # subproblems.
            solve_data.initial_var_values = list(v.value
                                                 for v in GDPopt.variable_list)
            solve_data.best_solution_found = None

            # Validate the model to ensure that GDPopt is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Integer cuts exclude particular discrete decisions
            GDPopt.integer_cuts = ConstraintList(doc='integer cuts')

            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default, unless the initial model has no
            # discrete decisions.

            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary GDPopt_integer_cuts ConstraintList.
            GDPopt.no_backtracking = ConstraintList(
                doc='explored integer cuts')

            # Set up iteration counters
            solve_data.master_iteration = 0
            solve_data.mip_iteration = 0
            solve_data.nlp_iteration = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.iteration_log = {}

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.feasible_solution_improved = False

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                GDPopt_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                GDPopt_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in working model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     GDPopt_utils.variable_list,
                                     to_list=GDPopt.variable_list,
                                     config=config)
                # Update values in original model
                copy_var_list_values(
                    GDPopt.variable_list,
                    solve_data.original_model.GDPopt_utils.variable_list,
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.master_iteration

        return solve_data.results