Esempio n. 1
0
def add_affine_cuts(nlp_result, solve_data, config):
    with time_code(solve_data.timing, "affine cut generation"):
        m = solve_data.linear_GDP
        if config.calc_disjunctive_bounds:
            with time_code(solve_data.timing, "disjunctive variable bounding"):
                TransformationFactory(
                    'contrib.compute_disj_var_bounds').apply_to(
                        m,
                        solver=config.mip_solver
                        if config.obbt_disjunctive_bounds else None)
        config.logger.info("Adding affine cuts.")
        GDPopt = m.GDPopt_utils
        counter = 0
        for var, val in zip(GDPopt.variable_list, nlp_result.var_values):
            if val is not None and not var.fixed:
                var.value = val

        for constr in constraints_in_True_disjuncts(m, config):
            # Note: this includes constraints that are deactivated in the current model (linear_GDP)

            disjunctive_var_bounds = disjunctive_bounds(constr.parent_block())

            if constr.body.polynomial_degree() in (1, 0):
                continue

            vars_in_constr = list(identify_variables(constr.body))
            if any(var.value is None for var in vars_in_constr):
                continue  # a variable has no values

            # mcpp stuff
            mc_eqn = mc(constr.body, disjunctive_var_bounds)
            # mc_eqn = mc(constr.body)
            ccSlope = mc_eqn.subcc()
            cvSlope = mc_eqn.subcv()
            ccStart = mc_eqn.concave()
            cvStart = mc_eqn.convex()
            ub_int = min(
                constr.upper,
                mc_eqn.upper()) if constr.has_ub() else mc_eqn.upper()
            lb_int = max(
                constr.lower,
                mc_eqn.lower()) if constr.has_lb() else mc_eqn.lower()

            parent_block = constr.parent_block()
            # Create a block on which to put outer approximation cuts.
            aff_utils = parent_block.component('GDPopt_aff')
            if aff_utils is None:
                aff_utils = parent_block.GDPopt_aff = Block(
                    doc="Block holding affine constraints")
                aff_utils.GDPopt_aff_cons = ConstraintList()
            aff_cuts = aff_utils.GDPopt_aff_cons
            concave_cut = sum(ccSlope[var] * (var - var.value)
                              for var in vars_in_constr) + ccStart >= lb_int
            convex_cut = sum(cvSlope[var] * (var - var.value)
                             for var in vars_in_constr) + cvStart <= ub_int
            aff_cuts.add(expr=concave_cut)
            aff_cuts.add(expr=convex_cut)
            counter += 2

        config.logger.info("Added %s affine cuts" % counter)
Esempio n. 2
0
def add_affine_cuts(nlp_result, solve_data, config):
    with time_code(solve_data.timing, "affine cut generation"):
        m = solve_data.linear_GDP
        if config.calc_disjunctive_bounds:
            with time_code(solve_data.timing, "disjunctive variable bounding"):
                TransformationFactory('contrib.compute_disj_var_bounds').apply_to(
                    m,
                    solver=config.mip_solver if config.obbt_disjunctive_bounds else None
                )
        config.logger.info("Adding affine cuts.")
        GDPopt = m.GDPopt_utils
        counter = 0
        for var, val in zip(GDPopt.variable_list, nlp_result.var_values):
            if val is not None and not var.fixed:
                var.value = val

        for constr in constraints_in_True_disjuncts(m, config):
            # Note: this includes constraints that are deactivated in the current model (linear_GDP)

            disjunctive_var_bounds = disjunctive_bounds(constr.parent_block())

            if constr.body.polynomial_degree() in (1, 0):
                continue

            vars_in_constr = list(
                identify_variables(constr.body))
            if any(var.value is None for var in vars_in_constr):
                continue  # a variable has no values

            # mcpp stuff
            mc_eqn = mc(constr.body, disjunctive_var_bounds)
            # mc_eqn = mc(constr.body)
            ccSlope = mc_eqn.subcc()
            cvSlope = mc_eqn.subcv()
            ccStart = mc_eqn.concave()
            cvStart = mc_eqn.convex()
            ub_int = min(constr.upper, mc_eqn.upper()) if constr.has_ub() else mc_eqn.upper()
            lb_int = max(constr.lower, mc_eqn.lower()) if constr.has_lb() else mc_eqn.lower()

            parent_block = constr.parent_block()
            # Create a block on which to put outer approximation cuts.
            aff_utils = parent_block.component('GDPopt_aff')
            if aff_utils is None:
                aff_utils = parent_block.GDPopt_aff = Block(
                    doc="Block holding affine constraints")
                aff_utils.GDPopt_aff_cons = ConstraintList()
            aff_cuts = aff_utils.GDPopt_aff_cons
            concave_cut = sum(ccSlope[var] * (var - var.value)
                              for var in vars_in_constr
                              ) + ccStart >= lb_int
            convex_cut = sum(cvSlope[var] * (var - var.value)
                             for var in vars_in_constr
                             ) + cvStart <= ub_int
            aff_cuts.add(expr=concave_cut)
            aff_cuts.add(expr=convex_cut)
            counter += 2

        config.logger.info("Added %s affine cuts" % counter)
Esempio n. 3
0
def solve_feasibility_subproblem(solve_data, config):
    """Solves a feasibility NLP if the fixed_nlp problem is infeasible.

    Args:
        solve_data (MindtPySolveData): data container that holds solve-instance data.
        config (ConfigBlock): the specific configurations for MindtPy.

    Returns:
        feas_subproblem (Pyomo model): feasibility NLP from the model.
        feas_soln (SolverResults): results from solving the feasibility NLP.
    """
    feas_subproblem = solve_data.working_model.clone()
    add_feas_slacks(feas_subproblem, config)

    MindtPy = feas_subproblem.MindtPy_utils
    if MindtPy.find_component('objective_value') is not None:
        MindtPy.objective_value.value = 0

    next(feas_subproblem.component_data_objects(
        Objective, active=True)).deactivate()
    for constr in feas_subproblem.MindtPy_utils.nonlinear_constraint_list:
        constr.deactivate()

    MindtPy.feas_opt.activate()
    if config.feasibility_norm == 'L1':
        MindtPy.feas_obj = Objective(
            expr=sum(s for s in MindtPy.feas_opt.slack_var[...]),
            sense=minimize)
    elif config.feasibility_norm == 'L2':
        MindtPy.feas_obj = Objective(
            expr=sum(s*s for s in MindtPy.feas_opt.slack_var[...]),
            sense=minimize)
    else:
        MindtPy.feas_obj = Objective(
            expr=MindtPy.feas_opt.slack_var,
            sense=minimize)
    TransformationFactory('core.fix_integer_vars').apply_to(feas_subproblem)
    nlpopt = SolverFactory(config.nlp_solver)
    nlp_args = dict(config.nlp_solver_args)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        try:
            with time_code(solve_data.timing, 'feasibility subproblem'):
                feas_soln = nlpopt.solve(
                    feas_subproblem, tee=config.nlp_solver_tee, **nlp_args)
        except (ValueError, OverflowError) as error:
            for nlp_var, orig_val in zip(
                    MindtPy.variable_list,
                    solve_data.initial_var_values):
                if not nlp_var.fixed and not nlp_var.is_binary():
                    nlp_var.set_value(orig_val, skip_validation=True)
            with time_code(solve_data.timing, 'feasibility subproblem'):
                feas_soln = nlpopt.solve(
                    feas_subproblem, tee=config.nlp_solver_tee, **nlp_args)
    handle_feasibility_subproblem_tc(
        feas_soln.solver.termination_condition, MindtPy, solve_data, config)
    return feas_subproblem, feas_soln
Esempio n. 4
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}), preserve_implicit=True)
        config.set_value(kwds)
        if config.strategy is None:
            msg = 'Please specify solution strategy. Options are: \n'
            msg += '    LOA:  Logic-based Outer Approximation\n'
            msg += '    GLOA: Global Logic-based Outer Approximation\n'
            msg += '    LBB:  Logic-based Branch and Bound\n'
            msg += '    RIC:  Relaxation with Integer Cuts'
            raise ValueError(msg)

        with setup_solver_environment(model, config) as solve_data:
            self._log_solver_intro_message(config)
            solve_data.results.solver.name = 'GDPopt %s - %s' % (str(
                self.version()), config.strategy)

            # Verify that objective has correct form
            process_objective(solve_data, config)

            # Presolve LP or NLP problems using subsolvers
            presolved, presolve_results = presolve_lp_nlp(solve_data, config)
            if presolved:
                # TODO merge the solver results
                return presolve_results  # problem presolved

            if solve_data.active_strategy in {'LOA', 'GLOA', 'RIC'}:
                # Initialize the master problem
                with time_code(solve_data.timing, 'initialization'):
                    GDPopt_initialize_master(solve_data, config)

                # Algorithm main loop
                with time_code(solve_data.timing, 'main loop'):
                    GDPopt_iteration_loop(solve_data, config)
            elif solve_data.active_strategy == 'LBB':
                _perform_branch_and_bound(solve_data)
            else:
                raise ValueError('Unrecognized strategy: ' + config.strategy)

        return solve_data.results
Esempio n. 5
0
def GDPopt_iteration_loop(solve_data, config):
    """Algorithm main loop.

    Returns True if successful convergence is obtained. False otherwise.

    """
    while solve_data.master_iteration < config.iterlim:
        # Set iteration counters for new master iteration.
        solve_data.master_iteration += 1
        solve_data.mip_iteration = 0
        solve_data.nlp_iteration = 0

        # print line for visual display
        config.logger.info(
            '---GDPopt Master Iteration %s---'
            % solve_data.master_iteration)

        # solve linear master problem
        with time_code(solve_data.timing, 'mip'):
            mip_result = solve_LOA_master(solve_data, config)

        # Check termination conditions
        if algorithm_should_terminate(solve_data, config):
            break

        # Solve NLP subproblem
        if solve_data.active_strategy == 'LOA':
            with time_code(solve_data.timing, 'nlp'):
                nlp_result = solve_local_subproblem(mip_result, solve_data, config)
            if nlp_result.feasible:
                add_outer_approximation_cuts(nlp_result, solve_data, config)
        elif solve_data.active_strategy == 'GLOA':
            with time_code(solve_data.timing, 'nlp'):
                nlp_result = solve_global_subproblem(mip_result, solve_data, config)
            if nlp_result.feasible:
                add_affine_cuts(nlp_result, solve_data, config)
        elif solve_data.active_strategy == 'RIC':
            with time_code(solve_data.timing, 'nlp'):
                nlp_result = solve_local_subproblem(mip_result, solve_data, config)
        else:
            raise ValueError('Unrecognized strategy: ' + solve_data.active_strategy)

        # Add integer cut
        add_integer_cut(
            mip_result.var_values, solve_data.linear_GDP, solve_data, config,
            feasible=nlp_result.feasible)

        # Check termination conditions
        if algorithm_should_terminate(solve_data, config):
            break
Esempio n. 6
0
    def add_lazy_no_good_cuts(self,
                              var_values,
                              solve_data,
                              config,
                              opt,
                              feasible=False):
        """Adds no-good cuts.

        Add the no-good cuts through Cplex inherent function self.add().

        Args:
            var_values (list): values of the current variables, used to generate the cut.
            solve_data (MindtPySolveData): data container that holds solve-instance data.
            config (ConfigBlock): the specific configurations for MindtPy.
            opt (SolverFactory): cplex_persistent.
            feasible (bool, optional): whether the integer combination yields a feasible or infeasible NLP. Defaults to False.

        Raises:
            ValueError: binary variable is not 0 or 1
        """
        if not config.add_no_good_cuts:
            return

        config.logger.info('Adding no-good cuts')
        with time_code(solve_data.timing, 'No-good cut generation'):
            m = solve_data.mip
            MindtPy = m.MindtPy_utils
            int_tol = config.integer_tolerance

            binary_vars = [v for v in MindtPy.variable_list if v.is_binary()]

            # copy variable values over
            for var, val in zip(MindtPy.variable_list, var_values):
                if not var.is_binary():
                    continue
                var.set_value(val, skip_validation=True)

            # check to make sure that binary variables are all 0 or 1
            for v in binary_vars:
                if value(abs(v - 1)) > int_tol and value(abs(v)) > int_tol:
                    raise ValueError('Binary {} = {} is not 0 or 1'.format(
                        v.name, value(v)))

            if not binary_vars:  # if no binary variables, skip
                return

            pyomo_no_good_cut = sum(
                1 - v
                for v in binary_vars if value(abs(v - 1)) <= int_tol) + sum(
                    v for v in binary_vars if value(abs(v)) <= int_tol)
            cplex_no_good_rhs = generate_standard_repn(
                pyomo_no_good_cut).constant
            cplex_no_good_cut, _ = opt._get_expr_from_pyomo_expr(
                pyomo_no_good_cut)

            self.add(constraint=cplex.SparsePair(
                ind=cplex_no_good_cut.variables,
                val=cplex_no_good_cut.coefficients),
                     sense='G',
                     rhs=1 - cplex_no_good_rhs)
Esempio n. 7
0
def GDPopt_iteration_loop(solve_data, config):
    """Algorithm main loop.

    Returns True if successful convergence is obtained. False otherwise.

    """
    while solve_data.master_iteration < config.iterlim:
        # Set iteration counters for new master iteration.
        solve_data.master_iteration += 1
        solve_data.mip_iteration = 0
        solve_data.nlp_iteration = 0

        # print line for visual display
        config.logger.info(
            '---GDPopt Master Iteration %s---'
            % solve_data.master_iteration)

        # solve linear master problem
        with time_code(solve_data.timing, 'mip'):
            mip_result = solve_LOA_master(solve_data, config)

        # Check termination conditions
        if algorithm_should_terminate(solve_data, config):
            break

        # Solve NLP subproblem
        if solve_data.current_strategy == 'LOA':
            with time_code(solve_data.timing, 'nlp'):
                nlp_result = solve_local_subproblem(mip_result, solve_data, config)
            if nlp_result.feasible:
                add_outer_approximation_cuts(nlp_result, solve_data, config)
        elif solve_data.current_strategy == 'GLOA':
            with time_code(solve_data.timing, 'nlp'):
                nlp_result = solve_global_subproblem(mip_result, solve_data, config)
            if nlp_result.feasible:
                add_affine_cuts(nlp_result, solve_data, config)

        # Add integer cut
        add_integer_cut(
            mip_result.var_values, solve_data.linear_GDP, solve_data, config,
            feasible=nlp_result.feasible)

        # Check termination conditions
        if algorithm_should_terminate(solve_data, config):
            break
Esempio n. 8
0
def add_integer_cut(var_values, target_model, solve_data, config, feasible=False):
    """Add an integer cut to the target GDP model."""
    with time_code(solve_data.timing, 'integer cut generation'):
        m = target_model
        GDPopt = m.GDPopt_utils
        var_value_is_one = ComponentSet()
        var_value_is_zero = ComponentSet()
        for var, val in zip(GDPopt.variable_list, var_values):
            if not var.is_binary():
                continue
            if var.fixed:
                if val is not None and var.value != val:
                    # val needs to be None or match var.value. Otherwise, we have a
                    # contradiction.
                    raise ValueError(
                        "Fixed variable %s has value %s != "
                        "provided value of %s." % (var.name, var.value, val))
                val = var.value

            if not config.force_subproblem_nlp:
                # Skip indicator variables
                # TODO we should implement this as a check among Disjuncts instead
                if not (var.local_name == 'indicator_var' and var.parent_block().type() == Disjunct):
                    continue

            if fabs(val - 1) <= config.integer_tolerance:
                var_value_is_one.add(var)
            elif fabs(val) <= config.integer_tolerance:
                var_value_is_zero.add(var)
            else:
                raise ValueError(
                    'Binary %s = %s is not 0 or 1' % (var.name, val))

        if not (var_value_is_one or var_value_is_zero):
            # if no remaining binary variables, then terminate algorithm.
            config.logger.info(
                'Adding integer cut to a model without discrete variables. '
                'Model is now infeasible.')
            if solve_data.objective_sense == minimize:
                solve_data.LB = float('inf')
            else:
                solve_data.UB = float('-inf')
            return False

        int_cut = (sum(1 - v for v in var_value_is_one) +
                   sum(v for v in var_value_is_zero)) >= 1

        if not feasible:
            config.logger.info('Adding integer cut')
            GDPopt.integer_cuts.add(expr=int_cut)
        else:
            backtracking_enabled = (
                "disabled" if GDPopt.no_backtracking.active else "allowed")
            config.logger.info(
                'Registering explored configuration. '
                'Backtracking is currently %s.' % backtracking_enabled)
            GDPopt.no_backtracking.add(expr=int_cut)
Esempio n. 9
0
def add_integer_cut(var_values, target_model, solve_data, config, feasible=False):
    """Add an integer cut to the target GDP model."""
    with time_code(solve_data.timing, 'integer cut generation'):
        m = target_model
        GDPopt = m.GDPopt_utils
        var_value_is_one = ComponentSet()
        var_value_is_zero = ComponentSet()
        for var, val in zip(GDPopt.variable_list, var_values):
            if not var.is_binary():
                continue
            if var.fixed:
                if val is not None and var.value != val:
                    # val needs to be None or match var.value. Otherwise, we have a
                    # contradiction.
                    raise ValueError(
                        "Fixed variable %s has value %s != "
                        "provided value of %s." % (var.name, var.value, val))
                val = var.value

            if not config.force_subproblem_nlp:
                # Skip indicator variables
                # TODO we should implement this as a check among Disjuncts instead
                if not (var.local_name == 'indicator_var' and var.parent_block().type() == Disjunct):
                    continue

            if fabs(val - 1) <= config.integer_tolerance:
                var_value_is_one.add(var)
            elif fabs(val) <= config.integer_tolerance:
                var_value_is_zero.add(var)
            else:
                raise ValueError(
                    'Binary %s = %s is not 0 or 1' % (var.name, val))

        if not (var_value_is_one or var_value_is_zero):
            # if no remaining binary variables, then terminate algorithm.
            config.logger.info(
                'Adding integer cut to a model without discrete variables. '
                'Model is now infeasible.')
            if solve_data.objective_sense == minimize:
                solve_data.LB = float('inf')
            else:
                solve_data.UB = float('-inf')
            return False

        int_cut = (sum(1 - v for v in var_value_is_one) +
                   sum(v for v in var_value_is_zero)) >= 1

        if not feasible:
            config.logger.info('Adding integer cut')
            GDPopt.integer_cuts.add(expr=int_cut)
        else:
            backtracking_enabled = (
                "disabled" if GDPopt.no_backtracking.active else "allowed")
            config.logger.info(
                'Registering explored configuration. '
                'Backtracking is currently %s.' % backtracking_enabled)
            GDPopt.no_backtracking.add(expr=int_cut)
Esempio n. 10
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        with setup_solver_environment(model, config) as solve_data:
            self._log_solver_intro_message(config)
            solve_data.results.solver.name = 'GDPopt %s - %s' % (str(
                self.version()), config.strategy)

            # Verify that objective has correct form
            process_objective(solve_data, config)

            # Presolve LP or NLP problems using subsolvers
            presolved, presolve_results = presolve_lp_nlp(solve_data, config)
            if presolved:
                # TODO merge the solver results
                return presolve_results  # problem presolved

            if solve_data.active_strategy in {'LOA', 'GLOA'}:
                # Initialize the master problem
                with time_code(solve_data.timing, 'initialization'):
                    GDPopt_initialize_master(solve_data, config)

                # Algorithm main loop
                with time_code(solve_data.timing, 'main loop'):
                    GDPopt_iteration_loop(solve_data, config)
            elif solve_data.active_strategy == 'LBB':
                _perform_branch_and_bound(solve_data)

        return solve_data.results
Esempio n. 11
0
def add_no_good_cuts(var_values, solve_data, config):
    """Adds no-good cuts.

    This adds an no-good cuts to the no_good_cuts ConstraintList, which is not activated by default.
    However, it may be activated as needed in certain situations or for certain values of option flags.


    Parameters
    ----------
    var_values : list
        Variable values of the current solution, used to generate the cut.
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.

    Raises
    ------
    ValueError
        The value of binary variable is not 0 or 1.
    """
    if not config.add_no_good_cuts:
        return
    with time_code(solve_data.timing, 'no_good cut generation'):

        config.logger.debug('Adding no-good cuts')

        m = solve_data.mip
        MindtPy = m.MindtPy_utils
        int_tol = config.integer_tolerance

        binary_vars = [v for v in MindtPy.variable_list if v.is_binary()]

        # copy variable values over
        for var, val in zip(MindtPy.variable_list, var_values):
            if not var.is_binary():
                continue
            var.set_value(val, skip_validation=True)

        # check to make sure that binary variables are all 0 or 1
        for v in binary_vars:
            if value(abs(v - 1)) > int_tol and value(abs(v)) > int_tol:
                raise ValueError(
                    'Binary {} = {} is not 0 or 1'.format(v.name, value(v)))

        if not binary_vars:  # if no binary variables, skip
            return

        int_cut = (sum(1 - v for v in binary_vars
                       if value(abs(v - 1)) <= int_tol) +
                   sum(v for v in binary_vars
                       if value(abs(v)) <= int_tol) >= 1)

        MindtPy.cuts.no_good_cuts.add(expr=int_cut)
Esempio n. 12
0
def add_no_good_cuts(var_values, solve_data, config, feasible=False):
    """
    Adds no-good cuts; modifies the model to include no-good cuts
    This adds an no-good cuts to the no_good_cuts ConstraintList, which is not activated by default.
    However, it may be activated as needed in certain situations or for certain values of option flags.

    Parameters
    ----------
    var_values: list
        values of the current variables, used to generate the cut
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    feasible: bool, optional
        boolean indicating if integer combination yields a feasible or infeasible NLP
    """
    if not config.add_no_good_cuts:
        return
    with time_code(solve_data.timing, 'no_good cut generation'):

        config.logger.info('Adding no_good cuts')

        m = solve_data.mip
        MindtPy = m.MindtPy_utils
        int_tol = config.integer_tolerance

        binary_vars = [v for v in MindtPy.variable_list if v.is_binary()]

        # copy variable values over
        for var, val in zip(MindtPy.variable_list, var_values):
            if not var.is_binary():
                continue
            var.value = val

        # check to make sure that binary variables are all 0 or 1
        for v in binary_vars:
            if value(abs(v - 1)) > int_tol and value(abs(v)) > int_tol:
                raise ValueError('Binary {} = {} is not 0 or 1'.format(
                    v.name, value(v)))

        if not binary_vars:  # if no binary variables, skip
            return

        int_cut = (sum(1 - v
                       for v in binary_vars if value(abs(v - 1)) <= int_tol) +
                   sum(v
                       for v in binary_vars if value(abs(v)) <= int_tol) >= 1)

        MindtPy.cuts.no_good_cuts.add(expr=int_cut)
Esempio n. 13
0
 def test_solve_linear_GDP_unbounded(self):
     m = ConcreteModel()
     m.GDPopt_utils = Block()
     m.x = Var(bounds=(-1, 10))
     m.y = Var(bounds=(2, 3))
     m.z = Var()
     m.d = Disjunction(expr=[
         [m.x + m.y >= 5], [m.x - m.y <= 3]
     ])
     m.o = Objective(expr=m.z)
     m.GDPopt_utils.variable_list = [m.x, m.y, m.z]
     m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], m.d._autodisjuncts[1]]
     output = StringIO()
     with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING):
         solver_data = GDPoptSolveData()
         solver_data.timing = Bunch()
         with time_code(solver_data.timing, 'main', is_main_timer=True):
             solve_linear_GDP(m, solver_data, GDPoptSolver.CONFIG(dict(mip_solver=mip_solver, strategy='LOA')))
         self.assertIn("Linear GDP was unbounded. Resolving with arbitrary bound values",
                       output.getvalue().strip())
Esempio n. 14
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = GDPoptSolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPopt_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPopt version %s using %s algorithm"
                % (".".join(map(str, self.version())), config.strategy)
            )
            config.logger.info(
                """
If you use this software, you may cite the following:
- Implementation:
    Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE.
    Pyomo.GDP: Disjunctive Models in Python. 
    Proc. of the 13th Intl. Symposium on Process Systems Eng.
    San Diego, 2018.
- LOA algorithm:
    Türkay, M; Grossmann, IE.
    Logic-based MINLP algorithms for the optimal synthesis of process networks.
    Comp. and Chem. Eng. 1996, 20(8), 959–978.
    DOI: 10.1016/0098-1354(95)00219-7.
- GLOA algorithm:
    Lee, S; Grossmann, IE.
    A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems
    Comp. and Chem. Eng. 2001, 25, 1675-1697.
    DOI: 10.1016/S0098-1354(01)00732-3
                """.strip()
            )
            solve_data.results.solver.name = 'GDPopt %s - %s' % (
                str(self.version()), config.strategy)

            solve_data.original_model = model
            solve_data.working_model = model.clone()
            GDPopt = solve_data.working_model.GDPopt_utils
            setup_results_object(solve_data, config)

            solve_data.current_strategy = config.strategy

            # Verify that objective has correct form
            process_objective(solve_data, config)

            # Save model initial values. These are used later to initialize NLP
            # subproblems.
            solve_data.initial_var_values = list(
                v.value for v in GDPopt.variable_list)
            solve_data.best_solution_found = None

            # Validate the model to ensure that GDPopt is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Integer cuts exclude particular discrete decisions
            GDPopt.integer_cuts = ConstraintList(doc='integer cuts')

            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default, unless the initial model has no
            # discrete decisions.

            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary GDPopt_integer_cuts ConstraintList.
            GDPopt.no_backtracking = ConstraintList(
                doc='explored integer cuts')

            # Set up iteration counters
            solve_data.master_iteration = 0
            solve_data.mip_iteration = 0
            solve_data.nlp_iteration = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.iteration_log = {}

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.feasible_solution_improved = False

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                GDPopt_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                GDPopt_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in working model
                copy_var_list_values(
                    from_list=solve_data.best_solution_found.GDPopt_utils.variable_list,
                    to_list=GDPopt.variable_list,
                    config=config)
                # Update values in original model
                copy_var_list_values(
                    GDPopt.variable_list,
                    solve_data.original_model.GDPopt_utils.variable_list,
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.master_iteration

        return solve_data.results
Esempio n. 15
0
def add_ecp_cuts(target_model, solve_data, config,
                 linearize_active=True,
                 linearize_violated=True):
    """Linearizes nonlinear constraints. Adds the cuts for the ECP method.

    Parameters
    ----------
    target_model : Pyomo model
        The relaxed linear model.
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.
    linearize_active : bool, optional
        Whether to linearize the active nonlinear constraints, by default True.
    linearize_violated : bool, optional
        Whether to linearize the violated nonlinear constraints, by default True.
    """
    with time_code(solve_data.timing, 'ECP cut generation'):
        for constr in target_model.MindtPy_utils.nonlinear_constraint_list:
            constr_vars = list(identify_variables(constr.body))
            jacs = solve_data.jacobians

            if constr.has_lb() and constr.has_ub():
                config.logger.warning(
                    'constraint {} has both a lower '
                    'and upper bound.'
                    '\n'.format(
                        constr))
                continue
            if constr.has_ub():
                try:
                    upper_slack = constr.uslack()
                except (ValueError, OverflowError):
                    config.logger.warning(
                        'constraint {} has caused either a '
                        'ValueError or OverflowError.'
                        '\n'.format(
                            constr))
                    continue
                if (linearize_active and abs(upper_slack) < config.ecp_tolerance) \
                        or (linearize_violated and upper_slack < 0) \
                        or (config.linearize_inactive and upper_slack > 0):
                    if config.add_slack:
                        slack_var = target_model.MindtPy_utils.cuts.slack_vars.add()

                    target_model.MindtPy_utils.cuts.ecp_cuts.add(
                        expr=(sum(value(jacs[constr][var])*(var - var.value)
                                  for var in constr_vars)
                              - (slack_var if config.add_slack else 0)
                              <= upper_slack)
                    )

            if constr.has_lb():
                try:
                    lower_slack = constr.lslack()
                except (ValueError, OverflowError):
                    config.logger.warning(
                        'constraint {} has caused either a '
                        'ValueError or OverflowError.'
                        '\n'.format(
                            constr))
                    continue
                if (linearize_active and abs(lower_slack) < config.ecp_tolerance) \
                        or (linearize_violated and lower_slack < 0) \
                        or (config.linearize_inactive and lower_slack > 0):
                    if config.add_slack:
                        slack_var = target_model.MindtPy_utils.cuts.slack_vars.add()

                    target_model.MindtPy_utils.cuts.ecp_cuts.add(
                        expr=(sum(value(jacs[constr][var])*(var - var.value)
                                  for var in constr_vars)
                              + (slack_var if config.add_slack else 0)
                              >= -lower_slack)
                    )
Esempio n. 16
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total'), \
             restore_logger_level(config.logger), \
             create_utility_block(model, 'MindtPy_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info("---Starting MindtPy---")

            solve_data.original_model = model
            solve_data.working_model = model.clone()
            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            nonlinear_constraints = [c for c in MindtPy.constraint_list if
                                     c.body.polynomial_degree() not in (1, 0)]
            lin.nl_constraint_set = RangeSet(
                len(nonlinear_constraints),
                doc="Integer index set over the nonlinear constraints")
            feas.constraint_set = RangeSet(
                len(MindtPy.constraint_list),
                doc="integer index set over the constraints")

            # # Mapping Constraint -> integer index
            # MindtPy.feas_map = {}
            # # Mapping integer index -> Constraint
            # MindtPy.feas_inverse_map = {}
            # # Generate the two maps. These maps may be helpful for later
            # # interpreting indices on the slack variables or generated cuts.
            # for c, n in zip(MindtPy.constraint_list, feas.constraint_set):
            #     MindtPy.feas_map[c] = n
            #     MindtPy.feas_inverse_map[n] = c

            # Create slack variables for OA cuts
            lin.slack_vars = VarList(bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals)
            # Create slack variables for feasibility problem
            feas.slack_var = Var(feas.constraint_set,
                                 domain=NonNegativeReals, initialize=1)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                solve_data.working_model.ipopt_zL_out = Suffix(
                    direction=Suffix.IMPORT)
            if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                solve_data.working_model.ipopt_zU_out = Suffix(
                    direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(
                    from_list=solve_data.best_solution_found.MindtPy_utils.variable_list,
                    to_list=MindtPy.variable_list,
                    config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.MindtPy_utils.variable_list,
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB
Esempio n. 17
0
def solve_main(solve_data, config, fp=False, regularization_problem=False):
    """
    This function solves the MIP main problem

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm

    Returns
    -------
    solve_data.mip: Pyomo model
        the MIP stored in solve_data
    main_mip_results: Pyomo results object
        result from solving the main MIP
    fp: Bool
        generate the feasibility pump regularization main problem
    regularization_problem: Bool
        generate the ROA regularization main problem
    """
    if fp:
        config.logger.info('FP-MIP %s: Solve main problem.' %
                           (solve_data.fp_iter,))
    elif regularization_problem:
        config.logger.info('Regularization-MIP %s: Solve main regularization problem.' %
                           (solve_data.mip_iter,))
    else:
        solve_data.mip_iter += 1
        config.logger.info('MIP %s: Solve main problem.' %
                           (solve_data.mip_iter,))

    # setup main problem
    setup_main(solve_data, config, fp, regularization_problem)
    mainopt = setup_mip_solver(solve_data, config, regularization_problem)

    mip_args = dict(config.mip_solver_args)
    if config.mip_solver in {'cplex', 'cplex_persistent', 'gurobi', 'gurobi_persistent'}:
        mip_args['warmstart'] = True
    set_solver_options(mainopt, solve_data, config,
                       solver_type='mip', regularization=regularization_problem)
    try:
        with time_code(solve_data.timing, 'regularization main' if regularization_problem else ('fp main' if fp else 'main')):
            main_mip_results = mainopt.solve(solve_data.mip,
                                             tee=config.mip_solver_tee, **mip_args)
    except (ValueError, AttributeError):
        if config.single_tree:
            config.logger.warning('Single tree terminate.')
            if get_main_elapsed_time(solve_data.timing) >= config.time_limit - 2:
                config.logger.warning('due to the timelimit.')
                solve_data.results.solver.termination_condition = tc.maxTimeLimit
            if config.strategy == 'GOA' or config.add_no_good_cuts:
                config.logger.warning('ValueError: Cannot load a SolverResults object with bad status: error. '
                                      'MIP solver failed. This usually happens in the single-tree GOA algorithm. '
                                      "No-good cuts are added and GOA algorithm doesn't converge within the time limit. "
                                      'No integer solution is found, so the cplex solver will report an error status. ')
        return None, None
    if main_mip_results.solver.termination_condition is tc.optimal:
        if config.single_tree and not config.add_no_good_cuts and not regularization_problem:
            if solve_data.objective_sense == minimize:
                solve_data.LB = max(
                    main_mip_results.problem.lower_bound, solve_data.LB)
                solve_data.bound_improved = solve_data.LB > solve_data.LB_progress[-1]
                solve_data.LB_progress.append(solve_data.LB)
            else:
                solve_data.UB = min(
                    main_mip_results.problem.upper_bound, solve_data.UB)
                solve_data.bound_improved = solve_data.UB < solve_data.UB_progress[-1]
                solve_data.UB_progress.append(solve_data.UB)

    elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded:
        # Linear solvers will sometimes tell me that it's infeasible or
        # unbounded during presolve, but fails to distinguish. We need to
        # resolve with a solver option flag on.
        main_mip_results, _ = distinguish_mip_infeasible_or_unbounded(
            solve_data.mip, config)
        return solve_data.mip, main_mip_results

    if regularization_problem:
        solve_data.mip.MindtPy_utils.objective_constr.deactivate()
        solve_data.mip.MindtPy_utils.del_component('loa_proj_mip_obj')
        solve_data.mip.MindtPy_utils.cuts.del_component('obj_reg_estimate')
        if config.add_regularization == 'level_L1':
            solve_data.mip.MindtPy_utils.del_component('L1_obj')
        elif config.add_regularization == 'level_L_infinity':
            solve_data.mip.MindtPy_utils.del_component(
                'L_infinity_obj')

    return solve_data.mip, main_mip_results
Esempio n. 18
0
    def solve(self, model, **kwds):
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        return SolverFactory('gdpopt').solve(
            model,
            strategy='LBB',
            minlp_solver=config.solver,
            minlp_solver_args=config.solver_args,
            tee=config.tee,
            check_sat=config.check_sat,
            logger=config.logger,
            time_limit=config.time_limit)

        # Validate model to be used with gdpbb
        self.validate_model(model)
        # Set solver as an MINLP
        solve_data = GDPbbSolveData()
        solve_data.timing = Container()
        solve_data.original_model = model
        solve_data.results = SolverResults()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPbb_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPbb version %s using %s as subsolver" %
                (".".join(map(str, self.version())), config.solver))

            # Setup results
            solve_data.results.solver.name = 'GDPbb - %s' % (str(
                config.solver))
            setup_results_object(solve_data, config)

            # clone original model for root node of branch and bound
            root = solve_data.working_model = solve_data.original_model.clone()

            # get objective sense
            process_objective(solve_data, config)
            objectives = solve_data.original_model.component_data_objects(
                Objective, active=True)
            obj = next(objectives, None)
            solve_data.results.problem.sense = obj.sense

            # set up lists to keep track of which disjunctions have been covered.

            # this list keeps track of the relaxed disjunctions
            root.GDPbb_utils.unenforced_disjunctions = list(
                disjunction
                for disjunction in root.GDPbb_utils.disjunction_list
                if disjunction.active)

            root.GDPbb_utils.deactivated_constraints = ComponentSet([
                constr
                for disjunction in root.GDPbb_utils.unenforced_disjunctions
                for disjunct in disjunction.disjuncts
                for constr in disjunct.component_data_objects(ctype=Constraint,
                                                              active=True)
                if constr.body.polynomial_degree() not in (1, 0)
            ])
            # Deactivate nonlinear constraints in unenforced disjunctions
            for constr in root.GDPbb_utils.deactivated_constraints:
                constr.deactivate()

            # Add the BigM suffix if it does not already exist. Used later during nonlinear constraint activation.
            if not hasattr(root, 'BigM'):
                root.BigM = Suffix()

            # Pre-screen that none of the disjunctions are already predetermined due to the disjuncts being fixed
            # to True/False values.
            # TODO this should also be done within the loop, but we aren't handling it right now.
            # Should affect efficiency, but not correctness.
            root.GDPbb_utils.disjuncts_fixed_True = ComponentSet()
            # Only find top-level (non-nested) disjunctions
            for disjunction in root.component_data_objects(Disjunction,
                                                           active=True):
                fixed_true_disjuncts = [
                    disjunct for disjunct in disjunction.disjuncts
                    if disjunct.indicator_var.fixed
                    and disjunct.indicator_var.value == 1
                ]
                fixed_false_disjuncts = [
                    disjunct for disjunct in disjunction.disjuncts
                    if disjunct.indicator_var.fixed
                    and disjunct.indicator_var.value == 0
                ]
                for disjunct in fixed_false_disjuncts:
                    disjunct.deactivate()
                if len(fixed_false_disjuncts) == len(
                        disjunction.disjuncts) - 1:
                    # all but one disjunct in the disjunction is fixed to False. Remaining one must be true.
                    if not fixed_true_disjuncts:
                        fixed_true_disjuncts = [
                            disjunct for disjunct in disjunction.disjuncts
                            if disjunct not in fixed_false_disjuncts
                        ]
                # Reactivate the fixed-true disjuncts
                for disjunct in fixed_true_disjuncts:
                    newly_activated = ComponentSet()
                    for constr in disjunct.component_data_objects(Constraint):
                        if constr in root.GDPbb_utils.deactivated_constraints:
                            newly_activated.add(constr)
                            constr.activate()
                            # Set the big M value for the constraint
                            root.BigM[constr] = 1
                            # Note: we use a default big M value of 1
                            # because all non-selected disjuncts should be deactivated.
                            # Therefore, none of the big M transformed nonlinear constraints will need to be relaxed.
                            # The default M value should therefore be irrelevant.
                    root.GDPbb_utils.deactivated_constraints -= newly_activated
                    root.GDPbb_utils.disjuncts_fixed_True.add(disjunct)

                if fixed_true_disjuncts:
                    assert disjunction.xor, "GDPbb only handles disjunctions in which one term can be selected. " \
                        "%s violates this assumption." % (disjunction.name, )
                    root.GDPbb_utils.unenforced_disjunctions.remove(
                        disjunction)

            # Check satisfiability
            if config.check_sat and satisfiable(root, config.logger) is False:
                # Problem is not satisfiable. Problem is infeasible.
                obj_value = obj_sign * float('inf')
            else:
                # solve the root node
                config.logger.info("Solving the root node.")
                obj_value, result, var_values = self.subproblem_solve(
                    root, config)

            if obj_sign * obj_value == float('inf'):
                config.logger.info(
                    "Model was found to be infeasible at the root node. Elapsed %.2f seconds."
                    % get_main_elapsed_time(solve_data.timing))
                if solve_data.results.problem.sense == minimize:
                    solve_data.results.problem.lower_bound = float('inf')
                    solve_data.results.problem.upper_bound = None
                else:
                    solve_data.results.problem.lower_bound = None
                    solve_data.results.problem.upper_bound = float('-inf')
                solve_data.results.solver.timing = solve_data.timing
                solve_data.results.solver.iterations = 0
                solve_data.results.solver.termination_condition = tc.infeasible
                return solve_data.results

            # initialize minheap for Branch and Bound algorithm
            # Heap structure: (ordering tuple, model)
            # Ordering tuple: (objective value, disjunctions_left, -total_nodes_counter)
            #  - select solutions with lower objective value,
            #    then fewer disjunctions left to explore (depth first),
            #    then more recently encountered (tiebreaker)
            heap = []
            total_nodes_counter = 0
            disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions)
            heapq.heappush(heap,
                           ((obj_sign * obj_value, disjunctions_left,
                             -total_nodes_counter), root, result, var_values))

            # loop to branch through the tree
            while len(heap) > 0:
                # pop best model off of heap
                sort_tuple, incumbent_model, incumbent_results, incumbent_var_values = heapq.heappop(
                    heap)
                incumbent_obj_value, disjunctions_left, _ = sort_tuple

                config.logger.info(
                    "Exploring node with LB %.10g and %s inactive disjunctions."
                    % (incumbent_obj_value, disjunctions_left))

                # if all the originally active disjunctions are active, solve and
                # return solution
                if disjunctions_left == 0:
                    config.logger.info("Model solved.")
                    # Model is solved. Copy over solution values.
                    original_model = solve_data.original_model
                    for orig_var, val in zip(
                            original_model.GDPbb_utils.variable_list,
                            incumbent_var_values):
                        orig_var.value = val

                    solve_data.results.problem.lower_bound = incumbent_results.problem.lower_bound
                    solve_data.results.problem.upper_bound = incumbent_results.problem.upper_bound
                    solve_data.results.solver.timing = solve_data.timing
                    solve_data.results.solver.iterations = total_nodes_counter
                    solve_data.results.solver.termination_condition = incumbent_results.solver.termination_condition
                    return solve_data.results

                # Pick the next disjunction to branch on
                next_disjunction = incumbent_model.GDPbb_utils.unenforced_disjunctions[
                    0]
                config.logger.info("Branching on disjunction %s" %
                                   next_disjunction.name)
                assert next_disjunction.xor, "GDPbb only handles disjunctions in which one term can be selected. " \
                    "%s violates this assumption." % (next_disjunction.name, )

                new_nodes_counter = 0

                for i, disjunct in enumerate(next_disjunction.disjuncts):
                    # Create one branch for each of the disjuncts on the disjunction

                    if any(disj.indicator_var.fixed
                           and disj.indicator_var.value == 1
                           for disj in next_disjunction.disjuncts
                           if disj is not disjunct):
                        # If any other disjunct is fixed to 1 and an xor relationship applies,
                        # then this disjunct cannot be activated.
                        continue

                    # Check time limit
                    if get_main_elapsed_time(
                            solve_data.timing) >= config.time_limit:
                        if solve_data.results.problem.sense == minimize:
                            solve_data.results.problem.lower_bound = incumbent_obj_value
                            solve_data.results.problem.upper_bound = float(
                                'inf')
                        else:
                            solve_data.results.problem.lower_bound = float(
                                '-inf')
                            solve_data.results.problem.upper_bound = incumbent_obj_value
                        config.logger.info('GDPopt unable to converge bounds '
                                           'before time limit of {} seconds. '
                                           'Elapsed: {} seconds'.format(
                                               config.time_limit,
                                               get_main_elapsed_time(
                                                   solve_data.timing)))
                        config.logger.info(
                            'Final bound values: LB: {}  UB: {}'.format(
                                solve_data.results.problem.lower_bound,
                                solve_data.results.problem.upper_bound))
                        solve_data.results.solver.timing = solve_data.timing
                        solve_data.results.solver.iterations = total_nodes_counter
                        solve_data.results.solver.termination_condition = tc.maxTimeLimit
                        return solve_data.results

                    # Branch on the disjunct
                    child = incumbent_model.clone()
                    # TODO I am leaving the old branching system in place, but there should be
                    # something better, ideally that deals with nested disjunctions as well.
                    disjunction_to_branch = child.GDPbb_utils.unenforced_disjunctions.pop(
                        0)
                    child_disjunct = disjunction_to_branch.disjuncts[i]
                    child_disjunct.indicator_var.fix(1)
                    # Deactivate (and fix to 0) other disjuncts on the disjunction
                    for disj in disjunction_to_branch.disjuncts:
                        if disj is not child_disjunct:
                            disj.deactivate()
                    # Activate nonlinear constraints on the newly fixed child disjunct
                    newly_activated = ComponentSet()
                    for constr in child_disjunct.component_data_objects(
                            Constraint):
                        if constr in child.GDPbb_utils.deactivated_constraints:
                            newly_activated.add(constr)
                            constr.activate()
                            # Set the big M value for the constraint
                            child.BigM[constr] = 1
                            # Note: we use a default big M value of 1
                            # because all non-selected disjuncts should be deactivated.
                            # Therefore, none of the big M transformed nonlinear constraints will need to be relaxed.
                            # The default M value should therefore be irrelevant.
                    child.GDPbb_utils.deactivated_constraints -= newly_activated
                    child.GDPbb_utils.disjuncts_fixed_True.add(child_disjunct)

                    if disjunct in incumbent_model.GDPbb_utils.disjuncts_fixed_True:
                        # If the disjunct was already branched to True from a parent disjunct branching, just pass
                        # through the incumbent value without resolving. The solution should be the same as the parent.
                        total_nodes_counter += 1
                        ordering_tuple = (obj_sign * incumbent_obj_value,
                                          disjunctions_left - 1,
                                          -total_nodes_counter)
                        heapq.heappush(heap, (ordering_tuple, child, result,
                                              incumbent_var_values))
                        new_nodes_counter += 1
                        continue

                    if config.check_sat and satisfiable(
                            child, config.logger) is False:
                        # Problem is not satisfiable. Skip this disjunct.
                        continue

                    obj_value, result, var_values = self.subproblem_solve(
                        child, config)
                    total_nodes_counter += 1
                    ordering_tuple = (obj_sign * obj_value,
                                      disjunctions_left - 1,
                                      -total_nodes_counter)
                    heapq.heappush(heap,
                                   (ordering_tuple, child, result, var_values))
                    new_nodes_counter += 1

                config.logger.info(
                    "Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s."
                    % (new_nodes_counter, disjunctions_left - 1, len(heap)))
Esempio n. 19
0
def add_integer_cut(var_values, target_model, solve_data, config, feasible=False):
    """Add an integer cut to the target GDP model."""
    with time_code(solve_data.timing, 'integer cut generation'):
        m = target_model
        GDPopt = m.GDPopt_utils
        var_value_is_one = ComponentSet()
        var_value_is_zero = ComponentSet()
        indicator_vars = ComponentSet(disj.indicator_var for disj in GDPopt.disjunct_list)
        for var, val in zip(GDPopt.variable_list, var_values):
            if not var.is_binary():
                continue
            if var.fixed:
                # if val is not None and var.value != val:
                #     # val needs to be None or match var.value. Otherwise, we have a
                #     # contradiction.
                #     raise ValueError(
                #         "Fixed variable %s has value %s != "
                #         "provided value of %s." % (var.name, var.value, val))

                # Note: FBBT may cause some disjuncts to be fathomed, which can cause
                # a fixed variable to be different than the subproblem value.
                # In this case, we simply construct the integer cut as usual with
                # the subproblem value rather than its fixed value.
                if val is None:
                    val = var.value

            if not config.force_subproblem_nlp:
                # By default (config.force_subproblem_nlp = False), we only want
                # the integer cuts to be over disjunct indicator vars.
                if var not in indicator_vars:
                    continue

            if fabs(val - 1) <= config.integer_tolerance:
                var_value_is_one.add(var)
            elif fabs(val) <= config.integer_tolerance:
                var_value_is_zero.add(var)
            else:
                raise ValueError(
                    'Binary %s = %s is not 0 or 1' % (var.name, val))

        if not (var_value_is_one or var_value_is_zero):
            # if no remaining binary variables, then terminate algorithm.
            config.logger.info(
                'Adding integer cut to a model without discrete variables. '
                'Model is now infeasible.')
            if solve_data.objective_sense == minimize:
                solve_data.LB = float('inf')
            else:
                solve_data.UB = float('-inf')
            return False

        int_cut = (sum(1 - v for v in var_value_is_one) +
                   sum(v for v in var_value_is_zero)) >= 1

        # Exclude the current binary combination
        config.logger.info('Adding integer cut')
        GDPopt.integer_cuts.add(expr=int_cut)

    if config.calc_disjunctive_bounds:
        with time_code(solve_data.timing, "disjunctive variable bounding"):
            TransformationFactory('contrib.compute_disj_var_bounds').apply_to(
                m,
                solver=config.mip_solver if config.obbt_disjunctive_bounds else None
            )
Esempio n. 20
0
def add_outer_approximation_cuts(nlp_result, solve_data, config):
    """Add outer approximation cuts to the linear GDP model."""
    with time_code(solve_data.timing, 'OA cut generation'):
        m = solve_data.linear_GDP
        GDPopt = m.GDPopt_utils
        sign_adjust = -1 if solve_data.objective_sense == minimize else 1

        # copy values over
        for var, val in zip(GDPopt.variable_list, nlp_result.var_values):
            if val is not None and not var.fixed:
                var.value = val

        # TODO some kind of special handling if the dual is phenomenally small?
        config.logger.debug('Adding OA cuts.')

        counter = 0
        if not hasattr(GDPopt, 'jacobians'):
            GDPopt.jacobians = ComponentMap()
        for constr, dual_value in zip(GDPopt.constraint_list,
                                      nlp_result.dual_values):
            if dual_value is None or constr.body.polynomial_degree() in (1, 0):
                continue

            # Determine if the user pre-specified that OA cuts should not be
            # generated for the given constraint.
            parent_block = constr.parent_block()
            ignore_set = getattr(parent_block, 'GDPopt_ignore_OA', None)
            config.logger.debug('Ignore_set %s' % ignore_set)
            if (ignore_set and (constr in ignore_set
                                or constr.parent_component() in ignore_set)):
                config.logger.debug(
                    'OA cut addition for %s skipped because it is in '
                    'the ignore set.' % constr.name)
                continue

            config.logger.debug("Adding OA cut for %s with dual value %s" %
                                (constr.name, dual_value))

            # Cache jacobian
            jacobian = GDPopt.jacobians.get(constr, None)
            if jacobian is None:
                constr_vars = list(
                    identify_variables(constr.body, include_fixed=False))
                if len(constr_vars) >= MAX_SYMBOLIC_DERIV_SIZE:
                    mode = differentiate.Modes.reverse_numeric
                else:
                    mode = differentiate.Modes.sympy

                try:
                    jac_list = differentiate(constr.body,
                                             wrt_list=constr_vars,
                                             mode=mode)
                    jac_map = ComponentMap(zip(constr_vars, jac_list))
                except:
                    if mode is differentiate.Modes.reverse_numeric:
                        raise
                    mode = differentiate.Modes.reverse_numeric
                    jac_map = ComponentMap()
                jacobian = JacInfo(mode=mode, vars=constr_vars, jac=jac_map)
                GDPopt.jacobians[constr] = jacobian
            # Recompute numeric derivatives
            if not jacobian.jac:
                jac_list = differentiate(constr.body,
                                         wrt_list=jacobian.vars,
                                         mode=jacobian.mode)
                jacobian.jac.update(zip(jacobian.vars, jac_list))

            # Create a block on which to put outer approximation cuts.
            oa_utils = parent_block.component('GDPopt_OA')
            if oa_utils is None:
                oa_utils = parent_block.GDPopt_OA = Block(
                    doc="Block holding outer approximation cuts "
                    "and associated data.")
                oa_utils.GDPopt_OA_cuts = ConstraintList()
                oa_utils.GDPopt_OA_slacks = VarList(bounds=(0,
                                                            config.max_slack),
                                                    domain=NonNegativeReals,
                                                    initialize=0)

            oa_cuts = oa_utils.GDPopt_OA_cuts
            slack_var = oa_utils.GDPopt_OA_slacks.add()
            rhs = value(constr.lower) if constr.has_lb() else value(
                constr.upper)
            try:
                new_oa_cut = (copysign(1, sign_adjust * dual_value) *
                              (value(constr.body) - rhs + sum(
                                  value(jac) * (var - value(var))
                                  for var, jac in iteritems(jacobian.jac))) -
                              slack_var <= 0)
                if new_oa_cut.polynomial_degree() not in (1, 0):
                    for var, jac in iteritems(jacobian.jac):
                        print(var.name, value(jac))
                oa_cuts.add(expr=new_oa_cut)
                counter += 1
            except ZeroDivisionError:
                config.logger.warning(
                    "Zero division occured attempting to generate OA cut for constraint %s.\n"
                    "Skipping OA cut generation for this constraint." %
                    (constr.name, ))
                # Simply continue on to the next constraint.
            # Clear out the numeric Jacobian values
            if jacobian.mode is differentiate.Modes.reverse_numeric:
                jacobian.jac.clear()

        config.logger.info('Added %s OA cuts' % counter)
Esempio n. 21
0
def add_oa_cuts(target_model, dual_values, solve_data, config,
                cb_opt=None,
                linearize_active=True,
                linearize_violated=True):
    """Adds OA cuts.

    Generates and adds OA cuts (linearizes nonlinear constraints).
    For nonconvex problems, turn on 'config.add_slack'. 
    Slack variables will always be used for nonlinear equality constraints.

    Parameters
    ----------
    target_model : Pyomo model
        The relaxed linear model.
    dual_values : list
        The value of the duals for each constraint.
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.
    cb_opt : SolverFactory, optional
        Gurobi_persistent solver, by default None.
    linearize_active : bool, optional
        Whether to linearize the active nonlinear constraints, by default True.
    linearize_violated : bool, optional
        Whether to linearize the violated nonlinear constraints, by default True.
    """
    with time_code(solve_data.timing, 'OA cut generation'):
        for index, constr in enumerate(target_model.MindtPy_utils.constraint_list):
            # TODO: here the index is correlated to the duals, try if this can be fixed when temp duals are removed.
            if constr.body.polynomial_degree() in {0, 1}:
                continue

            constr_vars = list(identify_variables(constr.body))
            jacs = solve_data.jacobians

            # Equality constraint (makes the problem nonconvex)
            if constr.has_ub() and constr.has_lb() and value(constr.lower) == value(constr.upper) and config.equality_relaxation:
                sign_adjust = -1 if solve_data.objective_sense == minimize else 1
                rhs = constr.lower
                if config.add_slack:
                    slack_var = target_model.MindtPy_utils.cuts.slack_vars.add()
                target_model.MindtPy_utils.cuts.oa_cuts.add(
                    expr=copysign(1, sign_adjust * dual_values[index])
                    * (sum(value(jacs[constr][var]) * (var - value(var))
                           for var in EXPR.identify_variables(constr.body))
                        + value(constr.body) - rhs)
                    - (slack_var if config.add_slack else 0) <= 0)
                if config.single_tree and config.mip_solver == 'gurobi_persistent' and solve_data.mip_iter > 0 and cb_opt is not None:
                    cb_opt.cbLazy(
                        target_model.MindtPy_utils.cuts.oa_cuts[len(target_model.MindtPy_utils.cuts.oa_cuts)])

            else:  # Inequality constraint (possibly two-sided)
                if (constr.has_ub()
                    and (linearize_active and abs(constr.uslack()) < config.zero_tolerance)
                        or (linearize_violated and constr.uslack() < 0)
                        or (config.linearize_inactive and constr.uslack() > 0)) or ('MindtPy_utils.objective_constr' in constr.name and constr.has_ub()):
                    # always add the linearization for the epigraph of the objective
                    if config.add_slack:
                        slack_var = target_model.MindtPy_utils.cuts.slack_vars.add()

                    target_model.MindtPy_utils.cuts.oa_cuts.add(
                        expr=(sum(value(jacs[constr][var])*(var - var.value)
                                  for var in constr_vars) + value(constr.body)
                              - (slack_var if config.add_slack else 0)
                              <= value(constr.upper))
                    )
                    if config.single_tree and config.mip_solver == 'gurobi_persistent' and solve_data.mip_iter > 0 and cb_opt is not None:
                        cb_opt.cbLazy(
                            target_model.MindtPy_utils.cuts.oa_cuts[len(target_model.MindtPy_utils.cuts.oa_cuts)])

                if (constr.has_lb()
                    and (linearize_active and abs(constr.lslack()) < config.zero_tolerance)
                        or (linearize_violated and constr.lslack() < 0)
                        or (config.linearize_inactive and constr.lslack() > 0)) or ('MindtPy_utils.objective_constr' in constr.name and constr.has_lb()):
                    if config.add_slack:
                        slack_var = target_model.MindtPy_utils.cuts.slack_vars.add()

                    target_model.MindtPy_utils.cuts.oa_cuts.add(
                        expr=(sum(value(jacs[constr][var])*(var - var.value)
                                  for var in constr_vars) + value(constr.body)
                              + (slack_var if config.add_slack else 0)
                              >= value(constr.lower))
                    )
                    if config.single_tree and config.mip_solver == 'gurobi_persistent' and solve_data.mip_iter > 0 and cb_opt is not None:
                        cb_opt.cbLazy(
                            target_model.MindtPy_utils.cuts.oa_cuts[len(target_model.MindtPy_utils.cuts.oa_cuts)])
Esempio n. 22
0
    def solve(self, model, **kwds):
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        # Validate model to be used with gdpbb
        self.validate_model(model)
        # Set solver as an MINLP
        solver = SolverFactory(config.solver)
        solve_data = GDPbbSolveData()
        solve_data.timing = Container()
        solve_data.original_model = model
        solve_data.results = SolverResults()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total'), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPbb_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPbb version %s using %s as subsolver" %
                (".".join(map(str, self.version())), config.solver))

            # Setup results
            solve_data.results.solver.name = 'GDPbb - %s' % (str(
                config.solver))
            setup_results_object(solve_data, config)
            # Initialize list containing indicator vars for reupdating model after solving
            indicator_list_name = unique_component_name(
                model, "_indicator_list")
            indicator_vars = []
            for disjunction in model.component_data_objects(ctype=Disjunction,
                                                            active=True):
                for disjunct in disjunction.disjuncts:
                    indicator_vars.append(disjunct.indicator_var)
            setattr(model, indicator_list_name, indicator_vars)

            # get objective sense
            objectives = model.component_data_objects(Objective, active=True)
            obj = next(objectives, None)
            obj_sign = 1 if obj.sense == minimize else -1
            solve_data.results.problem.sense = obj.sense
            # clone original model for root node of branch and bound
            root = model.clone()

            # set up lists to keep track of which disjunctions have been covered.

            # this list keeps track of the original disjunctions that were active and are soon to be inactive
            root.GDPbb_utils.unenforced_disjunctions = list(
                disjunction
                for disjunction in root.GDPbb_utils.disjunction_list
                if disjunction.active)

            # this list keeps track of the disjunctions that have been activated by the branch and bound
            root.GDPbb_utils.curr_active_disjunctions = []

            # deactivate all disjunctions in the model
            # self.indicate(root)
            for djn in root.GDPbb_utils.unenforced_disjunctions:
                djn.deactivate()
            # Deactivate all disjuncts in model. To be reactivated when disjunction
            # is reactivated.
            for disj in root.component_data_objects(Disjunct, active=True):
                disj._deactivate_without_fixing_indicator()

            # Satisfiability check would go here

            # solve the root node
            config.logger.info("Solving the root node.")
            obj_value, result, _ = self.subproblem_solve(root, solver, config)

            # initialize minheap for Branch and Bound algorithm
            # Heap structure: (ordering tuple, model)
            # Ordering tuple: (objective value, disjunctions_left, -counter)
            #  - select solutions with lower objective value,
            #    then fewer disjunctions left to explore (depth first),
            #    then more recently encountered (tiebreaker)
            heap = []
            counter = 0
            disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions)
            heapq.heappush(
                heap, ((obj_sign * obj_value, disjunctions_left, -counter),
                       root, result, root.GDPbb_utils.variable_list))
            # loop to branch through the tree
            while len(heap) > 0:
                # pop best model off of heap
                sort_tup, mdl, mdl_results, vars = heapq.heappop(heap)
                old_obj_val, disjunctions_left, _ = sort_tup
                config.logger.info(
                    "Exploring node with LB %.10g and %s inactive disjunctions."
                    % (old_obj_val, disjunctions_left))

                # if all the originally active disjunctions are active, solve and
                # return solution
                if disjunctions_left == 0:
                    config.logger.info("Model solved.")
                    # Model is solved. Copy over solution values.
                    for orig_var, soln_var in zip(
                            model.GDPbb_utils.variable_list, vars):
                        orig_var.value = soln_var.value

                    solve_data.results.problem.lower_bound = mdl_results.problem.lower_bound
                    solve_data.results.problem.upper_bound = mdl_results.problem.upper_bound
                    solve_data.results.solver.timing = solve_data.timing
                    solve_data.results.solver.termination_condition = mdl_results.solver.termination_condition
                    return solve_data.results

                next_disjunction = mdl.GDPbb_utils.unenforced_disjunctions.pop(
                    0)
                config.logger.info("Activating disjunction %s" %
                                   next_disjunction.name)
                next_disjunction.activate()
                mdl.GDPbb_utils.curr_active_disjunctions.append(
                    next_disjunction)
                djn_left = len(mdl.GDPbb_utils.unenforced_disjunctions)
                for disj in next_disjunction.disjuncts:
                    disj._activate_without_unfixing_indicator()
                    if not disj.indicator_var.fixed:
                        disj.indicator_var = 0  # initially set all indicator vars to zero
                added_disj_counter = 0
                for disj in next_disjunction.disjuncts:
                    if not disj.indicator_var.fixed:
                        disj.indicator_var = 1
                    mnew = mdl.clone()
                    if not disj.indicator_var.fixed:
                        disj.indicator_var = 0

                    # Check feasibility
                    if config.check_sat and satisfiable(
                            mnew, config.logger) is False:
                        # problem is not satisfiable. Skip this disjunct.
                        continue

                    obj_value, result, vars = self.subproblem_solve(
                        mnew, solver, config)
                    counter += 1
                    ordering_tuple = (obj_sign * obj_value, djn_left, -counter)
                    heapq.heappush(heap, (ordering_tuple, mnew, result, vars))
                    added_disj_counter = added_disj_counter + 1
                config.logger.info(
                    "Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s."
                    % (added_disj_counter, djn_left, len(heap)))
Esempio n. 23
0
def solve_fp_subproblem(solve_data, config):
    """
    Solves the feasibility pump NLP

    This function sets up the 'fp_nlp' by relax integer variables.
    precomputes dual values, deactivates trivial constraints, and then solves NLP model.

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm

    Returns
    -------
    fp_nlp: Pyomo model
        Fixed-NLP from the model
    results: Pyomo results object
        result from solving the Fixed-NLP
    """

    fp_nlp = solve_data.working_model.clone()
    MindtPy = fp_nlp.MindtPy_utils
    config.logger.info('FP-NLP %s: Solve feasibility pump NLP subproblem.'
                       % (solve_data.fp_iter,))

    # Set up NLP
    fp_nlp.MindtPy_utils.objective_list[-1].deactivate()
    if solve_data.objective_sense == minimize:
        fp_nlp.improving_objective_cut = Constraint(
            expr=fp_nlp.MindtPy_utils.objective_value <= solve_data.UB)
    else:
        fp_nlp.improving_objective_cut = Constraint(
            expr=fp_nlp.MindtPy_utils.objective_value >= solve_data.LB)

    # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations
    # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP'
    # the norm type is consistant with the norm obj of the FP-main problem.
    if config.fp_norm_constraint:
        if config.fp_main_norm == 'L1':
            # TODO: check if we can access the block defined in FP-main problem
            generate_norm1_norm_constraint(
                fp_nlp, solve_data.mip, config, discrete_only=True)
        elif config.fp_main_norm == 'L2':
            fp_nlp.norm_constraint = Constraint(expr=sum((nlp_var - mip_var.value)**2 - config.fp_norm_constraint_coef*(nlp_var.value - mip_var.value)**2
                                                         for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) <= 0)
        elif config.fp_main_norm == 'L_infinity':
            fp_nlp.norm_constraint = ConstraintList()
            rhs = config.fp_norm_constraint_coef * max(nlp_var.value - mip_var.value for nlp_var, mip_var in zip(
                fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list))
            for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list):
                fp_nlp.norm_constraint.add(nlp_var - mip_var.value <= rhs)

    MindtPy.fp_nlp_obj = generate_norm2sq_objective_function(
        fp_nlp, solve_data.mip, discrete_only=config.fp_discrete_only)

    MindtPy.cuts.deactivate()
    TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp)
    try:
        TransformationFactory('contrib.deactivate_trivial_constraints').apply_to(
            fp_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance)
    except ValueError:
        config.logger.warning(
            'infeasibility detected in deactivate_trivial_constraints')
        results = SolverResults()
        results.solver.termination_condition = tc.infeasible
        return fp_nlp, results
    # Solve the NLP
    nlpopt = SolverFactory(config.nlp_solver)
    nlp_args = dict(config.nlp_solver_args)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        with time_code(solve_data.timing, 'fp subproblem'):
            results = nlpopt.solve(
                fp_nlp, tee=config.nlp_solver_tee, **nlp_args)
    return fp_nlp, results
Esempio n. 24
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
             lower_logger_level_to(config.logger, new_logging_level), \
             create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info("---Starting MindtPy---")

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            nonlinear_constraints = [
                c for c in MindtPy.constraint_list
                if c.body.polynomial_degree() not in (1, 0)
            ]
            lin.nl_constraint_set = RangeSet(
                len(nonlinear_constraints),
                doc="Integer index set over the nonlinear constraints")
            feas.constraint_set = RangeSet(
                len(MindtPy.constraint_list),
                doc="integer index set over the constraints")

            # # Mapping Constraint -> integer index
            # MindtPy.feas_map = {}
            # # Mapping integer index -> Constraint
            # MindtPy.feas_inverse_map = {}
            # # Generate the two maps. These maps may be helpful for later
            # # interpreting indices on the slack variables or generated cuts.
            # for c, n in zip(MindtPy.constraint_list, feas.constraint_set):
            #     MindtPy.feas_map[c] = n
            #     MindtPy.feas_inverse_map[n] = c

            # Create slack variables for OA cuts
            lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                     initialize=0,
                                     domain=NonNegativeReals)
            # Create slack variables for feasibility problem
            feas.slack_var = Var(feas.constraint_set,
                                 domain=NonNegativeReals,
                                 initialize=1)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                solve_data.working_model.ipopt_zL_out = Suffix(
                    direction=Suffix.IMPORT)
            if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                solve_data.working_model.ipopt_zU_out = Suffix(
                    direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.component_data_objects(Var),
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.mip_iter

        return solve_data.results
Esempio n. 25
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        This function performs all of the GDPopt solver setup and problem
        validation. It then calls upon helper functions to construct the
        initial master approximation and iteration loop.

        Args:
            model (Block): a Pyomo model or block to be solved

        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = GDPoptSolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total'), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPopt_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPopt version %s using %s algorithm"
                % (".".join(map(str, self.version())), config.strategy)
            )
            config.logger.info(
                """
If you use this software, you may cite the following:
- Implementation:
    Chen, Q; Johnson, ES; Siirola, JD; Grossmann, IE.
    Pyomo.GDP: Disjunctive Models in Python. 
    Proc. of the 13th Intl. Symposium on Process Systems Eng.
    San Diego, 2018.
- LOA algorithm:
    Türkay, M; Grossmann, IE.
    Logic-based MINLP algorithms for the optimal synthesis of process networks.
    Comp. and Chem. Eng. 1996, 20(8), 959–978.
    DOI: 10.1016/0098-1354(95)00219-7.
- GLOA algorithm:
    Lee, S; Grossmann, IE.
    A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems
    Comp. and Chem. Eng. 2001, 25, 1675-1697.
    DOI: 10.1016/S0098-1354(01)00732-3
                """.strip()
            )
            solve_data.results.solver.name = 'GDPopt %s - %s' % (
                str(self.version()), config.strategy)

            solve_data.original_model = model
            solve_data.working_model = model.clone()
            GDPopt = solve_data.working_model.GDPopt_utils
            setup_results_object(solve_data, config)

            solve_data.current_strategy = config.strategy

            # Verify that objective has correct form
            process_objective(solve_data, config)

            # Save model initial values. These are used later to initialize NLP
            # subproblems.
            solve_data.initial_var_values = list(
                v.value for v in GDPopt.variable_list)
            solve_data.best_solution_found = None

            # Validate the model to ensure that GDPopt is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Integer cuts exclude particular discrete decisions
            GDPopt.integer_cuts = ConstraintList(doc='integer cuts')

            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default, unless the initial model has no
            # discrete decisions.

            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary GDPopt_integer_cuts ConstraintList.
            GDPopt.no_backtracking = ConstraintList(
                doc='explored integer cuts')

            # Set up iteration counters
            solve_data.master_iteration = 0
            solve_data.mip_iteration = 0
            solve_data.nlp_iteration = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.iteration_log = {}

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.feasible_solution_improved = False

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                GDPopt_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                GDPopt_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in working model
                copy_var_list_values(
                    from_list=solve_data.best_solution_found.GDPopt_utils.variable_list,
                    to_list=GDPopt.variable_list,
                    config=config)
                # Update values in original model
                copy_var_list_values(
                    GDPopt.variable_list,
                    solve_data.original_model.GDPopt_utils.variable_list,
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.master_iteration

        return solve_data.results
Esempio n. 26
0
def solve_main(solve_data, config, fp=False, regularization_problem=False):
    """This function solves the MIP main problem.

    Args:
        solve_data (MindtPySolveData): data container that holds solve-instance data.
        config (ConfigBlock): the specific configurations for MindtPy.
        fp (bool, optional): whether it is in the loop of feasibility pump. Defaults to False.
        regularization_problem (bool, optional): whether it is solving a regularization problem. Defaults to False.

    Returns:
        solve_data.mip (Pyomo model): the MIP stored in solve_data.
        main_mip_results (SolverResults): results from solving the main MIP.
    """
    if not fp and not regularization_problem:
        solve_data.mip_iter += 1

    # setup main problem
    setup_main(solve_data, config, fp, regularization_problem)
    mainopt = set_up_mip_solver(solve_data, config, regularization_problem)

    mip_args = dict(config.mip_solver_args)
    if config.mip_solver in {
            'cplex', 'cplex_persistent', 'gurobi', 'gurobi_persistent'
    }:
        mip_args['warmstart'] = True
    set_solver_options(mainopt,
                       solve_data,
                       config,
                       solver_type='mip',
                       regularization=regularization_problem)
    try:
        with time_code(
                solve_data.timing,
                'regularization main' if regularization_problem else
            ('fp main' if fp else 'main')):
            main_mip_results = mainopt.solve(solve_data.mip,
                                             tee=config.mip_solver_tee,
                                             **mip_args)
    except (ValueError, AttributeError):
        if config.single_tree:
            config.logger.warning('Single tree terminate.')
            if get_main_elapsed_time(
                    solve_data.timing) >= config.time_limit - 2:
                config.logger.warning('due to the timelimit.')
                solve_data.results.solver.termination_condition = tc.maxTimeLimit
            if config.strategy == 'GOA' or config.add_no_good_cuts:
                config.logger.warning(
                    'ValueError: Cannot load a SolverResults object with bad status: error. '
                    'MIP solver failed. This usually happens in the single-tree GOA algorithm. '
                    "No-good cuts are added and GOA algorithm doesn't converge within the time limit. "
                    'No integer solution is found, so the cplex solver will report an error status. '
                )
        return None, None
    if config.solution_pool:
        main_mip_results._solver_model = mainopt._solver_model
        main_mip_results._pyomo_var_to_solver_var_map = mainopt._pyomo_var_to_solver_var_map
    if main_mip_results.solver.termination_condition is tc.optimal:
        if config.single_tree and not config.add_no_good_cuts and not regularization_problem:
            uptade_suboptimal_dual_bound(solve_data, main_mip_results)
        if regularization_problem:
            config.logger.info(
                solve_data.log_formatter.format(
                    solve_data.mip_iter,
                    'Reg ' + solve_data.regularization_mip_type,
                    value(solve_data.mip.MindtPy_utils.loa_proj_mip_obj),
                    solve_data.LB, solve_data.UB, solve_data.rel_gap,
                    get_main_elapsed_time(solve_data.timing)))

    elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded:
        # Linear solvers will sometimes tell me that it's infeasible or
        # unbounded during presolve, but fails to distinguish. We need to
        # resolve with a solver option flag on.
        main_mip_results, _ = distinguish_mip_infeasible_or_unbounded(
            solve_data.mip, config)
        return solve_data.mip, main_mip_results

    if regularization_problem:
        solve_data.mip.MindtPy_utils.objective_constr.deactivate()
        solve_data.mip.MindtPy_utils.del_component('loa_proj_mip_obj')
        solve_data.mip.MindtPy_utils.cuts.del_component('obj_reg_estimate')
        if config.add_regularization == 'level_L1':
            solve_data.mip.MindtPy_utils.del_component('L1_obj')
        elif config.add_regularization == 'level_L_infinity':
            solve_data.mip.MindtPy_utils.del_component('L_infinity_obj')

    return solve_data.mip, main_mip_results
Esempio n. 27
0
def add_affine_cuts(solve_data, config):
    """Adds affine cuts using MCPP.

    Parameters
    ----------
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.
    """
    with time_code(solve_data.timing, 'Affine cut generation'):
        m = solve_data.mip
        config.logger.debug('Adding affine cuts')
        counter = 0

        for constr in m.MindtPy_utils.nonlinear_constraint_list:
            vars_in_constr = list(
                identify_variables(constr.body))
            if any(var.value is None for var in vars_in_constr):
                continue  # a variable has no values

            # mcpp stuff
            try:
                mc_eqn = mc(constr.body)
            except MCPP_Error as e:
                config.logger.debug(
                    'Skipping constraint %s due to MCPP error %s' % (constr.name, str(e)))
                continue  # skip to the next constraint

            ccSlope = mc_eqn.subcc()
            cvSlope = mc_eqn.subcv()
            ccStart = mc_eqn.concave()
            cvStart = mc_eqn.convex()

            # check if the value of ccSlope and cvSlope is not Nan or inf. If so, we skip this.
            concave_cut_valid = True
            convex_cut_valid = True
            for var in vars_in_constr:
                if not var.fixed:
                    if ccSlope[var] == float('nan') or ccSlope[var] == float('inf'):
                        concave_cut_valid = False
                    if cvSlope[var] == float('nan') or cvSlope[var] == float('inf'):
                        convex_cut_valid = False
            # check if the value of ccSlope and cvSlope all equals zero. if so, we skip this.
            if not any(list(ccSlope.values())):
                concave_cut_valid = False
            if not any(list(cvSlope.values())):
                convex_cut_valid = False
            if ccStart == float('nan') or ccStart == float('inf'):
                concave_cut_valid = False
            if cvStart == float('nan') or cvStart == float('inf'):
                convex_cut_valid = False
            if not (concave_cut_valid or convex_cut_valid):
                continue

            ub_int = min(value(constr.upper), mc_eqn.upper()
                         ) if constr.has_ub() else mc_eqn.upper()
            lb_int = max(value(constr.lower), mc_eqn.lower()
                         ) if constr.has_lb() else mc_eqn.lower()

            aff_cuts = m.MindtPy_utils.cuts.aff_cuts
            if concave_cut_valid:
                concave_cut = sum(ccSlope[var] * (var - var.value)
                                  for var in vars_in_constr
                                  if not var.fixed) + ccStart >= lb_int
                aff_cuts.add(expr=concave_cut)
                counter += 1
            if convex_cut_valid:
                convex_cut = sum(cvSlope[var] * (var - var.value)
                                 for var in vars_in_constr
                                 if not var.fixed) + cvStart <= ub_int
                aff_cuts.add(expr=convex_cut)
                counter += 1

        config.logger.debug('Added %s affine cuts' % counter)
Esempio n. 28
0
    def add_lazy_no_good_cuts(self,
                              var_values,
                              solve_data,
                              config,
                              opt,
                              feasible=False):
        """
        Adds no-good cuts; add the no-good cuts through Cplex inherent function self.add()

        Parameters
        ----------
        var_values: list
            values of the current variables, used to generate the cut
        solve_data: MindtPy Data Container
            data container that holds solve-instance data
        config: ConfigBlock
            contains the specific configurations for the algorithm
        feasible: bool, optional
            boolean indicating if integer combination yields a feasible or infeasible NLP
        opt: SolverFactory
            the mip solver
        """
        if not config.add_no_good_cuts:
            return

        config.logger.info('Adding no-good cuts')
        with time_code(solve_data.timing, 'No-good cut generation'):
            m = solve_data.mip
            MindtPy = m.MindtPy_utils
            int_tol = config.integer_tolerance

            binary_vars = [v for v in MindtPy.variable_list if v.is_binary()]

            # copy variable values over
            for var, val in zip(MindtPy.variable_list, var_values):
                if not var.is_binary():
                    continue
                var.value = val

            # check to make sure that binary variables are all 0 or 1
            for v in binary_vars:
                if value(abs(v - 1)) > int_tol and value(abs(v)) > int_tol:
                    raise ValueError('Binary {} = {} is not 0 or 1'.format(
                        v.name, value(v)))

            if not binary_vars:  # if no binary variables, skip
                return

            pyomo_no_good_cut = sum(
                1 - v
                for v in binary_vars if value(abs(v - 1)) <= int_tol) + sum(
                    v for v in binary_vars if value(abs(v)) <= int_tol)
            cplex_no_good_rhs = generate_standard_repn(
                pyomo_no_good_cut).constant
            cplex_no_good_cut, _ = opt._get_expr_from_pyomo_expr(
                pyomo_no_good_cut)

            self.add(constraint=cplex.SparsePair(
                ind=cplex_no_good_cut.variables,
                val=cplex_no_good_cut.coefficients),
                     sense='G',
                     rhs=1 - cplex_no_good_rhs)
Esempio n. 29
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        # configuration confirmation
        if config.single_tree:
            config.iteration_limit = 1
            config.add_slack = False
            config.add_nogood_cuts = False
            config.mip_solver = 'cplex_persistent'
            config.logger.info(
                "Single tree implementation is activated. The defalt MIP solver is 'cplex_persistent'"
            )
        # if the slacks fix to zero, just don't add them
        if config.max_slack == 0.0:
            config.add_slack = False

        if config.strategy == "GOA":
            config.add_nogood_cuts = True
            config.add_slack = True
            config.use_mcpp = True
            config.integer_to_binary = True
            config.use_dual = False
            config.use_fbbt = True

        if config.nlp_solver == "baron":
            config.use_dual = False
        # if ecp tolerance is not provided use bound tolerance
        if config.ecp_tolerance is None:
            config.ecp_tolerance = config.bound_tolerance

        # if the objective function is a constant, dual bound constraint is not added.
        obj = next(model.component_data_objects(ctype=Objective, active=True))
        if obj.expr.polynomial_degree() == 0:
            config.use_dual_bound = False

        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()
        solve_data.curr_int_sol = []
        solve_data.prev_int_sol = []

        if config.use_fbbt:
            fbbt(model)
            config.logger.info(
                "Use the fbbt to tighten the bounds of variables")

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info("---Starting MindtPy---")

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config, use_mcpp=config.use_mcpp)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]
            if config.single_tree and config.add_nogood_cuts:
                solve_data.stored_bound = {}
            if config.strategy == 'GOA' and config.add_nogood_cuts:
                solve_data.num_no_good_cuts_added = {}

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = Set(
                    initialize=[
                        i
                        for i, constr in enumerate(MindtPy.constraint_list, 1)
                        if constr.body.polynomial_degree() not in (1, 0)
                    ],
                    doc="Integer index set over the nonlinear constraints."
                    "The set corresponds to the index of nonlinear constraint in constraint_set"
                )
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                         initialize=0,
                                         domain=NonNegativeReals)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if config.nlp_solver == 'ipopt':
                if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                    solve_data.working_model.ipopt_zL_out = Suffix(
                        direction=Suffix.IMPORT)
                if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                    solve_data.working_model.ipopt_zU_out = Suffix(
                        direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.component_data_objects(Var),
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.mip_iter
        solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
Esempio n. 30
0
def add_outer_approximation_cuts(nlp_result, solve_data, config):
    """Add outer approximation cuts to the linear GDP model."""
    with time_code(solve_data.timing, 'OA cut generation'):
        m = solve_data.linear_GDP
        GDPopt = m.GDPopt_utils
        sign_adjust = -1 if solve_data.objective_sense == minimize else 1

        # copy values over
        for var, val in zip(GDPopt.variable_list, nlp_result.var_values):
            if val is not None and not var.fixed:
                var.value = val

        # TODO some kind of special handling if the dual is phenomenally small?
        config.logger.debug('Adding OA cuts.')

        counter = 0
        if not hasattr(GDPopt, 'jacobians'):
            GDPopt.jacobians = ComponentMap()
        for constr, dual_value in zip(GDPopt.constraint_list,
                                      nlp_result.dual_values):
            if dual_value is None or constr.body.polynomial_degree() in (1, 0):
                continue

            # Determine if the user pre-specified that OA cuts should not be
            # generated for the given constraint.
            parent_block = constr.parent_block()
            ignore_set = getattr(parent_block, 'GDPopt_ignore_OA', None)
            config.logger.debug('Ignore_set %s' % ignore_set)
            if (ignore_set and (constr in ignore_set
                                or constr.parent_component() in ignore_set)):
                config.logger.debug(
                    'OA cut addition for %s skipped because it is in '
                    'the ignore set.' % constr.name)
                continue

            config.logger.debug("Adding OA cut for %s with dual value %s" %
                                (constr.name, dual_value))

            # Cache jacobians
            jacobians = GDPopt.jacobians.get(constr, None)
            if jacobians is None:
                constr_vars = list(identify_variables(constr.body))
                jac_list = differentiate(constr.body, wrt_list=constr_vars)
                jacobians = ComponentMap(zip(constr_vars, jac_list))
                GDPopt.jacobians[constr] = jacobians

            # Create a block on which to put outer approximation cuts.
            oa_utils = parent_block.component('GDPopt_OA')
            if oa_utils is None:
                oa_utils = parent_block.GDPopt_OA = Block(
                    doc="Block holding outer approximation cuts "
                    "and associated data.")
                oa_utils.GDPopt_OA_cuts = ConstraintList()
                oa_utils.GDPopt_OA_slacks = VarList(bounds=(0,
                                                            config.max_slack),
                                                    domain=NonNegativeReals,
                                                    initialize=0)

            oa_cuts = oa_utils.GDPopt_OA_cuts
            slack_var = oa_utils.GDPopt_OA_slacks.add()
            rhs = value(constr.lower) if constr.has_lb() else value(
                constr.upper)
            oa_cuts.add(expr=copysign(1, sign_adjust * dual_value) *
                        (value(constr.body) - rhs + sum(
                            value(jacobians[var]) * (var - value(var))
                            for var in jacobians)) - slack_var <= 0)
            counter += 1

        config.logger.info('Added %s OA cuts' % counter)
Esempio n. 31
0
    def test_handle_termination_condition(self):
        """Test the outer approximation decomposition algorithm."""
        model = SimpleMINLP()
        config = _get_MindtPy_config()
        solve_data = set_up_solve_data(model, config)
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):

            MindtPy = solve_data.working_model.MindtPy_utils

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(
                solve_data,
                config,
                move_linear_objective=(config.init_strategy == 'FP' or
                                       config.add_regularization is not None),
                use_mcpp=config.use_mcpp,
                update_var_con_list=config.add_regularization is None)
            feas = MindtPy.feas_opt = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            lin = MindtPy.cuts = Block()
            lin.deactivate()

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = RangeSet(
                    len(MindtPy.nonlinear_constraint_list),
                    doc='Integer index set over the nonlinear constraints.')
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # no-good cuts exclude particular discrete decisions
            lin.no_good_cuts = ConstraintList(doc='no-good cuts')

            fixed_nlp = solve_data.working_model.clone()
            TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp)

            MindtPy_initialize_main(solve_data, config)

            # test handle_subproblem_other_termination
            termination_condition = tc.maxIterations
            config.add_no_good_cuts = True
            handle_subproblem_other_termination(fixed_nlp,
                                                termination_condition,
                                                solve_data, config)
            self.assertEqual(
                len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts), 1)

            # test handle_main_other_conditions
            main_mip, main_mip_results = solve_main(solve_data, config)
            main_mip_results.solver.termination_condition = tc.infeasible
            handle_main_other_conditions(solve_data.mip, main_mip_results,
                                         solve_data, config)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.feasible)

            main_mip_results.solver.termination_condition = tc.unbounded
            handle_main_other_conditions(solve_data.mip, main_mip_results,
                                         solve_data, config)
            self.assertIn(main_mip.MindtPy_utils.objective_bound,
                          main_mip.component_data_objects(ctype=Constraint))

            main_mip.MindtPy_utils.del_component('objective_bound')
            main_mip_results.solver.termination_condition = tc.infeasibleOrUnbounded
            handle_main_other_conditions(solve_data.mip, main_mip_results,
                                         solve_data, config)
            self.assertIn(main_mip.MindtPy_utils.objective_bound,
                          main_mip.component_data_objects(ctype=Constraint))

            main_mip_results.solver.termination_condition = tc.maxTimeLimit
            handle_main_other_conditions(solve_data.mip, main_mip_results,
                                         solve_data, config)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.maxTimeLimit)

            main_mip_results.solver.termination_condition = tc.other
            main_mip_results.solution.status = SolutionStatus.feasible
            handle_main_other_conditions(solve_data.mip, main_mip_results,
                                         solve_data, config)
            for v1, v2 in zip(
                    main_mip.MindtPy_utils.variable_list,
                    solve_data.working_model.MindtPy_utils.variable_list):
                self.assertEqual(v1.value, v2.value)

            # test handle_feasibility_subproblem_tc
            feas_subproblem = solve_data.working_model.clone()
            add_feas_slacks(feas_subproblem, config)
            MindtPy = feas_subproblem.MindtPy_utils
            MindtPy.feas_opt.activate()
            if config.feasibility_norm == 'L1':
                MindtPy.feas_obj = Objective(expr=sum(
                    s for s in MindtPy.feas_opt.slack_var[...]),
                                             sense=minimize)
            elif config.feasibility_norm == 'L2':
                MindtPy.feas_obj = Objective(expr=sum(
                    s * s for s in MindtPy.feas_opt.slack_var[...]),
                                             sense=minimize)
            else:
                MindtPy.feas_obj = Objective(expr=MindtPy.feas_opt.slack_var,
                                             sense=minimize)

            handle_feasibility_subproblem_tc(tc.optimal, MindtPy, solve_data,
                                             config)
            handle_feasibility_subproblem_tc(tc.infeasible, MindtPy,
                                             solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.status, SolverStatus.error)

            solve_data.should_terminate = False
            solve_data.results.solver.status = None
            handle_feasibility_subproblem_tc(tc.maxIterations, MindtPy,
                                             solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.status, SolverStatus.error)

            solve_data.should_terminate = False
            solve_data.results.solver.status = None
            handle_feasibility_subproblem_tc(tc.solverFailure, MindtPy,
                                             solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.status, SolverStatus.error)

            # test NLP subproblem infeasible
            solve_data.working_model.Y[1].value = 0
            solve_data.working_model.Y[2].value = 0
            solve_data.working_model.Y[3].value = 0
            fixed_nlp, fixed_nlp_results = solve_subproblem(solve_data, config)
            solve_data.working_model.Y[1].value = None
            solve_data.working_model.Y[2].value = None
            solve_data.working_model.Y[3].value = None

            # test handle_nlp_subproblem_tc
            fixed_nlp_results.solver.termination_condition = tc.maxTimeLimit
            handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data,
                                     config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.maxTimeLimit)

            fixed_nlp_results.solver.termination_condition = tc.maxEvaluations
            handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data,
                                     config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.maxEvaluations)

            fixed_nlp_results.solver.termination_condition = tc.maxIterations
            handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_results, solve_data,
                                     config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.maxEvaluations)

            # test handle_fp_main_tc
            config.init_strategy = 'FP'
            solve_data.fp_iter = 1
            init_rNLP(solve_data, config)
            feas_main, feas_main_results = solve_main(solve_data,
                                                      config,
                                                      fp=True)
            feas_main_results.solver.termination_condition = tc.optimal
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, False)

            feas_main_results.solver.termination_condition = tc.maxTimeLimit
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.maxTimeLimit)

            feas_main_results.solver.termination_condition = tc.infeasible
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, True)

            feas_main_results.solver.termination_condition = tc.unbounded
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, True)

            feas_main_results.solver.termination_condition = tc.other
            feas_main_results.solution.status = SolutionStatus.feasible
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, False)

            feas_main_results.solver.termination_condition = tc.solverFailure
            fp_should_terminate = handle_fp_main_tc(feas_main_results,
                                                    solve_data, config)
            self.assertIs(fp_should_terminate, True)

            # test generate_norm_constraint
            fp_nlp = solve_data.working_model.clone()
            config.fp_main_norm = 'L1'
            generate_norm_constraint(fp_nlp, solve_data, config)
            self.assertIsNotNone(
                fp_nlp.MindtPy_utils.find_component('L1_norm_constraint'))

            config.fp_main_norm = 'L2'
            generate_norm_constraint(fp_nlp, solve_data, config)
            self.assertIsNotNone(fp_nlp.find_component('norm_constraint'))

            fp_nlp.del_component('norm_constraint')
            config.fp_main_norm = 'L_infinity'
            generate_norm_constraint(fp_nlp, solve_data, config)
            self.assertIsNotNone(fp_nlp.find_component('norm_constraint'))

            # test set_solver_options
            config.mip_solver = 'gams'
            config.threads = 1
            opt = SolverFactory(config.mip_solver)
            set_solver_options(opt,
                               solve_data,
                               config,
                               'mip',
                               regularization=False)

            config.mip_solver = 'gurobi'
            config.mip_regularization_solver = 'gurobi'
            config.regularization_mip_threads = 1
            opt = SolverFactory(config.mip_solver)
            set_solver_options(opt,
                               solve_data,
                               config,
                               'mip',
                               regularization=True)

            config.nlp_solver = 'gams'
            config.nlp_solver_args['solver'] = 'ipopt'
            set_solver_options(opt,
                               solve_data,
                               config,
                               'nlp',
                               regularization=False)

            config.nlp_solver_args['solver'] = 'ipopth'
            set_solver_options(opt,
                               solve_data,
                               config,
                               'nlp',
                               regularization=False)

            config.nlp_solver_args['solver'] = 'conopt'
            set_solver_options(opt,
                               solve_data,
                               config,
                               'nlp',
                               regularization=False)

            config.nlp_solver_args['solver'] = 'msnlp'
            set_solver_options(opt,
                               solve_data,
                               config,
                               'nlp',
                               regularization=False)

            config.nlp_solver_args['solver'] = 'baron'
            set_solver_options(opt,
                               solve_data,
                               config,
                               'nlp',
                               regularization=False)

            # test algorithm_should_terminate
            solve_data.should_terminate = True
            solve_data.UB = float('inf')
            self.assertIs(
                algorithm_should_terminate(solve_data,
                                           config,
                                           check_cycling=False), True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.noSolution)

            solve_data.UB = 100
            self.assertIs(
                algorithm_should_terminate(solve_data,
                                           config,
                                           check_cycling=False), True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.feasible)

            solve_data.objective_sense = maximize
            solve_data.LB = float('-inf')
            self.assertIs(
                algorithm_should_terminate(solve_data,
                                           config,
                                           check_cycling=False), True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.noSolution)

            solve_data.LB = 100
            self.assertIs(
                algorithm_should_terminate(solve_data,
                                           config,
                                           check_cycling=False), True)
            self.assertIs(solve_data.results.solver.termination_condition,
                          tc.feasible)
Esempio n. 32
0
def add_outer_approximation_cuts(nlp_result, solve_data, config):
    """Add outer approximation cuts to the linear GDP model."""
    with time_code(solve_data.timing, 'OA cut generation'):
        m = solve_data.linear_GDP
        GDPopt = m.GDPopt_utils
        sign_adjust = -1 if solve_data.objective_sense == minimize else 1

        # copy values over
        for var, val in zip(GDPopt.variable_list, nlp_result.var_values):
            if val is not None and not var.fixed:
                var.value = val

        # TODO some kind of special handling if the dual is phenomenally small?
        config.logger.debug('Adding OA cuts.')

        counter = 0
        if not hasattr(GDPopt, 'jacobians'):
            GDPopt.jacobians = ComponentMap()
        for constr, dual_value in zip(GDPopt.constraint_list,
                                      nlp_result.dual_values):
            if dual_value is None or constr.body.polynomial_degree() in (1, 0):
                continue

            # Determine if the user pre-specified that OA cuts should not be
            # generated for the given constraint.
            parent_block = constr.parent_block()
            ignore_set = getattr(parent_block, 'GDPopt_ignore_OA', None)
            config.logger.debug('Ignore_set %s' % ignore_set)
            if (ignore_set and (constr in ignore_set or
                                constr.parent_component() in ignore_set)):
                config.logger.debug(
                    'OA cut addition for %s skipped because it is in '
                    'the ignore set.' % constr.name)
                continue

            config.logger.debug(
                "Adding OA cut for %s with dual value %s"
                % (constr.name, dual_value))

            # Cache jacobians
            jacobians = GDPopt.jacobians.get(constr, None)
            if jacobians is None:
                constr_vars = list(identify_variables(constr.body))
                jac_list = differentiate(constr.body, wrt_list=constr_vars)
                jacobians = ComponentMap(zip(constr_vars, jac_list))
                GDPopt.jacobians[constr] = jacobians

            # Create a block on which to put outer approximation cuts.
            oa_utils = parent_block.component('GDPopt_OA')
            if oa_utils is None:
                oa_utils = parent_block.GDPopt_OA = Block(
                    doc="Block holding outer approximation cuts "
                    "and associated data.")
                oa_utils.GDPopt_OA_cuts = ConstraintList()
                oa_utils.GDPopt_OA_slacks = VarList(
                    bounds=(0, config.max_slack),
                    domain=NonNegativeReals, initialize=0)

            oa_cuts = oa_utils.GDPopt_OA_cuts
            slack_var = oa_utils.GDPopt_OA_slacks.add()
            rhs = value(constr.lower) if constr.has_lb() else value(constr.upper)
            oa_cuts.add(
                expr=copysign(1, sign_adjust * dual_value) * (
                    value(constr.body) - rhs + sum(
                        value(jacobians[var]) * (var - value(var))
                        for var in jacobians)) - slack_var <= 0)
            counter += 1

        config.logger.info('Added %s OA cuts' % counter)
Esempio n. 33
0
    def solve(self, model, **kwds):
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        # Validate model to be used with gdpbb
        self.validate_model(model)
        # Set solver as an MINLP
        solve_data = GDPbbSolveData()
        solve_data.timing = Container()
        solve_data.original_model = model
        solve_data.results = SolverResults()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPbb_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPbb version %s using %s as subsolver"
                % (".".join(map(str, self.version())), config.solver)
            )

            # Setup results
            solve_data.results.solver.name = 'GDPbb - %s' % (str(config.solver))
            setup_results_object(solve_data, config)

            # clone original model for root node of branch and bound
            root = solve_data.working_model = solve_data.original_model.clone()

            # get objective sense
            process_objective(solve_data, config)
            objectives = solve_data.original_model.component_data_objects(Objective, active=True)
            obj = next(objectives, None)
            obj_sign = 1 if obj.sense == minimize else -1
            solve_data.results.problem.sense = obj.sense

            # set up lists to keep track of which disjunctions have been covered.

            # this list keeps track of the relaxed disjunctions
            root.GDPbb_utils.unenforced_disjunctions = list(
                disjunction for disjunction in root.GDPbb_utils.disjunction_list if disjunction.active
            )

            root.GDPbb_utils.deactivated_constraints = ComponentSet([
                constr for disjunction in root.GDPbb_utils.unenforced_disjunctions
                for disjunct in disjunction.disjuncts
                for constr in disjunct.component_data_objects(ctype=Constraint, active=True)
                if constr.body.polynomial_degree() not in (1, 0)
            ])
            # Deactivate nonlinear constraints in unenforced disjunctions
            for constr in root.GDPbb_utils.deactivated_constraints:
                constr.deactivate()

            # Add the BigM suffix if it does not already exist. Used later during nonlinear constraint activation.
            if not hasattr(root, 'BigM'):
                root.BigM = Suffix()

            # Pre-screen that none of the disjunctions are already predetermined due to the disjuncts being fixed
            # to True/False values.
            # TODO this should also be done within the loop, but we aren't handling it right now.
            # Should affect efficiency, but not correctness.
            root.GDPbb_utils.disjuncts_fixed_True = ComponentSet()
            # Only find top-level (non-nested) disjunctions
            for disjunction in root.component_data_objects(Disjunction, active=True):
                fixed_true_disjuncts = [disjunct for disjunct in disjunction.disjuncts
                                        if disjunct.indicator_var.fixed
                                        and disjunct.indicator_var.value == 1]
                fixed_false_disjuncts = [disjunct for disjunct in disjunction.disjuncts
                                         if disjunct.indicator_var.fixed
                                         and disjunct.indicator_var.value == 0]
                for disjunct in fixed_false_disjuncts:
                    disjunct.deactivate()
                if len(fixed_false_disjuncts) == len(disjunction.disjuncts) - 1:
                    # all but one disjunct in the disjunction is fixed to False. Remaining one must be true.
                    if not fixed_true_disjuncts:
                        fixed_true_disjuncts = [disjunct for disjunct in disjunction.disjuncts
                                                if disjunct not in fixed_false_disjuncts]
                # Reactivate the fixed-true disjuncts
                for disjunct in fixed_true_disjuncts:
                    newly_activated = ComponentSet()
                    for constr in disjunct.component_data_objects(Constraint):
                        if constr in root.GDPbb_utils.deactivated_constraints:
                            newly_activated.add(constr)
                            constr.activate()
                            # Set the big M value for the constraint
                            root.BigM[constr] = 1
                            # Note: we use a default big M value of 1
                            # because all non-selected disjuncts should be deactivated.
                            # Therefore, none of the big M transformed nonlinear constraints will need to be relaxed.
                            # The default M value should therefore be irrelevant.
                    root.GDPbb_utils.deactivated_constraints -= newly_activated
                    root.GDPbb_utils.disjuncts_fixed_True.add(disjunct)

                if fixed_true_disjuncts:
                    assert disjunction.xor, "GDPbb only handles disjunctions in which one term can be selected. " \
                        "%s violates this assumption." % (disjunction.name, )
                    root.GDPbb_utils.unenforced_disjunctions.remove(disjunction)

            # Check satisfiability
            if config.check_sat and satisfiable(root, config.logger) is False:
                # Problem is not satisfiable. Problem is infeasible.
                obj_value = obj_sign * float('inf')
            else:
                # solve the root node
                config.logger.info("Solving the root node.")
                obj_value, result, var_values = self.subproblem_solve(root, config)

            if obj_sign * obj_value == float('inf'):
                config.logger.info("Model was found to be infeasible at the root node. Elapsed %.2f seconds."
                                   % get_main_elapsed_time(solve_data.timing))
                if solve_data.results.problem.sense == minimize:
                    solve_data.results.problem.lower_bound = float('inf')
                    solve_data.results.problem.upper_bound = None
                else:
                    solve_data.results.problem.lower_bound = None
                    solve_data.results.problem.upper_bound = float('-inf')
                solve_data.results.solver.timing = solve_data.timing
                solve_data.results.solver.iterations = 0
                solve_data.results.solver.termination_condition = tc.infeasible
                return solve_data.results

            # initialize minheap for Branch and Bound algorithm
            # Heap structure: (ordering tuple, model)
            # Ordering tuple: (objective value, disjunctions_left, -total_nodes_counter)
            #  - select solutions with lower objective value,
            #    then fewer disjunctions left to explore (depth first),
            #    then more recently encountered (tiebreaker)
            heap = []
            total_nodes_counter = 0
            disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions)
            heapq.heappush(
                heap, (
                    (obj_sign * obj_value, disjunctions_left, -total_nodes_counter),
                    root, result, var_values))

            # loop to branch through the tree
            while len(heap) > 0:
                # pop best model off of heap
                sort_tuple, incumbent_model, incumbent_results, incumbent_var_values = heapq.heappop(heap)
                incumbent_obj_value, disjunctions_left, _ = sort_tuple

                config.logger.info("Exploring node with LB %.10g and %s inactive disjunctions." % (
                    incumbent_obj_value, disjunctions_left
                ))

                # if all the originally active disjunctions are active, solve and
                # return solution
                if disjunctions_left == 0:
                    config.logger.info("Model solved.")
                    # Model is solved. Copy over solution values.
                    original_model = solve_data.original_model
                    for orig_var, val in zip(original_model.GDPbb_utils.variable_list, incumbent_var_values):
                        orig_var.value = val

                    solve_data.results.problem.lower_bound = incumbent_results.problem.lower_bound
                    solve_data.results.problem.upper_bound = incumbent_results.problem.upper_bound
                    solve_data.results.solver.timing = solve_data.timing
                    solve_data.results.solver.iterations = total_nodes_counter
                    solve_data.results.solver.termination_condition = incumbent_results.solver.termination_condition
                    return solve_data.results

                # Pick the next disjunction to branch on
                next_disjunction = incumbent_model.GDPbb_utils.unenforced_disjunctions[0]
                config.logger.info("Branching on disjunction %s" % next_disjunction.name)
                assert next_disjunction.xor, "GDPbb only handles disjunctions in which one term can be selected. " \
                    "%s violates this assumption." % (next_disjunction.name, )

                new_nodes_counter = 0

                for i, disjunct in enumerate(next_disjunction.disjuncts):
                    # Create one branch for each of the disjuncts on the disjunction

                    if any(disj.indicator_var.fixed and disj.indicator_var.value == 1
                           for disj in next_disjunction.disjuncts if disj is not disjunct):
                        # If any other disjunct is fixed to 1 and an xor relationship applies,
                        # then this disjunct cannot be activated.
                        continue

                    # Check time limit
                    if get_main_elapsed_time(solve_data.timing) >= config.time_limit:
                        if solve_data.results.problem.sense == minimize:
                            solve_data.results.problem.lower_bound = incumbent_obj_value
                            solve_data.results.problem.upper_bound = float('inf')
                        else:
                            solve_data.results.problem.lower_bound = float('-inf')
                            solve_data.results.problem.upper_bound = incumbent_obj_value
                        config.logger.info(
                            'GDPopt unable to converge bounds '
                            'before time limit of {} seconds. '
                            'Elapsed: {} seconds'
                            .format(config.time_limit, get_main_elapsed_time(solve_data.timing)))
                        config.logger.info(
                            'Final bound values: LB: {}  UB: {}'.
                            format(solve_data.results.problem.lower_bound, solve_data.results.problem.upper_bound))
                        solve_data.results.solver.timing = solve_data.timing
                        solve_data.results.solver.iterations = total_nodes_counter
                        solve_data.results.solver.termination_condition = tc.maxTimeLimit
                        return solve_data.results

                    # Branch on the disjunct
                    child = incumbent_model.clone()
                    # TODO I am leaving the old branching system in place, but there should be
                    # something better, ideally that deals with nested disjunctions as well.
                    disjunction_to_branch = child.GDPbb_utils.unenforced_disjunctions.pop(0)
                    child_disjunct = disjunction_to_branch.disjuncts[i]
                    child_disjunct.indicator_var.fix(1)
                    # Deactivate (and fix to 0) other disjuncts on the disjunction
                    for disj in disjunction_to_branch.disjuncts:
                        if disj is not child_disjunct:
                            disj.deactivate()
                    # Activate nonlinear constraints on the newly fixed child disjunct
                    newly_activated = ComponentSet()
                    for constr in child_disjunct.component_data_objects(Constraint):
                        if constr in child.GDPbb_utils.deactivated_constraints:
                            newly_activated.add(constr)
                            constr.activate()
                            # Set the big M value for the constraint
                            child.BigM[constr] = 1
                            # Note: we use a default big M value of 1
                            # because all non-selected disjuncts should be deactivated.
                            # Therefore, none of the big M transformed nonlinear constraints will need to be relaxed.
                            # The default M value should therefore be irrelevant.
                    child.GDPbb_utils.deactivated_constraints -= newly_activated
                    child.GDPbb_utils.disjuncts_fixed_True.add(child_disjunct)

                    if disjunct in incumbent_model.GDPbb_utils.disjuncts_fixed_True:
                        # If the disjunct was already branched to True from a parent disjunct branching, just pass
                        # through the incumbent value without resolving. The solution should be the same as the parent.
                        total_nodes_counter += 1
                        ordering_tuple = (obj_sign * incumbent_obj_value, disjunctions_left - 1, -total_nodes_counter)
                        heapq.heappush(heap, (ordering_tuple, child, result, incumbent_var_values))
                        new_nodes_counter += 1
                        continue

                    if config.check_sat and satisfiable(child, config.logger) is False:
                        # Problem is not satisfiable. Skip this disjunct.
                        continue

                    obj_value, result, var_values = self.subproblem_solve(child, config)
                    total_nodes_counter += 1
                    ordering_tuple = (obj_sign * obj_value, disjunctions_left - 1, -total_nodes_counter)
                    heapq.heappush(heap, (ordering_tuple, child, result, var_values))
                    new_nodes_counter += 1

                config.logger.info("Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s." % (
                    new_nodes_counter, disjunctions_left - 1, len(heap)))
Esempio n. 34
0
    def add_lazy_oa_cuts(self,
                         target_model,
                         dual_values,
                         solve_data,
                         config,
                         opt,
                         linearize_active=True,
                         linearize_violated=True):
        """Linearizes nonlinear constraints; add the OA cuts through Cplex inherent function self.add()
        For nonconvex problems, turn on 'config.add_slack'. Slack variables will always be used for 
        nonlinear equality constraints.

        Parameters
        ----------
        target_model : Pyomo model
            The MIP main problem.
        dual_values : list
            The value of the duals for each constraint.
        solve_data : MindtPySolveData
            Data container that holds solve-instance data.
        config : ConfigBlock
            The specific configurations for MindtPy.
        opt : SolverFactory
            The cplex_persistent solver.
        linearize_active : bool, optional
            Whether to linearize the active nonlinear constraints, by default True.
        linearize_violated : bool, optional
            Whether to linearize the violated nonlinear constraints, by default True.
        """
        config.logger.debug('Adding OA cuts')
        with time_code(solve_data.timing, 'OA cut generation'):
            for index, constr in enumerate(
                    target_model.MindtPy_utils.constraint_list):
                if constr.body.polynomial_degree(
                ) in solve_data.mip_constraint_polynomial_degree:
                    continue

                constr_vars = list(identify_variables(constr.body))
                jacs = solve_data.jacobians

                # Equality constraint (makes the problem nonconvex)
                if constr.has_ub() and constr.has_lb() and value(
                        constr.lower) == value(constr.upper):
                    sign_adjust = -1 if solve_data.objective_sense == minimize else 1
                    rhs = constr.lower

                    # since the cplex requires the lazy cuts in cplex type, we need to transform the pyomo expression into cplex expression
                    pyomo_expr = copysign(
                        1, sign_adjust *
                        dual_values[index]) * (sum(
                            value(jacs[constr][var]) * (var - value(var))
                            for var in EXPR.identify_variables(constr.body)) +
                                               value(constr.body) - rhs)
                    cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr)
                    cplex_rhs = -generate_standard_repn(pyomo_expr).constant
                    self.add(constraint=cplex.SparsePair(
                        ind=cplex_expr.variables, val=cplex_expr.coefficients),
                             sense='L',
                             rhs=cplex_rhs)
                else:  # Inequality constraint (possibly two-sided)
                    if (constr.has_ub() and
                        (linearize_active
                         and abs(constr.uslack()) < config.zero_tolerance) or
                        (linearize_violated and constr.uslack() < 0) or
                        (config.linearize_inactive and constr.uslack() > 0)
                        ) or ('MindtPy_utils.objective_constr' in constr.name
                              and constr.has_ub()):

                        pyomo_expr = sum(
                            value(jacs[constr][var]) * (var - var.value)
                            for var in constr_vars) + value(constr.body)
                        cplex_rhs = - \
                            generate_standard_repn(pyomo_expr).constant
                        cplex_expr, _ = opt._get_expr_from_pyomo_expr(
                            pyomo_expr)
                        self.add(constraint=cplex.SparsePair(
                            ind=cplex_expr.variables,
                            val=cplex_expr.coefficients),
                                 sense='L',
                                 rhs=value(constr.upper) + cplex_rhs)
                    if (constr.has_lb() and
                        (linearize_active
                         and abs(constr.lslack()) < config.zero_tolerance) or
                        (linearize_violated and constr.lslack() < 0) or
                        (config.linearize_inactive and constr.lslack() > 0)
                        ) or ('MindtPy_utils.objective_constr' in constr.name
                              and constr.has_lb()):
                        pyomo_expr = sum(
                            value(jacs[constr][var]) * (var - self.get_values(
                                opt._pyomo_var_to_solver_var_map[var]))
                            for var in constr_vars) + value(constr.body)
                        cplex_rhs = - \
                            generate_standard_repn(pyomo_expr).constant
                        cplex_expr, _ = opt._get_expr_from_pyomo_expr(
                            pyomo_expr)
                        self.add(constraint=cplex.SparsePair(
                            ind=cplex_expr.variables,
                            val=cplex_expr.coefficients),
                                 sense='G',
                                 rhs=value(constr.lower) + cplex_rhs)
Esempio n. 35
0
File: GDPbb.py Progetto: Pyomo/pyomo
    def solve(self, model, **kwds):
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        # Validate model to be used with gdpbb
        self.validate_model(model)
        # Set solver as an MINLP
        solver = SolverFactory(config.solver)
        solve_data = GDPbbSolveData()
        solve_data.timing = Container()
        solve_data.original_model = model
        solve_data.results = SolverResults()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total'), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPbb_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPbb version %s using %s as subsolver"
                % (".".join(map(str, self.version())), config.solver)
            )

            # Setup results
            solve_data.results.solver.name = 'GDPbb - %s' % (str(config.solver))
            setup_results_object(solve_data, config)
            # Initialize list containing indicator vars for reupdating model after solving
            indicator_list_name = unique_component_name(model, "_indicator_list")
            indicator_vars = []
            for disjunction in model.component_data_objects(
                    ctype=Disjunction, active=True):
                for disjunct in disjunction.disjuncts:
                    indicator_vars.append(disjunct.indicator_var)
            setattr(model, indicator_list_name, indicator_vars)

            # get objective sense
            objectives = model.component_data_objects(Objective, active=True)
            obj = next(objectives, None)
            obj_sign = 1 if obj.sense == minimize else -1
            solve_data.results.problem.sense = obj.sense
            # clone original model for root node of branch and bound
            root = model.clone()

            # set up lists to keep track of which disjunctions have been covered.

            # this list keeps track of the original disjunctions that were active and are soon to be inactive
            root.GDPbb_utils.unenforced_disjunctions = list(
                disjunction for disjunction in root.GDPbb_utils.disjunction_list if disjunction.active
            )

            # this list keeps track of the disjunctions that have been activated by the branch and bound
            root.GDPbb_utils.curr_active_disjunctions = []

            # deactivate all disjunctions in the model
            # self.indicate(root)
            for djn in root.GDPbb_utils.unenforced_disjunctions:
                djn.deactivate()
            # Deactivate all disjuncts in model. To be reactivated when disjunction
            # is reactivated.
            for disj in root.component_data_objects(Disjunct, active=True):
                disj._deactivate_without_fixing_indicator()

            # Satisfiability check would go here

            # solve the root node
            config.logger.info("Solving the root node.")
            obj_value, result, _ = self.subproblem_solve(root, solver, config)

            # initialize minheap for Branch and Bound algorithm
            # Heap structure: (ordering tuple, model)
            # Ordering tuple: (objective value, disjunctions_left, -counter)
            #  - select solutions with lower objective value,
            #    then fewer disjunctions left to explore (depth first),
            #    then more recently encountered (tiebreaker)
            heap = []
            counter = 0
            disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions)
            heapq.heappush(heap,
                           ((obj_sign * obj_value, disjunctions_left, -counter), root,
                            result, root.GDPbb_utils.variable_list))
            # loop to branch through the tree
            while len(heap) > 0:
                # pop best model off of heap
                sort_tup, mdl, mdl_results, vars = heapq.heappop(heap)
                old_obj_val, disjunctions_left, _ = sort_tup
                config.logger.info("Exploring node with LB %.10g and %s inactive disjunctions." % (
                    old_obj_val, disjunctions_left
                ))

                # if all the originally active disjunctions are active, solve and
                # return solution
                if disjunctions_left == 0:
                    config.logger.info("Model solved.")
                    # Model is solved. Copy over solution values.
                    for orig_var, soln_var in zip(model.GDPbb_utils.variable_list, vars):
                        orig_var.value = soln_var.value

                    solve_data.results.problem.lower_bound = mdl_results.problem.lower_bound
                    solve_data.results.problem.upper_bound = mdl_results.problem.upper_bound
                    solve_data.results.solver.timing = solve_data.timing
                    solve_data.results.solver.termination_condition = mdl_results.solver.termination_condition
                    return solve_data.results

                next_disjunction = mdl.GDPbb_utils.unenforced_disjunctions.pop(0)
                config.logger.info("Activating disjunction %s" % next_disjunction.name)
                next_disjunction.activate()
                mdl.GDPbb_utils.curr_active_disjunctions.append(next_disjunction)
                djn_left = len(mdl.GDPbb_utils.unenforced_disjunctions)
                for disj in next_disjunction.disjuncts:
                    disj._activate_without_unfixing_indicator()
                    if not disj.indicator_var.fixed:
                        disj.indicator_var = 0  # initially set all indicator vars to zero
                added_disj_counter = 0
                for disj in next_disjunction.disjuncts:
                    if not disj.indicator_var.fixed:
                        disj.indicator_var = 1
                    mnew = mdl.clone()
                    if not disj.indicator_var.fixed:
                        disj.indicator_var = 0

                    # Check feasibility
                    if config.check_sat and satisfiable(mnew, config.logger) is False:
                        # problem is not satisfiable. Skip this disjunct.
                        continue

                    obj_value, result, vars = self.subproblem_solve(mnew, solver, config)
                    counter += 1
                    ordering_tuple = (obj_sign * obj_value, djn_left, -counter)
                    heapq.heappush(heap, (ordering_tuple, mnew, result, vars))
                    added_disj_counter = added_disj_counter + 1
                config.logger.info("Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s." % (
                    added_disj_counter, djn_left, len(heap)))
Esempio n. 36
0
def generate_lag_objective_function(model,
                                    setpoint_model,
                                    config,
                                    solve_data,
                                    discrete_only=False):
    """The function generate taylor extension of the Lagrangean function.

    Args:
        model ([type]): [description]
        setpoint_model ([type]): [description]
        discrete_only (bool, optional): [description]. Defaults to False.
    """
    temp_model = setpoint_model.clone()
    for var in temp_model.MindtPy_utils.variable_list:
        if var.is_integer():
            var.unfix()
    # objective_list[0] is the original objective function, not in MindtPy_utils block
    temp_model.MindtPy_utils.objective_list[0].activate()
    temp_model.MindtPy_utils.deactivate()
    TransformationFactory('core.relax_integer_vars').apply_to(temp_model)
    # Note: PyNumero does not support discrete variables
    # So PyomoNLP should operate on setpoint_model

    # Implementation 1
    # First calculate Jacobian and Hessian without assigning variable and constraint sequence, then use get_primal_indices to get the indices.
    with time_code(solve_data.timing, 'PyomoNLP'):
        nlp = pyomo_nlp.PyomoNLP(temp_model)
        lam = [
            -temp_model.dual[constr]
            if abs(temp_model.dual[constr]) > config.zero_tolerance else 0
            for constr in nlp.get_pyomo_constraints()
        ]
        nlp.set_duals(lam)
        obj_grad = nlp.evaluate_grad_objective().reshape(-1, 1)
        jac = nlp.evaluate_jacobian().toarray()
        jac_lag = obj_grad + jac.transpose().dot(
            numpy.array(lam).reshape(-1, 1))
        jac_lag[abs(jac_lag) < config.zero_tolerance] = 0
        # jac_lag of continuous variables should be zero
        for var in temp_model.MindtPy_utils.continuous_variable_list[:-1]:
            jac_lag[nlp.get_primal_indices([var])[0]] = 0
        nlp_var = set([i.name for i in nlp.get_pyomo_variables()])
        first_order_term = sum(
            float(jac_lag[nlp.get_primal_indices([temp_var])[0]]) *
            (var - temp_var.value) for var, temp_var in zip(
                model.MindtPy_utils.variable_list[:-1],
                temp_model.MindtPy_utils.variable_list[:-1])
            if temp_var.name in nlp_var)

        if config.add_regularization == 'grad_lag':
            return Objective(expr=first_order_term, sense=minimize)
        elif config.add_regularization in {'hess_lag', 'hess_only_lag'}:
            # Implementation 1
            hess_lag = nlp.evaluate_hessian_lag().toarray()
            hess_lag[abs(hess_lag) < config.zero_tolerance] = 0
            second_order_term = 0.5 * sum(
                (var_i - temp_var_i.value) *
                float(hess_lag[nlp.get_primal_indices([temp_var_i])[0]][
                    nlp.get_primal_indices([temp_var_j])[0]]) *
                (var_j - temp_var_j.value) for var_i, temp_var_i in zip(
                    model.MindtPy_utils.variable_list[:-1],
                    temp_model.MindtPy_utils.variable_list[:-1])
                for var_j, temp_var_j in zip(
                    model.MindtPy_utils.variable_list[:-1],
                    temp_model.MindtPy_utils.variable_list[:-1])
                if (temp_var_i.name in nlp_var and temp_var_j.name in nlp_var))
            if config.add_regularization == 'hess_lag':
                return Objective(expr=first_order_term + second_order_term,
                                 sense=minimize)
            elif config.add_regularization == 'hess_only_lag':
                return Objective(expr=second_order_term, sense=minimize)
        elif config.add_regularization == 'sqp_lag':
            var_filter = (lambda v: v[1].is_integer()) if discrete_only \
                else (lambda v: v[1].name != 'MindtPy_utils.objective_value' and
                      'MindtPy_utils.feas_opt.slack_var' not in v[1].name)

            model_vars, setpoint_vars = zip(*filter(
                var_filter,
                zip(model.component_data_objects(Var),
                    setpoint_model.component_data_objects(Var))))
            assert len(model_vars) == len(
                setpoint_vars
            ), 'Trying to generate Squared Norm2 objective function for models with different number of variables'
            if config.sqp_lag_scaling_coef is None:
                rho = 1
            elif config.sqp_lag_scaling_coef == 'fixed':
                r = 1
                rho = numpy.linalg.norm(jac_lag / (2 * r))
            elif config.sqp_lag_scaling_coef == 'variable_dependent':
                r = numpy.sqrt(
                    len(temp_model.MindtPy_utils.discrete_variable_list))
                rho = numpy.linalg.norm(jac_lag / (2 * r))

            return Objective(
                expr=first_order_term + rho *
                sum([(model_var - setpoint_var.value)**2
                     for (model_var,
                          setpoint_var) in zip(model_vars, setpoint_vars)]))
Esempio n. 37
0
    def add_lazy_affine_cuts(self, solve_data, config, opt):
        """Adds affine cuts using MCPP.

        Add affine cuts through Cplex inherent function self.add().

        Parameters
        ----------
        solve_data : MindtPySolveData
            Data container that holds solve-instance data.
        config : ConfigBlock
            The specific configurations for MindtPy.
        opt : SolverFactory
            The cplex_persistent solver.
        """
        with time_code(solve_data.timing, 'Affine cut generation'):
            m = solve_data.mip
            config.logger.debug('Adding affine cuts')
            counter = 0

            for constr in m.MindtPy_utils.nonlinear_constraint_list:

                vars_in_constr = list(identify_variables(constr.body))
                if any(var.value is None for var in vars_in_constr):
                    continue  # a variable has no values

                # mcpp stuff
                try:
                    mc_eqn = mc(constr.body)
                except MCPP_Error as e:
                    config.logger.debug(
                        'Skipping constraint %s due to MCPP error %s' %
                        (constr.name, str(e)))
                    continue  # skip to the next constraint
                # TODO: check if the value of ccSlope and cvSlope is not Nan or inf. If so, we skip this.
                ccSlope = mc_eqn.subcc()
                cvSlope = mc_eqn.subcv()
                ccStart = mc_eqn.concave()
                cvStart = mc_eqn.convex()

                concave_cut_valid = True
                convex_cut_valid = True
                for var in vars_in_constr:
                    if not var.fixed:
                        if ccSlope[var] == float(
                                'nan') or ccSlope[var] == float('inf'):
                            concave_cut_valid = False
                        if cvSlope[var] == float(
                                'nan') or cvSlope[var] == float('inf'):
                            convex_cut_valid = False
                if ccStart == float('nan') or ccStart == float('inf'):
                    concave_cut_valid = False
                if cvStart == float('nan') or cvStart == float('inf'):
                    convex_cut_valid = False
                # check if the value of ccSlope and cvSlope all equals zero. if so, we skip this.
                if not any(ccSlope.values()):
                    concave_cut_valid = False
                if not any(cvSlope.values()):
                    convex_cut_valid = False
                if not (concave_cut_valid or convex_cut_valid):
                    continue

                ub_int = min(
                    value(constr.upper),
                    mc_eqn.upper()) if constr.has_ub() else mc_eqn.upper()
                lb_int = max(
                    value(constr.lower),
                    mc_eqn.lower()) if constr.has_lb() else mc_eqn.lower()

                if concave_cut_valid:
                    pyomo_concave_cut = sum(ccSlope[var] * (var - var.value)
                                            for var in vars_in_constr
                                            if not var.fixed) + ccStart
                    cplex_concave_rhs = generate_standard_repn(
                        pyomo_concave_cut).constant
                    cplex_concave_cut, _ = opt._get_expr_from_pyomo_expr(
                        pyomo_concave_cut)
                    self.add(constraint=cplex.SparsePair(
                        ind=cplex_concave_cut.variables,
                        val=cplex_concave_cut.coefficients),
                             sense='G',
                             rhs=lb_int - cplex_concave_rhs)
                    counter += 1
                if convex_cut_valid:
                    pyomo_convex_cut = sum(cvSlope[var] * (var - var.value)
                                           for var in vars_in_constr
                                           if not var.fixed) + cvStart
                    cplex_convex_rhs = generate_standard_repn(
                        pyomo_convex_cut).constant
                    cplex_convex_cut, _ = opt._get_expr_from_pyomo_expr(
                        pyomo_convex_cut)
                    self.add(constraint=cplex.SparsePair(
                        ind=cplex_convex_cut.variables,
                        val=cplex_convex_cut.coefficients),
                             sense='L',
                             rhs=ub_int - cplex_convex_rhs)
                    counter += 1

            config.logger.info('Added %s affine cuts' % counter)
Esempio n. 38
0
    def add_lazy_no_good_cuts(self,
                              var_values,
                              solve_data,
                              config,
                              opt,
                              feasible=False):
        """Adds no-good cuts.

        Add the no-good cuts through Cplex inherent function self.add().

        Parameters
        ----------
        var_values : list
            The variable values of the incumbent solution, used to generate the cut.
        solve_data : MindtPySolveData
            Data container that holds solve-instance data.
        config : ConfigBlock
            The specific configurations for MindtPy.
        opt : SolverFactory
            The cplex_persistent solver.
        feasible : bool, optional
            Whether the integer combination yields a feasible or infeasible NLP, by default False.

        Raises
        ------
        ValueError
            The value of binary variable is not 0 or 1.
        """
        if not config.add_no_good_cuts:
            return

        config.logger.info('Adding no-good cuts')
        with time_code(solve_data.timing, 'No-good cut generation'):
            m = solve_data.mip
            MindtPy = m.MindtPy_utils
            int_tol = config.integer_tolerance

            binary_vars = [v for v in MindtPy.variable_list if v.is_binary()]

            # copy variable values over
            for var, val in zip(MindtPy.variable_list, var_values):
                if not var.is_binary():
                    continue
                # We don't want to trigger the reset of the global stale
                # indicator, so we will set this variable to be "stale",
                # knowing that set_value will switch it back to "not
                # stale"
                var.stale = True
                var.set_value(val, skip_validation=True)

            # check to make sure that binary variables are all 0 or 1
            for v in binary_vars:
                if value(abs(v - 1)) > int_tol and value(abs(v)) > int_tol:
                    raise ValueError('Binary {} = {} is not 0 or 1'.format(
                        v.name, value(v)))

            if not binary_vars:  # if no binary variables, skip
                return

            pyomo_no_good_cut = sum(
                1 - v
                for v in binary_vars if value(abs(v - 1)) <= int_tol) + sum(
                    v for v in binary_vars if value(abs(v)) <= int_tol)
            cplex_no_good_rhs = generate_standard_repn(
                pyomo_no_good_cut).constant
            cplex_no_good_cut, _ = opt._get_expr_from_pyomo_expr(
                pyomo_no_good_cut)

            self.add(constraint=cplex.SparsePair(
                ind=cplex_no_good_cut.variables,
                val=cplex_no_good_cut.coefficients),
                     sense='G',
                     rhs=1 - cplex_no_good_rhs)