コード例 #1
0
ファイル: initialization.py プロジェクト: jsiirola/pyomo
def MindtPy_initialize_main(solve_data, config):
    """
    Initializes the decomposition algorithm and creates the main MIP/MILP problem.

    This function initializes the decomposition problem, which includes generating the initial cuts required to
    build the main MIP/MILP

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    # if single tree is activated, we need to add bounds for unbounded variables in nonlinear constraints to avoid unbounded main problem.
    if config.single_tree:
        var_bound_add(solve_data, config)

    m = solve_data.mip = solve_data.working_model.clone()
    next(solve_data.mip.component_data_objects(Objective,
                                               active=True)).deactivate()

    MindtPy = m.MindtPy_utils
    if config.calculate_dual:
        m.dual.deactivate()

    if config.init_strategy == 'FP':
        MindtPy.cuts.fp_orthogonality_cuts = ConstraintList(
            doc='Orthogonality cuts in feasibility pump')
        if config.fp_projcuts:
            solve_data.working_model.MindtPy_utils.cuts.fp_orthogonality_cuts = ConstraintList(
                doc='Orthogonality cuts in feasibility pump')
    if config.strategy == 'OA' or config.init_strategy == 'FP':
        calc_jacobians(solve_data, config)  # preload jacobians
        MindtPy.cuts.oa_cuts = ConstraintList(doc='Outer approximation cuts')
    elif config.strategy == 'ECP':
        calc_jacobians(solve_data, config)  # preload jacobians
        MindtPy.cuts.ecp_cuts = ConstraintList(doc='Extended Cutting Planes')
    elif config.strategy == 'GOA':
        MindtPy.cuts.aff_cuts = ConstraintList(doc='Affine cuts')
    # elif config.strategy == 'PSC':
    #     detect_nonlinear_vars(solve_data, config)
    #     MindtPy.cuts.psc_cuts = ConstraintList(
    #         doc='Partial surrogate cuts')
    # elif config.strategy == 'GBD':
    #     MindtPy.cuts.gbd_cuts = ConstraintList(
    #         doc='Generalized Benders cuts')

    # Set default initialization_strategy
    if config.init_strategy is None:
        if config.strategy in {'OA', 'GOA'}:
            config.init_strategy = 'rNLP'
        else:
            config.init_strategy = 'max_binary'

    config.logger.info('{} is the initial strategy being used.'
                       '\n'.format(config.init_strategy))
    # Do the initialization
    if config.init_strategy == 'rNLP':
        init_rNLP(solve_data, config)
    elif config.init_strategy == 'max_binary':
        init_max_binaries(solve_data, config)
    elif config.init_strategy == 'initial_binary':
        solve_data.curr_int_sol = get_integer_solution(
            solve_data.working_model)
        solve_data.integer_list.append(solve_data.curr_int_sol)
        if config.strategy != 'ECP':
            fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config)
            handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, solve_data,
                                     config)
    elif config.init_strategy == 'FP':
        init_rNLP(solve_data, config)
        fp_loop(solve_data, config)
コード例 #2
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
             lower_logger_level_to(config.logger, new_logging_level), \
             create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info("---Starting MindtPy---")

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            nonlinear_constraints = [
                c for c in MindtPy.constraint_list
                if c.body.polynomial_degree() not in (1, 0)
            ]
            lin.nl_constraint_set = RangeSet(
                len(nonlinear_constraints),
                doc="Integer index set over the nonlinear constraints")
            feas.constraint_set = RangeSet(
                len(MindtPy.constraint_list),
                doc="integer index set over the constraints")

            # # Mapping Constraint -> integer index
            # MindtPy.feas_map = {}
            # # Mapping integer index -> Constraint
            # MindtPy.feas_inverse_map = {}
            # # Generate the two maps. These maps may be helpful for later
            # # interpreting indices on the slack variables or generated cuts.
            # for c, n in zip(MindtPy.constraint_list, feas.constraint_set):
            #     MindtPy.feas_map[c] = n
            #     MindtPy.feas_inverse_map[n] = c

            # Create slack variables for OA cuts
            lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                     initialize=0,
                                     domain=NonNegativeReals)
            # Create slack variables for feasibility problem
            feas.slack_var = Var(feas.constraint_set,
                                 domain=NonNegativeReals,
                                 initialize=1)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                solve_data.working_model.ipopt_zL_out = Suffix(
                    direction=Suffix.IMPORT)
            if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                solve_data.working_model.ipopt_zU_out = Suffix(
                    direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.component_data_objects(Var),
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.mip_iter

        return solve_data.results
コード例 #3
0
ファイル: MILP_unused_vars.py プロジェクト: CanLi1/pyomo-1
    def _generate_model(self):
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.s = Set(initialize=[1, 2])

        model.x_unused = Var(within=Integers)
        model.x_unused.stale = False

        model.x_unused_initialy_stale = Var(within=Integers)
        model.x_unused_initialy_stale.stale = True

        model.X_unused = Var(model.s, within=Integers)
        model.X_unused_initialy_stale = Var(model.s, within=Integers)
        for i in model.s:
            model.X_unused[i].stale = False
            model.X_unused_initialy_stale[i].stale = True

        model.x = Var(within=IntegerInterval(bounds=(None, None)))
        model.x.stale = False

        model.x_initialy_stale = Var(within=Integers)
        model.x_initialy_stale.stale = True

        model.X = Var(model.s, within=Integers)
        model.X_initialy_stale = Var(model.s, within=Integers)
        for i in model.s:
            model.X[i].stale = False
            model.X_initialy_stale[i].stale = True

        model.obj = Objective(expr= model.x + \
                                    model.x_initialy_stale + \
                                    sum_product(model.X) + \
                                    sum_product(model.X_initialy_stale))

        model.c = ConstraintList()
        model.c.add(model.x >= 1)
        model.c.add(model.x_initialy_stale >= 1)
        model.c.add(model.X[1] >= 0)
        model.c.add(model.X[2] >= 1)
        model.c.add(model.X_initialy_stale[1] >= 0)
        model.c.add(model.X_initialy_stale[2] >= 1)

        # Test that stale flags do not get updated
        # on inactive blocks (where "inactive blocks" mean blocks
        # that do NOT follow a path of all active parent blocks
        # up to the top-level model)
        flat_model = model.clone()
        model.b = Block()
        model.B = Block(model.s)
        model.b.b = flat_model.clone()
        model.B[1].b = flat_model.clone()
        model.B[2].b = flat_model.clone()

        model.b.deactivate()
        model.B.deactivate()
        model.b.b.activate()
        model.B[1].b.activate()
        model.B[2].b.deactivate()
        assert model.b.active is False
        assert model.B[1].active is False
        assert model.B[1].active is False
        assert model.b.b.active is True
        assert model.B[1].b.active is True
        assert model.B[2].b.active is False
コード例 #4
0
ファイル: colloc.py プロジェクト: zypher22/pyomo
    def reduce_collocation_points(self,
                                  instance,
                                  var=None,
                                  ncp=None,
                                  contset=None):
        """
        This method will add additional constraints to a model to reduce the
        number of free collocation points (degrees of freedom) for a particular
        variable.

        Parameters
        ----------
        instance : Pyomo model
            The discretized Pyomo model to add constraints to

        var : ``pyomo.environ.Var``
            The Pyomo variable for which the degrees of freedom will be reduced

        ncp : int
            The new number of free collocation points for `var`. Must be
            less that the number of collocation points used in discretizing
            the model.

        contset : ``pyomo.dae.ContinuousSet``
            The :py:class:`ContinuousSet<pyomo.dae.ContinuousSet>` that was
            discretized and for which the `var` will have a reduced number
            of degrees of freedom

        """
        if contset is None:
            raise TypeError("A continuous set must be specified using the "
                            "keyword 'contset'")
        if contset.ctype is not ContinuousSet:
            raise TypeError("The component specified using the 'contset' "
                            "keyword must be a ContinuousSet")
        ds = contset

        if len(self._ncp) == 0:
            raise RuntimeError("This method should only be called after using "
                               "the apply() method to discretize the model")
        elif None in self._ncp:
            tot_ncp = self._ncp[None]
        elif ds.name in self._ncp:
            tot_ncp = self._ncp[ds.name]
        else:
            raise ValueError("ContinuousSet '%s' has not been discretized, "
                             "please call the apply_to() method with this "
                             "ContinuousSet to discretize it before calling "
                             "this method" % ds.name)

        if var is None:
            raise TypeError("A variable must be specified")
        if var.ctype is not Var:
            raise TypeError("The component specified using the 'var' keyword "
                            "must be a variable")

        if ncp is None:
            raise TypeError(
                "The number of collocation points must be specified")
        if ncp <= 0:
            raise ValueError(
                "The number of collocation points must be at least 1")
        if ncp > tot_ncp:
            raise ValueError("The number of collocation points used to "
                             "interpolate an individual variable must be less "
                             "than the number used to discretize the original "
                             "model")
        if ncp == tot_ncp:
            # Nothing to be done
            return instance

        # Check to see if the continuousset is an indexing set of the variable
        if var.dim() == 0:
            raise IndexError("ContinuousSet '%s' is not an indexing set of"
                             " the variable '%s'" % (ds.name, var.name))
        varidx = var.index_set()
        if not hasattr(varidx, 'set_tuple'):
            if ds is not varidx:
                raise IndexError("ContinuousSet '%s' is not an indexing set of"
                                 " the variable '%s'" % (ds.name, var.name))
        elif ds not in varidx.set_tuple:
            raise IndexError("ContinuousSet '%s' is not an indexing set of the"
                             " variable '%s'" % (ds.name, var.name))

        if var.name in self._reduced_cp:
            temp = self._reduced_cp[var.name]
            if ds.name in temp:
                raise RuntimeError("Variable '%s' has already been constrained"
                                   " to a reduced number of collocation points"
                                   " over ContinuousSet '%s'.")
            else:
                temp[ds.name] = ncp
        else:
            self._reduced_cp[var.name] = {ds.name: ncp}

        # TODO: Use unique_component_name for this
        list_name = var.local_name + "_interpolation_constraints"

        instance.add_component(list_name, ConstraintList())
        conlist = instance.find_component(list_name)

        t = sorted(ds)
        fe = ds._fe
        info = get_index_information(var, ds)
        tmpidx = info['non_ds']
        idx = info['index function']

        # Iterate over non_ds indices
        for n in tmpidx:
            # Iterate over finite elements
            for i in xrange(0, len(fe) - 1):
                # Iterate over collocation points
                for k in xrange(1, tot_ncp - ncp + 1):
                    if ncp == 1:
                        # Constant over each finite element
                        conlist.add(
                            var[idx(n, i, k)] == var[idx(n, i, tot_ncp)])
                    else:
                        tmp = t.index(fe[i])
                        tmp2 = t.index(fe[i + 1])
                        ti = t[tmp + k]
                        tfit = t[tmp2 - ncp + 1:tmp2 + 1]
                        coeff = self._interpolation_coeffs(ti, tfit)
                        conlist.add(var[idx(n, i, k)] == sum(
                            var[idx(n, i, j)] * next(coeff)
                            for j in xrange(tot_ncp - ncp + 1, tot_ncp + 1)))

        return instance
コード例 #5
0
ファイル: cut_generation.py プロジェクト: matzech/pyomo
def add_affine_cuts(nlp_result, solve_data, config):
    with time_code(solve_data.timing, "affine cut generation"):
        m = solve_data.linear_GDP
        if config.calc_disjunctive_bounds:
            with time_code(solve_data.timing, "disjunctive variable bounding"):
                TransformationFactory(
                    'contrib.compute_disj_var_bounds').apply_to(
                        m,
                        solver=config.mip_solver
                        if config.obbt_disjunctive_bounds else None)
        config.logger.info("Adding affine cuts.")
        GDPopt = m.GDPopt_utils
        counter = 0
        for var, val in zip(GDPopt.variable_list, nlp_result.var_values):
            if val is not None and not var.fixed:
                var.value = val

        for constr in constraints_in_True_disjuncts(m, config):
            # Note: this includes constraints that are deactivated in the current model (linear_GDP)

            disjunctive_var_bounds = disjunctive_bounds(constr.parent_block())

            if constr.body.polynomial_degree() in (1, 0):
                continue

            vars_in_constr = list(identify_variables(constr.body))
            if any(var.value is None for var in vars_in_constr):
                continue  # a variable has no values

            # mcpp stuff
            try:
                mc_eqn = mc(constr.body, disjunctive_var_bounds)
            except MCPP_Error as e:
                config.logger.debug(
                    "Skipping constraint %s due to MCPP error %s" %
                    (constr.name, str(e)))
                continue  # skip to the next constraint
            ccSlope = mc_eqn.subcc()
            cvSlope = mc_eqn.subcv()
            ccStart = mc_eqn.concave()
            cvStart = mc_eqn.convex()
            ub_int = min(
                constr.upper,
                mc_eqn.upper()) if constr.has_ub() else mc_eqn.upper()
            lb_int = max(
                constr.lower,
                mc_eqn.lower()) if constr.has_lb() else mc_eqn.lower()

            parent_block = constr.parent_block()
            # Create a block on which to put outer approximation cuts.
            aff_utils = parent_block.component('GDPopt_aff')
            if aff_utils is None:
                aff_utils = parent_block.GDPopt_aff = Block(
                    doc="Block holding affine constraints")
                aff_utils.GDPopt_aff_cons = ConstraintList()
            aff_cuts = aff_utils.GDPopt_aff_cons
            concave_cut = sum(ccSlope[var] * (var - var.value)
                              for var in vars_in_constr
                              if not var.fixed) + ccStart >= lb_int
            convex_cut = sum(cvSlope[var] * (var - var.value)
                             for var in vars_in_constr
                             if not var.fixed) + cvStart <= ub_int
            aff_cuts.add(expr=concave_cut)
            aff_cuts.add(expr=convex_cut)
            counter += 2

        config.logger.info("Added %s affine cuts" % counter)
コード例 #6
0
    def solve(self, model, **kwds):
        """Solve the model.

        Parameters
        ----------
        model : Pyomo model
            The MINLP model to be solved.

        Returns
        -------
        results : SolverResults
            Results from solving the MINLP problem by MindtPy.
        """
        config = self.CONFIG(kwds.pop('options', {
        }), preserve_implicit=True)  # TODO: do we need to set preserve_implicit=True?
        config.set_value(kwds)
        set_up_logger(config)
        check_config(config)

        solve_data = set_up_solve_data(model, config)

        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info(
                '---------------------------------------------------------------------------------------------\n'
                '              Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo (MindtPy)               \n'
                '---------------------------------------------------------------------------------------------\n'
                'For more information, please visit https://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html')

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            # In the process_objective function, as long as the objective function is nonlinear, it will be reformulated and the variable/constraint/objective lists will be updated.
            # For OA/GOA/LP-NLP algorithm, if the objective funtion is linear, it will not be reformulated as epigraph constraint.
            # If the objective function is linear, it will be reformulated as epigraph constraint only if the Feasibility Pump or ROA/RLP-NLP algorithm is activated. (move_objective = True)
            # In some cases, the variable/constraint/objective lists will not be updated even if the objective is epigraph-reformulated.
            # In Feasibility Pump, since the distance calculation only includes discrete variables and the epigraph slack variables are continuous variables, the Feasibility Pump algorithm will not affected even if the variable list are updated.
            # In ROA and RLP/NLP, since the distance calculation does not include these epigraph slack variables, they should not be added to the variable list. (update_var_con_list = False)
            # In the process_objective function, once the objective function has been reformulated as epigraph constraint, the variable/constraint/objective lists will not be updated only if the MINLP has a linear objective function and regularization is activated at the same time.
            # This is because the epigraph constraint is very "flat" for branching rules. The original objective function will be used for the main problem and epigraph reformulation will be used for the projection problem.
            # TODO: The logic here is too complicated, can we simplify it?
            process_objective(solve_data, config,
                              move_objective=(config.init_strategy == 'FP'
                                                     or config.add_regularization is not None
                                                     or config.move_objective),
                              use_mcpp=config.use_mcpp,
                              update_var_con_list=config.add_regularization is None,
                              partition_nonlinear_terms=config.partition_obj_nonlinear_terms,
                              obj_handleable_polynomial_degree=solve_data.mip_objective_polynomial_degree,
                              constr_handleable_polynomial_degree=solve_data.mip_constraint_polynomial_degree
                              )
            # The epigraph constraint is very "flat" for branching rules.
            # If ROA/RLP-NLP is activated and the original objective function is linear, we will use the original objective for the main mip.
            if MindtPy.objective_list[0].expr.polynomial_degree() in solve_data.mip_objective_polynomial_degree and config.add_regularization is not None:
                MindtPy.objective_list[0].activate()
                MindtPy.objective_constr.deactivate()
                MindtPy.objective.deactivate()

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.feas_opt = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.cuts = Block()
            lin.deactivate()

            # no-good cuts exclude particular discrete decisions
            lin.no_good_cuts = ConstraintList(doc='no-good cuts')
            # Feasible no-good cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary no_good_cuts ConstraintList.
            lin.feasible_no_good_cuts = ConstraintList(
                doc='explored no-good cuts')
            lin.feasible_no_good_cuts.deactivate()

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = RangeSet(len(MindtPy.nonlinear_constraint_list),
                                                  doc='Integer index set over the nonlinear constraints.')
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals, initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(
                    bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals)

            # Initialize the main problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_main(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)
            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(
                    from_list=solve_data.best_solution_found.MindtPy_utils.variable_list,
                    to_list=MindtPy.variable_list,
                    config=config)
                copy_var_list_values(
                    MindtPy.variable_list,
                    [i for i in solve_data.original_model.component_data_objects(
                        Var) if not i.fixed],
                    config)
                # exclude fixed variables here. This is consistent with the definition of variable_list in GDPopt.util
            if solve_data.objective_sense == minimize:
                solve_data.results.problem.lower_bound = solve_data.dual_bound
                solve_data.results.problem.upper_bound = solve_data.primal_bound
            else:
                solve_data.results.problem.lower_bound = solve_data.primal_bound
                solve_data.results.problem.upper_bound = solve_data.dual_bound

            solve_data.results.solver.timing = solve_data.timing
            solve_data.results.solver.user_time = solve_data.timing.total
            solve_data.results.solver.wallclock_time = solve_data.timing.total
            solve_data.results.solver.iterations = solve_data.mip_iter
            solve_data.results.solver.num_infeasible_nlp_subproblem = solve_data.nlp_infeasible_counter
            solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time
            solve_data.results.solver.primal_integral = get_primal_integral(solve_data, config)
            solve_data.results.solver.dual_integral = get_dual_integral(solve_data, config)
            solve_data.results.solver.primal_dual_gap_integral = solve_data.results.solver.primal_integral + \
                solve_data.results.solver.dual_integral
            config.logger.info(' {:<25}:   {:>7.4f} '.format(
                'Primal-dual gap integral', solve_data.results.solver.primal_dual_gap_integral))

            if config.single_tree:
                solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                    (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
コード例 #7
0
    def set_value(self, expr):
        for e in expr:
            # The user gave us a proper Disjunct block
            # [ESJ 06/21/2019] This is really an issue with the reclassifier,
            # but in the case where you are iteratively adding to an
            # IndexedDisjunct indexed by Any which has already been transformed,
            # the new Disjuncts are Blocks already. This catches them for who
            # they are anyway.
            if isinstance(e, _DisjunctData):
                #if hasattr(e, 'type') and e.ctype == Disjunct:
                self.disjuncts.append(e)
                continue
            # The user was lazy and gave us a single constraint
            # expression or an iterable of expressions
            expressions = []
            if hasattr(e, '__iter__'):
                e_iter = e
            else:
                e_iter = [e]
            for _tmpe in e_iter:
                try:
                    isexpr = _tmpe.is_expression_type()
                except AttributeError:
                    isexpr = False
                if not isexpr or not _tmpe.is_relational():
                    try:
                        isvar = _tmpe.is_variable_type()
                    except AttributeError:
                        isvar = False
                    if isvar and _tmpe.is_relational():
                        expressions.append(_tmpe)
                        continue
                    try:
                        isbool = _tmpe.is_logical_type()
                    except AttributeError:
                        isbool = False
                    if isbool:
                        expressions.append(_tmpe)
                        continue
                    msg = "\n\tin %s" % (type(e), ) if e_iter is e else ""
                    raise ValueError(
                        "Unexpected term for Disjunction %s.\n"
                        "\tExpected a Disjunct object, relational expression, "
                        "or iterable of\n"
                        "\trelational expressions but got %s%s" %
                        (self.name, type(_tmpe), msg))
                else:
                    expressions.append(_tmpe)

            comp = self.parent_component()
            if comp._autodisjuncts is None:
                b = self.parent_block()
                comp._autodisjuncts = Disjunct(Any)
                b.add_component(
                    unique_component_name(b, comp.local_name + "_disjuncts"),
                    comp._autodisjuncts)
                # TODO: I am not at all sure why we need to
                # explicitly construct this block - that should
                # happen automatically.
                comp._autodisjuncts.construct()
            disjunct = comp._autodisjuncts[len(comp._autodisjuncts)]
            disjunct.constraint = c = ConstraintList()
            disjunct.propositions = p = LogicalConstraintList()
            for e in expressions:
                if isinstance(e, BooleanValue):
                    p.add(e)
                else:
                    c.add(e)
            self.disjuncts.append(disjunct)
コード例 #8
0
def _process_logical_constraints_in_logical_context(context):
    new_xfrm_block_name = unique_component_name(context, 'logic_to_linear')
    new_xfrm_block = Block(doc="Transformation objects for logic_to_linear")
    setattr(context, new_xfrm_block_name, new_xfrm_block)

    new_constrlist = new_xfrm_block.transformed_constraints = ConstraintList()
    new_boolvarlist = new_xfrm_block.augmented_vars = BooleanVarList()
    new_varlist = new_xfrm_block.augmented_vars_asbinary = VarList(
        domain=Binary)

    indicator_map = ComponentMap()
    cnf_statements = []
    # Convert all logical constraints to CNF
    for logical_constraint in context.component_data_objects(
            ctype=LogicalConstraint, active=True):
        cnf_statements.extend(
            to_cnf(logical_constraint.body, new_boolvarlist, indicator_map))
        logical_constraint.deactivate()

    # Associate new Boolean vars to new binary variables
    for bool_vardata in new_boolvarlist.values():
        new_binary_vardata = new_varlist.add()
        bool_vardata.associate_binary_var(new_binary_vardata)

    # Add constraints associated with each CNF statement
    for cnf_statement in cnf_statements:
        for linear_constraint in _cnf_to_linear_constraint_list(cnf_statement):
            new_constrlist.add(expr=linear_constraint)

    # Add bigM associated with special atoms
    # Note: this ad-hoc reformulation may be revisited for tightness in the future.
    old_varlist_length = len(new_varlist)
    for indicator_var, special_atom in indicator_map.items():
        for linear_constraint in _cnf_to_linear_constraint_list(
                special_atom, indicator_var, new_varlist):
            new_constrlist.add(expr=linear_constraint)

    # Previous step may have added auxiliary binaries. Associate augmented Booleans to them.
    num_new = len(new_varlist) - old_varlist_length
    list_o_vars = list(new_varlist.values())
    if num_new:
        for binary_vardata in list_o_vars[-num_new:]:
            new_bool_vardata = new_boolvarlist.add()
            new_bool_vardata.associate_binary_var(binary_vardata)

    # If added components were not used, remove them.
    # Note: it is ok to simply delete the index_set for these components, because by
    # default, a new set object is generated for each [Thing]List.
    if len(new_constrlist) == 0:
        new_xfrm_block.del_component(new_constrlist.index_set())
        new_xfrm_block.del_component(new_constrlist)
    if len(new_boolvarlist) == 0:
        new_xfrm_block.del_component(new_boolvarlist.index_set())
        new_xfrm_block.del_component(new_boolvarlist)
    if len(new_varlist) == 0:
        new_xfrm_block.del_component(new_varlist.index_set())
        new_xfrm_block.del_component(new_varlist)

    # If block was entirely unused, remove it
    if all(
            len(l) == 0
            for l in (new_constrlist, new_boolvarlist, new_varlist)):
        context.del_component(new_xfrm_block)
コード例 #9
0
ファイル: detection.py プロジェクト: zsyf102900/pyomo
    def generate_structured_model(self):
        """
        Using the community map and the original model used to create this community map, we will create
        structured_model, which will be based on the original model but will place variables, constraints, and
        objectives into or outside of various blocks (communities) based on the community map.

        Returns
        -------
        structured_model: Block
            a Pyomo model that reflects the nature of the community map
        """

        # Initialize a new model (structured_model) which will contain variables and constraints in blocks based on
        # their respective communities within the CommunityMap
        structured_model = ConcreteModel()

        # Create N blocks (where N is the number of communities found within the model)
        structured_model.b = Block([0, len(self.community_map) - 1, 1])  # values given for (start, stop, step)

        # Initialize a ComponentMap that will map a variable from the model (for example, old_model.x1) used to
        # create the CommunityMap to a list of variables in various blocks that were created based on this
        # variable (for example, [structured_model.b[0].x1, structured_model.b[3].x1])
        blocked_variable_map = ComponentMap()
        # Example key-value pair -> {original_model.x1 : [structured_model.b[0].x1, structured_model.b[3].x1]}

        # TODO - Consider changing structure of the next two for loops to be more efficient (maybe loop through
        #  constraints and add variables as you go) (but note that disconnected variables would be
        #  missed with this strategy)

        # First loop through community_map to add all the variables to structured_model before we add constraints
        # that use those variables
        for community_key, community in self.community_map.items():
            _, variables_in_community = community

            # Loop through all of the variables (from the original model) in the given community
            for stored_variable in variables_in_community:
                # Construct a new_variable whose attributes are determined by querying the variable from the
                # original model
                new_variable = Var(domain=stored_variable.domain, bounds=stored_variable.bounds)

                # Add this new_variable to its block/community and name it using the string of the variable from the
                # original model
                structured_model.b[community_key].add_component(str(stored_variable), new_variable)

                # Since there could be multiple variables 'x1' (such as
                # structured_model.b[0].x1, structured_model.b[3].x1, etc), we need to create equality constraints
                # for all of the variables 'x1' within structured_model (this is the purpose of blocked_variable_map)

                # Here we update blocked_variable_map to keep track of what equality constraints need to be made
                variable_in_new_model = structured_model.find_component(new_variable)
                blocked_variable_map[stored_variable] = blocked_variable_map.get(stored_variable,
                                                                                 []) + [variable_in_new_model]

        # Now that we have all of our variables within the model, we will initialize a dictionary that used to
        # replace variables within constraints to other variables (in our case, this will convert variables from the
        # original model into variables from the new model (structured_model))
        replace_variables_in_expression_map = dict()

        # Loop through community_map again, this time to add constraints (with replaced variables)
        for community_key, community in self.community_map.items():
            constraints_in_community, _ = community

            # Loop through all of the constraints (from the original model) in the given community
            for stored_constraint in constraints_in_community:

                # Now, loop through all of the variables within the given constraint expression
                for variable_in_stored_constraint in identify_variables(stored_constraint.expr):

                    # Loop through each of the "blocked" variables that a variable is mapped to and update
                    # replace_variables_in_expression_map if a variable has a "blocked" form in the given community

                    # What this means is that if we are looping through constraints in community 0, then it would be
                    # best to change a variable x1 into b[0].x1 as opposed to b[2].x1 or b[5].x1 (assuming all of these
                    # blocked versions of the variable x1 exist (which depends on the community map))

                    variable_in_current_block = False
                    for blocked_variable in blocked_variable_map[variable_in_stored_constraint]:
                        if 'b[%d]' % community_key in str(blocked_variable):
                            # Update replace_variables_in_expression_map accordingly
                            replace_variables_in_expression_map[id(variable_in_stored_constraint)] = blocked_variable
                            variable_in_current_block = True

                    if not variable_in_current_block:
                        # Create a version of the given variable outside of blocks then add it to
                        # replace_variables_in_expression_map

                        new_variable = Var(domain=variable_in_stored_constraint.domain,
                                           bounds=variable_in_stored_constraint.bounds)

                        # Add the new variable just as we did above (but now it is not in any blocks)
                        structured_model.add_component(str(variable_in_stored_constraint), new_variable)

                        # Update blocked_variable_map to keep track of what equality constraints need to be made
                        variable_in_new_model = structured_model.find_component(new_variable)
                        blocked_variable_map[variable_in_stored_constraint] = blocked_variable_map.get(
                            variable_in_stored_constraint, []) + [variable_in_new_model]

                        # Update replace_variables_in_expression_map accordingly
                        replace_variables_in_expression_map[id(variable_in_stored_constraint)] = variable_in_new_model

                # TODO - Is there a better way to check whether something is actually an objective? (as done below)
                # Check to see whether 'stored_constraint' is actually an objective (since constraints and objectives
                # grouped together)
                if self.with_objective and isinstance(stored_constraint, (_GeneralObjectiveData, Objective)):
                    # If the constraint is actually an objective, we add it to the block as an objective
                    new_objective = Objective(
                        expr=replace_expressions(stored_constraint.expr, replace_variables_in_expression_map))
                    structured_model.b[community_key].add_component(str(stored_constraint), new_objective)

                else:
                    # Construct a constraint based on the expression within stored_constraint and the dict we have
                    # created for the purpose of replacing the variables within the constraint expression
                    new_constraint = Constraint(
                        expr=replace_expressions(stored_constraint.expr, replace_variables_in_expression_map))

                    # Add this new constraint to the corresponding community/block with its name as the string of the
                    # constraint from the original model
                    structured_model.b[community_key].add_component(str(stored_constraint), new_constraint)

        # If with_objective was set to False, that means we might have missed an objective function within the
        # original model
        if not self.with_objective:
            # Construct a new dictionary for replacing the variables (replace_variables_in_objective_map) which will
            # be specific to the variables in the objective function, since there is the possibility that the
            # objective contains variables we have not yet seen (and thus not yet added to our new model)
            for objective_function in self.model.component_data_objects(ctype=Objective,
                                                                        active=self.use_only_active_components,
                                                                        descend_into=True):

                for variable_in_objective in identify_variables(objective_function):
                    # Add all of the variables in the objective function (not within any blocks)

                    # Check to make sure a form of the variable has not already been made outside of the blocks
                    if structured_model.find_component(str(variable_in_objective)) is None:

                        new_variable = Var(domain=variable_in_objective.domain, bounds=variable_in_objective.bounds)
                        structured_model.add_component(str(variable_in_objective), new_variable)

                        # Again we update blocked_variable_map to keep track of what
                        # equality constraints need to be made
                        variable_in_new_model = structured_model.find_component(new_variable)
                        blocked_variable_map[variable_in_objective] = blocked_variable_map.get(
                            variable_in_objective, []) + [variable_in_new_model]

                        # Update the dictionary that we will use to replace the variables
                        replace_variables_in_expression_map[id(variable_in_objective)] = variable_in_new_model

                    else:
                        for version_of_variable in blocked_variable_map[variable_in_objective]:
                            if 'b[' not in str(version_of_variable):
                                replace_variables_in_expression_map[id(variable_in_objective)] = version_of_variable

                # Now we will construct a new objective function based on the one from the original model and then
                # add it to the new model just as we have done before
                new_objective = Objective(
                    expr=replace_expressions(objective_function.expr, replace_variables_in_expression_map))
                structured_model.add_component(str(objective_function), new_objective)

        # Now, we need to create equality constraints for all of the different "versions" of a variable (such
        # as x1, b[0].x1, b[2].x2, etc.)

        # Create a constraint list for the equality constraints
        structured_model.equality_constraint_list = ConstraintList(doc="Equality Constraints for the different "
                                                                       "forms of a given variable")

        # Loop through blocked_variable_map and create constraints accordingly
        for variable, duplicate_variables in blocked_variable_map.items():
            # variable -> variable from the original model
            # duplicate_variables -> list of variables in the new model

            # Create a list of all the possible equality constraints that need to be made
            equalities_to_make = combinations(duplicate_variables, 2)

            # Loop through the list of two-variable tuples and create an equality constraint for those two variables
            for variable_1, variable_2 in equalities_to_make:
                structured_model.equality_constraint_list.add(expr=variable_1 == variable_2)

        # Return 'structured_model', which is essentially identical to the original model but now has all of the
        # variables, constraints, and objectives placed into blocks based on the nature of the CommunityMap

        return structured_model
コード例 #10
0
def process_objective(solve_data, config, move_objective=False,
                      use_mcpp=False, update_var_con_list=True,
                      partition_nonlinear_terms=True,
                      obj_handleable_polynomial_degree={0, 1},
                      constr_handleable_polynomial_degree={0, 1}):
    """Process model objective function.

    Check that the model has only 1 valid objective.
    If the objective is nonlinear, move it into the constraints.
    If no objective function exists, emit a warning and create a dummy 
    objective.

    Parameters
    ----------
    solve_data (GDPoptSolveData): solver environment data class
    config (ConfigBlock): solver configuration options
    move_objective (bool): if True, move even linear
        objective functions to the constraints
    update_var_con_list (bool): if True, the variable/constraint/objective lists will not be updated. 
        This arg is set to True by default. Currently, update_var_con_list will be set to False only when
        add_regularization is not None in MindtPy.
    partition_nonlinear_terms (bool): if True, partition sum of nonlinear terms in the objective function.

    """
    m = solve_data.working_model
    util_blk = getattr(m, solve_data.util_block_name)
    # Handle missing or multiple objectives
    active_objectives = list(m.component_data_objects(
        ctype=Objective, active=True, descend_into=True))
    solve_data.results.problem.number_of_objectives = len(active_objectives)
    if len(active_objectives) == 0:
        config.logger.warning(
            'Model has no active objectives. Adding dummy objective.')
        util_blk.dummy_objective = Objective(expr=1)
        main_obj = util_blk.dummy_objective
    elif len(active_objectives) > 1:
        raise ValueError('Model has multiple active objectives.')
    else:
        main_obj = active_objectives[0]
    solve_data.results.problem.sense = ProblemSense.minimize if \
                                       main_obj.sense == 1 else \
                                       ProblemSense.maximize
    solve_data.objective_sense = main_obj.sense

    # Move the objective to the constraints if it is nonlinear or move_objective is True.
    if main_obj.expr.polynomial_degree() not in obj_handleable_polynomial_degree or move_objective:
        if move_objective:
            config.logger.info("Moving objective to constraint set.")
        else:
            config.logger.info(
                "Objective is nonlinear. Moving it to constraint set.")
        util_blk.objective_value = VarList(domain=Reals, initialize=0)
        util_blk.objective_constr = ConstraintList()
        if main_obj.expr.polynomial_degree() not in obj_handleable_polynomial_degree and partition_nonlinear_terms and main_obj.expr.__class__ is EXPR.SumExpression:
            repn = generate_standard_repn(main_obj.expr, quadratic=2 in obj_handleable_polynomial_degree)
            # the following code will also work if linear_subexpr is a constant.
            linear_subexpr = repn.constant + sum(coef*var for coef, var in zip(repn.linear_coefs, repn.linear_vars)) \
                + sum(coef*var1*var2 for coef, (var1, var2) in zip(repn.quadratic_coefs, repn.quadratic_vars))
            # only need to generate one epigraph constraint for the sum of all linear terms and constant
            epigraph_reformulation(linear_subexpr, util_blk.objective_value, util_blk.objective_constr, use_mcpp, main_obj.sense)
            nonlinear_subexpr = repn.nonlinear_expr
            if nonlinear_subexpr.__class__ is EXPR.SumExpression:
                for subsubexpr in nonlinear_subexpr.args:
                    epigraph_reformulation(subsubexpr, util_blk.objective_value, util_blk.objective_constr, use_mcpp, main_obj.sense)
            else:
                epigraph_reformulation(nonlinear_subexpr, util_blk.objective_value, util_blk.objective_constr, use_mcpp, main_obj.sense)
        else:
            epigraph_reformulation(main_obj.expr, util_blk.objective_value, util_blk.objective_constr, use_mcpp, main_obj.sense)

        main_obj.deactivate()
        util_blk.objective = Objective(expr=sum(util_blk.objective_value[:]), sense=main_obj.sense)

        if main_obj.expr.polynomial_degree() not in obj_handleable_polynomial_degree or \
           (move_objective and update_var_con_list):
            util_blk.variable_list.extend(util_blk.objective_value[:])
            util_blk.continuous_variable_list.extend(util_blk.objective_value[:])
            util_blk.constraint_list.extend(util_blk.objective_constr[:])
            util_blk.objective_list.append(util_blk.objective)
            for constr in util_blk.objective_constr[:]:
                if constr.body.polynomial_degree() in constr_handleable_polynomial_degree:
                    util_blk.linear_constraint_list.append(constr)
                else:
                    util_blk.nonlinear_constraint_list.append(constr)
コード例 #11
0
ファイル: compute_bounds.py プロジェクト: pazochoa/pyomo
    def _apply_to(self, model):
        """Apply the transformation.

        Args:
            model: Pyomo model object on which to compute disjuctive bounds.

        """
        disjuncts_to_process = list(model.component_data_objects(
            ctype=Disjunct, active=True, descend_into=(Block, Disjunct),
            descent_order=TraversalStrategy.BreadthFirstSearch))
        if model.type() == Disjunct:
            disjuncts_to_process.insert(0, model)

        # Deactivate nonlinear constraints
        model._tmp_constr_deactivated = ComponentSet()
        for constraint in model.component_data_objects(
                ctype=Constraint, active=True,
                descend_into=(Block, Disjunct)):
            if constraint.body.polynomial_degree() not in linear_degrees:
                model._tmp_constr_deactivated.add(constraint)
                constraint.deactivate()

        for disjunct in disjuncts_to_process:
            # If disjunct does not have a component map to store disjunctive
            # bounds, then make one.
            if not hasattr(disjunct, '_disj_var_bounds'):
                disjunct._disj_var_bounds = ComponentMap()

            # fix the disjunct to active, deactivate all nonlinear constraints,
            # and apply the big-M transformation
            old_disjunct_state = {'fixed': disjunct.indicator_var.fixed,
                                  'value': disjunct.indicator_var.value}

            disjunct.indicator_var.fix(1)
            model._tmp_var_set = ComponentSet()
            # Maps a variable in a cloned model instance to the original model
            # variable
            for constraint in disjunct.component_data_objects(
                    ctype=Constraint, active=True, descend_into=True):
                model._tmp_var_set.update(identify_variables(constraint.body))
            model._var_list = list(model._tmp_var_set)
            bigM_model = model.clone()
            new_var_to_orig = ComponentMap(
                zip(bigM_model._var_list, model._var_list))

            TransformationFactory('gdp.bigm').apply_to(bigM_model)
            for var in bigM_model._tmp_var_set:
                # If variable is fixed, no need to calculate disjunctive bounds
                if var.fixed:
                    continue
                # calculate the disjunctive variable bounds for these variables
                # disable all other objectives
                for obj in bigM_model.component_data_objects(ctype=Objective):
                    obj.deactivate()
                bigM_model.del_component('_var_bounding_obj')
                # Calculate the lower bound
                bigM_model._var_bounding_obj = Objective(
                    expr=var, sense=minimize)
                results = SolverFactory('cbc').solve(bigM_model)
                if results.solver.termination_condition is tc.optimal:
                    disj_lb = value(var)
                elif results.solver.termination_condition is tc.infeasible:
                    disj_lb = None
                    # TODO disjunct can be fathomed?
                else:
                    raise NotImplementedError(
                        "Unhandled termination condition: %s"
                        % results.solver.termination_condition)
                # Calculate the upper bound
                bigM_model._var_bounding_obj.sense = maximize
                results = SolverFactory('cbc').solve(bigM_model)
                if results.solver.termination_condition is tc.optimal:
                    disj_ub = value(var)
                elif results.solver.termination_condition is tc.infeasible:
                    disj_ub = None
                    # TODO disjunct can be fathomed?
                else:
                    raise NotImplementedError(
                        "Unhandled termination condition: %s"
                        % results.solver.termination_condition)
                old_bounds = disjunct._disj_var_bounds.get(
                    new_var_to_orig[var], (None, None)  # default of None
                )
                # update bounds values
                disjunct._disj_var_bounds[new_var_to_orig[var]] = (
                    min_if_not_None(disj_lb, old_bounds[0]),
                    max_if_not_None(disj_ub, old_bounds[1]))

            # reset the disjunct
            if not old_disjunct_state['fixed']:
                disjunct.indicator_var.unfix()
            disjunct.indicator_var.set_value(old_disjunct_state['value'])

            # Enforce the disjunctive variable bounds as constraints
            if hasattr(disjunct, '_disjunctive_var_constraints'):
                if getattr(disjunct._disjunctive_var_constraints, 'doc', "")\
                        .startswith("q.Autogenerated"):
                    del disjunct._disjunctive_var_constraints
                else:
                    raise ValueError(
                        'Disjunct %s already has an attribute '
                        '_disjunctive_var_constraints required by the '
                        'gdp_bounds package.')
            cons_list = disjunct._disjunctive_var_constraints = ConstraintList(
                doc="q.Autogenerated constraints enforcing "
                "disjunctive variable bounds."
            )
            for var, bounds in iteritems(disjunct._disj_var_bounds):
                lbb, ubb = bounds
                if lbb is not None:
                    cons_list.add(expr=lbb <= var)
                if ubb is not None:
                    cons_list.add(expr=var <= ubb)

        # Reactivate deactivated nonlinear constraints
        for constraint in model._tmp_constr_deactivated:
            constraint.activate()
コード例 #12
0
ファイル: basic_step.py プロジェクト: bbrunaud/pyomo
def apply_basic_step(disjunctions_or_constraints):
    #
    # Basic steps only apply to XOR'd disjunctions
    #
    disjunctions = list(obj for obj in disjunctions_or_constraints
                        if obj.ctype == Disjunction)
    constraints = list(obj for obj in disjunctions_or_constraints
                       if obj.ctype == Constraint)
    for d in disjunctions:
        if not d.xor:
            raise ValueError(
                "Basic steps can only be applied to XOR'd disjunctions\n\t"
                "(raised by disjunction %s)" % (d.name,))
        if not d.active:
            logger.warning("Warning: applying basic step to a previously "
                           "deactivated disjunction (%s)" % (d.name,))

    ans = Block(concrete=True)
    ans.DISJUNCTIONS = Set(initialize=range(len(disjunctions)))
    ans.INDEX = Set(
        dimen=len(disjunctions),
        initialize=_squish_singletons(itertools.product(
            *tuple( range(len(d.disjuncts)) for d in disjunctions ))))

    #
    # Form the individual disjuncts for the new basic step
    #
    ans.disjuncts = Disjunct(ans.INDEX)
    for idx in ans.INDEX:
        #
        # Each source disjunct will be copied (cloned) into its own
        # subblock
        #
        ans.disjuncts[idx].src = Block(ans.DISJUNCTIONS)
        for i in ans.DISJUNCTIONS:
            tmp = _pseudo_clone(disjunctions[i].disjuncts[
                idx[i] if isinstance(idx, tuple) else idx])
            for k,v in list(tmp.component_map().items()):
                if k == 'indicator_var':
                    continue
                tmp.del_component(k)
                ans.disjuncts[idx].src[i].add_component(k,v)
        # Copy in the constraints corresponding to the improper disjunctions
        ans.disjuncts[idx].improper_constraints = ConstraintList()
        for constr in constraints:
            if constr.is_indexed():
                for indx in constr:
                    ans.disjuncts[idx].improper_constraints.add(
                        (constr[indx].lower, constr[indx].body, constr[indx].upper)
                    )
                    constr[indx].deactivate()
            # need this so that we can take an improper basic step with a
            # ConstraintData
            else:
                ans.disjuncts[idx].improper_constraints.add(
                    (constr.lower, constr.body, constr.upper)
                )
                constr.deactivate()

    #
    # Link the new disjunct indicator_var's to the original
    # indicator_var's.  Since only one of the new
    #
    NAME_BUFFER = {}
    ans.indicator_links = ConstraintList()
    for i in ans.DISJUNCTIONS:
        for j in range(len(disjunctions[i].disjuncts)):
            orig_var = disjunctions[i].disjuncts[j].indicator_var
            ans.indicator_links.add(
                orig_var ==
                sum( ans.disjuncts[idx].indicator_var for idx in ans.INDEX
                     if (idx[i] if isinstance(idx, tuple) else idx) == j ))
            # and throw on a Reference to original on the block
            name_base = orig_var.getname(fully_qualified=True,
                                         name_buffer=NAME_BUFFER)
            ans.add_component(unique_component_name( ans, name_base),
                              Reference(orig_var))

    # Form the new disjunction
    ans.disjunction = Disjunction(expr=[ans.disjuncts[i] for i in ans.INDEX])

    #
    # Deactivate the old disjunctions / disjuncts
    #
    for i in ans.DISJUNCTIONS:
        disjunctions[i].deactivate()
        for d in disjunctions[i].disjuncts:
            d._deactivate_without_fixing_indicator()

    return ans
コード例 #13
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Bunch()
        solve_data.curr_int_sol = []
        solve_data.should_terminate = False
        solve_data.integer_list = []

        check_config(config)

        # if the objective function is a constant, dual bound constraint is not added.
        obj = next(model.component_data_objects(ctype=Objective, active=True))
        if obj.expr.polynomial_degree() == 0:
            config.use_dual_bound = False

        if config.use_fbbt:
            fbbt(model)
            # TODO: logging_level is not logging.INFO here
            config.logger.info(
                'Use the fbbt to tighten the bounds of variables')

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info('---Starting MindtPy---')

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(
                solve_data,
                config,
                move_linear_objective=(config.init_strategy == 'FP' or
                                       config.add_regularization is not None),
                use_mcpp=config.use_mcpp,
                updata_var_con_list=config.add_regularization is None)
            # The epigraph constraint is very "flat" for branching rules,
            # we want to use to original model for the main mip.
            if MindtPy.objective_list[0].expr.polynomial_degree() in {
                    1, 0
            } and config.add_regularization is not None:
                MindtPy.objective_list[0].activate()
                MindtPy.objective_constr.deactivate()
                MindtPy.objective.deactivate()

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.feas_opt = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.cuts = Block()
            lin.deactivate()

            # no-good cuts exclude particular discrete decisions
            lin.no_good_cuts = ConstraintList(doc='no-good cuts')
            # Feasible no-good cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary no_good_cuts ConstraintList.
            lin.feasible_no_good_cuts = ConstraintList(
                doc='explored no-good cuts')
            lin.feasible_no_good_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0
            solve_data.nlp_infeasible_counter = 0
            if config.init_strategy == 'FP':
                solve_data.fp_iter = 1

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]
            if config.single_tree and (config.add_no_good_cuts
                                       or config.use_tabu_list):
                solve_data.stored_bound = {}
            if config.strategy == 'GOA' and (config.add_no_good_cuts
                                             or config.use_tabu_list):
                solve_data.num_no_good_cuts_added = {}

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = RangeSet(
                    len(MindtPy.nonlinear_constraint_list),
                    doc='Integer index set over the nonlinear constraints.')
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                         initialize=0,
                                         domain=NonNegativeReals)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False
            solve_data.bound_improved = False

            if config.nlp_solver == 'ipopt':
                if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                    solve_data.working_model.ipopt_zL_out = Suffix(
                        direction=Suffix.IMPORT)
                if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                    solve_data.working_model.ipopt_zU_out = Suffix(
                        direction=Suffix.IMPORT)

            # Initialize the main problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_main(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)
            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                copy_var_list_values(MindtPy.variable_list, [
                    i
                    for i in solve_data.original_model.component_data_objects(
                        Var) if not i.fixed
                ], config)
                # exclude fixed variables here. This is consistent with the definition of variable_list in GDPopt.util

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total
        solve_data.results.solver.iterations = solve_data.mip_iter
        solve_data.results.solver.num_infeasible_nlp_subproblem = solve_data.nlp_infeasible_counter
        solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
コード例 #14
0
    def _construct_bundle_dual_master_model(self, ph):

        self._master_model = ConcreteModel()
        for scenario in ph._scenario_tree._scenarios:
            for tree_node in scenario._node_list[:-1]:
                new_w_variable_name = "WVAR_" + str(
                    tree_node._name) + "_" + str(scenario._name)
                new_w_k_parameter_name = "WDATA_" + str(
                    tree_node._name) + "_" + str(scenario._name) + "_K"
                setattr(self._master_model, new_w_variable_name,
                        Var(tree_node._standard_variable_ids, within=Reals))
                setattr(
                    self._master_model, new_w_k_parameter_name,
                    Param(tree_node._standard_variable_ids,
                          within=Reals,
                          default=0.0,
                          mutable=True))
                setattr(self._master_model, "V_" + str(scenario._name),
                        Var(within=Reals))
                # HERE - NEED TO MAKE CK VARAIBLE-DEPENDENT - PLUS WE NEED A SANE INITIAL VALUE (AND SUBSEQUENT VALUE)
        # DLW SAYS NO - THIS SHOULD BE VARIABLE-SPECIFIC
        setattr(self._master_model, "CK", Param(default=1.0, mutable=True))

        def obj_rule(m):
            expr = 0.0
            for scenario in ph._scenario_tree._scenarios:
                for tree_node in scenario._node_list[:-1]:
                    new_w_variable_name = "WVAR_" + str(
                        tree_node._name) + "_" + str(scenario._name)
                    new_w_k_parameter_name = "WDATA_" + str(
                        tree_node._name) + "_" + str(scenario._name) + "_K"
                    w_variable = m.find_component(new_w_variable_name)
                    w_k_parameter = m.find_component(new_w_k_parameter_name)
                    expr += 1.0 / (2.0 * m.CK) * sum(
                        w_variable[i]**2 -
                        2.0 * w_variable[i] * w_k_parameter[i]
                        for i in w_variable)
                expr -= getattr(m, "V_" + str(scenario._name))
            return expr

        self._master_model.TheObjective = Objective(sense=minimize,
                                                    rule=obj_rule)

        self._master_model.W_Balance = ConstraintList()

        for stage in ph._scenario_tree._stages[:-1]:

            for tree_node in stage._tree_nodes:

                # GABE SHOULD HAVE A SERVICE FOR THIS???
                for idx in tree_node._standard_variable_ids:

                    expr = 0.0
                    for scenario in tree_node._scenarios:
                        scenario_probability = scenario._probability
                        new_w_variable_name = "WVAR_" + str(
                            tree_node._name) + "_" + str(scenario._name)
                        w_variable = self._master_model.find_component(
                            new_w_variable_name)
                        expr += scenario_probability * w_variable[idx]

                    self._master_model.W_Balance.add(expr == 0.0)

        # we can't populate until we see data from PH....
        self._master_model.V_Bound = ConstraintList()
コード例 #15
0
def add_outer_approximation_cuts(nlp_result, solve_data, config):
    """Add outer approximation cuts to the linear GDP model."""
    m = solve_data.linear_GDP
    GDPopt = m.GDPopt_utils
    sign_adjust = -1 if GDPopt.objective.sense == minimize else 1

    # copy values over
    for var, val in zip(GDPopt.working_var_list, nlp_result.var_values):
        if val is not None and not var.fixed:
            var.value = val

    # TODO some kind of special handling if the dual is phenomenally small?
    config.logger.debug('Adding OA cuts.')

    nonlinear_constraints = ComponentSet(GDPopt.working_nonlinear_constraints)
    counter = 0
    for constr, dual_value in zip(GDPopt.working_constraints_list,
                                  nlp_result.dual_values):
        if dual_value is None or constr not in nonlinear_constraints:
            continue

        # Determine if the user pre-specified that OA cuts should not be
        # generated for the given constraint.
        parent_block = constr.parent_block()
        ignore_set = getattr(parent_block, 'GDPopt_ignore_OA', None)
        config.logger.debug('Ignore_set %s' % ignore_set)
        if (ignore_set and
            (constr in ignore_set or constr.parent_component() in ignore_set)):
            config.logger.debug(
                'OA cut addition for %s skipped because it is in '
                'the ignore set.' % constr.name)
            continue

        config.logger.debug("Adding OA cut for %s with dual value %s" %
                            (constr.name, dual_value))

        # TODO make this more efficient by not having to use differentiate()
        # at each iteration.
        constr_vars = list(EXPR.identify_variables(constr.body))
        jac_list = differentiate(constr.body, wrt_list=constr_vars)
        jacobians = ComponentMap(zip(constr_vars, jac_list))

        # Create a block on which to put outer approximation cuts.
        oa_utils = parent_block.component('GDPopt_OA')
        if oa_utils is None:
            oa_utils = parent_block.GDPopt_OA = Block(
                doc="Block holding outer approximation cuts "
                "and associated data.")
            oa_utils.GDPopt_OA_cuts = ConstraintList()
            oa_utils.GDPopt_OA_slacks = VarList(bounds=(0, config.max_slack),
                                                domain=NonNegativeReals,
                                                initialize=0)

        oa_cuts = oa_utils.GDPopt_OA_cuts
        slack_var = oa_utils.GDPopt_OA_slacks.add()
        oa_cuts.add(expr=copysign(1, sign_adjust * dual_value) *
                    (value(constr.body) + sum(
                        value(jacobians[var]) * (var - value(var))
                        for var in constr_vars)) + slack_var <= 0)
        counter += 1

    config.logger.info('Added %s OA cuts' % counter)
コード例 #16
0
def set_up_jumps(model, kwargs):
    """Takes the kwargs from the Estimator classes and initializes the dosing points (or any other type
    of sudden change in the states)

    :param ConcreteModel model: Pyomo model (from Variance or Parameter Estimator)
    :param dict var_dict: Dict of states
    :param dict jump_times: Dict of jump times
    :param array-like feed_times: List of feed times (could be a set)

    :return: None
    
    """
    var_dict = kwargs.pop("jump_states", None)
    jump_times = kwargs.pop("jump_times", None)
    feed_times = kwargs.pop("feed_times", None)

    if not isinstance(var_dict, dict):
        print("disc_jump_v_dict is of type {}".format(type(var_dict)))
        raise Exception  # wrong type
    if not isinstance(jump_times, dict):
        print("disc_jump_times is of type {}".format(type(jump_times)))
        raise Exception  # wrong type
    count = 0

    for i in jump_times.keys():
        for j in jump_times[i].items():
            count += 1
    if len(feed_times) > count:
        raise Exception(
            "Error: Check feed time points in set feed_times and in jump_times again.\n"
            "There are more time points in feed_times than jump_times provided."
        )

    time_set = None
    for i in model.component_objects(ContinuousSet):
        time_set = i
        break
    if time_set is None:
        raise Exception('No continuous_set')

    time_set = time_set.name

    ttgt = getattr(model, time_set)
    ncp = ttgt.get_discretization_info()['ncp']
    fe_l = ttgt.get_finite_elements()
    fe_list = [fe_l[i + 1] - fe_l[i] for i in range(0, len(fe_l) - 1)]

    for fe in range(0, len(fe_list)):

        vs = ReplacementVisitor()
        kn = 0
        for ki in jump_times.keys():
            if not isinstance(ki, str):
                print("ki is not str")
            vtjumpkeydict = jump_times[ki]
            for l in vtjumpkeydict.keys():
                jump_time = vtjumpkeydict[l]
                jump_fe, jump_cp = fe_cp(ttgt, jump_time)
                if jump_time not in feed_times:
                    raise Exception(
                        "Error: Check feed time points in set feed_times and in jump_times again.\n"
                        "They do not match.\n"
                        "Jump_time is not included in feed_times.")
                if fe == jump_fe + 1:
                    for v in var_dict.keys():
                        if not isinstance(v, str):
                            print("v is not str")
                        vkeydict = var_dict[v]
                        for k in vkeydict.keys():
                            if k == l:  # Match in between two components of dictionaries
                                var = getattr(model, v)
                                #dvar = getattr(model, "d" + v + "dt")
                                con_name = 'd' + v + 'dt_disc_eq'
                                con = getattr(model, con_name)

                                model.add_component(v + "_dummy_eq_" + str(kn),
                                                    ConstraintList())
                                conlist = getattr(model,
                                                  v + "_dummy_eq_" + str(kn))
                                varname = v + "_dummy_" + str(kn)
                                model.add_component(varname, Var(
                                    [0]))  #: this is now indexed [0]
                                vdummy = getattr(model, varname)
                                vs.change_replacement(
                                    vdummy[0])  #: who is replacing.
                                jump_delta = vkeydict[k]
                                model.add_component(
                                    v + '_jumpdelta' + str(kn),
                                    Param(initialize=jump_delta))
                                jump_param = getattr(
                                    model, v + '_jumpdelta' + str(kn))
                                if not isinstance(k, tuple):
                                    k = (k, )
                                exprjump = vdummy[0] - var[
                                    (jump_time, ) +
                                    k] == jump_param  #: this cha
                                # exprjump = vdummy - var[(self.jump_time,) + k] == jump_param
                                model.add_component("jumpdelta_expr" + str(kn),
                                                    Constraint(expr=exprjump))
                                for kcp in range(1, ncp + 1):
                                    curr_time = t_ij(ttgt, jump_fe + 1, kcp)
                                    if not isinstance(k, tuple):
                                        knew = (k, )
                                    else:
                                        knew = k
                                    idx = (curr_time, ) + knew
                                    con[idx].deactivate()
                                    e = con[idx].expr
                                    suspect_var = e.args[0].args[1].args[
                                        0].args[0].args[1]  #: seems that
                                    vs.change_suspect(
                                        id(suspect_var))  #: who to replace
                                    e_new = vs.dfs_postorder_stack(
                                        e)  #: replace
                                    con[idx].set_value(e_new)
                                    conlist.add(con[idx].expr)
                kn = kn + 1
コード例 #17
0
ファイル: basic_step.py プロジェクト: zy09838/pyomo
def apply_basic_step(disjunctions_or_constraints):
    #
    # Basic steps only apply to XOR'd disjunctions
    #
    disjunctions = list(obj for obj in disjunctions_or_constraints
                        if obj.type() == Disjunction)
    constraints = list(obj for obj in disjunctions_or_constraints
                       if obj.type() == Constraint)
    for d in disjunctions:
        if not d.xor:
            raise ValueError(
                "Basic steps can only be applied to XOR'd disjunctions\n\t"
                "(raised by disjunction %s)" % (d.name, ))
        if not d.active:
            logger.warning("Warning: applying basic step to a previously "
                           "deactivated disjunction (%s)" % (d.name, ))

    ans = Block(concrete=True)
    ans.DISJUNCTIONS = Set(initialize=xrange(len(disjunctions)))
    ans.INDEX = Set(dimen=len(disjunctions),
                    initialize=_squish_singletons(
                        itertools.product(*tuple(
                            xrange(len(d.disjuncts)) for d in disjunctions))))

    #
    # Form the individual disjuncts for the new basic step
    #
    ans.disjuncts = Disjunct(ans.INDEX)
    for idx in ans.INDEX:
        #
        # Each source disjunct will be copied (cloned) into its own
        # subblock
        #
        ans.disjuncts[idx].src = Block(ans.DISJUNCTIONS)
        for i in ans.DISJUNCTIONS:
            tmp = _pseudo_clone(
                disjunctions[i].disjuncts[idx[i] if isinstance(idx, tuple
                                                               ) else idx])
            for k, v in list(iteritems(tmp.component_map())):
                if k == 'indicator_var':
                    continue
                tmp.del_component(k)
                ans.disjuncts[idx].src[i].add_component(k, v)
        # Copy in the constraints corresponding to the improper disjunctions
        ans.disjuncts[idx].improper_constraints = ConstraintList()
        for constr in constraints:
            for indx in constr:
                ans.disjuncts[idx].improper_constraints.add(
                    (constr[indx].lower, constr[indx].body,
                     constr[indx].upper))
                constr[indx].deactivate()

    #
    # Link the new disjunct indicator_var's to the original
    # indicator_var's.  Since only one of the new
    #
    ans.indicator_links = ConstraintList()
    for i in ans.DISJUNCTIONS:
        for j in xrange(len(disjunctions[i].disjuncts)):
            ans.indicator_links.add(
                disjunctions[i].disjuncts[j].indicator_var == sum(
                    ans.disjuncts[idx].indicator_var for idx in ans.INDEX
                    if (idx[i] if isinstance(idx, tuple) else idx) == j))

    # Form the new disjunction
    ans.disjunction = Disjunction(expr=[ans.disjuncts[i] for i in ans.INDEX])

    #
    # Deactivate the old disjunctions / disjuncts
    #
    for i in ans.DISJUNCTIONS:
        disjunctions[i].deactivate()
        for d in disjunctions[i].disjuncts:
            d.deactivate()

    return ans
コード例 #18
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        # configuration confirmation
        if config.single_tree:
            config.iteration_limit = 1
            config.add_slack = False
            config.add_nogood_cuts = False
            config.mip_solver = 'cplex_persistent'
            config.logger.info(
                "Single tree implementation is activated. The defalt MIP solver is 'cplex_persistent'"
            )
        # if the slacks fix to zero, just don't add them
        if config.max_slack == 0.0:
            config.add_slack = False

        if config.strategy == "GOA":
            config.add_nogood_cuts = True
            config.add_slack = True
            config.use_mcpp = True
            config.integer_to_binary = True
            config.use_dual = False
            config.use_fbbt = True

        if config.nlp_solver == "baron":
            config.use_dual = False
        # if ecp tolerance is not provided use bound tolerance
        if config.ecp_tolerance is None:
            config.ecp_tolerance = config.bound_tolerance

        # if the objective function is a constant, dual bound constraint is not added.
        obj = next(model.component_data_objects(ctype=Objective, active=True))
        if obj.expr.polynomial_degree() == 0:
            config.use_dual_bound = False

        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()
        solve_data.curr_int_sol = []
        solve_data.prev_int_sol = []

        if config.use_fbbt:
            fbbt(model)
            config.logger.info(
                "Use the fbbt to tighten the bounds of variables")

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info("---Starting MindtPy---")

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config, use_mcpp=config.use_mcpp)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]
            if config.single_tree and config.add_nogood_cuts:
                solve_data.stored_bound = {}
            if config.strategy == 'GOA' and config.add_nogood_cuts:
                solve_data.num_no_good_cuts_added = {}

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = Set(
                    initialize=[
                        i
                        for i, constr in enumerate(MindtPy.constraint_list, 1)
                        if constr.body.polynomial_degree() not in (1, 0)
                    ],
                    doc="Integer index set over the nonlinear constraints."
                    "The set corresponds to the index of nonlinear constraint in constraint_set"
                )
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                         initialize=0,
                                         domain=NonNegativeReals)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if config.nlp_solver == 'ipopt':
                if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                    solve_data.working_model.ipopt_zL_out = Suffix(
                        direction=Suffix.IMPORT)
                if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                    solve_data.working_model.ipopt_zU_out = Suffix(
                        direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.component_data_objects(Var),
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.mip_iter
        solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
コード例 #19
0
ファイル: PyomoInterface.py プロジェクト: CanLi1/pyomo-1
    def transformForTrustRegion(self, model, eflist):
        # transform and model into suitable form for TRF method
        #
        # Arguments:
        # model : pyomo model containing ExternalFunctions
        # eflist : a list of the external functions that will be
        #   handled with TRF method rather than calls to compiled code

        efSet = set([id(x) for x in eflist])

        TRF = Block()

        # Get all varibles
        seenVar = Set()
        allVariables = []
        for var in model.component_data_objects(Var):
            if id(var) not in seenVar:
                seenVar.add(id(var))
                allVariables.append(var)

        # This assumes that an external funtion call is present, required!
        model.add_component(unique_component_name(model, 'tR'), TRF)
        TRF.y = VarList()
        TRF.x = VarList()
        TRF.conset = ConstraintList()
        TRF.external_fcns = []
        TRF.exfn_xvars = []

        # TODO: Copy constraints onto block so that transformation can be reversed.

        for con in model.component_data_objects(Constraint, active=True):
            con.set_value((con.lower, self.substituteEF(con.body, TRF,
                                                        efSet), con.upper))
        for obj in model.component_data_objects(Objective, active=True):
            obj.set_value(self.substituteEF(obj.expr, TRF, efSet))
            ## Assume only one ative objective function here
            self.objective = obj

        if self.objective.sense == maximize:
            self.objective.expr = -1 * self.objective.expr
            self.objective.sense = minimize

        # xvars and zvars are lists of x and z varibles as in the paper
        TRF.xvars = []
        TRF.zvars = []
        seenVar = Set()
        for varss in TRF.exfn_xvars:
            for var in varss:
                if id(var) not in seenVar:
                    seenVar.add(id(var))
                    TRF.xvars.append(var)

        for var in allVariables:
            if id(var) not in seenVar:
                seenVar.add(id(var))
                TRF.zvars.append(var)

        # TODO: build dict for exfn_xvars
        # assume it is not bottleneck of the code
        self.exfn_xvars_ind = []
        for varss in TRF.exfn_xvars:
            listtmp = []
            for var in varss:
                for i in range(len(TRF.xvars)):
                    if (id(var) == id(TRF.xvars[i])):
                        listtmp.append(i)
                        break

            self.exfn_xvars_ind.append(listtmp)

        return TRF
コード例 #20
0
def create_ef_instance(scenario_tree,
                       ef_instance_name="MASTER",
                       verbose_output=False,
                       generate_weighted_cvar=False,
                       cvar_weight=None,
                       risk_alpha=None,
                       cc_indicator_var_name=None,
                       cc_alpha=0.0):

    #
    # create the new and empty binding instance.
    #

    # scenario tree must be "linked" with a set of instances
    # to used this function
    scenario_instances = {}
    for scenario in scenario_tree.scenarios:
        if scenario._instance is None:
            raise ValueError("Cannot construct extensive form instance. "
                             "The scenario tree does not appear to be linked "
                             "to any Pyomo models. Missing model for scenario "
                             "with name: %s" % (scenario.name))
        scenario_instances[scenario.name] = scenario._instance

    binding_instance = ConcreteModel(name=ef_instance_name)
    root_node = scenario_tree.findRootNode()

    opt_sense = minimize \
                if (scenario_tree._scenarios[0]._instance_objective.is_minimizing()) \
                   else maximize

    #
    # validate cvar options, if specified.
    #
    cvar_excess_vardatas = []
    if generate_weighted_cvar:
        if (cvar_weight is None) or (cvar_weight < 0.0):
            raise RuntimeError(
                "Weight of CVaR term must be >= 0.0 - value supplied=" +
                str(cvar_weight))
        if (risk_alpha is None) or (risk_alpha <= 0.0) or (risk_alpha >= 1.0):
            raise RuntimeError(
                "CVaR risk alpha must be between 0 and 1, exclusive - value supplied="
                + str(risk_alpha))

        if verbose_output:
            print("Writing CVaR weighted objective")
            print("CVaR term weight=" + str(cvar_weight))
            print("CVaR alpha=" + str(risk_alpha))
            print("")

        # create the eta and excess variable on a per-scenario basis,
        # in addition to the constraint relating to the two.

        cvar_eta_variable_name = "CVAR_ETA_" + str(root_node._name)
        cvar_eta_variable = Var()
        binding_instance.add_component(cvar_eta_variable_name,
                                       cvar_eta_variable)

        excess_var_domain = NonNegativeReals if (opt_sense == minimize) else \
                            NonPositiveReals

        compute_excess_constraint = \
            binding_instance.COMPUTE_SCENARIO_EXCESS = \
                ConstraintList()

        for scenario in scenario_tree._scenarios:

            cvar_excess_variable_name = "CVAR_EXCESS_" + scenario._name
            cvar_excess_variable = Var(domain=excess_var_domain)
            binding_instance.add_component(cvar_excess_variable_name,
                                           cvar_excess_variable)

            compute_excess_expression = cvar_excess_variable
            compute_excess_expression -= scenario._instance_cost_expression
            compute_excess_expression += cvar_eta_variable
            if opt_sense == maximize:
                compute_excess_expression *= -1

            compute_excess_constraint.add(
                (0.0, compute_excess_expression, None))

            cvar_excess_vardatas.append(
                (cvar_excess_variable, scenario._probability))

    # the individual scenario instances are sub-blocks of the binding instance.
    for scenario in scenario_tree._scenarios:
        scenario_instance = scenario_instances[scenario._name]
        binding_instance.add_component(str(scenario._name), scenario_instance)
        # Now deactivate the scenario instance Objective since we are creating
        # a new master objective
        scenario._instance_objective.deactivate()

    # walk the scenario tree - create variables representing the
    # common values for all scenarios associated with that node, along
    # with equality constraints to enforce non-anticipativity.  also
    # create expected cost variables for each node, to be computed via
    # constraints/objectives defined in a subsequent pass. master
    # variables are created for all nodes but those in the last
    # stage. expected cost variables are, for no particularly good
    # reason other than easy coding, created for nodes in all stages.
    if verbose_output:
        print("Creating variables for master binding instance")

    _cmap = binding_instance.MASTER_CONSTRAINT_MAP = ComponentMap()
    for stage in scenario_tree._stages[:-1]:  # skip the leaf stage

        for tree_node in stage._tree_nodes:

            # create the master blending variable and constraints for this node
            master_blend_variable_name = \
                "MASTER_BLEND_VAR_"+str(tree_node._name)
            master_blend_constraint_name = \
                "MASTER_BLEND_CONSTRAINT_"+str(tree_node._name)

            # don't create master variables for derived
            # stage variables as they will not be used in
            # the problem, and their values would likely
            # never be consistent with what is stored on the
            # scenario variables
            master_variable_index = Set(
                initialize=sorted(tree_node._standard_variable_ids),
                ordered=True,
                name=master_blend_variable_name + "_index")

            binding_instance.add_component(
                master_blend_variable_name + "_index", master_variable_index)

            master_variable = Var(master_variable_index,
                                  name=master_blend_variable_name)

            binding_instance.add_component(master_blend_variable_name,
                                           master_variable)

            master_constraint = ConstraintList(
                name=master_blend_constraint_name)

            binding_instance.add_component(master_blend_constraint_name,
                                           master_constraint)

            tree_node_variable_datas = tree_node._variable_datas
            for variable_id in sorted(tree_node._standard_variable_ids):
                master_vardata = master_variable[variable_id]
                vardatas = tree_node_variable_datas[variable_id]
                # Don't blend fixed variables
                if not tree_node.is_variable_fixed(variable_id):
                    for scenario_vardata, scenario_probability in vardatas:
                        _cmap[scenario_vardata] = master_constraint.add(
                            (master_vardata - scenario_vardata, 0.0))

    if generate_weighted_cvar:

        cvar_cost_expression_name = "CVAR_COST_" + str(root_node._name)
        cvar_cost_expression = Expression(name=cvar_cost_expression_name)
        binding_instance.add_component(cvar_cost_expression_name,
                                       cvar_cost_expression)

    # create an expression to represent the expected cost at the root node
    binding_instance.EF_EXPECTED_COST = \
        Expression(initialize=sum(scenario._probability * \
                                  scenario._instance_cost_expression
                                  for scenario in scenario_tree._scenarios))

    opt_expression = \
        binding_instance.MASTER_OBJECTIVE_EXPRESSION = \
            Expression(initialize=binding_instance.EF_EXPECTED_COST)

    if generate_weighted_cvar:
        cvar_cost_expression_name = "CVAR_COST_" + str(root_node._name)
        cvar_cost_expression = \
            binding_instance.find_component(cvar_cost_expression_name)
        if cvar_weight == 0.0:
            # if the cvar weight is 0, then we're only
            # doing cvar - no mean.
            opt_expression.set_value(cvar_cost_expression)
        else:
            opt_expression.expr += cvar_weight * cvar_cost_expression

    binding_instance.MASTER = Objective(sense=opt_sense, expr=opt_expression)

    # CVaR requires the addition of a variable per scenario to
    # represent the cost excess, and a constraint to compute the cost
    # excess relative to eta.
    if generate_weighted_cvar:

        # add the constraint to compute the master CVaR variable value. iterate
        # over scenario instances to create the expected excess component first.
        cvar_cost_expression_name = "CVAR_COST_" + str(root_node._name)
        cvar_cost_expression = binding_instance.find_component(
            cvar_cost_expression_name)
        cvar_eta_variable_name = "CVAR_ETA_" + str(root_node._name)
        cvar_eta_variable = binding_instance.find_component(
            cvar_eta_variable_name)

        cost_expr = 1.0
        for scenario_excess_vardata, scenario_probability in cvar_excess_vardatas:
            cost_expr += (scenario_probability * scenario_excess_vardata)
        cost_expr /= (1.0 - risk_alpha)
        cost_expr += cvar_eta_variable

        cvar_cost_expression.set_value(cost_expr)

    if cc_indicator_var_name is not None:
        if verbose_output is True:
            print("Creating chance constraint for indicator variable= " +
                  cc_indicator_var_name)
            print("with alpha= " + str(cc_alpha))
        if not isVariableNameIndexed(cc_indicator_var_name):
            cc_expression = 0  #??????
            for scenario in scenario_tree._scenarios:
                scenario_instance = scenario_instances[scenario._name]
                scenario_probability = scenario._probability
                cc_var = scenario_instance.find_component(
                    cc_indicator_var_name)

                cc_expression += scenario_probability * cc_var

            def makeCCRule(expression):
                def CCrule(model):
                    return (1.0 - cc_alpha, cc_expression, None)

                return CCrule

            cc_constraint_name = "cc_" + cc_indicator_var_name
            cc_constraint = Constraint(name=cc_constraint_name,
                                       rule=makeCCRule(cc_expression))
            binding_instance.add_component(cc_constraint_name, cc_constraint)
        else:
            print("multiple cc not yet supported.")
            variable_name, index_template = extractVariableNameAndIndex(
                cc_indicator_var_name)

            # verify that the root variable exists and grab it.
            # NOTE: we are using whatever scenario happens to laying around... it might be better to use the reference
            variable = scenario_instance.find_component(variable_name)
            if variable is None:
                raise RuntimeError("Unknown variable=" + variable_name +
                                   " referenced as the CC indicator variable.")

            # extract all "real", i.e., fully specified, indices matching the index template.
            match_indices = extractComponentIndices(variable, index_template)

            # there is a possibility that no indices match the input template.
            # if so, let the user know about it.
            if len(match_indices) == 0:
                raise RuntimeError("No indices match template=" +
                                   str(index_template) + " for variable=" +
                                   variable_name)

            # add the suffix to all variable values identified.
            for index in match_indices:
                variable_value = variable[index]

                cc_expression = 0  #??????
                for scenario in scenario_tree._scenarios:
                    scenario_instance = scenario_instances[scenario._name]
                    scenario_probability = scenario._probability
                    cc_var = scenario_instance.find_component(
                        variable_name)[index]

                    cc_expression += scenario_probability * cc_var

                def makeCCRule(expression):
                    def CCrule(model):
                        return (1.0 - cc_alpha, cc_expression, None)

                    return CCrule

                indexasname = ''
                for c in str(index):
                    if c not in ' ,':
                        indexasname += c
                cc_constraint_name = "cc_" + variable_name + "_" + indexasname

                cc_constraint = Constraint(name=cc_constraint_name,
                                           rule=makeCCRule(cc_expression))
                binding_instance.add_component(cc_constraint_name,
                                               cc_constraint)

    return binding_instance
コード例 #21
0
    def _apply_to_impl(self, instance, config):
        vars_to_eliminate = config.vars_to_eliminate
        self.constraint_filter = config.constraint_filtering_callback
        self.do_integer_arithmetic = config.do_integer_arithmetic
        self.integer_tolerance = config.integer_tolerance
        self.zero_tolerance = config.zero_tolerance
        if vars_to_eliminate is None:
            raise RuntimeError(
                "The Fourier-Motzkin Elimination transformation "
                "requires the argument vars_to_eliminate, a "
                "list of Vars to be projected out of the model.")

        # make transformation block
        transBlockName = unique_component_name(
            instance, '_pyomo_contrib_fme_transformation')
        transBlock = Block()
        instance.add_component(transBlockName, transBlock)
        projected_constraints = transBlock.projected_constraints = \
                                ConstraintList()

        # collect all of the constraints
        # NOTE that we are ignoring deactivated constraints
        constraints = []
        ctypes_not_to_transform = set(
            (Block, Param, Objective, Set, SetOf, Expression, Suffix))
        for obj in instance.component_data_objects(
                descend_into=Block, sort=SortComponents.deterministic,
                active=True):
            if obj.ctype in ctypes_not_to_transform:
                continue
            elif obj.ctype is Constraint:
                cons_list = self._process_constraint(obj)
                constraints.extend(cons_list)
                obj.deactivate(
                )  # the truth will be on our transformation block
            elif obj.ctype is Var:
                # variable bounds are constraints, but we only need them if this
                # is a variable we are projecting out
                if obj not in vars_to_eliminate:
                    continue
                if obj.lb is not None:
                    constraints.append({
                        'body': generate_standard_repn(obj),
                        'lower': value(obj.lb),
                        'map': ComponentMap([(obj, 1)])
                    })
                if obj.ub is not None:
                    constraints.append({
                        'body': generate_standard_repn(-obj),
                        'lower': -value(obj.ub),
                        'map': ComponentMap([(obj, -1)])
                    })
            else:
                raise RuntimeError(
                    "Found active component %s of type %s. The "
                    "Fourier-Motzkin Elimination transformation can only "
                    "handle purely algebraic models. That is, only "
                    "Sets, Params, Vars, Constraints, Expressions, Blocks, "
                    "and Objectives may be active on the model." %
                    (obj.name, obj.ctype))

        new_constraints = self._fourier_motzkin_elimination(
            constraints, vars_to_eliminate)

        # put the new constraints on the transformation block
        for cons in new_constraints:
            if self.constraint_filter is not None:
                try:
                    keep = self.constraint_filter(cons)
                except:
                    logger.error("Problem calling constraint filter callback "
                                 "on constraint with right-hand side %s and "
                                 "body:\n%s" %
                                 (cons['lower'], cons['body'].to_expression()))
                    raise
                if not keep:
                    continue
            lhs = cons['body'].to_expression(sort=True)
            lower = cons['lower']
            assert type(lower) is int or type(lower) is float
            if type(lhs >= lower) is bool:
                if lhs >= lower:
                    continue
                else:
                    # This would actually make a lot of sense in this case...
                    #projected_constraints.add(Constraint.Infeasible)
                    raise RuntimeError("Fourier-Motzkin found the model is "
                                       "infeasible!")
            else:
                projected_constraints.add(lhs >= lower)
コード例 #22
0
ファイル: MindtPy.py プロジェクト: ZedongPeng/pyomo
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        Args:
            model (Block): a Pyomo model or block to be solved.
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        set_up_logger(config)
        check_config(config)

        solve_data = set_up_solve_data(model, config)

        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info(
                '---------------------------------------------------------------------------------------------\n'
                '              Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo (MindtPy)               \n'
                '---------------------------------------------------------------------------------------------\n'
                'For more information, please visit https://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html')

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config,
                              move_linear_objective=(config.init_strategy == 'FP'
                                                     or config.add_regularization is not None),
                              use_mcpp=config.use_mcpp,
                              updata_var_con_list=config.add_regularization is None
                              )
            # The epigraph constraint is very "flat" for branching rules,
            # we want to use to original model for the main mip.
            if MindtPy.objective_list[0].expr.polynomial_degree() in {1, 0} and config.add_regularization is not None:
                MindtPy.objective_list[0].activate()
                MindtPy.objective_constr.deactivate()
                MindtPy.objective.deactivate()

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.feas_opt = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.cuts = Block()
            lin.deactivate()

            # no-good cuts exclude particular discrete decisions
            lin.no_good_cuts = ConstraintList(doc='no-good cuts')
            # Feasible no-good cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary no_good_cuts ConstraintList.
            lin.feasible_no_good_cuts = ConstraintList(
                doc='explored no-good cuts')
            lin.feasible_no_good_cuts.deactivate()

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = RangeSet(len(MindtPy.nonlinear_constraint_list),
                                                  doc='Integer index set over the nonlinear constraints.')
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals, initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(
                    bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals)

            # Initialize the main problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_main(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)
            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(
                    from_list=solve_data.best_solution_found.MindtPy_utils.variable_list,
                    to_list=MindtPy.variable_list,
                    config=config)
                copy_var_list_values(
                    MindtPy.variable_list,
                    [i for i in solve_data.original_model.component_data_objects(
                        Var) if not i.fixed],
                    config)
                # exclude fixed variables here. This is consistent with the definition of variable_list in GDPopt.util

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total
        solve_data.results.solver.iterations = solve_data.mip_iter
        solve_data.results.solver.num_infeasible_nlp_subproblem = solve_data.nlp_infeasible_counter
        solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
コード例 #23
0
def create_submodel_kkt_block(instance, submodel, deterministic,
                              fixed_upper_vars):
    """
    Add optimality conditions for the submodel

    This assumes that the original model has the form:

        min c1*x + d1*y
            A3*x <= b3
            A1*x + B1*y <= b1
            min c2*x + d2*y + x'*Q*y
                A2*x + B2*y + x'*E2*y <= b2
                y >= 0

    NOTE THE VARIABLE BOUNDS!
    """
    fixed_vars = {id(v) for v in fixed_upper_vars}
    #
    # Populate the block with the linear constraints.
    # Note that we don't simply clone the current block.
    # We need to collect a single set of equations that
    # can be easily expressed.
    #
    d2 = {}
    B2 = {}
    vtmp = {}
    utmp = {}
    sids_set = set()
    sids_list = []
    #
    block = Block(concrete=True)
    block.u = VarList(
    )  # Note: Dual variables associated to bounds in primal problem
    block.v = VarList(
    )  # Note: Dual variables associated to constraints in primal problem
    block.c1 = ConstraintList()
    block.c2 = ComplementarityList()
    block.c3 = ComplementarityList()
    #
    # Collect submodel objective terms
    #
    # TODO: detect fixed variables
    #
    for odata in submodel.component_data_objects(Objective, active=True):
        if odata.sense == maximize:
            d_sense = -1
        else:
            d_sense = 1
        #
        # Iterate through the variables in the representation
        #
        o_terms = generate_standard_repn(odata.expr, compute_values=False)
        #
        # Linear terms
        #
        for i, var in enumerate(o_terms.linear_vars):
            if id(var) in fixed_vars:
                #
                # Skip fixed upper variables
                #
                continue
            #
            # Store the coefficient for the variable.  The coefficient is
            # negated if the objective is maximized.
            #
            id_ = id(var)
            d2[id_] = d_sense * o_terms.linear_coefs[i]
            if not id_ in sids_set:
                sids_set.add(id_)
                sids_list.append(id_)
        #
        # Quadratic terms
        #
        for i, var in enumerate(o_terms.quadratic_vars):
            if id(var[0]) in fixed_vars:
                if id(var[1]) in fixed_vars:
                    #
                    # Skip fixed upper variables
                    #
                    continue
                #
                # Add the linear term
                #
                id_ = id(var[1])
                d2[id_] = d2.get(
                    id_, 0) + d_sense * o_terms.quadratic_coefs[i] * var[0]
                if not id_ in sids_set:
                    sids_set.add(id_)
                    sids_list.append(id_)
            elif id(var[1]) in fixed_vars:
                #
                # Add the linear term
                #
                id_ = id(var[0])
                d2[id_] = d2.get(
                    id_, 0) + d_sense * o_terms.quadratic_coefs[i] * var[1]
                if not id_ in sids_set:
                    sids_set.add(id_)
                    sids_list.append(id_)
            else:
                raise RuntimeError(
                    "Cannot apply this transformation to a problem with \
quadratic terms where both variables are in the lower level.")
        #
        # Stop after the first objective
        #
        break
    #
    # Iterate through all lower level variables, adding dual variables
    # and complementarity slackness conditions for y bound constraints
    #
    for vcomponent in instance.component_objects(Var, active=True):
        for ndx in vcomponent:
            if id(vcomponent[ndx]) in fixed_vars:
                #
                # Skip fixed upper variables
                #
                continue
            #
            # For each index, get the bounds for the variable
            #
            lb, ub = vcomponent[ndx].bounds
            if not lb is None:
                #
                # Add the complementarity slackness condition for a lower bound
                #
                v = block.v.add()
                block.c3.add(complements(vcomponent[ndx] >= lb, v >= 0))
            else:
                v = None
            if not ub is None:
                #
                # Add the complementarity slackness condition for an upper bound
                #
                w = block.v.add()
                vtmp[id(vcomponent[ndx])] = w
                block.c3.add(complements(vcomponent[ndx] <= ub, w >= 0))
            else:
                w = None
            if not (v is None and w is None):
                #
                # Record the variables for which complementarity slackness conditions
                # were created.
                #
                id_ = id(vcomponent[ndx])
                vtmp[id_] = (v, w)
                if not id_ in sids_set:
                    sids_set.add(id_)
                    sids_list.append(id_)
    #
    # Iterate through all constraints, adding dual variables and
    # complementary slackness conditions (for inequality constraints)
    #
    for cdata in submodel.component_data_objects(Constraint, active=True):
        if cdata.equality:
            # Don't add a complementary slackness condition for an equality constraint
            u = block.u.add()
            utmp[id(cdata)] = (None, u)
        else:
            if not cdata.lower is None:
                #
                # Add the complementarity slackness condition for a greater-than inequality
                #
                u = block.u.add()
                block.c2.add(complements(-cdata.body <= -cdata.lower, u >= 0))
            else:
                u = None
            if not cdata.upper is None:
                #
                # Add the complementarity slackness condition for a less-than inequality
                #
                w = block.u.add()
                block.c2.add(complements(cdata.body <= cdata.upper, w >= 0))
            else:
                w = None
            if not (u is None and w is None):
                utmp[id(cdata)] = (u, w)
        #
        # Store the coefficients for the constraint variables that are not fixed
        #
        c_terms = generate_standard_repn(cdata.body, compute_values=False)
        #
        # Linear terms
        #
        for i, var in enumerate(c_terms.linear_vars):
            if id(var) in fixed_vars:
                continue
            id_ = id(var)
            B2.setdefault(id_, {}).setdefault(id(cdata),
                                              c_terms.linear_coefs[i])
            if not id_ in sids_set:
                sids_set.add(id_)
                sids_list.append(id_)
        #
        # Quadratic terms
        #
        for i, var in enumerate(c_terms.quadratic_vars):
            if id(var[0]) in fixed_vars:
                if id(var[1]) in fixed_vars:
                    continue
                id_ = id(var[1])
                if id_ in B2:
                    B2[id_][id(cdata)] = c_terms.quadratic_coefs[i] * var[0]
                else:
                    B2.setdefault(id_, {}).setdefault(
                        id(cdata), c_terms.quadratic_coefs[i] * var[0])
                if not id_ in sids_set:
                    sids_set.add(id_)
                    sids_list.append(id_)
            elif id(var[1]) in fixed_vars:
                id_ = id(var[0])
                if id_ in B2:
                    B2[id_][id(cdata)] = c_terms.quadratic_coefs[i] * var[1]
                else:
                    B2.setdefault(id_, {}).setdefault(
                        id(cdata), c_terms.quadratic_coefs[i] * var[1])
                if not id_ in sids_set:
                    sids_set.add(id_)
                    sids_list.append(id_)
            else:
                raise RuntimeError(
                    "Cannot apply this transformation to a problem with \
quadratic terms where both variables are in the lower level.")
    #
    # Generate stationarity equations
    #
    tmp__ = (None, None)
    for vid in sids_list:
        exp = d2.get(vid, 0)
        #
        lb_dual, ub_dual = vtmp.get(vid, tmp__)
        if vid in vtmp:
            if not lb_dual is None:
                exp -= lb_dual  # dual for variable lower bound
            if not ub_dual is None:
                exp += ub_dual  # dual for variable upper bound
        #
        B2_ = B2.get(vid, {})
        utmp_keys = list(utmp.keys())
        if deterministic:
            utmp_keys.sort(key=lambda x: utmp[x][0].local_name\
                           if utmp[x][1] is None else utmp[x][1].local_name)
        for uid in utmp_keys:
            if uid in B2_:
                lb_dual, ub_dual = utmp[uid]
                if not lb_dual is None:
                    exp -= B2_[uid] * lb_dual
                if not ub_dual is None:
                    exp += B2_[uid] * ub_dual
        if type(exp) in six.integer_types or type(exp) is float:
            # TODO: Annotate the model as unbounded
            raise IOError("Unbounded variable without side constraints")
        block.c1.add(exp == 0)
    return block
コード例 #24
0
    def test_handle_termination_condition(self):
        """Test the outer approximation decomposition algorithm."""
        model = SimpleMINLP()
        config = _get_MindtPy_config()
        solve_data = set_up_solve_data(model, config)
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):

            MindtPy = solve_data.working_model.MindtPy_utils

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config,
                              move_objective=(config.init_strategy == 'FP'
                                                     or config.add_regularization is not None),
                              use_mcpp=config.use_mcpp,
                              update_var_con_list=config.add_regularization is None
                              )
            feas = MindtPy.feas_opt = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            lin = MindtPy.cuts = Block()
            lin.deactivate()

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = RangeSet(len(MindtPy.nonlinear_constraint_list),
                                                  doc='Integer index set over the nonlinear constraints.')
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals, initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # no-good cuts exclude particular discrete decisions
            lin.no_good_cuts = ConstraintList(doc='no-good cuts')

            fixed_nlp = solve_data.working_model.clone()
            TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp)

            MindtPy_initialize_main(solve_data, config)

            # test handle_subproblem_other_termination
            termination_condition = tc.maxIterations
            config.add_no_good_cuts = True
            handle_subproblem_other_termination(fixed_nlp, termination_condition,
                                                solve_data, config)
            self.assertEqual(
                len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts), 1)

            # test handle_main_other_conditions
            main_mip, main_mip_results = solve_main(solve_data, config)
            main_mip_results.solver.termination_condition = tc.infeasible
            handle_main_other_conditions(
                solve_data.mip, main_mip_results, solve_data, config)
            self.assertIs(
                solve_data.results.solver.termination_condition, tc.feasible)

            main_mip_results.solver.termination_condition = tc.unbounded
            handle_main_other_conditions(
                solve_data.mip, main_mip_results, solve_data, config)
            self.assertIn(main_mip.MindtPy_utils.objective_bound,
                          main_mip.component_data_objects(ctype=Constraint))

            main_mip.MindtPy_utils.del_component('objective_bound')
            main_mip_results.solver.termination_condition = tc.infeasibleOrUnbounded
            handle_main_other_conditions(
                solve_data.mip, main_mip_results, solve_data, config)
            self.assertIn(main_mip.MindtPy_utils.objective_bound,
                          main_mip.component_data_objects(ctype=Constraint))

            main_mip_results.solver.termination_condition = tc.maxTimeLimit
            handle_main_other_conditions(
                solve_data.mip, main_mip_results, solve_data, config)
            self.assertIs(
                solve_data.results.solver.termination_condition, tc.maxTimeLimit)

            main_mip_results.solver.termination_condition = tc.other
            main_mip_results.solution.status = SolutionStatus.feasible
            handle_main_other_conditions(
                solve_data.mip, main_mip_results, solve_data, config)
            for v1, v2 in zip(main_mip.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list):
                self.assertEqual(v1.value, v2.value)

            # test handle_feasibility_subproblem_tc
            feas_subproblem = solve_data.working_model.clone()
            add_feas_slacks(feas_subproblem, config)
            MindtPy = feas_subproblem.MindtPy_utils
            MindtPy.feas_opt.activate()
            if config.feasibility_norm == 'L1':
                MindtPy.feas_obj = Objective(
                    expr=sum(s for s in MindtPy.feas_opt.slack_var[...]),
                    sense=minimize)
            elif config.feasibility_norm == 'L2':
                MindtPy.feas_obj = Objective(
                    expr=sum(s*s for s in MindtPy.feas_opt.slack_var[...]),
                    sense=minimize)
            else:
                MindtPy.feas_obj = Objective(
                    expr=MindtPy.feas_opt.slack_var,
                    sense=minimize)

            handle_feasibility_subproblem_tc(
                tc.optimal, MindtPy, solve_data, config)
            handle_feasibility_subproblem_tc(
                tc.infeasible, MindtPy, solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.status, SolverStatus.error)

            solve_data.should_terminate = False
            solve_data.results.solver.status = None
            handle_feasibility_subproblem_tc(
                tc.maxIterations, MindtPy, solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.status, SolverStatus.error)

            solve_data.should_terminate = False
            solve_data.results.solver.status = None
            handle_feasibility_subproblem_tc(
                tc.solverFailure, MindtPy, solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(solve_data.results.solver.status, SolverStatus.error)

            # test NLP subproblem infeasible
            solve_data.working_model.Y[1].value = 0
            solve_data.working_model.Y[2].value = 0
            solve_data.working_model.Y[3].value = 0
            fixed_nlp, fixed_nlp_results = solve_subproblem(solve_data, config)
            solve_data.working_model.Y[1].value = None
            solve_data.working_model.Y[2].value = None
            solve_data.working_model.Y[3].value = None

            # test handle_nlp_subproblem_tc
            fixed_nlp_results.solver.termination_condition = tc.maxTimeLimit
            handle_nlp_subproblem_tc(
                fixed_nlp, fixed_nlp_results, solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(
                solve_data.results.solver.termination_condition, tc.maxTimeLimit)

            fixed_nlp_results.solver.termination_condition = tc.maxEvaluations
            handle_nlp_subproblem_tc(
                fixed_nlp, fixed_nlp_results, solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(
                solve_data.results.solver.termination_condition, tc.maxEvaluations)

            fixed_nlp_results.solver.termination_condition = tc.maxIterations
            handle_nlp_subproblem_tc(
                fixed_nlp, fixed_nlp_results, solve_data, config)
            self.assertIs(solve_data.should_terminate, True)
            self.assertIs(
                solve_data.results.solver.termination_condition, tc.maxEvaluations)

            # test handle_fp_main_tc
            config.init_strategy = 'FP'
            solve_data.fp_iter = 1
            init_rNLP(solve_data, config)
            feas_main, feas_main_results = solve_main(
                solve_data, config, fp=True)
            feas_main_results.solver.termination_condition = tc.optimal
            fp_should_terminate = handle_fp_main_tc(
                feas_main_results, solve_data, config)
            self.assertIs(fp_should_terminate, False)

            feas_main_results.solver.termination_condition = tc.maxTimeLimit
            fp_should_terminate = handle_fp_main_tc(
                feas_main_results, solve_data, config)
            self.assertIs(fp_should_terminate, True)
            self.assertIs(
                solve_data.results.solver.termination_condition, tc.maxTimeLimit)

            feas_main_results.solver.termination_condition = tc.infeasible
            fp_should_terminate = handle_fp_main_tc(
                feas_main_results, solve_data, config)
            self.assertIs(fp_should_terminate, True)

            feas_main_results.solver.termination_condition = tc.unbounded
            fp_should_terminate = handle_fp_main_tc(
                feas_main_results, solve_data, config)
            self.assertIs(fp_should_terminate, True)

            feas_main_results.solver.termination_condition = tc.other
            feas_main_results.solution.status = SolutionStatus.feasible
            fp_should_terminate = handle_fp_main_tc(
                feas_main_results, solve_data, config)
            self.assertIs(fp_should_terminate, False)

            feas_main_results.solver.termination_condition = tc.solverFailure
            fp_should_terminate = handle_fp_main_tc(
                feas_main_results, solve_data, config)
            self.assertIs(fp_should_terminate, True)

            # test generate_norm_constraint
            fp_nlp = solve_data.working_model.clone()
            config.fp_main_norm = 'L1'
            generate_norm_constraint(fp_nlp, solve_data, config)
            self.assertIsNotNone(fp_nlp.MindtPy_utils.find_component(
                'L1_norm_constraint'))

            config.fp_main_norm = 'L2'
            generate_norm_constraint(fp_nlp, solve_data, config)
            self.assertIsNotNone(fp_nlp.find_component('norm_constraint'))

            fp_nlp.del_component('norm_constraint')
            config.fp_main_norm = 'L_infinity'
            generate_norm_constraint(fp_nlp, solve_data, config)
            self.assertIsNotNone(fp_nlp.find_component('norm_constraint'))

            # test set_solver_options
            config.mip_solver = 'gams'
            config.threads = 1
            opt = SolverFactory(config.mip_solver)
            set_solver_options(opt, solve_data, config,
                               'mip', regularization=False)

            config.mip_solver = 'gurobi'
            config.mip_regularization_solver = 'gurobi'
            config.regularization_mip_threads = 1
            opt = SolverFactory(config.mip_solver)
            set_solver_options(opt, solve_data, config,
                               'mip', regularization=True)

            config.nlp_solver = 'gams'
            config.nlp_solver_args['solver'] = 'ipopt'
            set_solver_options(opt, solve_data, config,
                               'nlp', regularization=False)

            config.nlp_solver_args['solver'] = 'ipopth'
            set_solver_options(opt, solve_data, config,
                               'nlp', regularization=False)

            config.nlp_solver_args['solver'] = 'conopt'
            set_solver_options(opt, solve_data, config,
                               'nlp', regularization=False)

            config.nlp_solver_args['solver'] = 'msnlp'
            set_solver_options(opt, solve_data, config,
                               'nlp', regularization=False)

            config.nlp_solver_args['solver'] = 'baron'
            set_solver_options(opt, solve_data, config,
                               'nlp', regularization=False)

            # test algorithm_should_terminate
            solve_data.should_terminate = True
            solve_data.primal_bound = float('inf')
            self.assertIs(algorithm_should_terminate(
                solve_data, config, check_cycling=False), True)
            self.assertIs(
                solve_data.results.solver.termination_condition, tc.noSolution)

            solve_data.primal_bound = 100
            self.assertIs(algorithm_should_terminate(
                solve_data, config, check_cycling=False), True)
            self.assertIs(
                solve_data.results.solver.termination_condition, tc.feasible)

            solve_data.primal_bound_progress = [float('inf'), 5, 4, 3, 2, 1]
            solve_data.primal_bound_progress_time = [1, 2, 3, 4, 5, 6]
            solve_data.primal_bound = 1
            self.assertEqual(get_primal_integral(solve_data, config), 14.5)

            solve_data.dual_bound_progress = [float('-inf'), 1, 2, 3, 4, 5]
            solve_data.dual_bound_progress_time = [1, 2, 3, 4, 5, 6]
            solve_data.dual_bound = 5
            self.assertEqual(get_dual_integral(solve_data, config), 14.1)

            # test check_config
            config.add_regularization = 'level_L1'
            config.regularization_mip_threads = 0
            config.threads = 8
            check_config(config)
            self.assertEqual(config.regularization_mip_threads, 8)

            config.mip_solver = 'cplex'
            config.single_tree = True
            check_config(config)
            self.assertEqual(config.mip_solver, 'cplex_persistent')
            self.assertEqual(config.threads, 1)

            config.add_slack = True
            config.max_slack == 0.0
            check_config(config)
            self.assertEqual(config.add_slack, False)

            config.strategy = 'GOA'
            config.add_slack = True
            config.use_mcpp = False
            config.equality_relaxation = True
            config.use_fbbt = False
            config.add_no_good_cuts = False
            config.use_tabu_list = False
            check_config(config)
            self.assertTrue(config.use_mcpp)
            self.assertTrue(config.use_fbbt)
            self.assertFalse(config.add_slack)
            self.assertFalse(config.equality_relaxation)
            self.assertTrue(config.add_no_good_cuts)
            self.assertFalse(config.use_tabu_list)
            
            config.single_tree = False
            config.strategy = 'FP'
            config.init_strategy = 'rNLP'
            config.iteration_limit = 100
            config.add_no_good_cuts = False
            config.use_tabu_list = True
            check_config(config)
            self.assertEqual(config.init_strategy, 'FP')
            self.assertEqual(config.iteration_limit, 0)
            self.assertEqual(config.add_no_good_cuts, True)
            self.assertEqual(config.use_tabu_list, False)
コード例 #25
0
ファイル: cut_generation.py プロジェクト: matzech/pyomo
def add_outer_approximation_cuts(nlp_result, solve_data, config):
    """Add outer approximation cuts to the linear GDP model."""
    with time_code(solve_data.timing, 'OA cut generation'):
        m = solve_data.linear_GDP
        GDPopt = m.GDPopt_utils
        sign_adjust = -1 if solve_data.objective_sense == minimize else 1

        # copy values over
        for var, val in zip(GDPopt.variable_list, nlp_result.var_values):
            if val is not None and not var.fixed:
                var.value = val

        # TODO some kind of special handling if the dual is phenomenally small?
        config.logger.debug('Adding OA cuts.')

        counter = 0
        if not hasattr(GDPopt, 'jacobians'):
            GDPopt.jacobians = ComponentMap()
        for constr, dual_value in zip(GDPopt.constraint_list,
                                      nlp_result.dual_values):
            if dual_value is None or constr.body.polynomial_degree() in (1, 0):
                continue

            # Determine if the user pre-specified that OA cuts should not be
            # generated for the given constraint.
            parent_block = constr.parent_block()
            ignore_set = getattr(parent_block, 'GDPopt_ignore_OA', None)
            config.logger.debug('Ignore_set %s' % ignore_set)
            if (ignore_set and (constr in ignore_set
                                or constr.parent_component() in ignore_set)):
                config.logger.debug(
                    'OA cut addition for %s skipped because it is in '
                    'the ignore set.' % constr.name)
                continue

            config.logger.debug("Adding OA cut for %s with dual value %s" %
                                (constr.name, dual_value))

            # Cache jacobians
            jacobians = GDPopt.jacobians.get(constr, None)
            if jacobians is None:
                constr_vars = list(
                    identify_variables(constr.body, include_fixed=False))
                if len(constr_vars) >= 1000:
                    mode = differentiate.Modes.reverse_numeric
                else:
                    mode = differentiate.Modes.sympy

                jac_list = differentiate(constr.body,
                                         wrt_list=constr_vars,
                                         mode=mode)
                jacobians = ComponentMap(zip(constr_vars, jac_list))
                GDPopt.jacobians[constr] = jacobians

            # Create a block on which to put outer approximation cuts.
            oa_utils = parent_block.component('GDPopt_OA')
            if oa_utils is None:
                oa_utils = parent_block.GDPopt_OA = Block(
                    doc="Block holding outer approximation cuts "
                    "and associated data.")
                oa_utils.GDPopt_OA_cuts = ConstraintList()
                oa_utils.GDPopt_OA_slacks = VarList(bounds=(0,
                                                            config.max_slack),
                                                    domain=NonNegativeReals,
                                                    initialize=0)

            oa_cuts = oa_utils.GDPopt_OA_cuts
            slack_var = oa_utils.GDPopt_OA_slacks.add()
            rhs = value(constr.lower) if constr.has_lb() else value(
                constr.upper)
            try:
                new_oa_cut = (copysign(1, sign_adjust * dual_value) *
                              (value(constr.body) - rhs + sum(
                                  value(jacobians[var]) * (var - value(var))
                                  for var in jacobians)) - slack_var <= 0)
                if new_oa_cut.polynomial_degree() not in (1, 0):
                    for var in jacobians:
                        print(var.name, value(jacobians[var]))
                oa_cuts.add(expr=new_oa_cut)
                counter += 1
            except ZeroDivisionError:
                config.logger.warning(
                    "Zero division occured attempting to generate OA cut for constraint %s.\n"
                    "Skipping OA cut generation for this constraint." %
                    (constr.name, ))
                # Simply continue on to the next constraint.

        config.logger.info('Added %s OA cuts' % counter)
コード例 #26
0
ファイル: interscenario.py プロジェクト: zypher22/pyomo
def get_modified_instance(ph, scenario_tree, scenario_or_bundle, **options):
    # Find the model
    if scenario_tree.contains_bundles():
        model = ph._bundle_binding_instance_map[scenario_or_bundle._name]
    else:
        model = ph._instances[scenario_or_bundle._name]
    b = model.component('_interscenario_plugin')
    if b is not None:
        return model

    #
    # We need to add the interscenario information to this model
    #
    model._interscenario_plugin = b = Block()

    # Save our options
    #
    b.epsilon = options.pop('epsilon')
    b.cut_scale = options.pop('cut_scale')
    b.allow_slack = options.pop('allow_slack')
    b.enable_rho = options.pop('enable_rho')
    b.enable_cuts = options.pop('enable_cuts')
    assert (len(options) == 0)

    # Information for generating cuts
    #
    b.cutlist = ConstraintList()
    b.abs_int_vars = VarList(within=NonNegativeIntegers)
    b.abs_binary_vars = VarList(within=Binary)

    # Note: the var_ids are on the ORIGINAL scenario models
    rootNode = scenario_tree.findRootNode()
    var_ids = list(iterkeys(rootNode._variable_datas))

    # Right now, this is hard-coded for 2-stage problems - so we only
    # need to worry about the variables from the root node.  These
    # variables should exist on all scenarios.  Set up a (trivial)
    # equality constraint for each variable:
    #    var == current_value{param} + separation_variable{var, fixed=0}
    b.STAGE1VAR = _S1V = Set(initialize=var_ids)
    b.separation_variables = _sep = Var(_S1V, dense=True)
    b.fixed_variable_values = _param = Param(_S1V, mutable=True, initialize=0)

    b.rho = weakref.ref(model.component('PHRHO_%s' % rootNode._name))
    b.weights = weakref.ref(model.component('PHWEIGHT_%s' % rootNode._name))

    if b.allow_slack:
        for idx in _sep:
            _sep[idx].setlb(-b.epsilon)
            _sep[idx].setub(b.epsilon)
    else:
        _sep.fix(0)

    _cuidBuffer = {}
    _src = b.local_stage1_varmap = {}
    for i in _S1V:
        # Note indexing: for each 1st stage var, pick an arbitrary
        # (first) scenario and return the variable (and not it's
        # probability)
        _cuid = ComponentUID(rootNode._variable_datas[i][0][0], _cuidBuffer)
        _src[i] = weakref.ref(_cuid.find_component_on(model))
        #_base_src[i] = weakref.ref(_cuid.find_component_on(base_model))

    def _set_var_value(b, i):
        return _param[i] + _sep[i] - _src[i]() == 0
    b.fixed_variables_constraint \
        = _con = Constraint( _S1V, rule=_set_var_value )

    #
    # TODO: When we get the duals of the first-stage variables, do we
    # want the dual WRT the original objective, or the dual WRT the
    # augmented objective?
    #
    # Move the objective to a standardized place so we can easily find it later
    if PYOMO_4_0:
        _orig_objective = list(x[2] for x in model.all_component_data(
            Objective, active=True, descend_into=True))
    else:
        _orig_objective = list(
            model.component_data_objects(Objective,
                                         active=True,
                                         descend_into=True))
    assert (len(_orig_objective) == 1)
    _orig_objective = _orig_objective[0]
    b.original_obj = weakref.ref(_orig_objective)

    # add (and deactivate) the objective for the infeasibility
    # separation problem.
    b.separation_obj = Objective(expr=sum(_sep[i]**2 for i in var_ids),
                                 sense=minimize)

    # Make sure we get dual information
    if 'dual' not in model:
        # Export and import floating point data
        model.dual = Suffix(direction=Suffix.IMPORT_EXPORT)
    #if 'rc' not in model:
    #    model.rc = Suffix(direction=Suffix.IMPORT_EXPORT)

    if FALLBACK_ON_BRUTE_FORCE_PREPROCESS:
        model.preprocess()
    else:
        _map = {}
        preprocess_block_constraints(b, idMap=_map)

    # Note: we wait to deactivate the objective until after we
    # preprocess so that the obective is correctly processed.
    b.separation_obj.deactivate()
    # (temporarily) deactivate the fixed stage-1 variables
    _con.deactivate()

    toc("InterScenario plugin: generated modified problem instance")
    return model
コード例 #27
0
ファイル: adjustable.py プロジェクト: cog-imperial/romodel
    def _apply_to(self, instance):
        for c in chain(
                self.get_adjustable_components(instance),
                self.get_adjustable_components(instance, component=Objective)):
            # Collect adjustable vars and uncparams
            adjvar = collect_adjustable(c)
            if id(adjvar) not in self._adjvars:
                self._adjvars[id(adjvar)] = adjvar
            # Create variables for LDR coefficients
            for i in adjvar:
                for u in adjvar[i].uncparams:
                    parent = u.parent_component()
                    if (adjvar.name, parent.name) not in self._coef_dict:
                        coef = Var(adjvar.index_set(), parent.index_set())
                        coef_name = adjvar.name + '_' + parent.name + '_coef'
                        setattr(instance, coef_name, coef)
                        self._coef_dict[adjvar.name, parent.name] = coef

            # Create substitution map
            def coef(u):
                return self._coef_dict[adjvar.name, u.parent_component().name]

            def gen_index(u):
                if hasattr(u, 'index'):
                    yield u.index()
                else:
                    for i in u:
                        yield i

            sub_map = {
                id(adjvar[i]): sum(u.parent_component()[j] * coef(u)[i, j]
                                   for u in adjvar[i].uncparams
                                   for j in gen_index(u))
                for i in adjvar
            }
            self._expr_dict[adjvar.name] = sub_map
            # Replace AdjustableVar by LDR
            # Objectives
            if c.ctype is Objective:
                e_new = replace_expressions(c.expr, substitution_map=sub_map)
                c_new = Objective(expr=e_new, sense=c.sense)
                setattr(instance, c.name + '_ldr', c_new)
            # Constraints
            elif c.ctype is Constraint:
                e_new = replace_expressions(c.body, substitution_map=sub_map)

                if c.equality:
                    repn = self.generate_repn_param(instance, e_new)

                    c_new = ConstraintList()
                    setattr(instance, c.name + '_ldr', c_new)
                    # Check if repn.constant is an expression
                    cons = repn.constant
                    if cons.__class__ in nonpyomo_leaf_types:
                        if cons != 0:
                            raise ValueError("Can't reformulate constraint {} "
                                             "with numeric constant "
                                             "{}".format(c.name, cons))
                    elif cons.is_potentially_variable():
                        c_new.add(cons == 0)
                    else:
                        raise ValueError("Can't reformulate constraint {} with"
                                         " constant "
                                         "{}".format(c.name, cons))
                    # Add constraints for each uncparam
                    for coef in repn.linear_coefs:
                        c_new.add(coef == 0)
                    for coef in repn.quadratic_coefs:
                        c_new.add(coef == 0)
                else:

                    def c_rule(x):
                        return (c.lower, e_new, c.upper)

                    c_new = Constraint(rule=c_rule)
                    setattr(instance, c.name + '_ldr', c_new)

            c.deactivate()

        # Add constraints for bounds on AdjustableVar
        for name, sub_map in self._expr_dict.items():
            adjvar = instance.find_component(name)
            cl = ConstraintList()
            setattr(instance, adjvar.name + '_bounds', cl)
            for i in adjvar:
                if adjvar[i].has_lb():
                    cl.add(adjvar[i].lb <= sub_map[id(adjvar[i])])
                if adjvar[i].has_ub():
                    cl.add(adjvar[i].ub >= sub_map[id(adjvar[i])])
コード例 #28
0
def add_affine_cuts(solve_data, config):
    """
    Adds affine cuts using MCPP; modifies the model to include affine cuts

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """

    m = solve_data.mip
    config.logger.info("Adding affine cuts")
    counter = 0

    for constr in m.MindtPy_utils.constraint_list:
        if constr.body.polynomial_degree() in (1, 0):
            continue

        vars_in_constr = list(identify_variables(constr.body))
        if any(var.value is None for var in vars_in_constr):
            continue  # a variable has no values

        # mcpp stuff
        try:
            mc_eqn = mc(constr.body)
        except MCPP_Error as e:
            config.logger.debug("Skipping constraint %s due to MCPP error %s" %
                                (constr.name, str(e)))
            continue  # skip to the next constraint

        ccSlope = mc_eqn.subcc()
        cvSlope = mc_eqn.subcv()
        ccStart = mc_eqn.concave()
        cvStart = mc_eqn.convex()

        # check if the value of ccSlope and cvSlope is not Nan or inf. If so, we skip this.
        concave_cut_valid = True
        convex_cut_valid = True
        for var in vars_in_constr:
            if not var.fixed:
                if ccSlope[var] == float('nan') or ccSlope[var] == float(
                        'inf'):
                    concave_cut_valid = False
                if cvSlope[var] == float('nan') or cvSlope[var] == float(
                        'inf'):
                    convex_cut_valid = False
        # check if the value of ccSlope and cvSlope all equals zero. if so, we skip this.
        if not any(list(ccSlope.values())):
            concave_cut_valid = False
        if not any(list(cvSlope.values())):
            convex_cut_valid = False
        if ccStart == float('nan') or ccStart == float('inf'):
            concave_cut_valid = False
        if cvStart == float('nan') or cvStart == float('inf'):
            convex_cut_valid = False
        if (concave_cut_valid or convex_cut_valid) is False:
            continue

        ub_int = min(constr.upper,
                     mc_eqn.upper()) if constr.has_ub() else mc_eqn.upper()
        lb_int = max(constr.lower,
                     mc_eqn.lower()) if constr.has_lb() else mc_eqn.lower()

        parent_block = constr.parent_block()
        # Create a block on which to put outer approximation cuts.
        # TODO: create it at the beginning.
        aff_utils = parent_block.component('MindtPy_aff')
        if aff_utils is None:
            aff_utils = parent_block.MindtPy_aff = Block(
                doc="Block holding affine constraints")
            aff_utils.MindtPy_aff_cons = ConstraintList()
        aff_cuts = aff_utils.MindtPy_aff_cons
        if concave_cut_valid:
            concave_cut = sum(ccSlope[var] * (var - var.value)
                              for var in vars_in_constr
                              if not var.fixed) + ccStart >= lb_int
            aff_cuts.add(expr=concave_cut)
            counter += 1
        if convex_cut_valid:
            convex_cut = sum(cvSlope[var] * (var - var.value)
                             for var in vars_in_constr
                             if not var.fixed) + cvStart <= ub_int
            aff_cuts.add(expr=convex_cut)
            counter += 1

    config.logger.info("Added %s affine cuts" % counter)
コード例 #29
0
ファイル: LP_compiled.py プロジェクト: vova292/pyomo
    def _generate_model(self):
        self.model = ConcreteModel()
        model = self.model
        model._name = self.description

        model.s = RangeSet(1, 12)
        model.x = Var(model.s)
        model.x[1].setlb(-1)
        model.x[1].setub(1)
        model.x[2].setlb(-1)
        model.x[2].setub(1)
        model.obj = Objective(expr=sum(model.x[i] * ((-1)**(i + 1))
                                       for i in model.x.index_set()))
        model.c = ConstraintList()
        # to make the variable used in the constraint match the name
        model.c.add(Constraint.Skip)
        model.c.add(Constraint.Skip)
        model.c.add(model.x[3] >= -1.)
        model.c.add(model.x[4] <= 1.)
        model.c.add(model.x[5] == -1.)
        model.c.add(model.x[6] == -1.)
        model.c.add(model.x[7] == 1.)
        model.c.add(model.x[8] == 1.)
        model.c.add((-1., model.x[9], -1.))
        model.c.add((-1., model.x[10], -1.))
        model.c.add((1., model.x[11], 1.))
        model.c.add((1., model.x[12], 1.))
        cdata = model.c.add((0, 1, 3))
        assert cdata.lower == 0
        assert cdata.upper == 3
        assert cdata.body() == 1
        assert not cdata.equality
        cdata = model.c.add((0, 2, 3))
        assert cdata.lower == 0
        assert cdata.upper == 3
        assert cdata.body() == 2
        assert not cdata.equality
        cdata = model.c.add((0, 1, None))
        assert cdata.lower == 0
        assert cdata.upper is None
        assert cdata.body() == 1
        assert not cdata.equality
        cdata = model.c.add((None, 0, 1))
        assert cdata.lower is None
        assert cdata.upper == 1
        assert cdata.body() == 0
        assert not cdata.equality
        cdata = model.c.add((1, 1))
        assert cdata.lower == 1
        assert cdata.upper == 1
        assert cdata.body() == 1
        assert cdata.equality

        model.fixed_var = Var()
        model.fixed_var.fix(1.0)
        cdata = model.c.add((0, 1 + model.fixed_var, 3))
        cdata = model.c.add((0, 2 + model.fixed_var, 3))
        cdata = model.c.add((0, model.fixed_var, None))
        cdata = model.c.add((None, model.fixed_var, 1))
        cdata = model.c.add((model.fixed_var, 1))

        model.c_inactive = ConstraintList()
        # to make the variable used in the constraint match the name
        model.c_inactive.add(Constraint.Skip)
        model.c_inactive.add(Constraint.Skip)
        model.c_inactive.add(model.x[3] >= -2.)
        model.c_inactive.add(model.x[4] <= 2.)

        compile_block_linear_constraints(model, 'Amatrix')
コード例 #30
0
def MindtPy_initialize_master(solve_data, config):
    """
    Initializes the decomposition algorithm and creates the master MIP/MILP problem.

    This function initializes the decomposition problem, which includes generating the initial cuts required to
    build the master MIP/MILP

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm
    """
    # if single tree is activated, we need to add bounds for unbounded variables in nonlinear constraints to avoid unbounded master problem.
    if config.single_tree:
        var_bound_add(solve_data, config)

    m = solve_data.mip = solve_data.working_model.clone()
    MindtPy = m.MindtPy_utils
    if config.use_dual:
        m.dual.deactivate()

    if config.strategy == 'OA':
        calc_jacobians(solve_data, config)  # preload jacobians
        MindtPy.MindtPy_linear_cuts.oa_cuts = ConstraintList(
            doc='Outer approximation cuts')
    elif config.strategy == 'ECP':
        calc_jacobians(solve_data, config)  # preload jacobians
        MindtPy.MindtPy_linear_cuts.ecp_cuts = ConstraintList(
            doc='Extended Cutting Planes')
    # elif config.strategy == 'PSC':
    #     detect_nonlinear_vars(solve_data, config)
    #     MindtPy.MindtPy_linear_cuts.psc_cuts = ConstraintList(
    #         doc='Partial surrogate cuts')
    # elif config.strategy == 'GBD':
    #     MindtPy.MindtPy_linear_cuts.gbd_cuts = ConstraintList(
    #         doc='Generalized Benders cuts')

    # Set default initialization_strategy
    if config.init_strategy is None:
        if config.strategy in {'OA', 'GOA'}:
            config.init_strategy = 'rNLP'
        else:
            config.init_strategy = 'max_binary'

    config.logger.info(
        '{} is the initial strategy being used.'
        '\n'.format(
            config.init_strategy))
    # Do the initialization
    if config.init_strategy == 'rNLP':
        init_rNLP(solve_data, config)
    elif config.init_strategy == 'max_binary':
        init_max_binaries(solve_data, config)
    elif config.init_strategy == 'initial_binary':
        if config.strategy != 'ECP':
            fixed_nlp, fixed_nlp_result = solve_NLP_subproblem(
                solve_data, config)
            if fixed_nlp_result.solver.termination_condition in {tc.optimal, tc.locallyOptimal, tc.feasible}:
                handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config)
            elif fixed_nlp_result.solver.termination_condition is tc.infeasible:
                handle_NLP_subproblem_infeasible(fixed_nlp, solve_data, config)
            else:
                handle_NLP_subproblem_other_termination(fixed_nlp, fixed_nlp_result.solver.termination_condition,
                                                        solve_data, config)