Exemplo n.º 1
0
def get_index_set_except(comp, *sets):
    """ 
    Function for getting indices of a component over a product of its
    indexing sets other than those specified. Indices for the specified 
    sets can be used to construct indices of the proper dimension for the 
    original component via the index_getter function.

    Args:
        comp : Component whose indexing sets are to be manipulated
        sets : Sets to omit from the set_except product

    Returns:
        A dictionary. Maps 'set_except' to a Pyomo Set or SetProduct
        of comp's index set, excluding those in sets. Maps
        'index_getter' to a function that returns an index of the
        proper dimension for comp, given an element of set_except
        and a value for each set excluded. These values must be provided
        in the same order their Sets were provided in the sets argument.
    """
    n_set = len(sets)
    s_set = ComponentSet(sets)
    try:
        total_s_dim = sum([s.dimen for s in sets])
    except TypeError:
        msg = ('get_index_set_except does not support sets with '
               'dimen == None, including those with inconsistent dimen')
        raise TypeError(msg)

    info = {}

    if not is_explicitly_indexed_by(comp, *sets):
        msg = (comp.name + ' is not indexed by at least one of ' +
               str([s.name for s in sets]))
        raise ValueError(msg)

    index_set = comp.index_set()
    if isinstance(index_set, SetProduct):
        projection_sets = list(index_set.subsets())
        counter = Counter([id(_) for _ in projection_sets])
        for s in sets:
            if counter[id(s)] != 1:
                msg = 'Cannot omit sets that appear multiple times'
                raise ValueError(msg)
        # Need to know the location of each set within comp's index_set
        # location will map:
        #     location in comp's subsets() -> location in input sets
        location = {}
        # location should be well defined even for higher dimension sets
        # because this maps between lists of sets, not lists of indices
        other_ind_sets = []
        for ind_loc, ind_set in enumerate(projection_sets):
            found_set = False
            for s_loc, s_set in enumerate(sets):
                if ind_set is s_set:
                    location[ind_loc] = s_loc
                    found_set = True
                    break
            if not found_set:
                other_ind_sets.append(ind_set)
    else:
        # If index_set is not a SetProduct, only one set must have been
        # provided, so len(sets) == 1
        # Location in sets and in comp's indexing set are the same.
        location = {0: 0}
        other_ind_sets = []

    if comp.dim() == total_s_dim:
        # comp indexed by all sets and having this dimension
        # is sufficient to know that comp is only indexed by
        # Sets in *sets

        # In this case, return the trivial set_except and index_getter

        # Problem: cannot construct location without a set tuple
        #          is that a problem with this syntax?
        #          Here len(newvals) should == 1
        info['set_except'] = [None]
        # index_getter returns an index corresponding to the values passed to
        # it, re-ordered according to order of indexing sets in component.
        info['index_getter'] = (lambda incomplete_index, *newvals: newvals[0]
                                if len(newvals) <= 1 else tuple(
                                    [newvals[location[i]] for i in location]))
        return info

    # Now may assume other_ind_sets is nonempty.
    if len(other_ind_sets) == 1:
        set_except = other_ind_sets[0]
    elif len(other_ind_sets) >= 2:
        set_except = other_ind_sets[0].cross(*other_ind_sets[1:])
    else:
        raise ValueError('Did not expect this to happen')

    index_getter = (lambda incomplete_index, *newvals: _complete_index(
        location, incomplete_index, *newvals))

    info['set_except'] = set_except
    info['index_getter'] = index_getter
    return info
Exemplo n.º 2
0
def solve_strongly_connected_components(block, solver=None, solve_kwds=None):
    """ This function solves a square block of variables and equality
    constraints by solving strongly connected components individually.
    Strongly connected components (of the directed graph of constraints
    obtained from a perfect matching of variables and constraints) are
    the diagonal blocks in a block triangularization of the incidence
    matrix, so solving the strongly connected components in topological
    order is sufficient to solve the entire block.

    Arguments
    ---------
    block: Pyomo Block
        The Pyomo block whose variables and constraints will be solved
    solver: Pyomo solver object
        The solver object that will be used to solve strongly connected
        components of size greater than one constraint. Must implement
        a solve method.
    solve_kwds: Dictionary
        Keyword arguments for the solver's solve method

    Returns
    -------
    List of results objects returned by each call to solve

    """
    if solve_kwds is None:
        solve_kwds = {}

    constraints = list(block.component_data_objects(Constraint, active=True))
    var_set = ComponentSet()
    variables = []
    for con in constraints:
        for var in identify_variables(con.expr, include_fixed=False):
            # Because we are solving, we do not want to include fixed variables
            if var not in var_set:
                variables.append(var)
                var_set.add(var)

    res_list = []
    for scc, inputs in generate_strongly_connected_components(
            constraints,
            variables,
    ):
        with TemporarySubsystemManager(to_fix=inputs):
            if len(scc.vars) == 1:
                calculate_variable_from_constraint(scc.vars[0], scc.cons[0])
            else:
                if solver is None:
                    # NOTE: Use local name to avoid slow generation of this
                    # error message if a user provides a large, non-decomposable
                    # block with no solver.
                    vars = [var.local_name for var in scc.vars.values()]
                    cons = [con.local_name for con in scc.cons.values()]
                    raise RuntimeError(
                        "An external solver is required if block has strongly\n"
                        "connected components of size greater than one (is not "
                        "a DAG).\nGot an SCC with components: \n%s\n%s" %
                        (vars, cons))
                results = solver.solve(scc, **solve_kwds)
                res_list.append(results)
    return res_list
Exemplo n.º 3
0
    def __init__(self, **kwds):
        OptSolver.__init__(self, **kwds)

        self._pyomo_model = None
        """The pyomo model being solved."""

        self._solver_model = None
        """The python instance of the solver model (e.g., the gurobipy Model instance)."""

        self._symbol_map = SymbolMap()
        """A symbol map used to map between pyomo components and their names used with the solver."""

        self._labeler = None
        """The labeler for creating names for the solver model components."""

        self._pyomo_var_to_solver_var_map = ComponentMap()
        self._solver_var_to_pyomo_var_map = dict()
        """A dictionary mapping pyomo Var's to the solver variables."""

        self._pyomo_con_to_solver_con_map = dict()
        self._solver_con_to_pyomo_con_map = dict()
        """A dictionary mapping pyomo constraints to solver constraints."""

        self._vars_referenced_by_con = ComponentMap()
        """A dictionary mapping constraints to a ComponentSet containt the pyomo variables referenced by that
        constraint. This is primarily needed for the persistent solvers. When a constraint is deleted, we need
        to decrement the number of times those variables are referenced (see self._referenced_variables)."""

        self._vars_referenced_by_obj = ComponentSet()
        """A set containing the pyomo variables referenced by that the objective.
        This is primarily needed for the persistent solvers. When a the objective is deleted, we need
        to decrement the number of times those variables are referenced (see self._referenced_variables)."""

        self._objective = None
        """The pyomo Objective object currently being used with the solver."""

        self.results = None
        """A results object return from the solve method."""

        self._skip_trivial_constraints = False
        """A bool. If True, then any constraints with a constant body will not be added to the solver model.
        Be careful with this. If a trivial constraint is skipped then that constraint cannot be removed from
        a persistent solver (an error will be raised if a user tries to remove a non-existent constraint)."""

        self._output_fixed_variable_bounds = False
        """A bool. If False then an error will be raised if a fixed variable is used in one of the solver constraints.
        This is useful for catching bugs. Ordinarily a fixed variable should appear as a constant value in the
        solver constraints. If True, then the error will not be raised."""

        self._python_api_exists = False
        """A bool indicating whether or not the python api is available for the specified solver."""

        self._version = None
        """The version of the solver."""

        self._symbolic_solver_labels = False
        """A bool. If true then the solver components will be given names corresponding to the pyomo component names."""

        self._capabilites = Bunch()

        self._referenced_variables = ComponentMap()
        """dict: {var: count} where count is the number of constraints/objective referencing the var"""

        self._keepfiles = False
        """A bool. If True, then the solver log will be saved."""

        self._save_results = True
        """A bool. This is used for backwards compatability. If True, the solution will be loaded into the Solution
        object that gets placed on the SolverResults object. This way, users can do model.solutions.load_from(results)
        to load solutions into thier model. However, it is more efficient to bypass the Solution object and load
        the results directly from the solver object. If False, the solution will not be loaded into the Solution
        object."""

        self._integer_only_warmstarts = False
Exemplo n.º 4
0
def identify_objective_functions(model, config):
    '''
    Determine the objective first- and second-stage costs based on the user provided variable partition
    :param model: deterministic model
    :param config: config block
    :return:
    '''

    m = model
    obj = [o for o in model.component_data_objects(Objective)]
    if len(obj) > 1:
        raise AttributeError("Deterministic model must only have 1 active objective!")
    if obj[0].sense != minimize:
        raise AttributeError("PyROS requires deterministic models to have an objective function with  'sense'=minimization. "
                             "Please specify your objective function as minimization.")
    first_stage_terms = []
    second_stage_terms = []

    first_stage_cost_expr = 0
    second_stage_cost_expr = 0
    const_obj_expr = 0

    if isinstance(obj[0].expr, Var):
        obj_to_parse = [obj[0].expr]
    else:
        obj_to_parse = obj[0].expr.args
    first_stage_variable_set = ComponentSet(model.util.first_stage_variables)
    second_stage_variable_set = ComponentSet(model.util.second_stage_variables)
    for term in obj_to_parse:
        vars_in_term = list(v for v in identify_variables(term))

        first_stage_vars_in_term = list(v for v in vars_in_term if
                                        v in first_stage_variable_set)
        second_stage_vars_in_term = list(v for v in vars_in_term if
                                         v not in first_stage_variable_set)
        # By checking not in first_stage_variable_set, you pick up both ssv and state vars
        for v in first_stage_vars_in_term:
            if id(v) not in list(id(var) for var in first_stage_terms):
                first_stage_terms.append(v)
        for v in second_stage_vars_in_term:
            if id(v) not in list(id(var) for var in second_stage_terms):
                second_stage_terms.append(v)

        if first_stage_vars_in_term and second_stage_vars_in_term:
            second_stage_cost_expr += term
        elif first_stage_vars_in_term and not second_stage_vars_in_term:
            first_stage_cost_expr += term
        elif not first_stage_vars_in_term and second_stage_vars_in_term:
            second_stage_cost_expr += term
        elif not vars_in_term:
            const_obj_expr += term
    # convention to add constant objective term to first stage costs
    # IFF the const_obj_term does not contain an uncertain param! Else, it is second-stage cost
    mutable_params_in_const_term = identify_mutable_parameters(expr=const_obj_expr)
    if any(q in ComponentSet(model.util.uncertain_params) for q in mutable_params_in_const_term):
        m.first_stage_objective = Expression(expr=first_stage_cost_expr )
        m.second_stage_objective = Expression(expr=second_stage_cost_expr + const_obj_expr)
    else:
        m.first_stage_objective = Expression(expr=first_stage_cost_expr + const_obj_expr)
        m.second_stage_objective = Expression(expr=second_stage_cost_expr)
    return
Exemplo n.º 5
0
    def test_expression_replacement_ranged_inequality(self):
        model = make_model_with_ranged_inequalities()
        sens = SensitivityInterface(model, clone_model=False)
        sens._add_data_block()
        instance = sens.model_instance
        block = sens.block
        instance.x.fix()
        param_list = [instance.eta[1], instance.eta[2]]
        sens._add_sensitivity_data(param_list)

        orig_components = (
            list(instance.component_data_objects(Constraint, active=True)) +
            list(instance.component_data_objects(Objective, active=True)))
        orig_expr = [con.expr for con in orig_components]

        # These will be modified to account for expected replacements
        expected_variables = ComponentMap(
            (con, ComponentSet(identify_variables(con.expr)))
            for con in orig_components)
        expected_parameters = ComponentMap(
            (con, ComponentSet(identify_mutable_parameters(con.expr)))
            for con in orig_components)

        # As constructed by the `setup_sensitivity` method:
        variable_sub_map = dict(
            (id(param), var)
            for var, param, list_idx, _ in block._sens_data_list
            if param_list[list_idx].ctype is Param)
        # Sanity check
        self.assertEqual(len(variable_sub_map), 2)

        # Map each param to the var that should replace it
        param_var_map = ComponentMap(
            (param, var) for var, param, _, _ in block._sens_data_list)

        # Remove parameters we expect to replace and add vars
        # we expect to replace with.
        for con in orig_components:
            for param in param_var_map:
                if param in expected_parameters[con]:
                    expected_variables[con].add(param_var_map[param])
                    expected_parameters[con].remove(param)

        # We check that the new components (Constraints and Objectives) contain
        # the expected parameters and variables.
        replaced = sens._replace_parameters_in_constraints(variable_sub_map)
        # With ranged inequalities, we end up with more constraints than we
        # started with:
        self.assertEqual(len(block.constList), 3)

        for con in block.constList.values():
            self.assertTrue(con.active)
            param_set = ComponentSet(identify_mutable_parameters(con.expr))
            var_set = ComponentSet(identify_variables(con.expr))
            orig_con = replaced[con]
            self.assertIsNot(orig_con, con)
            # Note that for ranged inequalities, it is not valid to check
            # that the two sets are equal as a mutable parameter could be
            # contained in only one "sub-inequality"
            self.assertIsSubset(param_set, expected_parameters[orig_con])
            self.assertEqual(var_set, expected_variables[orig_con])

        self.assertIs(block.cost.ctype, Objective)
        obj = block.cost
        param_set = ComponentSet(identify_mutable_parameters(obj.expr))
        var_set = ComponentSet(identify_variables(obj.expr))
        orig_obj = replaced[obj]
        self.assertIsNot(orig_obj, obj)
        self.assertEqual(param_set, expected_parameters[orig_obj])
        self.assertEqual(var_set, expected_variables[orig_obj])

        # Original components were deactivated but otherwise not altered
        for con, expr in zip(orig_components, orig_expr):
            self.assertFalse(con.active)
            #self.assertIs(con.expr, expr)
            # ^Why does this fail?
            self.assertEqual(con.expr.to_string(), expr.to_string())
Exemplo n.º 6
0
 def test_init(self):
     cset = ComponentSet()
     cset = ComponentSet(self._components)
     with self.assertRaises(TypeError):
         cset = ComponentSet(*self._components)
Exemplo n.º 7
0
def categorize_dae_variables(dae_vars, time, inputs, measurements=None):
    t0 = time.first()
    t1 = time.get_finite_elements()[1]
    deriv_vars = []
    diff_vars = []
    input_vars = []
    alg_vars = []
    fixed_vars = []

    # TODO: give user ability to specify measurements and disturbances
    measured_vars = []

    if measurements is not None:
        infer_measurements = False
        user_measurements = ComponentSet(measurements)
        updated_user_measurements = ComponentSet(measurements)
        user_measured_vars = []
    else:
        infer_measurements = True
        updated_user_measurements = ComponentSet()

    dae_map = ComponentMap([(v[t0], v) for v in dae_vars])
    t0_vardata = list(dae_map.keys())

    if inputs is None:
        inputs = []
    input_set = ComponentSet(inputs)
    updated_input_set = ComponentSet(inputs)

    for var0 in t0_vardata:
        if var0 in input_set:
            updated_input_set.remove(var0)
            time_slice = dae_map.pop(var0)
            input_vars.append(time_slice)

        if var0 in updated_user_measurements:
            updated_user_measurements.remove(var0)
            # Don't pop measured vars. They will be popped elsewhere.
            time_slice = dae_map[var0]
            user_measured_vars.append(time_slice)

        parent = var0.parent_component()
        if not isinstance(parent, DerivativeVar):
            continue
        if not time in ComponentSet(parent.get_continuousset_list()):
            continue
        index0 = var0.index()
        var1 = dae_map[var0][t1]
        index1 = var1.index()
        state = parent.get_state_var()

        if state[index1].fixed:
            # Assume state var is fixed everywhere, so derivative
            # 'isn't really' a derivative.
            # Should be safe to remove state from dae_map here
            state_slice = dae_map.pop(state[index0])
            fixed_vars.append(state_slice)
            continue
        if state[index0] in input_set:
            # If differential variable is an input, then this DerivativeVar
            # is 'not really a derivative'
            continue

        deriv_slice = dae_map.pop(var0)

        if var1.fixed:
            # Assume derivative has been fixed everywhere.
            # Add to list of fixed variables, and don't remove its state variable.
            fixed_vars.append(deriv_slice)
        elif var0.fixed:
            # In this case the derivative has been used as an initial condition.
            # Still want to include it in the list of derivatives.
            measured_vars.append(deriv_slice)
            state_slice = dae_map.pop(state[index0])
            if state[index0].fixed:
                measured_vars.append(state_slice)
            deriv_vars.append(deriv_slice)
            diff_vars.append(state_slice)
        else:
            # Neither is fixed. This should be the most common case.
            state_slice = dae_map.pop(state[index0])
            if state[index0].fixed:
                measured_vars.append(state_slice)
            deriv_vars.append(deriv_slice)
            diff_vars.append(state_slice)

    if updated_input_set:
        raise RuntimeError('Not all inputs could be found')
    assert len(deriv_vars) == len(diff_vars)

    for var0, time_slice in dae_map.items():
        var1 = time_slice[t1]
        # If the variable is still in the list of time-indexed vars,
        # it must either be fixed (not a var) or be an algebraic var
        if var1.fixed:
            fixed_vars.append(time_slice)
        else:
            if var0.fixed:
                measured_vars.append(time_slice)
            alg_vars.append(time_slice)

    category_list_map = {
        VariableCategory.DERIVATIVE: deriv_vars,
        VariableCategory.DIFFERENTIAL: diff_vars,
        VariableCategory.ALGEBRAIC: alg_vars,
        VariableCategory.INPUT: input_vars,
        VariableCategory.FIXED: fixed_vars,
        VariableCategory.MEASUREMENT: measured_vars,
    }
    if measurements is not None:
        # If the user provided their own measurements,
        # override the inferred measurements. Assume the user
        # will modify the state of their variables appropriately.
        category_list_map[VariableCategory.MEASUREMENT] = user_measured_vars
    category_dict = {
        category: [
            Reference(ref.referent, ctype=ctype)
            for ref in category_list_map[category]
        ]
        for category, ctype in CATEGORY_TYPE_MAP.items()
    }
    return category_dict
Exemplo n.º 8
0
    def solve(self, model, **kwds):
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        return SolverFactory('gdpopt').solve(
            model,
            strategy='LBB',
            minlp_solver=config.solver,
            minlp_solver_args=config.solver_args,
            tee=config.tee,
            check_sat=config.check_sat,
            logger=config.logger,
            time_limit=config.time_limit)

        # Validate model to be used with gdpbb
        self.validate_model(model)
        # Set solver as an MINLP
        solve_data = GDPbbSolveData()
        solve_data.timing = Bunch()
        solve_data.original_model = model
        solve_data.results = SolverResults()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPbb_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPbb version %s using %s as subsolver" %
                (".".join(map(str, self.version())), config.solver))

            # Setup results
            solve_data.results.solver.name = 'GDPbb - %s' % (str(
                config.solver))
            setup_results_object(solve_data, config)

            # clone original model for root node of branch and bound
            root = solve_data.working_model = solve_data.original_model.clone()

            # get objective sense
            process_objective(solve_data, config)
            objectives = solve_data.original_model.component_data_objects(
                Objective, active=True)
            obj = next(objectives, None)
            solve_data.results.problem.sense = obj.sense

            # set up lists to keep track of which disjunctions have been covered.

            # this list keeps track of the relaxed disjunctions
            root.GDPbb_utils.unenforced_disjunctions = list(
                disjunction
                for disjunction in root.GDPbb_utils.disjunction_list
                if disjunction.active)

            root.GDPbb_utils.deactivated_constraints = ComponentSet([
                constr
                for disjunction in root.GDPbb_utils.unenforced_disjunctions
                for disjunct in disjunction.disjuncts
                for constr in disjunct.component_data_objects(ctype=Constraint,
                                                              active=True)
                if constr.body.polynomial_degree() not in (1, 0)
            ])
            # Deactivate nonlinear constraints in unenforced disjunctions
            for constr in root.GDPbb_utils.deactivated_constraints:
                constr.deactivate()

            # Add the BigM suffix if it does not already exist. Used later during nonlinear constraint activation.
            if not hasattr(root, 'BigM'):
                root.BigM = Suffix()

            # Pre-screen that none of the disjunctions are already predetermined due to the disjuncts being fixed
            # to True/False values.
            # TODO this should also be done within the loop, but we aren't handling it right now.
            # Should affect efficiency, but not correctness.
            root.GDPbb_utils.disjuncts_fixed_True = ComponentSet()
            # Only find top-level (non-nested) disjunctions
            for disjunction in root.component_data_objects(Disjunction,
                                                           active=True):
                fixed_true_disjuncts = [
                    disjunct for disjunct in disjunction.disjuncts
                    if disjunct.indicator_var.fixed
                    and disjunct.indicator_var.value == 1
                ]
                fixed_false_disjuncts = [
                    disjunct for disjunct in disjunction.disjuncts
                    if disjunct.indicator_var.fixed
                    and disjunct.indicator_var.value == 0
                ]
                for disjunct in fixed_false_disjuncts:
                    disjunct.deactivate()
                if len(fixed_false_disjuncts) == len(
                        disjunction.disjuncts) - 1:
                    # all but one disjunct in the disjunction is fixed to False. Remaining one must be true.
                    if not fixed_true_disjuncts:
                        fixed_true_disjuncts = [
                            disjunct for disjunct in disjunction.disjuncts
                            if disjunct not in fixed_false_disjuncts
                        ]
                # Reactivate the fixed-true disjuncts
                for disjunct in fixed_true_disjuncts:
                    newly_activated = ComponentSet()
                    for constr in disjunct.component_data_objects(Constraint):
                        if constr in root.GDPbb_utils.deactivated_constraints:
                            newly_activated.add(constr)
                            constr.activate()
                            # Set the big M value for the constraint
                            root.BigM[constr] = 1
                            # Note: we use a default big M value of 1
                            # because all non-selected disjuncts should be deactivated.
                            # Therefore, none of the big M transformed nonlinear constraints will need to be relaxed.
                            # The default M value should therefore be irrelevant.
                    root.GDPbb_utils.deactivated_constraints -= newly_activated
                    root.GDPbb_utils.disjuncts_fixed_True.add(disjunct)

                if fixed_true_disjuncts:
                    assert disjunction.xor, "GDPbb only handles disjunctions in which one term can be selected. " \
                        "%s violates this assumption." % (disjunction.name, )
                    root.GDPbb_utils.unenforced_disjunctions.remove(
                        disjunction)

            # Check satisfiability
            if config.check_sat and satisfiable(root, config.logger) is False:
                # Problem is not satisfiable. Problem is infeasible.
                obj_value = obj_sign * float('inf')
            else:
                # solve the root node
                config.logger.info("Solving the root node.")
                obj_value, result, var_values = self.subproblem_solve(
                    root, config)

            if obj_sign * obj_value == float('inf'):
                config.logger.info(
                    "Model was found to be infeasible at the root node. Elapsed %.2f seconds."
                    % get_main_elapsed_time(solve_data.timing))
                if solve_data.results.problem.sense == minimize:
                    solve_data.results.problem.lower_bound = float('inf')
                    solve_data.results.problem.upper_bound = None
                else:
                    solve_data.results.problem.lower_bound = None
                    solve_data.results.problem.upper_bound = float('-inf')
                solve_data.results.solver.timing = solve_data.timing
                solve_data.results.solver.iterations = 0
                solve_data.results.solver.termination_condition = tc.infeasible
                return solve_data.results

            # initialize minheap for Branch and Bound algorithm
            # Heap structure: (ordering tuple, model)
            # Ordering tuple: (objective value, disjunctions_left, -total_nodes_counter)
            #  - select solutions with lower objective value,
            #    then fewer disjunctions left to explore (depth first),
            #    then more recently encountered (tiebreaker)
            heap = []
            total_nodes_counter = 0
            disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions)
            heapq.heappush(heap,
                           ((obj_sign * obj_value, disjunctions_left,
                             -total_nodes_counter), root, result, var_values))

            # loop to branch through the tree
            while len(heap) > 0:
                # pop best model off of heap
                sort_tuple, incumbent_model, incumbent_results, incumbent_var_values = heapq.heappop(
                    heap)
                incumbent_obj_value, disjunctions_left, _ = sort_tuple

                config.logger.info(
                    "Exploring node with LB %.10g and %s inactive disjunctions."
                    % (incumbent_obj_value, disjunctions_left))

                # if all the originally active disjunctions are active, solve and
                # return solution
                if disjunctions_left == 0:
                    config.logger.info("Model solved.")
                    # Model is solved. Copy over solution values.
                    original_model = solve_data.original_model
                    for orig_var, val in zip(
                            original_model.GDPbb_utils.variable_list,
                            incumbent_var_values):
                        orig_var.value = val

                    solve_data.results.problem.lower_bound = incumbent_results.problem.lower_bound
                    solve_data.results.problem.upper_bound = incumbent_results.problem.upper_bound
                    solve_data.results.solver.timing = solve_data.timing
                    solve_data.results.solver.iterations = total_nodes_counter
                    solve_data.results.solver.termination_condition = incumbent_results.solver.termination_condition
                    return solve_data.results

                # Pick the next disjunction to branch on
                next_disjunction = incumbent_model.GDPbb_utils.unenforced_disjunctions[
                    0]
                config.logger.info("Branching on disjunction %s" %
                                   next_disjunction.name)
                assert next_disjunction.xor, "GDPbb only handles disjunctions in which one term can be selected. " \
                    "%s violates this assumption." % (next_disjunction.name, )

                new_nodes_counter = 0

                for i, disjunct in enumerate(next_disjunction.disjuncts):
                    # Create one branch for each of the disjuncts on the disjunction

                    if any(disj.indicator_var.fixed
                           and disj.indicator_var.value == 1
                           for disj in next_disjunction.disjuncts
                           if disj is not disjunct):
                        # If any other disjunct is fixed to 1 and an xor relationship applies,
                        # then this disjunct cannot be activated.
                        continue

                    # Check time limit
                    if get_main_elapsed_time(
                            solve_data.timing) >= config.time_limit:
                        if solve_data.results.problem.sense == minimize:
                            solve_data.results.problem.lower_bound = incumbent_obj_value
                            solve_data.results.problem.upper_bound = float(
                                'inf')
                        else:
                            solve_data.results.problem.lower_bound = float(
                                '-inf')
                            solve_data.results.problem.upper_bound = incumbent_obj_value
                        config.logger.info('GDPopt unable to converge bounds '
                                           'before time limit of {} seconds. '
                                           'Elapsed: {} seconds'.format(
                                               config.time_limit,
                                               get_main_elapsed_time(
                                                   solve_data.timing)))
                        config.logger.info(
                            'Final bound values: LB: {}  UB: {}'.format(
                                solve_data.results.problem.lower_bound,
                                solve_data.results.problem.upper_bound))
                        solve_data.results.solver.timing = solve_data.timing
                        solve_data.results.solver.iterations = total_nodes_counter
                        solve_data.results.solver.termination_condition = tc.maxTimeLimit
                        return solve_data.results

                    # Branch on the disjunct
                    child = incumbent_model.clone()
                    # TODO I am leaving the old branching system in place, but there should be
                    # something better, ideally that deals with nested disjunctions as well.
                    disjunction_to_branch = child.GDPbb_utils.unenforced_disjunctions.pop(
                        0)
                    child_disjunct = disjunction_to_branch.disjuncts[i]
                    child_disjunct.indicator_var.fix(1)
                    # Deactivate (and fix to 0) other disjuncts on the disjunction
                    for disj in disjunction_to_branch.disjuncts:
                        if disj is not child_disjunct:
                            disj.deactivate()
                    # Activate nonlinear constraints on the newly fixed child disjunct
                    newly_activated = ComponentSet()
                    for constr in child_disjunct.component_data_objects(
                            Constraint):
                        if constr in child.GDPbb_utils.deactivated_constraints:
                            newly_activated.add(constr)
                            constr.activate()
                            # Set the big M value for the constraint
                            child.BigM[constr] = 1
                            # Note: we use a default big M value of 1
                            # because all non-selected disjuncts should be deactivated.
                            # Therefore, none of the big M transformed nonlinear constraints will need to be relaxed.
                            # The default M value should therefore be irrelevant.
                    child.GDPbb_utils.deactivated_constraints -= newly_activated
                    child.GDPbb_utils.disjuncts_fixed_True.add(child_disjunct)

                    if disjunct in incumbent_model.GDPbb_utils.disjuncts_fixed_True:
                        # If the disjunct was already branched to True from a parent disjunct branching, just pass
                        # through the incumbent value without resolving. The solution should be the same as the parent.
                        total_nodes_counter += 1
                        ordering_tuple = (obj_sign * incumbent_obj_value,
                                          disjunctions_left - 1,
                                          -total_nodes_counter)
                        heapq.heappush(heap, (ordering_tuple, child, result,
                                              incumbent_var_values))
                        new_nodes_counter += 1
                        continue

                    if config.check_sat and satisfiable(
                            child, config.logger) is False:
                        # Problem is not satisfiable. Skip this disjunct.
                        continue

                    obj_value, result, var_values = self.subproblem_solve(
                        child, config)
                    total_nodes_counter += 1
                    ordering_tuple = (obj_sign * obj_value,
                                      disjunctions_left - 1,
                                      -total_nodes_counter)
                    heapq.heappush(heap,
                                   (ordering_tuple, child, result, var_values))
                    new_nodes_counter += 1

                config.logger.info(
                    "Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s."
                    % (new_nodes_counter, disjunctions_left - 1, len(heap)))
Exemplo n.º 9
0
 def clear_persistent_solvers(self):
     self._persistent_solvers = ComponentSet()
Exemplo n.º 10
0
    def test_dynamic_backward_disc_with_initial_conditions(self):
        nfe = 5
        m = make_dynamic_model(nfe=nfe, scheme="BACKWARD")
        time = m.time
        t0 = m.time.first()
        t1 = m.time.next(t0)

        m.flow_in.fix()
        m.height[t0].fix()

        constraints = list(m.component_data_objects(pyo.Constraint))
        self.assertEqual(
            len(list(generate_strongly_connected_components(constraints))),
            nfe + 2,
            # The "initial constraints" have two SCCs because they
            # decompose into the algebraic equation and differential
            # equation. This decomposition is because the discretization
            # equation is not present.
            #
            # This is actually quite troublesome for testing because
            # it means that the topological order of strongly connected
            # components is not unique (alternatively, the initial
            # conditions and rest of the model are independent, or the
            # bipartite graph of variables and equations is disconnected).
        )
        t_scc_map = {}
        for i, (block, inputs) in enumerate(
                generate_strongly_connected_components(constraints)):
            with TemporarySubsystemManager(to_fix=inputs):
                t = block.vars[0].index()
                t_scc_map[t] = i
                if t == t0:
                    continue
                else:
                    t_prev = m.time.prev(t)

                    con_set = ComponentSet(
                        [m.diff_eqn[t], m.flow_out_eqn[t], m.dhdt_disc_eq[t]])
                    var_set = ComponentSet(
                        [m.height[t], m.dhdt[t], m.flow_out[t]])
                    self.assertEqual(len(con_set), len(block.cons))
                    self.assertEqual(len(var_set), len(block.vars))
                    for var, con in zip(block.vars[:], block.cons[:]):
                        self.assertIn(var, var_set)
                        self.assertIn(con, con_set)
                        self.assertFalse(var.fixed)

                    other_var_set = ComponentSet([m.height[t_prev]])\
                            if t != t1 else ComponentSet()
                    # At t1, "input var" height[t0] is fixed, so
                    # it is not included here.
                    self.assertEqual(len(inputs), len(other_var_set))
                    for var in block.input_vars[:]:
                        self.assertIn(var, other_var_set)
                        self.assertTrue(var.fixed)

        scc = -1
        for t in m.time:
            if t == t0:
                self.assertTrue(m.height[t].fixed)
            else:
                self.assertFalse(m.height[t].fixed)

                # Make sure "finite element blocks" in the SCC DAG are
                # in a valid topological order
                self.assertGreater(t_scc_map[t], scc)
                scc = t_scc_map[t]

            self.assertFalse(m.flow_out[t].fixed)
            self.assertFalse(m.dhdt[t].fixed)
            self.assertTrue(m.flow_in[t].fixed)
Exemplo n.º 11
0
    def _apply_to(self, model, detect_fixed_vars=True):
        """Apply the transformation to the given model."""
        # Generate the equality sets
        eq_var_map = _build_equality_set(model)

        # Detect and process fixed variables.
        if detect_fixed_vars:
            _fix_equality_fixed_variables(model)

        # Generate aggregation infrastructure
        model._var_aggregator_info = Block(
            doc="Holds information for the variable aggregation "
            "transformation system.")
        z = model._var_aggregator_info.z = VarList(doc="Aggregated variables.")
        # Map of the aggregate var to the equalty set (ComponentSet)
        z_to_vars = model._var_aggregator_info.z_to_vars = ComponentMap()
        # Map of variables to their corresponding aggregate var
        var_to_z = model._var_aggregator_info.var_to_z = ComponentMap()
        processed_vars = ComponentSet()

        # TODO This iteritems is sorted by the variable name of the key in
        # order to preserve determinism. Unfortunately, var.name() is an
        # expensive operation right now.
        for var, eq_set in sorted(eq_var_map.items(),
                                  key=lambda tup: tup[0].name):
            if var in processed_vars:
                continue  # Skip already-process variables

            # This would be weird. The variable hasn't been processed, but is
            # in the map. Raise an exception.
            assert var_to_z.get(var, None) is None

            z_agg = z.add()
            z_to_vars[z_agg] = eq_set
            var_to_z.update(ComponentMap((v, z_agg) for v in eq_set))

            # Set the bounds of the aggregate variable based on the bounds of
            # the variables in its equality set.
            z_agg.setlb(max_if_not_None(v.lb for v in eq_set if v.has_lb()))
            z_agg.setub(min_if_not_None(v.ub for v in eq_set if v.has_ub()))

            # Set the fixed status of the aggregate var
            fixed_vars = [v for v in eq_set if v.fixed]
            if fixed_vars:
                # Check to make sure all the fixed values are the same.
                if any(var.value != fixed_vars[0].value
                       for var in fixed_vars[1:]):
                    raise ValueError(
                        "Aggregate variable for equality set is fixed to "
                        "multiple different values: %s" % (fixed_vars, ))
                z_agg.fix(fixed_vars[0].value)

                # Check that the fixed value lies within bounds.
                if z_agg.has_lb() and z_agg.value < value(z_agg.lb):
                    raise ValueError(
                        "Aggregate variable for equality set is fixed to "
                        "a value less than its lower bound: %s < LB %s" %
                        (z_agg.value, value(z_agg.lb)))
                if z_agg.has_ub() and z_agg.value > value(z_agg.ub):
                    raise ValueError(
                        "Aggregate variable for equality set is fixed to "
                        "a value greater than its upper bound: %s > UB %s" %
                        (z_agg.value, value(z_agg.ub)))
            else:
                # Set the value to be the average of the values within the
                # bounds only if the value is not already fixed.
                values_within_bounds = [
                    v.value for v in eq_set if (v.value is not None and (
                        not z_agg.has_lb() or v.value >= value(z_agg.lb)) and (
                            not z_agg.has_ub() or v.value <= value(z_agg.ub)))
                ]
                if values_within_bounds:
                    z_agg.set_value(sum(values_within_bounds) /
                                    len(values_within_bounds),
                                    skip_validation=True)

            processed_vars.update(eq_set)

        # Do the substitution
        substitution_map = {id(var): z_var for var, z_var in var_to_z.items()}
        visitor = ExpressionReplacementVisitor(
            substitute=substitution_map,
            descend_into_named_expressions=True,
            remove_named_expressions=False,
        )
        for constr in model.component_data_objects(ctype=Constraint,
                                                   active=True):
            orig_body = constr.body
            new_body = visitor.walk_expression(constr.body)
            if orig_body is not new_body:
                constr.set_value((constr.lower, new_body, constr.upper))

        for objective in model.component_data_objects(ctype=Objective,
                                                      active=True):
            orig_expr = objective.expr
            new_expr = visitor.walk_expression(objective.expr)
            if orig_expr is not new_expr:
                objective.set_value(new_expr)
Exemplo n.º 12
0
    def test_gas_expansion(self):
        N = 5
        m = make_gas_expansion_model(N)
        m.rho[0].fix()
        m.F[0].fix()
        m.T[0].fix()

        constraints = list(m.component_data_objects(pyo.Constraint))
        self.assertEqual(
            len(list(generate_strongly_connected_components(constraints))),
            N + 1,
        )
        for i, (block, inputs) in enumerate(
                generate_strongly_connected_components(constraints)):
            with TemporarySubsystemManager(to_fix=inputs):
                if i == 0:
                    # P[0], ideal_gas[0]
                    self.assertEqual(len(block.vars), 1)
                    self.assertEqual(len(block.cons), 1)

                    var_set = ComponentSet([m.P[i]])
                    con_set = ComponentSet([m.ideal_gas[i]])
                    for var, con in zip(block.vars[:], block.cons[:]):
                        self.assertIn(var, var_set)
                        self.assertIn(con, con_set)

                    # Other variables are fixed; not included
                    self.assertEqual(len(block.input_vars), 0)

                elif i == 1:
                    # P[1], rho[1], F[1], T[1], etc.
                    self.assertEqual(len(block.vars), 4)
                    self.assertEqual(len(block.cons), 4)

                    var_set = ComponentSet([m.P[i], m.rho[i], m.F[i], m.T[i]])
                    con_set = ComponentSet(
                        [m.ideal_gas[i], m.mbal[i], m.ebal[i], m.expansion[i]])
                    for var, con in zip(block.vars[:], block.cons[:]):
                        self.assertIn(var, var_set)
                        self.assertIn(con, con_set)

                    # P[0] is in expansion[1]
                    other_var_set = ComponentSet([m.P[i - 1]])
                    self.assertEqual(len(block.input_vars), 1)
                    for var in block.input_vars[:]:
                        self.assertIn(var, other_var_set)

                else:
                    # P[i], rho[i], F[i], T[i], etc.
                    self.assertEqual(len(block.vars), 4)
                    self.assertEqual(len(block.cons), 4)

                    var_set = ComponentSet([m.P[i], m.rho[i], m.F[i], m.T[i]])
                    con_set = ComponentSet(
                        [m.ideal_gas[i], m.mbal[i], m.ebal[i], m.expansion[i]])
                    for var, con in zip(block.vars[:], block.cons[:]):
                        self.assertIn(var, var_set)
                        self.assertIn(con, con_set)

                    # P[i-1], rho[i-1], F[i-1], T[i-1], etc.
                    other_var_set = ComponentSet(
                        [m.P[i - 1], m.rho[i - 1], m.F[i - 1], m.T[i - 1]])
                    self.assertEqual(len(block.input_vars), 4)
                    for var in block.input_vars[:]:
                        self.assertIn(var, other_var_set)
Exemplo n.º 13
0
    def test_dynamic_backward_with_inputs(self):
        nfe = 5
        m = make_dynamic_model(nfe=nfe, scheme="BACKWARD")
        time = m.time
        t0 = m.time.first()
        t1 = m.time.next(t0)

        # Initial conditions are still fixed
        m.height[t0].fix()
        m.flow_out[t0].fix()
        m.dhdt[t0].fix()
        m.diff_eqn[t0].deactivate()
        m.flow_out_eqn[t0].deactivate()

        # Variables that we want in our SCCs:
        # Here we exclude "dynamic inputs" (flow_in) instead of fixing them
        variables = [
            var for var in m.component_data_objects(pyo.Var)
            if not var.fixed and var.parent_component() is not m.flow_in
        ]
        constraints = list(
            m.component_data_objects(pyo.Constraint, active=True))
        self.assertEqual(
            len(
                list(
                    generate_strongly_connected_components(
                        constraints,
                        variables,
                    ))),
            nfe,
        )

        # The result of the generator is the same as in the previous
        # test, but we are using the more general API
        for i, (block, inputs) in enumerate(
                generate_strongly_connected_components(
                    constraints,
                    variables,
                )):
            with TemporarySubsystemManager(to_fix=inputs):
                t = m.time[i + 2]
                t_prev = m.time.prev(t)

                con_set = ComponentSet(
                    [m.diff_eqn[t], m.flow_out_eqn[t], m.dhdt_disc_eq[t]])
                var_set = ComponentSet([m.height[t], m.dhdt[t], m.flow_out[t]])
                self.assertEqual(len(con_set), len(block.cons))
                self.assertEqual(len(var_set), len(block.vars))
                for var, con in zip(block.vars[:], block.cons[:]):
                    self.assertIn(var, var_set)
                    self.assertIn(con, con_set)
                    self.assertFalse(var.fixed)

                other_var_set = ComponentSet([m.flow_in[t]])
                if t != t1:
                    other_var_set.add(m.height[t_prev])
                    # At t1, "input var" height[t0] is fixed, so
                    # it is not included here.
                self.assertEqual(len(inputs), len(other_var_set))
                for var in block.input_vars[:]:
                    self.assertIn(var, other_var_set)
                    self.assertTrue(var.fixed)

        for t in time:
            if t == t0:
                self.assertTrue(m.height[t].fixed)
                self.assertTrue(m.flow_out[t].fixed)
                self.assertTrue(m.dhdt[t].fixed)
            else:
                self.assertFalse(m.height[t].fixed)
                self.assertFalse(m.flow_out[t].fixed)
                self.assertFalse(m.dhdt[t].fixed)
Exemplo n.º 14
0
    def test_dimen_none_replacement(self):
        m = self.model()

        # Single set of dimen None
        index_set = m.b0dn.index_set()
        index = ('c', 1, 10)
        location_set_map = get_location_set_map(index, index_set)
        sets = ComponentSet((m.d_none, ))
        pred_index = (Ellipsis, )
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        sets = ComponentSet()
        pred_index = ('c', 1, 10)
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        # One None-dimen, one one-dimen
        index_set = m.b1dn.index_set()
        index = (1, 'c', 1, 10)
        location_set_map = get_location_set_map(index, index_set)
        sets = ComponentSet((m.d_none, ))
        pred_index = (1, Ellipsis)
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        sets = ComponentSet((m.d_none, m.time))
        pred_index = (slice(None), Ellipsis)
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        # One None-dimen, one one-dimen, one two-dimen
        index_set = m.b1dnd2.index_set()
        index = (1, 'c', 1, 10, 'a', 1)
        location_set_map = get_location_set_map(index, index_set)
        sets = ComponentSet((m.d_none, m.time, m.d_2))
        pred_index = (slice(None), Ellipsis, slice(None), slice(None))
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        sets = ComponentSet((m.time, m.d_2))
        pred_index = (slice(None), 'c', 1, 10, slice(None), slice(None))
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        sets = ComponentSet((m.d_2, ))
        pred_index = (1, 'c', 1, 10, slice(None), slice(None))
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        # One None-dimen, one two-dimen
        index_set = m.b2dn.index_set()
        index = (1, 0, 'd', 3)
        location_set_map = get_location_set_map(index, index_set)
        sets = ComponentSet()
        pred_index = (1, 0, 'd', 3)
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        sets = ComponentSet((m.d_none, ))
        pred_index = (1, 0, Ellipsis)
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        # One None-dimen, one two-dimen, two one-dimen
        index_set = m.b2dnd2.index_set()
        index = (1, 'c', 1, 10, 'b', 2, 0)
        location_set_map = get_location_set_map(index, index_set)
        sets = ComponentSet((m.d_none, m.space))
        pred_index = (1, Ellipsis, 'b', 2, slice(None))
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        # None-dimen set comes first
        index_set = m.dnd2b1.index_set()
        index = ('d', 3, 'b', 2, 1)
        location_set_map = get_location_set_map(index, index_set)
        sets = ComponentSet((m.d_none, m.time))
        pred_index = (Ellipsis, 'b', 2, slice(None))
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)

        # None-dimen, plus 2-d set shows up twice
        index_set = m.b3dn.index_set()
        index = (1, 'a', 1, 'd', 3, 0, 'b', 2)
        location_set_map = get_location_set_map(index, index_set)
        sets = ComponentSet((m.d_none, m.d_2))
        pred_index = (
            1,
            slice(None),
            slice(None),
            Ellipsis,
            0,
            slice(None),
            slice(None),
        )
        new_index = replace_indices(index, location_set_map, sets)
        self.assertEqual(new_index, pred_index)
Exemplo n.º 15
0
 def test_str(self):
     cset = ComponentSet()
     self.assertEqual(str(cset), "ComponentSet([])")
     cset.update(self._components)
     str(cset)
Exemplo n.º 16
0
class BaseRelaxationData(_BlockData):
    def __init__(self, component):
        _BlockData.__init__(self, component)
        self._persistent_solvers = ComponentSet()
        self._allow_changes = False
        self._relaxation_side = RelaxationSide.BOTH
        self._oa_points = list(
        )  # List of ComponentMap. Each entry in the list specifies a point at which an outer
        # approximation cut should be built for convex/concave constraints.
        self._saved_oa_points = list()
        self._oa_stack_map = dict()
        self._cuts = None
        self._use_linear_relaxation = True
        self.large_eval_tol = math.inf

    def add_component(self, name, val):
        if self._allow_changes:
            _BlockData.add_component(self, name, val)
        else:
            raise RuntimeError(
                'Pyomo components cannot be added to objects of type {0}.'.
                format(type(self)))

    def _set_input(self,
                   relaxation_side=RelaxationSide.BOTH,
                   persistent_solvers=None,
                   use_linear_relaxation=True,
                   large_eval_tol=math.inf):
        self._oa_points = list()
        self._saved_oa_points = list()
        self._persistent_solvers = persistent_solvers
        if self._persistent_solvers is None:
            self._persistent_solvers = ComponentSet()
        if not isinstance(self._persistent_solvers, Iterable):
            self._persistent_solvers = ComponentSet([self._persistent_solvers])
        else:
            self._persistent_solvers = ComponentSet(self._persistent_solvers)
        self._relaxation_side = relaxation_side
        assert self._relaxation_side in RelaxationSide
        self.use_linear_relaxation = use_linear_relaxation
        self.large_eval_tol = large_eval_tol

    def get_aux_var(self):
        """
        All Coramin relaxations are relaxations of constraints of the form w <=/=/>= f(x). This method returns w

        Returns
        -------
        aux_var: pyomo.core.base.var._GeneralVarData
            The variable representing w in w = f(x) (which is the constraint being relaxed).
        """
        return self._aux_var

    def get_rhs_vars(self):
        raise NotImplementedError(
            'This method should be implemented by subclasses')

    def get_rhs_expr(self):
        raise NotImplementedError(
            'This method should be implemented by subclasses')

    @property
    def use_linear_relaxation(self):
        """
        If this is True, the relaxation will use a linear relaxation. If False, then a nonlinear relaxation may be used.
        Take x^2 for example, the underestimator can be quadratic.

        Returns
        -------
        bool
        """
        return self._use_linear_relaxation

    @use_linear_relaxation.setter
    def use_linear_relaxation(self, val):
        if not val:
            raise ValueError(
                'Relaxations of type {0} do not support relaxations that are not linear.'
                .format(type(self)))

    def remove_relaxation(self):
        """
        Remove any auto-created vars/constraints from the relaxation block
        """
        # this default implementation should work for most relaxations
        # it removes all vars and constraints on this block data object
        self._remove_from_persistent_solvers()
        comps = [pe.Block, pe.Constraint, pe.Var, pe.Set, pe.Param]
        for comp in comps:
            comps_to_del = list(
                self.component_objects([comp], descend_into=False))
            for _comp in comps_to_del:
                self.del_component(_comp)
        for comp in comps:
            comps_to_del = list(
                self.component_data_objects([comp], descend_into=False))
            for _comp in comps_to_del:
                self.del_component(_comp)

    def rebuild(self, build_nonlinear_constraint=False):
        """
        Remove any auto-created vars/constraints from the relaxation block and recreate it
        """
        self.clean_oa_points()
        self._allow_changes = True
        self.remove_relaxation()
        if build_nonlinear_constraint:
            if self.relaxation_side == RelaxationSide.BOTH:
                self.nonlinear_con = pe.Constraint(
                    expr=self.get_aux_var() == self.get_rhs_expr())
            elif self.relaxation_side == RelaxationSide.UNDER:
                self.nonlinear_con = pe.Constraint(
                    expr=self.get_aux_var() >= self.get_rhs_expr())
            else:
                self.nonlinear_con = pe.Constraint(
                    expr=self.get_aux_var() <= self.get_rhs_expr())
        else:
            self._build_relaxation()
            if self.use_linear_relaxation:
                val_mngr = _ValueManager()
                val_mngr.save_values(self.get_rhs_vars())
                for pt in self._oa_points:
                    _load_var_values(pt)
                    # check_violation has to be False because we are not loading the value of aux_var
                    self.add_cut(keep_cut=False,
                                 check_violation=False,
                                 add_cut_to_persistent_solvers=False)
                val_mngr.pop_values()
            else:
                if self.is_rhs_convex() and self.relaxation_side in {
                        RelaxationSide.BOTH, RelaxationSide.UNDER
                }:
                    self.nonlinear_underestimator = pe.Constraint(
                        expr=self.get_aux_var() >= self.get_rhs_expr())
                elif self.is_rhs_concave() and self.relaxation_side in {
                        RelaxationSide.BOTH, RelaxationSide.OVER
                }:
                    self.nonlinear_overestimator = pe.Constraint(
                        expr=self.get_aux_var() <= self.get_rhs_expr())
        self._add_to_persistent_solvers()
        self._allow_changes = False

    def _build_relaxation(self):
        """
        Build the auto-created vars/constraints that form the relaxation
        """
        raise NotImplementedError(
            'This should be implemented in the derived class.')

    def vars_with_bounds_in_relaxation(self):
        """
        This method returns a list of variables whose bounds appear in the constraints defining the relaxation.
        Take the McCormick relaxation of a bilinear term (w = x * y) for example. The McCormick relaxation is

        w >= xl * y + x * yl - xl * yl
        w >= xu * y + x * yu - xu * yu
        w <= xu * y + x * yl - xu * yl
        w <= x * yu + xl * y - xl * yu

        where xl and xu are the lower and upper bounds for x, respectively, and yl and yu are the lower and upper
        bounds for y, respectively. Because xl, xu, yl, and yu appear in the constraints, this method would return

        [x, y]

        As another example, take w >= x**2. A linear relaxation of this constraint just involves linear underestimators,
        which do not depend on the bounds of x or w. Therefore, this method would return an empty list.
        """
        raise NotImplementedError(
            'This method should be implemented in the derived class.')

    def _remove_from_persistent_solvers(self):
        for i in self._persistent_solvers:
            i.remove_block(block=self)

    def _add_to_persistent_solvers(self):
        for i in self._persistent_solvers:
            i.add_block(block=self)

    def add_persistent_solver(self, persistent_solver):
        self._persistent_solvers.add(persistent_solver)

    def remove_persistent_solver(self, persistent_solver):
        self._persistent_solvers.remove(persistent_solver)

    def clear_persistent_solvers(self):
        self._persistent_solvers = ComponentSet()

    def get_deviation(self):
        """
        All Coramin relaxations are relaxations of constraints of the form w <=/=/>= f(x). This method returns

        max{f(x) - w, 0} if relaxation_side is RelaxationSide.UNDER
        max{w - f(x), 0} if relaxation_side is RelaxationSide.OVER
        abs(w - f(x)) if relaxation_side is RelaxationSide.BOTH

        Returns
        -------
        float
        """
        dev = self.get_aux_var().value - pe.value(self.get_rhs_expr())
        if self.relaxation_side is RelaxationSide.BOTH:
            dev = abs(dev)
        elif self.relaxation_side is RelaxationSide.UNDER:
            dev = max(-dev, 0)
        else:
            dev = max(dev, 0)
        return dev

    def is_rhs_convex(self):
        """
        All Coramin relaxations are relaxations of constraints of the form w <=/=/>= f(x). This method returns True if f(x)
        is convex and False otherwise.

        Returns
        -------
        bool
        """
        raise NotImplementedError(
            'This method should be implemented in the derived class.')

    def is_rhs_concave(self):
        """
        All Coramin relaxations are relaxations of constraints of the form w <=/=/>= f(x). This method returns True if f(x)
        is concave and False otherwise.

        Returns
        -------
        bool
        """
        raise NotImplementedError(
            'This method should be implemented in the derived class.')

    @property
    def relaxation_side(self):
        return self._relaxation_side

    @relaxation_side.setter
    def relaxation_side(self, val):
        if val not in RelaxationSide:
            raise ValueError(
                '{0} is not a valid member of RelaxationSide'.format(val))
        self._relaxation_side = val

    def _get_pprint_string(self, relational_operator_string):
        return 'Relaxation for {0} {1} {2}'.format(self.get_aux_var().name,
                                                   relational_operator_string,
                                                   str(self.get_rhs_expr()))

    def pprint(self, ostream=None, verbose=False, prefix=""):
        if ostream is None:
            ostream = sys.stdout

        if self.relaxation_side == RelaxationSide.BOTH:
            relational_operator = '=='
        elif self.relaxation_side == RelaxationSide.UNDER:
            relational_operator = '>='
        elif self.relaxation_side == RelaxationSide.OVER:
            relational_operator = '<='
        else:
            raise ValueError('Unexpected relaxation side')
        ostream.write('{0}{1}: {2}\n'.format(
            prefix, self.name, self._get_pprint_string(relational_operator)))

        if verbose:
            super(BaseRelaxationData, self).pprint(ostream=ostream,
                                                   verbose=verbose,
                                                   prefix=(prefix + '  '))

    def add_oa_point(self, var_values=None):
        """
        Add a point at which an outer-approximation cut for a convex constraint should be added. This does not
        rebuild the relaxation. You must call rebuild() for the constraint to get added.

        Parameters
        ----------
        var_values: pe.ComponentMap
        """
        if var_values is None:
            var_values = pe.ComponentMap()
            for v in self.get_rhs_vars():
                var_values[v] = v.value
        else:
            var_values = pe.ComponentMap(var_values)
        self._oa_points.append(var_values)

    def push_oa_points(self, key=None):
        """
        Save the current list of OA points for later use through pop_oa_points().
        """
        to_save = [pe.ComponentMap(i) for i in self._oa_points]
        if key is not None:
            self._oa_stack_map[key] = to_save
        else:
            self._saved_oa_points.append(to_save)

    def clear_oa_points(self):
        """
        Delete any existing OA points.
        """
        self._oa_points = list()

    def pop_oa_points(self, key=None):
        """
        Use the most recently saved list of OA points
        """
        if key is None:
            self._oa_points = self._saved_oa_points.pop(-1)
        else:
            self._oa_points = self._oa_stack_map.pop(key)

    def add_cut(self,
                keep_cut=True,
                check_violation=False,
                feasibility_tol=1e-8,
                add_cut_to_persistent_solvers=True):
        """
        This function will add a linear cut to the relaxation. Cuts are only generated for the convex side of the
        constraint (if the constraint has a convex side). For example, if the relaxation is a PWXSquaredRelaxationData
        for y = x**2, the add_cut will add an underestimator at x.value (but only if y.value < x.value**2). If
        relaxation is a PWXSquaredRelaxationData for y < x**2, then no cut will be added. If relaxation is is a
        PWMcCormickRelaxationData, then no cut will be added.

        Parameters
        ----------
        keep_cut: bool
            If keep_cut is True, then add_oa_point will also be called. Be careful if the relaxation object is relaxing
            the nonconvex side of the constraint. Thus, the cut will be reconstructed when rebuild is called. If
            keep_cut is False, then the cut will be discarded when rebuild is called.
        check_violation: bool
            If True, then a cut is only added if the cut generated would cut off the current point (current values
            of the variables) by more than feasibility_tol.
        feasibility_tol: float
            Only used if check_violation is True
        add_cut_to_persistent_solvers: bool

        Returns
        -------
        new_con: pyomo.core.base.constraint.Constraint
        """
        if keep_cut:
            self.add_oa_point()

        cut_expr = None

        try:
            rhs_val = pe.value(self.get_rhs_expr())
            if rhs_val >= self.large_eval_tol or rhs_val < -self.large_eval_tol:
                pass
            else:
                if self.is_rhs_convex():
                    if self.relaxation_side == RelaxationSide.UNDER or self.relaxation_side == RelaxationSide.BOTH:
                        if check_violation:
                            viol = self.get_aux_var().value - rhs_val
                            if viol < -feasibility_tol:
                                cut_expr = self.get_aux_var(
                                ) >= taylor_series_expansion(
                                    self.get_rhs_expr())
                        else:
                            cut_expr = self.get_aux_var(
                            ) >= taylor_series_expansion(self.get_rhs_expr())
                elif self.is_rhs_concave():
                    if self.relaxation_side == RelaxationSide.OVER or self.relaxation_side == RelaxationSide.BOTH:
                        if check_violation:
                            viol = self.get_aux_var().value - rhs_val
                            if viol > feasibility_tol:
                                cut_expr = self.get_aux_var(
                                ) <= taylor_series_expansion(
                                    self.get_rhs_expr())
                        else:
                            cut_expr = self.get_aux_var(
                            ) <= taylor_series_expansion(self.get_rhs_expr())
        except (OverflowError, ZeroDivisionError, ValueError):
            pass
        if cut_expr is not None:
            if not hasattr(self, '_cuts'):
                self._cuts = None
            if self._cuts is None:
                del self._cuts
                self._allow_changes = True
                self._cuts = pe.ConstraintList()
                self._allow_changes = False
            new_con = self._cuts.add(cut_expr)
            if add_cut_to_persistent_solvers:
                for i in self._persistent_solvers:
                    i.add_constraint(new_con)
        else:
            new_con = None

        return new_con

    def clean_oa_points(self):
        # For each OA point, if the point is outside variable bounds, move the point to the variable bounds
        for pts in self._oa_points:
            for v, pt in pts.items():
                lb, ub = tuple(_get_bnds_list(v))
                if pt < lb:
                    pts[v] = lb
                if pt > ub:
                    pts[v] = ub
Exemplo n.º 17
0
 def test_isdisjoint(self):
     cset1 = ComponentSet()
     cset2 = ComponentSet()
     self.assertTrue(cset1.isdisjoint(cset2))
     self.assertTrue(cset2.isdisjoint(cset1))
     v = variable()
     cset1.add(v)
     self.assertTrue(cset1.isdisjoint(cset2))
     self.assertTrue(cset2.isdisjoint(cset1))
     cset2.add(v)
     self.assertFalse(cset1.isdisjoint(cset2))
     self.assertFalse(cset2.isdisjoint(cset1))
Exemplo n.º 18
0
    def _collect_ports(self, instance):
        self._name_buffer = {}
        # List of the ports in the order in which we found them
        # (this should be deterministic, provided that the user's model
        # is deterministic)
        port_list = []
        # ID of the next port group (set of matched ports)
        groupID = 0
        # port_groups stars out as a dict of {id(set): (groupID, set)}
        # If you sort by the groupID, then this will be deterministic.
        port_groups = dict()
        # map of port to the set of ports that must match it
        matched_ports = ComponentMap()

        for arc in instance.component_data_objects(**obj_iter_kwds):
            ports = ComponentSet(arc.ports)
            ref = None

            for p in arc.ports:
                if p in matched_ports:
                    if ref is None:
                        # The first port in this arc has
                        # already been seen. We will use that Set as
                        # the reference
                        ref = matched_ports[p]
                    elif ref is not matched_ports[p]:
                        # We already have a reference group; merge this
                        # new group into it.

                        # Optimization: this merge is linear in the size
                        # of the src set. If the reference set is
                        # smaller, save time by switching to a new
                        # reference set.
                        src = matched_ports[p]
                        if len(ref) < len(src):
                            ref, src = src, ref
                        ref.update(src)
                        for i in src:
                            matched_ports[i] = ref
                        del port_groups[id(src)]
                    # else: pass
                    #   The new group *is* the reference group;
                    #   there is nothing to do.
                else:
                    # The port has not been seen before.
                    port_list.append(p)
                    if ref is None:
                        # This is the first port in the arc:
                        # start a new reference set.
                        ref = ComponentSet()
                        port_groups[id(ref)] = (groupID, ref)
                        groupID += 1
                    # This port hasn't been seen. Record it.
                    ref.add(p)
                    matched_ports[p] = ref

        # Validate all port sets and expand the empty ones
        known_port_sets = {}
        for groupID, port_set in sorted(itervalues(port_groups)):
            known_port_sets[id(port_set)] \
                = self._validate_and_expand_port_set(port_set)

        return port_list, known_port_sets, matched_ports
Exemplo n.º 19
0
 def test_type(self):
     cset = ComponentSet()
     self.assertTrue(isinstance(cset, collections.abc.Set))
     self.assertTrue(isinstance(cset, collections.abc.MutableSet))
     self.assertTrue(issubclass(type(cset), collections.abc.Set))
     self.assertTrue(issubclass(type(cset), collections.abc.MutableSet))
Exemplo n.º 20
0
def flatten_components_along_sets(m, sets, ctype, indices=None):
    """
    This function iterates over components (recursively) contained
    in a block and partitions their data objects into components
    indexed only by the specified sets.

    Args:
        m : Block whose components (and their sub-components) will be
            partitioned
        sets : Possible indexing sets for the returned components
        ctype : Type of component to identify and partition
        indices : indices of sets to use when descending into subblocks

    Returns:
        tuple: The first entry is a list of tuples of Pyomo Sets. The
               second is a list of lists of components, each indexed by
               the corresponding sets in the first entry.
        
    """
    if indices is None:
        index_map = ComponentMap()
    elif type(indices) is ComponentMap:
        index_map = indices
    else:
        index_map = ComponentMap(zip(sets, indices))

    index_stack = []

    set_of_sets = ComponentSet(sets)
    # Using these two `OrderedDict`s is a workaround because I can't
    # reliably use tuples of components as keys in a `ComponentMap`.
    sets_dict = OrderedDict()
    comps_dict = OrderedDict()
    for index_sets, slice_ in generate_sliced_components(
            m, index_stack, m, set_of_sets, ctype, index_map):
        # Note that index_sets should always be a tuple, never a scalar.

        # TODO: Potentially re-order sets at this point.
        # In this way (time, space) would have the same key as (space, time).
        # They we'd have to somehow "swap indexing sets" when we create
        # the reference below.
        key = tuple(id(c) for c in index_sets)
        if key not in sets_dict:
            if len(key) == 0:
                sets_dict[key] = (UnindexedComponent_set, )
            else:
                sets_dict[key] = index_sets
        if key not in comps_dict:
            comps_dict[key] = []
        if len(key) == 0:
            comps_dict[key].append(slice_)
        else:
            # If the user wants to change these flags, they can access the
            # slice via the `referent` attribute of each reference component.
            slice_.attribute_errors_generate_exceptions = False
            slice_.key_errors_generate_exceptions = False
            comps_dict[key].append(Reference(slice_))

    # list-of-tuples of Sets:
    sets_list = list(sets for sets in sets_dict.values())
    # list-of-lists of components:
    comps_list = list(comps for comps in comps_dict.values())
    # E.g. we return: (
    #          [(time, space), (time,)],
    #          [[some_component, ...], [other, ...]],
    #      )                            ^ These components are indexed by time
    #            ^ These components are indexed by time and space
    return sets_list, comps_list
Exemplo n.º 21
0
def coefficient_matching(model, constraint, uncertain_params, config):
    '''
    :param model: master problem model
    :param constraint: the constraint from the master problem model
    :param uncertain_params: the list of uncertain parameters
    :param first_stage_variables: the list of effective first-stage variables (includes ssv if decision_rule_order = 0)
    :return: True if the coefficient matching was successful, False if its proven robust_infeasible due to
             constraints of the form 1 == 0
    '''
    # === Returned flags
    successful_matching = True
    robust_infeasible = False

    # === Efficiency for q_LB = q_UB
    actual_uncertain_params = []

    for i in range(len(uncertain_params)):
        if not is_certain_parameter(uncertain_param_index=i, config=config):
            actual_uncertain_params.append(uncertain_params[i])

    # === Add coefficient matching constraint list
    if not hasattr(model, "coefficient_matching_constraints"):
        model.coefficient_matching_constraints = ConstraintList()
    if not hasattr(model, "swapped_constraints"):
        model.swapped_constraints = ConstraintList()

    variables_in_constraint = ComponentSet(identify_variables(constraint.expr))
    params_in_constraint = ComponentSet(identify_mutable_parameters(constraint.expr))
    first_stage_variables = model.util.first_stage_variables
    second_stage_variables = model.util.second_stage_variables

    # === Determine if we need to do DR expression/ssv substitution to
    #     make h(x,z,q) == 0 into h(x,d,q) == 0 (which is just h(x,q) == 0)
    if all(v in ComponentSet(first_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):
        # h(x, q) == 0
        pass
    elif all(v in ComponentSet(first_stage_variables + second_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):
        constraint = substitute_ssv_in_dr_constraints(model=model, constraint=constraint)
        variables_in_constraint = ComponentSet(identify_variables(constraint.expr))
        params_in_constraint = ComponentSet(identify_mutable_parameters(constraint.expr))
    else:
        pass

    if all(v in ComponentSet(first_stage_variables) for v in variables_in_constraint) and \
            any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint):

        # Swap param objects for variable objects in this constraint
        model.param_set = []
        for i in range(len(list(variables_in_constraint))):
            # Initialize Params to non-zero value due to standard_repn bug
            model.add_component("p_%s" % i, Param(initialize=1, mutable=True))
            model.param_set.append(getattr(model, "p_%s" % i))

        model.variable_set = []
        for i in range(len(list(actual_uncertain_params))):
            model.add_component("x_%s" % i, Var(initialize=1))
            model.variable_set.append(getattr(model, "x_%s" % i))

        original_var_to_param_map = list(zip(list(variables_in_constraint), model.param_set))
        original_param_to_vap_map = list(zip(list(actual_uncertain_params), model.variable_set))

        var_to_param_substitution_map_forward = {}
        # Separation problem initialized to nominal uncertain parameter values
        for var, param in original_var_to_param_map:
            var_to_param_substitution_map_forward[id(var)] = param

        param_to_var_substitution_map_forward = {}
        # Separation problem initialized to nominal uncertain parameter values
        for param, var in original_param_to_vap_map:
            param_to_var_substitution_map_forward[id(param)] = var

        var_to_param_substitution_map_reverse = {}
        # Separation problem initialized to nominal uncertain parameter values
        for var, param in original_var_to_param_map:
            var_to_param_substitution_map_reverse[id(param)] = var

        param_to_var_substitution_map_reverse = {}
        # Separation problem initialized to nominal uncertain parameter values
        for param, var in original_param_to_vap_map:
            param_to_var_substitution_map_reverse[id(var)] = param

        model.swapped_constraints.add(
            replace_expressions(
                expr=replace_expressions(expr=constraint.lower,
                                         substitution_map=param_to_var_substitution_map_forward),
                substitution_map=var_to_param_substitution_map_forward) ==
            replace_expressions(
                expr=replace_expressions(expr=constraint.body,
                                         substitution_map=param_to_var_substitution_map_forward),
                substitution_map=var_to_param_substitution_map_forward))

        swapped = model.swapped_constraints[max(model.swapped_constraints.keys())]

        val = generate_standard_repn(swapped.body, compute_values=False)

        if val.constant is not None:
            if type(val.constant) not in native_types:
                temp_expr = replace_expressions(val.constant, substitution_map=var_to_param_substitution_map_reverse)
                if temp_expr.is_potentially_variable():
                    model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
            elif math.isclose(value(val.constant), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                pass
            else:
                successful_matching = False
                robust_infeasible = True
        if val.linear_coefs is not None:
            for coeff in val.linear_coefs:
                if type(coeff) not in native_types:
                    temp_expr = replace_expressions(coeff, substitution_map=var_to_param_substitution_map_reverse)
                    if temp_expr.is_potentially_variable():
                        model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                    elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                        pass
                    else:
                        successful_matching = False
                        robust_infeasible = True
                elif math.isclose(value(coeff), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
        if val.quadratic_coefs:
            for coeff in val.quadratic_coefs:
                if type(coeff) not in native_types:
                    temp_expr = replace_expressions(coeff, substitution_map=var_to_param_substitution_map_reverse)
                    if temp_expr.is_potentially_variable():
                        model.coefficient_matching_constraints.add(expr=temp_expr == 0)
                    elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                        pass
                    else:
                        successful_matching = False
                        robust_infeasible = True
                elif math.isclose(value(coeff), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL):
                    pass
                else:
                    successful_matching = False
                    robust_infeasible = True
        if val.nonlinear_expr is not None:
            successful_matching = False
            robust_infeasible = False

        if successful_matching:
            model.util.h_x_q_constraints.add(constraint)

    for i in range(len(list(variables_in_constraint))):
        model.del_component("p_%s" % i)

    for i in range(len(list(params_in_constraint))):
        model.del_component("x_%s" % i)

    model.del_component("swapped_constraints")
    model.del_component("swapped_constraints_index")

    return successful_matching, robust_infeasible
Exemplo n.º 22
0
    def _apply_to_impl(self, instance, **kwds):
        if not instance.ctype in (Block, Disjunct):
            raise GDP_Error(
                "Transformation called on %s of type %s. 'instance' "
                "must be a ConcreteModel, Block, or Disjunct (in "
                "the case of nested disjunctions)." %
                (instance.name, instance.ctype))

        config = self.CONFIG(kwds.pop('options', {}))

        # We will let args override suffixes and estimate as a last
        # resort. More specific args/suffixes override ones anywhere in
        # the tree. Suffixes lower down in the tree override ones higher
        # up.
        if 'default_bigM' in kwds:
            deprecation_warning(
                "the 'default_bigM=' argument has been "
                "replaced by 'bigM='",
                version='5.4')
            config.bigM = kwds.pop('default_bigM')

        config.set_value(kwds)
        bigM = config.bigM
        self.assume_fixed_vars_permanent = config.assume_fixed_vars_permanent

        targets = config.targets
        if targets is None:
            targets = (instance, )
        else:
            # we need to preprocess targets to make sure that if there are any
            # disjunctions in targets that their disjuncts appear before them in
            # the list.
            targets = preprocess_targets(targets)

        #  We need to check that all the targets are in fact on
        # instance. As we do this, we will use the set below to cache components
        # we know to be in the tree rooted at instance.
        knownBlocks = {}
        for t in targets:
            # check that t is in fact a child of instance
            if not is_child_of(
                    parent=instance, child=t, knownBlocks=knownBlocks):
                raise GDP_Error(
                    "Target '%s' is not a component on instance '%s'!" %
                    (t.name, instance.name))
            elif t.ctype is Disjunction:
                if t.is_indexed():
                    self._transform_disjunction(t, bigM)
                else:
                    self._transform_disjunctionData(t, bigM, t.index())
            elif t.ctype in (Block, Disjunct):
                if t.is_indexed():
                    self._transform_block(t, bigM)
                else:
                    self._transform_blockData(t, bigM)
            else:
                raise GDP_Error(
                    "Target '%s' was not a Block, Disjunct, or Disjunction. "
                    "It was of type %s and can't be transformed." %
                    (t.name, type(t)))

        # issue warnings about anything that was in the bigM args dict that we
        # didn't use
        if bigM is not None:
            unused_args = ComponentSet(bigM.keys()) - \
                          ComponentSet(self.used_args.keys())
            if len(unused_args) > 0:
                warning_msg = ("Unused arguments in the bigM map! "
                               "These arguments were not used by the "
                               "transformation:\n")
                for component in unused_args:
                    if hasattr(component, 'name'):
                        warning_msg += "\t%s\n" % component.name
                    else:
                        warning_msg += "\t%s\n" % component
                logger.warning(warning_msg)
Exemplo n.º 23
0
def build_model_size_report(model):
    """Build a model size report object."""
    report = ModelSizeReport()
    activated_disjunctions = ComponentSet()
    activated_disjuncts = ComponentSet()
    fixed_true_disjuncts = ComponentSet()
    activated_constraints = ComponentSet()
    activated_vars = ComponentSet()
    new_containers = (model, )

    while new_containers:
        new_activated_disjunctions = ComponentSet()
        new_activated_disjuncts = ComponentSet()
        new_fixed_true_disjuncts = ComponentSet()
        new_activated_constraints = ComponentSet()

        for container in new_containers:
            (next_activated_disjunctions, next_fixed_true_disjuncts,
             next_activated_disjuncts, next_activated_constraints
             ) = _process_activated_container(container)
            new_activated_disjunctions.update(next_activated_disjunctions)
            new_activated_disjuncts.update(next_activated_disjuncts)
            new_fixed_true_disjuncts.update(next_fixed_true_disjuncts)
            new_activated_constraints.update(next_activated_constraints)

        new_containers = ((new_activated_disjuncts - activated_disjuncts) |
                          (new_fixed_true_disjuncts - fixed_true_disjuncts))

        activated_disjunctions.update(new_activated_disjunctions)
        activated_disjuncts.update(new_activated_disjuncts)
        fixed_true_disjuncts.update(new_fixed_true_disjuncts)
        activated_constraints.update(new_activated_constraints)

    activated_vars.update(
        var for constr in activated_constraints
        for var in EXPR.identify_variables(constr.body, include_fixed=False))
    activated_vars.update(disj.indicator_var for disj in activated_disjuncts)

    report.activated = Container()
    report.activated.variables = len(activated_vars)
    report.activated.binary_variables = sum(1 for v in activated_vars
                                            if v.is_binary())
    report.activated.integer_variables = sum(
        1 for v in activated_vars if v.is_integer() and not v.is_binary())
    report.activated.continuous_variables = sum(1 for v in activated_vars
                                                if v.is_continuous())
    report.activated.disjunctions = len(activated_disjunctions)
    report.activated.disjuncts = len(activated_disjuncts)
    report.activated.constraints = len(activated_constraints)
    report.activated.nonlinear_constraints = sum(
        1 for c in activated_constraints
        if c.body.polynomial_degree() not in (1, 0))

    report.overall = Container()
    block_like = (Block, Disjunct)
    all_vars = ComponentSet(
        model.component_data_objects(Var, descend_into=block_like))
    report.overall.variables = len(all_vars)
    report.overall.binary_variables = sum(1 for v in all_vars if v.is_binary())
    report.overall.integer_variables = sum(
        1 for v in all_vars if v.is_integer() and not v.is_binary())
    report.overall.continuous_variables = sum(1 for v in all_vars
                                              if v.is_continuous())
    report.overall.disjunctions = sum(1 for d in model.component_data_objects(
        Disjunction, descend_into=block_like))
    report.overall.disjuncts = sum(1 for d in model.component_data_objects(
        Disjunct, descend_into=block_like))
    report.overall.constraints = sum(1 for c in model.component_data_objects(
        Constraint, descend_into=block_like))
    report.overall.nonlinear_constraints = sum(
        1 for c in model.component_data_objects(Constraint,
                                                descend_into=block_like)
        if c.body.polynomial_degree() not in (1, 0))

    report.warning = Container()
    report.warning.unassociated_disjuncts = sum(
        1 for d in model.component_data_objects(Disjunct,
                                                descend_into=block_like)
        if not d.indicator_var.fixed and d not in activated_disjuncts)

    return report
Exemplo n.º 24
0
    def test_incidence_matrix(self):
        N = 5
        model = make_gas_expansion_model(N)
        all_vars = list(model.component_data_objects(pyo.Var))
        all_cons = list(model.component_data_objects(pyo.Constraint))
        imat = get_numeric_incidence_matrix(all_vars, all_cons)
        n_var = 4 * (N + 1)
        n_con = 4 * N + 1
        self.assertEqual(imat.shape, (n_con, n_var))

        var_idx_map = ComponentMap((v, i) for i, v in enumerate(all_vars))
        con_idx_map = ComponentMap((c, i) for i, c in enumerate(all_cons))

        # Map constraints to the variables they contain.
        csr_map = ComponentMap()
        csr_map.update((model.mbal[i],
                        ComponentSet([
                            model.F[i],
                            model.F[i - 1],
                            model.rho[i],
                            model.rho[i - 1],
                        ])) for i in model.streams
                       if i != model.streams.first())
        csr_map.update((model.ebal[i],
                        ComponentSet([
                            model.F[i],
                            model.F[i - 1],
                            model.rho[i],
                            model.rho[i - 1],
                            model.T[i],
                            model.T[i - 1],
                        ])) for i in model.streams
                       if i != model.streams.first())
        csr_map.update((model.expansion[i],
                        ComponentSet([
                            model.rho[i],
                            model.rho[i - 1],
                            model.P[i],
                            model.P[i - 1],
                        ])) for i in model.streams
                       if i != model.streams.first())
        csr_map.update((model.ideal_gas[i],
                        ComponentSet([
                            model.P[i],
                            model.rho[i],
                            model.T[i],
                        ])) for i in model.streams)

        # Map constraint and variable indices to the values of the derivatives
        # Note that the derivative values calculated here depend on the model's
        # canonical form.
        deriv_lookup = {}
        m = model  # for convenience
        for s in model.streams:
            # Ideal gas:
            i = con_idx_map[model.ideal_gas[s]]
            j = var_idx_map[model.P[s]]
            deriv_lookup[i, j] = 1.0

            j = var_idx_map[model.rho[s]]
            deriv_lookup[i, j] = -model.R.value * model.T[s].value

            j = var_idx_map[model.T[s]]
            deriv_lookup[i, j] = -model.R.value * model.rho[s].value

            if s != model.streams.first():
                # Expansion:
                i = con_idx_map[model.expansion[s]]
                j = var_idx_map[model.P[s]]
                deriv_lookup[i, j] = 1 / model.P[s - 1].value

                j = var_idx_map[model.P[s - 1]]
                deriv_lookup[i, j] = -model.P[s].value / model.P[s - 1]**2

                j = var_idx_map[model.rho[s]]
                deriv_lookup[i, j] = pyo.value(
                    -m.gamma * (m.rho[s] / m.rho[s - 1])**(m.gamma - 1) /
                    m.rho[s - 1])

                j = var_idx_map[model.rho[s - 1]]
                deriv_lookup[i, j] = pyo.value(
                    -m.gamma * (m.rho[s] / m.rho[s - 1])**(m.gamma - 1) *
                    (-m.rho[s] / m.rho[s - 1]**2))

                # Energy balance:
                i = con_idx_map[m.ebal[s]]
                j = var_idx_map[m.rho[s - 1]]
                deriv_lookup[i, j] = pyo.value(m.F[s - 1] * m.T[s - 1])

                j = var_idx_map[m.F[s - 1]]
                deriv_lookup[i, j] = pyo.value(m.rho[s - 1] * m.T[s - 1])

                j = var_idx_map[m.T[s - 1]]
                deriv_lookup[i, j] = pyo.value(m.F[s - 1] * m.rho[s - 1])

                j = var_idx_map[m.rho[s]]
                deriv_lookup[i, j] = pyo.value(-m.F[s] * m.T[s])

                j = var_idx_map[m.F[s]]
                deriv_lookup[i, j] = pyo.value(-m.rho[s] * m.T[s])

                j = var_idx_map[m.T[s]]
                deriv_lookup[i, j] = pyo.value(-m.F[s] * m.rho[s])

                # Mass balance:
                i = con_idx_map[m.mbal[s]]
                j = var_idx_map[m.rho[s - 1]]
                deriv_lookup[i, j] = pyo.value(m.F[s - 1])

                j = var_idx_map[m.F[s - 1]]
                deriv_lookup[i, j] = pyo.value(m.rho[s - 1])

                j = var_idx_map[m.rho[s]]
                deriv_lookup[i, j] = pyo.value(-m.F[s])

                j = var_idx_map[m.F[s]]
                deriv_lookup[i, j] = pyo.value(-m.rho[s])

        # Want to test that the columns have the rows we expect.
        i = model.streams.first()
        for i, j, e in zip(imat.row, imat.col, imat.data):
            con = all_cons[i]
            var = all_vars[j]
            self.assertIn(var, csr_map[con])
            csr_map[con].remove(var)
            self.assertAlmostEqual(deriv_lookup[i, j], e, 8)
        # And no additional rows
        for con in csr_map:
            self.assertEqual(len(csr_map[con]), 0)
Exemplo n.º 25
0
def generate_strongly_connected_components(
    constraints,
    variables=None,
    include_fixed=False,
):
    """ Performs a block triangularization of the incidence matrix
    of the provided constraints and variables, and yields a block that
    contains the constraints and variables of each diagonal block
    (strongly connected component).

    Arguments
    ---------
    constraints: List of Pyomo constraint data objects
        Constraints used to generate strongly connected components.
    variables: List of Pyomo variable data objects
        Variables that may participate in strongly connected components.
        If not provided, all variables in the constraints will be used.
    include_fixed: Bool
        Indicates whether fixed variables will be included when
        identifying variables in constraints.

    Yields
    ------
    Blocks containing the variables and constraints of every strongly
    connected component, in a topological order, as well as the
    "input variables" for that block

    """
    if variables is None:
        var_set = ComponentSet()
        variables = []
        for con in constraints:
            for var in identify_variables(
                    con.expr,
                    include_fixed=include_fixed,
            ):
                if var not in var_set:
                    variables.append(var)
                    var_set.add(var)

    assert len(variables) == len(constraints)
    igraph = IncidenceGraphInterface()
    var_block_map, con_block_map = igraph.block_triangularize(
        variables=variables,
        constraints=constraints,
    )
    blocks = set(var_block_map.values())
    n_blocks = len(blocks)
    var_blocks = [[] for b in range(n_blocks)]
    con_blocks = [[] for b in range(n_blocks)]
    for var, b in var_block_map.items():
        var_blocks[b].append(var)
    for con, b in con_block_map.items():
        con_blocks[b].append(con)
    subsets = list(zip(con_blocks, var_blocks))
    for block, inputs in generate_subsystem_blocks(
            subsets,
            include_fixed=include_fixed,
    ):
        # TODO: How does len scale for reference-to-list?
        assert len(block.vars) == len(block.cons)
        yield (block, inputs)
Exemplo n.º 26
0
    def _transformBlock(self, block, currentds):

        self._fe = {}
        for ds in block.component_objects(ContinuousSet):
            if currentds is None or currentds == ds.name or currentds is ds:
                if 'scheme' in ds.get_discretization_info():
                    raise DAE_Error(
                        "Attempting to discretize ContinuousSet "
                        "'%s' after it has already been discretized. " %
                        ds.name)
                generate_finite_elements(ds, self._nfe[currentds])
                if not ds.get_changed():
                    if len(ds) - 1 > self._nfe[currentds]:
                        logger.warn("More finite elements were found in "
                                    "ContinuousSet '%s' than the number of "
                                    "finite elements specified in apply. The "
                                    "larger number of finite elements will be "
                                    "used." % ds.name)

                self._nfe[ds.name] = len(ds) - 1
                self._fe[ds.name] = list(ds)
                # Adding discretization information to the ContinuousSet
                # object itself so that it can be accessed outside of the
                # discretization object
                disc_info = ds.get_discretization_info()
                disc_info['nfe'] = self._nfe[ds.name]
                disc_info['scheme'] = self._scheme_name + ' Difference'

        # Maybe check to see if any of the ContinuousSets have been changed,
        # if they haven't then the model components need not be updated
        # or even iterated through
        expand_components(block)

        for d in block.component_objects(DerivativeVar, descend_into=True):
            dsets = d.get_continuousset_list()
            for i in ComponentSet(dsets):
                if currentds is None or i.name == currentds:
                    oldexpr = d.get_derivative_expression()
                    loc = d.get_state_var()._contset[i]
                    count = dsets.count(i)
                    if count >= 3:
                        raise DAE_Error(
                            "Error discretizing '%s' with respect to '%s'. "
                            "Current implementation only allows for taking the"
                            " first or second derivative with respect to "
                            "a particular ContinuousSet" % (d.name, i.name))
                    scheme = self._scheme[count - 1]
                    newexpr = create_partial_expression(
                        scheme, oldexpr, i, loc)
                    d.set_derivative_expression(newexpr)

            # Reclassify DerivativeVar if all indexing ContinuousSets have
            # been discretized. Add discretization equations to the
            # DerivativeVar's parent block.
            if d.is_fully_discretized():
                add_discretization_equations(d.parent_block(), d)
                d.parent_block().reclassify_component_type(d, Var)

                # Keep track of any reclassified DerivativeVar components so
                # that the Simulator can easily identify them if the model
                # is simulated after discretization
                # TODO: Update the discretization transformations to use
                # a Block to add things to the model and store discretization
                # information. Using a list for now because the simulator
                # does not yet support models containing active Blocks
                reclassified_list = getattr(
                    block, '_pyomo_dae_reclassified_derivativevars', None)
                if reclassified_list is None:
                    block._pyomo_dae_reclassified_derivativevars = list()
                    reclassified_list = \
                        block._pyomo_dae_reclassified_derivativevars

                reclassified_list.append(d)

        # Reclassify Integrals if all ContinuousSets have been discretized
        if block_fully_discretized(block):

            if block.contains_component(Integral):
                for i in block.component_objects(Integral, descend_into=True):
                    i.reconstruct()
                    i.parent_block().reclassify_component_type(i, Expression)
                # If a model contains integrals they are most likely to
                # appear in the objective function which will need to be
                # reconstructed after the model is discretized.
                for k in block.component_objects(Objective, descend_into=True):
                    # TODO: check this, reconstruct might not work
                    k.reconstruct()
Exemplo n.º 27
0
    def _transform_disjunctionData(self, obj, index, transBlock=None):
        if not obj.active:
            return
        # Hull reformulation doesn't work if this is an OR constraint. So if
        # xor is false, give up
        if not obj.xor:
            raise GDP_Error("Cannot do hull reformulation for "
                            "Disjunction '%s' with OR constraint.  "
                            "Must be an XOR!" % obj.name)

        if transBlock is None:
            # It's possible that we have already created a transformation block
            # for another disjunctionData from this same container. If that's
            # the case, let's use the same transformation block. (Else it will
            # be really confusing that the XOR constraint goes to that old block
            # but we create a new one here.)
            if obj.parent_component()._algebraic_constraint is not None:
                transBlock = obj.parent_component()._algebraic_constraint().\
                             parent_block()
            else:
                transBlock = self._add_transformation_block(obj.parent_block())

        parent_component = obj.parent_component()

        orConstraint = self._add_xor_constraint(parent_component, transBlock)
        disaggregationConstraint = transBlock.disaggregationConstraints
        disaggregationConstraintMap = transBlock._disaggregationConstraintMap

        # Just because it's unlikely this is what someone meant to do...
        if len(obj.disjuncts) == 0:
            raise GDP_Error(
                "Disjunction '%s' is empty. This is "
                "likely indicative of a modeling error." %
                obj.getname(fully_qualified=True, name_buffer=NAME_BUFFER))

        # We first go through and collect all the variables that we
        # are going to disaggregate.
        varOrder_set = ComponentSet()
        varOrder = []
        varsByDisjunct = ComponentMap()
        localVarsByDisjunct = ComponentMap()
        include_fixed_vars = not self._config.assume_fixed_vars_permanent
        for disjunct in obj.disjuncts:
            disjunctVars = varsByDisjunct[disjunct] = ComponentSet()
            for cons in disjunct.component_data_objects(
                    Constraint,
                    active=True,
                    sort=SortComponents.deterministic,
                    descend_into=Block):
                # [ESJ 02/14/2020] By default, we disaggregate fixed variables
                # on the philosophy that fixing is not a promise for the future
                # and we are mathematically wrong if we don't transform these
                # correctly and someone later unfixes them and keeps playing
                # with their transformed model. However, the user may have set
                # assume_fixed_vars_permanent to True in which case we will skip
                # them
                for var in EXPR.identify_variables(
                        cons.body, include_fixed=include_fixed_vars):
                    # Note the use of a list so that we will
                    # eventually disaggregate the vars in a
                    # deterministic order (the order that we found
                    # them)
                    disjunctVars.add(var)
                    if not var in varOrder_set:
                        varOrder.append(var)
                        varOrder_set.add(var)

            # check for LocalVars Suffix
            localVarsByDisjunct = self._get_local_var_suffixes(
                disjunct, localVarsByDisjunct)

        # We will disaggregate all variables which are not explicitly declared
        # as being local. Note however, that we do declare our own disaggregated
        # variables as local, so they will not be re-disaggregated.
        varSet = []
        # Note that variables are local with respect to a Disjunct. We deal with
        # them here to do some error checking (if something is obviously not
        # local since it is used in multiple Disjuncts in this Disjunction) and
        # also to get a deterministic order in which to process them when we
        # transform the Disjuncts: Values of localVarsByDisjunct are
        # ComponentSets, so we need this for determinism (we iterate through the
        # localVars of a Disjunct later)
        localVars = ComponentMap()
        for var in varOrder:
            disjuncts = [d for d in varsByDisjunct if var in varsByDisjunct[d]]
            # clearly not local if used in more than one disjunct
            if len(disjuncts) > 1:
                if self._generate_debug_messages:
                    logger.debug("Assuming '%s' is not a local var since it is"
                                 "used in multiple disjuncts." %
                                 var.getname(fully_qualified=True,
                                             name_buffer=NAME_BUFFER))
                varSet.append(var)
            # disjuncts is a list of length 1
            elif localVarsByDisjunct.get(disjuncts[0]) is not None:
                if var in localVarsByDisjunct[disjuncts[0]]:
                    localVars_thisDisjunct = localVars.get(disjuncts[0])
                    if localVars_thisDisjunct is not None:
                        localVars[disjuncts[0]].append(var)
                    else:
                        localVars[disjuncts[0]] = [var]
                else:
                    # It's not local to this Disjunct
                    varSet.append(var)
            else:
                # We don't even have have any local vars for this Disjunct.
                varSet.append(var)

        # Now that we know who we need to disaggregate, we will do it
        # while we also transform the disjuncts.
        or_expr = 0
        for disjunct in obj.disjuncts:
            or_expr += disjunct.indicator_var
            self._transform_disjunct(disjunct, transBlock, varSet,
                                     localVars.get(disjunct, []))
        orConstraint.add(index, (or_expr, 1))
        # map the DisjunctionData to its XOR constraint to mark it as
        # transformed
        obj._algebraic_constraint = weakref_ref(orConstraint[index])

        # add the reaggregation constraints
        for i, var in enumerate(varSet):
            disaggregatedExpr = 0
            for disjunct in obj.disjuncts:
                if disjunct._transformation_block is None:
                    # Because we called _transform_disjunct in the loop above,
                    # we know that if this isn't transformed it is because it
                    # was cleanly deactivated, and we can just skip it.
                    continue

                disaggregatedVar = disjunct._transformation_block().\
                                   _disaggregatedVarMap['disaggregatedVar'][var]
                disaggregatedExpr += disaggregatedVar

            disaggregationConstraint.add((i, index), var == disaggregatedExpr)
            # and update the map so that we can find this later. We index by
            # variable and the particular disjunction because there is a
            # different one for each disjunction
            if disaggregationConstraintMap.get(var) is not None:
                disaggregationConstraintMap[var][
                    obj] = disaggregationConstraint[(i, index)]
            else:
                thismap = disaggregationConstraintMap[var] = ComponentMap()
                thismap[obj] = disaggregationConstraint[(i, index)]

        # deactivate for the writers
        obj.deactivate()
def assert_categorization(model):
    init_input_set = ComponentSet([model.mixer.S_inlet.flow_vol[0],
                                   model.mixer.E_inlet.flow_vol[0]])

    init_deriv_list = []
    init_diff_list = []
    init_fixed_list = [
                       model.mixer.E_inlet.temperature[0],
                       model.mixer.S_inlet.temperature[0],
                       model.cstr.control_volume.energy_holdup[0, 'aq']]

    init_ic_list = [model.cstr.control_volume.volume[0]]

    init_alg_list = [
        model.cstr.control_volume.volume[0],
        model.cstr.outlet.flow_vol[0],
        model.cstr.outlet.temperature[0],
        model.cstr.inlet.flow_vol[0],
        model.cstr.inlet.temperature[0],
        model.mixer.outlet.flow_vol[0],
        model.mixer.outlet.temperature[0],
        model.cstr.control_volume.energy_accumulation[0, 'aq'],
        ]

    for j in model.properties.component_list:
        init_deriv_list.append(
                model.cstr.control_volume.material_accumulation[0, 'aq', j])
        init_diff_list.append(
                model.cstr.control_volume.material_holdup[0, 'aq', j])
        
        init_fixed_list.append(model.mixer.E_inlet.conc_mol[0, j])
        init_fixed_list.append(model.mixer.S_inlet.conc_mol[0, j])

        init_alg_list.extend([
            model.cstr.control_volume.properties_out[0].flow_mol_comp[j],
            model.cstr.inlet.conc_mol[0, j],
            model.cstr.control_volume.properties_in[0].flow_mol_comp[j],
            model.cstr.control_volume.rate_reaction_generation[0, 'aq', j],
            model.mixer.mixed_state[0].flow_mol_comp[j],
            model.mixer.E_inlet_state[0].flow_mol_comp[j],
            model.mixer.S_inlet_state[0].flow_mol_comp[j]
            ])
        if j != 'Solvent':
            init_alg_list.append(model.cstr.outlet.conc_mol[0, j])
            init_alg_list.append(model.mixer.outlet.conc_mol[0, j])
        else:
            init_fixed_list.append(model.cstr.outlet.conc_mol[0, j])
            init_fixed_list.append(model.mixer.outlet.conc_mol[0, j])

        if j != 'Solvent':
            init_ic_list.append(
                    model.cstr.control_volume.material_holdup[0, 'aq', j])

    for r in model.reactions.rate_reaction_idx:
        init_alg_list.extend([
            model.cstr.control_volume.reactions[0].reaction_coef[r],
            model.cstr.control_volume.reactions[0].reaction_rate[r],
            model.cstr.control_volume.rate_reaction_extent[0, r]
            ])

    init_deriv_set = ComponentSet(init_deriv_list)
    init_diff_set = ComponentSet(init_diff_list)
    init_fixed_set = ComponentSet(init_fixed_list)
    init_ic_set = ComponentSet(init_ic_list)
    init_alg_set = ComponentSet(init_alg_list)

    assert model._NMPC_NAMESPACE.input_vars.n_vars == len(init_input_set)
    for v in model._NMPC_NAMESPACE.input_vars:
        assert v[0] in init_input_set

    assert model._NMPC_NAMESPACE.deriv_vars.n_vars == len(init_deriv_set)
    for v in model._NMPC_NAMESPACE.deriv_vars:
        assert v[0] in init_deriv_set

    assert len(model._NMPC_NAMESPACE.diff_vars) == len(init_deriv_set)
    for v in model._NMPC_NAMESPACE.diff_vars:
        assert v[0] in init_diff_set

    assert len(model._NMPC_NAMESPACE.fixed_vars) == len(init_fixed_set)
    for v in model._NMPC_NAMESPACE.fixed_vars:
        assert v[0] in init_fixed_set

    assert len(model._NMPC_NAMESPACE.alg_vars) == len(init_alg_set)
    for v in model._NMPC_NAMESPACE.alg_vars:
        assert v[0] in init_alg_set

    assert len(model._NMPC_NAMESPACE.ic_vars) == len(init_ic_set)
    for v in model._NMPC_NAMESPACE.ic_vars:
        assert v[0] in init_ic_set

    assert len(model._NMPC_NAMESPACE.scalar_vars) == 0

    for var in model._NMPC_NAMESPACE.deriv_vars:
        assert len(var) == len(model._NMPC_NAMESPACE.get_time())
        assert var.index_set() is model._NMPC_NAMESPACE.get_time()
    for var in model._NMPC_NAMESPACE.alg_vars:
        assert len(var) == len(model._NMPC_NAMESPACE.get_time())
        assert var.index_set() is model._NMPC_NAMESPACE.get_time()
Exemplo n.º 29
0
    def _fourier_motzkin_elimination(self, constraints, vars_to_eliminate):
        """Performs FME on the constraint list in the argument
        (which is assumed to be all >= constraints and stored in the
        dictionary representation), projecting out each of the variables in
        vars_to_eliminate"""

        # We only need to eliminate variables that actually appear in
        # this set of constraints... Revise our list.
        vars_that_appear = []
        vars_that_appear_set = ComponentSet()
        for cons in constraints:
            std_repn = cons['body']
            if not std_repn.is_linear():
                # as long as none of vars_that_appear are in the nonlinear part,
                # we are actually okay.
                nonlinear_vars = ComponentSet(
                    v for two_tuple in std_repn.quadratic_vars
                    for v in two_tuple)
                nonlinear_vars.update(v for v in std_repn.nonlinear_vars)
                for var in nonlinear_vars:
                    if var in vars_to_eliminate:
                        raise RuntimeError(
                            "Variable %s appears in a nonlinear "
                            "constraint. The Fourier-Motzkin "
                            "Elimination transformation can only "
                            "be used to eliminate variables "
                            "which only appear linearly." % var.name)
            for var in std_repn.linear_vars:
                if var in vars_to_eliminate:
                    if not var in vars_that_appear_set:
                        vars_that_appear.append(var)
                        vars_that_appear_set.add(var)

        # we actually begin the recursion here
        total = len(vars_that_appear)
        iteration = 1
        while vars_that_appear:
            # first var we will project out
            the_var = vars_that_appear.pop()
            logger.warning("Projecting out var %s of %s" % (iteration, total))
            if self.verbose:
                logger.info("Projecting out %s" % the_var.getname(
                    fully_qualified=True, name_buffer=NAME_BUFFER))
                logger.info("New constraints are:")

            # we are 'reorganizing' the constraints, we sort based on the sign
            # of the coefficient of the_var: This tells us whether we have
            # the_var <= other stuff or vice versa.
            leq_list = []
            geq_list = []
            waiting_list = []

            coefs = []
            for cons in constraints:
                leaving_var_coef = cons['map'].get(the_var)
                if leaving_var_coef is None or leaving_var_coef == 0:
                    waiting_list.append(cons)
                    if self.verbose:
                        logger.info(
                            "\t%s <= %s" %
                            (cons['lower'], cons['body'].to_expression()))
                    continue

                # we know the constraint is a >= constraint, using that
                # assumption below.
                # NOTE: neither of the scalar multiplications below flip the
                # constraint. So we are sure to have only geq constraints
                # forever, which is exactly what we want.
                if not self.do_integer_arithmetic:
                    if leaving_var_coef < 0:
                        leq_list.append(
                            self._nonneg_scalar_multiply_linear_constraint(
                                cons, -1.0 / leaving_var_coef))
                    else:
                        geq_list.append(
                            self._nonneg_scalar_multiply_linear_constraint(
                                cons, 1.0 / leaving_var_coef))
                else:
                    coefs.append(
                        self._as_integer(
                            leaving_var_coef,
                            self._get_noninteger_coef_error_message,
                            (the_var.name, leaving_var_coef)))
            if self.do_integer_arithmetic and len(coefs) > 0:
                least_common_mult = lcm(coefs)
                for cons in constraints:
                    leaving_var_coef = cons['map'].get(the_var)
                    if leaving_var_coef is None or leaving_var_coef == 0:
                        continue
                    to_lcm = least_common_mult // abs(int(leaving_var_coef))
                    if leaving_var_coef < 0:
                        leq_list.append(
                            self._nonneg_scalar_multiply_linear_constraint(
                                cons, to_lcm))
                    else:
                        geq_list.append(
                            self._nonneg_scalar_multiply_linear_constraint(
                                cons, to_lcm))

            constraints = waiting_list
            for leq in leq_list:
                for geq in geq_list:
                    constraints.append(self._add_linear_constraints(leq, geq))
                    if self.verbose:
                        cons = constraints[len(constraints) - 1]
                        logger.info(
                            "\t%s <= %s" %
                            (cons['lower'], cons['body'].to_expression()))

            iteration += 1

        return constraints
Exemplo n.º 30
0
    def solve(self, model, first_stage_variables, second_stage_variables,
              uncertain_params, uncertainty_set, local_solver, global_solver,
              **kwds):
        """Solve the model.

        Parameters
        ----------
        model: ConcreteModel
            A ``ConcreteModel`` object representing the deterministic
            model, cast as a minimization problem.
        first_stage_variables: List[Var]
            The list of ``Var`` objects referenced in ``model``
            representing the design variables.
        second_stage_variables: List[Var]
            The list of ``Var`` objects referenced in ``model``
            representing the control variables.
        uncertain_params: List[Param]
            The list of ``Param`` objects referenced in ``model``
            representing the uncertain parameters.  MUST be ``mutable``.
            Assumes entries are provided in consistent order with the
            entries of 'nominal_uncertain_param_vals' input.
        uncertainty_set: UncertaintySet
            ``UncertaintySet`` object representing the uncertainty space
            that the final solutions will be robust against.
        local_solver: Solver
            ``Solver`` object to utilize as the primary local NLP solver.
        global_solver: Solver
            ``Solver`` object to utilize as the primary global NLP solver.

        """

        # === Add the explicit arguments to the config
        config = self.CONFIG(kwds.pop('options', {}))
        config.first_stage_variables = first_stage_variables
        config.second_stage_variables = second_stage_variables
        config.uncertain_params = uncertain_params
        config.uncertainty_set = uncertainty_set
        config.local_solver = local_solver
        config.global_solver = global_solver

        dev_options = kwds.pop('dev_options', {})
        config.set_value(kwds)
        config.set_value(dev_options)

        model = model

        # === Validate kwarg inputs
        validate_kwarg_inputs(model, config)

        # === Validate ability of grcs RO solver to handle this model
        if not model_is_valid(model):
            raise AttributeError(
                "This model structure is not currently handled by the ROSolver."
            )

        # === Define nominal point if not specified
        if len(config.nominal_uncertain_param_vals) == 0:
            config.nominal_uncertain_param_vals = list(
                p.value for p in config.uncertain_params)
        elif len(config.nominal_uncertain_param_vals) != len(
                config.uncertain_params):
            raise AttributeError(
                "The nominal_uncertain_param_vals list must be the same length"
                "as the uncertain_params list")

        # === Create data containers
        model_data = ROSolveResults()
        model_data.timing = Bunch()

        # === Set up logger for logging results
        with time_code(model_data.timing, 'total', is_main_timer=True):
            config.progress_logger.setLevel(logging.INFO)

            # === PREAMBLE
            output_logger(config=config,
                          preamble=True,
                          version=str(self.version()))

            # === DISCLAIMER
            output_logger(config=config, disclaimer=True)

            # === A block to hold list-type data to make cloning easy
            util = Block(concrete=True)
            util.first_stage_variables = config.first_stage_variables
            util.second_stage_variables = config.second_stage_variables
            util.uncertain_params = config.uncertain_params

            model_data.util_block = unique_component_name(model, 'util')
            model.add_component(model_data.util_block, util)
            # Note:  model.component(model_data.util_block) is util

            # === Validate uncertainty set happens here, requires util block for Cardinality and FactorModel sets
            validate_uncertainty_set(config=config)

            # === Deactivate objective on model
            for o in model.component_data_objects(Objective):
                o.deactivate()

            # === Leads to a logger warning here for inactive obj when cloning
            model_data.original_model = model
            # === For keeping track of variables after cloning
            cname = unique_component_name(model_data.original_model,
                                          'tmp_var_list')
            src_vars = list(
                model_data.original_model.component_data_objects(Var))
            setattr(model_data.original_model, cname, src_vars)
            model_data.working_model = model_data.original_model.clone()

            # === Add objective expressions
            identify_objective_functions(model_data.working_model, config)

            # === Put model in standard form
            transform_to_standard_form(model_data.working_model)

            # === Add decision rule information
            add_decision_rule_variables(model_data, config)
            add_decision_rule_constraints(model_data, config)

            # === Move bounds on control variables to explicit ineq constraints
            wm_util = model_data.working_model

            # === Assuming all other Var objects in the model are state variables
            fsv = ComponentSet(
                model_data.working_model.util.first_stage_variables)
            ssv = ComponentSet(
                model_data.working_model.util.second_stage_variables)
            sv = ComponentSet()
            model_data.working_model.util.state_vars = []
            for v in model_data.working_model.component_data_objects(Var):
                if v not in fsv and v not in ssv and v not in sv:
                    model_data.working_model.util.state_vars.append(v)
                    sv.add(v)

            # Bounds on second stage variables and state variables are separation objectives,
            #  they are brought in this was as explicit constraints
            for c in model_data.working_model.util.second_stage_variables:
                turn_bounds_to_constraints(c, wm_util, config)

            for c in model_data.working_model.util.state_vars:
                turn_bounds_to_constraints(c, wm_util, config)

            # === Make control_variable_bounds array
            wm_util.ssv_bounds = []
            for c in model_data.working_model.component_data_objects(
                    Constraint, descend_into=True):
                if "bound_con" in c.name:
                    wm_util.ssv_bounds.append(c)

            # === Solve and load solution into model
            pyros_soln, final_iter_separation_solns = ROSolver_iterative_solve(
                model_data, config)

            return_soln = ROSolveResults()
            if pyros_soln is not None and final_iter_separation_solns is not None:
                if config.load_solution and \
                        (pyros_soln.pyros_termination_condition is pyrosTerminationCondition.robust_optimal or
                         pyros_soln.pyros_termination_condition is pyrosTerminationCondition.robust_feasible):
                    load_final_solution(model_data, pyros_soln.master_soln,
                                        config)

                # === Return time info
                model_data.total_cpu_time = get_main_elapsed_time(
                    model_data.timing)
                iterations = pyros_soln.total_iters + 1

                # === Return config to user
                return_soln.config = config
                # Report the negative of the objective value if it was originally maximize, since we use the minimize form in the algorithm
                if next(model.component_data_objects(
                        Objective)).sense == maximize:
                    negation = -1
                else:
                    negation = 1
                if config.objective_focus == ObjectiveType.nominal:
                    return_soln.final_objective_value = negation * value(
                        pyros_soln.master_soln.master_model.obj)
                elif config.objective_focus == ObjectiveType.worst_case:
                    return_soln.final_objective_value = negation * value(
                        pyros_soln.master_soln.master_model.zeta)
                return_soln.pyros_termination_condition = pyros_soln.pyros_termination_condition

                return_soln.time = model_data.total_cpu_time
                return_soln.iterations = iterations

                # === Remove util block
                model.del_component(model_data.util_block)

                del pyros_soln.util_block
                del pyros_soln.working_model
            else:
                return_soln.pyros_termination_condition = pyrosTerminationCondition.robust_infeasible
                return_soln.final_objective_value = None
                return_soln.time = get_main_elapsed_time(model_data.timing)
                return_soln.iterations = 0
        return return_soln