예제 #1
0
    def test_constructor(self):
        # VectorSeries is constructed from an OrderedDict with data
        # series (lists) as values. Keys are meant to be identifiers
        # of Pyomo model components, but could technically be anything.
        # To illustrate their intended use, tests use CUIDs as keys.
        m = make_model()
        data = OrderedDict([(pyo.ComponentUID(_slice.referent),
                             [_slice[t].value for t in m.time])
                            for _slice in m.v1_refs])

        name = 'v1'
        tol = 0.1
        history = VectorSeries(data,
                               time=list(m.time),
                               name=name,
                               tolerance=tol)
        assert history.name == name
        assert type(history.time) is TimeList
        assert history.time == m.time
        assert history.time.tolerance == tol

        for _slice in m.v1_refs:
            assert pyo.ComponentUID(_slice.referent) in history

        empty_series = VectorSeries()
        assert empty_series == OrderedDict()
예제 #2
0
    def test_consistent(self):
        m = make_model()
        data = OrderedDict([(pyo.ComponentUID(_slice.referent),
                             [_slice[t].value for t in m.time])
                            for _slice in m.v3_refs])
        name = 'v3'
        tol = 0.1
        history = VectorSeries(data,
                               time=list(m.time),
                               name=name,
                               tolerance=tol)

        # These vals are consistent because this is how
        # I happened to initialize m.v3
        vals = [3.0 * m.time.last() for _ in m.space]
        bad_vals = [m.time.last() for _ in m.space]
        assert not history.consistent([])
        assert not history.consistent(bad_vals)
        assert history.consistent(vals)

        assert VectorSeries().consistent([])
        assert not VectorSeries().consistent([1])

        empty_data = OrderedDict([(pyo.ComponentUID(_slice.referent), [])
                                  for _slice in m.v1_refs])
        empty_series = VectorSeries(empty_data)
        vals = [1 for _ in m.space]
        assert not empty_series.consistent([])
        assert empty_series.consistent(vals)
예제 #3
0
파일: plot.py 프로젝트: IDAES/publications
def plot_time_indexed_variable(var):
    time = list(var.index_set())
    data = [var[t].value for t in time]
    fig = plt.figure()
    ax = fig.add_subplot()
    ax.plot(time, data)
    if var.is_reference():
        cuid = pyo.ComponentUID(var.referent)
    else:
        cuid = pyo.ComponentUID(var)
    ax.set_title(str(cuid))
    return fig, ax
예제 #4
0
파일: model.py 프로젝트: IDAES/publications
def get_data_from_steady_model(m, time):
    assert len(time) == 1
    t0 = next(iter(time))
    scalar_vars, dae_vars = flatten_dae_components(m, time, pyo.Var)
    scalar_data = {
        str(pyo.ComponentUID(var)): var.value
        for var in scalar_vars
    }
    dae_data = {
        str(pyo.ComponentUID(var.referent)): var[t0].value
        for var in dae_vars
    }
    return scalar_data, dae_data
예제 #5
0
파일: loa.py 프로젝트: Robbybp/IDAES-CLC
def init_LOA(m, iterlim=5, add_min_flows=True):
    """Performs initialization of the LOA algorithm by solving set covering NLP
    subproblems so that OA linearizations exist for all the major process
    units.

    This work is based upon prototyping done by Eloy Fernandez at CMU.

    Args:
        m (FlowsheetModel): flowsheet
        iterlim (int, optional): iteration limit for initialization

    Returns:
        Boolean: True if set covering successful. False otherwise.
    """
    m.switchable_units = frozenset(
        pe.ComponentUID(unit.equip_exists) for unit in itervalues(m.units)
        if hasattr(unit, 'equip_exists'))
    m.covered_units = set()
    m.not_covered_units = set(m.switchable_units)
    iter_count = 1
    while m.not_covered_units and iter_count <= iterlim:
        # print('Covered: ', m.covered_units)
        # print('Not covered: ', m.not_covered_units)
        if not m.solve_set_cover_MIP():
            print('Set covering MIP infeasible. Overall problem infeasible.')
            return False
        # m.solvers.local_NLP.outlev = 3  # use for troubleshooting CONOPT
        if m.solve_local_NLP(add_min_flows=add_min_flows):
            print('Solved initialization NLP. Adding OA cuts.')
            m.add_oa_cut()
            active_units = frozenset(
                pe.ComponentUID(unit.equip_exists)
                for unit in itervalues(m.units)
                if hasattr(unit, 'equip_exists')
                and abs(pe.value(unit.equip_exists) - 1.0) <= 1E-6)
            m.covered_units.update(active_units)
            m.not_covered_units.difference_update(active_units)
        else:
            # TODO could be infeasible due to bad NLP subproblem
            # initialization. Need to make this more robust
            print('Unable to solve NLP.')
        m.add_integer_cut(tmp=True)
        iter_count += 1
    if m.not_covered_units:
        # Iteration limit was hit without a full covering of all major units.
        print('Iteration limit reached for set covering initialization.')
        return False
    return True
예제 #6
0
def initialize_dynamic_from_steady(m_dyn, m_steady, flattened=None):
    """
    """
    time = m_dyn.fs.time
    time_steady = m_steady.fs.time
    t_steady = time_steady.first()

    TIMER.start("get derivs")
    diff_deriv_disc_list = list(
        generate_diff_deriv_disc_components_along_set(m_dyn, time))
    derivs = [var for _, var, _ in diff_deriv_disc_list]
    TIMER.stop("get derivs")

    TIMER.start("flatten dynamic")
    if flattened is None:
        scalar_vars, dae_vars = flatten_dae_components(m_dyn, time, pyo.Var)
    else:
        scalar_vars, dae_vars = flattened
    TIMER.stop("flatten dynamic")
    TIMER.start("flatten steady")
    steady_scalar_vars, steady_dae_vars = flatten_dae_components(
        m_steady, time_steady, pyo.Var)
    TIMER.stop("flatten steady")

    deriv_cuid_set = set(str(pyo.ComponentUID(var.referent)) for var in derivs)
    scalar_value_map = dict(
        (str(pyo.ComponentUID(var)), var.value) for var in steady_scalar_vars)
    dae_value_map = dict(
        (str(pyo.ComponentUID(var.referent)), var[t_steady].value)
        for var in steady_dae_vars)

    TIMER.start("set values")
    for var in scalar_vars:
        cuid = str(pyo.ComponentUID(var))
        var.set_value(scalar_value_map[cuid])
    for var in dae_vars:
        cuid = str(pyo.ComponentUID(var.referent))
        if cuid in dae_value_map:
            for t in time:
                var[t].set_value(dae_value_map[cuid])
        else:
            assert cuid in deriv_cuid_set
            # TODO: Better way of initializing derivatives
            for t in time:
                var[t].set_value(0.0)
    TIMER.stop("set values")

    return scalar_vars, dae_vars
예제 #7
0
def get_max_values_from_steady(m):
    time = m.fs.time
    assert len(time) == 1
    t0 = next(iter(time))
    gas_length = m.fs.MB.gas_phase.length_domain
    solid_length = m.fs.MB.solid_phase.length_domain
    sets = (time, gas_length, solid_length)
    sets_list, comps_list = flatten_components_along_sets(m, sets, pyo.Var)
    for sets, comps in zip(sets_list, comps_list):
        if len(sets) == 2 and sets[0] is time and sets[1] is gas_length:
            gas_comps = comps
        elif len(sets) == 2 and sets[0] is time and sets[1] is solid_length:
            solid_comps = comps
    variables = gas_comps + solid_comps
    max_values = ComponentMap(
        (
            var,
            max(abs(data.value) for data in var.values()
                if data.value is not None),
        )
        for var in variables
    )

    # Maps time indexed vars to their max values over space
    max_values_time = {}
    for var in variables:
        for x in gas_length:
            sliced = slice_component_along_sets(var[t0, x], (time,))
            max_values_time[str(pyo.ComponentUID(sliced))] = max_values[var]

    return max_values_time
예제 #8
0
파일: plot.py 프로젝트: IDAES/publications
def step_time_indexed_variable(var):
    time = list(var.index_set())
    data = [var[t].value for t in time]
    # We actually don't care about the first input value.
    # I think this is only true for an implicit discretization.
    time = time[1:]
    data = data[1:]
    fig = plt.figure()
    ax = fig.add_subplot()
    ax.step(time, data, where="pre")
    if var.is_reference():
        cuid = pyo.ComponentUID(var.referent)
    else:
        cuid = pyo.ComponentUID(var)
    ax.set_title(str(cuid))
    return fig, ax
예제 #9
0
    def test_cuid_of_slice(self):
        m = pyo.ConcreteModel()
        m.s1 = pyo.Set(initialize=["a", "b"])
        m.s2 = pyo.Set(initialize=["c", "d"])
        m.b = pyo.Block(m.s1)
        for i in m.s1:
            m.b[i].v = pyo.Var(m.s2)
        slice_ = slice_component_along_sets(m.b["a"].v["c"],
                                            ComponentSet((m.s1, )))
        cuid = pyo.ComponentUID(slice_)
        self.assertEqual(str(cuid), "b[*].v[c]")

        slice_ = slice_component_along_sets(m.b["a"].v[("c", )],
                                            ComponentSet((m.s1, )))
        cuid = pyo.ComponentUID(slice_)
        self.assertEqual(str(cuid), "b[*].v[c]")
예제 #10
0
 def fix_flows(self):
     for var in self._get_flow_vars():
         if var.fixed:
             pass
         else:
             self._tmp_fixed.add(pe.ComponentUID(var))
             var.fix(0)
예제 #11
0
    def test_append(self):
        m = make_model()
        data = OrderedDict([(pyo.ComponentUID(_slice.referent),
                             [_slice[t].value for t in m.time])
                            for _slice in m.v3_refs])
        name = 'v3'
        tol = 0.1
        history = VectorSeries(data,
                               time=list(m.time),
                               name=name,
                               tolerance=tol)
        new_t = 11
        with pytest.raises(ValueError) as err:
            history.append(new_t, [1])
            assert 'inconsistent dimension' in str(err)
        new_vals = [3 * new_t for _ in m.space]
        history.append(new_t, new_vals)
        last = [series[-1] for series in history.values()]
        assert last == new_vals
        assert history.time[-1] == new_t

        with pytest.raises(ValueError) as err:
            history.append(new_t, new_vals)
            msg = 'Appended time values must be'
            assert msg in str(err)
예제 #12
0
    def test_extend(self):
        m = make_model()
        midpoint = len(m.time) // 2
        tlist = list(m.time)
        time_1 = tlist[0:midpoint]
        time_2 = tlist[midpoint:]

        data_1 = OrderedDict([(pyo.ComponentUID(_slice.referent),
                               [_slice[t].value for t in time_1])
                              for _slice in m.v1_refs])
        data_2 = OrderedDict([(pyo.ComponentUID(_slice.referent),
                               [_slice[t].value for t in time_2])
                              for _slice in m.v1_refs])
        tol = 0.1
        history_1 = VectorSeries(data_1, time_1, tolerance=tol)
        history_2 = VectorSeries(data_2, time_2, tolerance=tol)
        history_1.extend(history_2.time, history_2)

        vals = [1.0 * t for t in m.time]
        for series in history_1.values():
            assert series == vals
        assert history_1.time == m.time

        new_time = [10, 11, 13]
        new_vals = [1 * t for t in new_time]
        new_data = [new_vals for _ in history_1.values()]
        history_1.extend(new_time, new_data)

        time = list(m.time) + new_time
        time = list(OrderedDict.fromkeys(time))  # No duplicates
        vals = [1 * t for t in time]
        for series in history_1.values():
            assert series == vals
        assert history_1.time == time

        new_time = [13, 14, 15]
        new_vals = [2 * t for t in new_time]
        new_data = [new_vals for _ in history_1.values()]
        with pytest.raises(ValueError) as err:
            history_1.extend(new_time, new_data)
            msg = 'data was not consistent'
            assert mst in str(err)

        with pytest.raises(ValueError) as err:
            # Assuming that history_2.dim() != 1 ...
            history_2.extend([], [[]])
            assert 'inconsistent dimension' in str(err)
예제 #13
0
def propagate_var_fix(block, tmp=False):
    """Propagates variable fixing for equalities of type x = y. If x is fixed
    and y is not fixed, then this function will fix y to the value of x.

    If temporary, stores the variable UIDs in a set '_tmp_propagate_fixed'
    attached to the block.

    Args:
        block (TYPE): The block for which to find variables to propagate fixing
        tmp (bool, optional): Whether the variable fixing will be temporary

    Returns:
        None

    Raises:
        RuntimeError: if two fixed variables x = y have different values.
    """
    from pyomo.core.base.expr import _SumExpression
    from pyomo.core.base.var import _GeneralVarData
    if not hasattr(block, '_tmp_propagate_fixed'):
        block._tmp_propagate_fixed = set()
    var_map = {}
    fixed_vars = set()
    eq_var_map = {}
    for constr in block.component_data_objects(ctype=pe.Constraint, active=True, descend_into=True):
        if constr.lower == 0 and constr.upper == 0 \
                and isinstance(constr.body, _SumExpression) \
                and len(constr.body._args) == 2 \
                and 1 in constr.body._coef \
                and -1 in constr.body._coef \
                and isinstance(constr.body._args[0], _GeneralVarData) \
                and isinstance(constr.body._args[1], _GeneralVarData) \
                and constr.body._const == 0:
            v1 = constr.body._args[0]
            v2 = constr.body._args[1]
            if v1.fixed:
                fixed_vars.add(id(v1))
            if v2.fixed:
                fixed_vars.add(id(v2))
            var_map.update({id(v1): v1, id(v2): v2})
            set1 = eq_var_map.get(id(v1), set([id(v1)]))
            set2 = eq_var_map.get(id(v2), set([id(v2)]))
            union = set1.union(set2)
            for vID in union:
                eq_var_map[vID] = union
    processed = set()
    for varID in fixed_vars:
        if varID not in processed:
            var_val = pe.value(var_map[varID])
            eq_set = eq_var_map[varID]
            for v in eq_set:
                if var_map[v].fixed and pe.value(var_map[v]) != var_val:
                    raise RuntimeError('Variables {} and {} have conflicting fixed values of {} and {}, but are linked by equality constraints.'.format(var_map[v1].name, var_map[v2].name, pe.value(var_map[v1]), pe.value(var_map[v2])))
                elif not var_map[v].fixed:
                    var_map[v].fix(var_val)
                    if tmp:
                        block._tmp_propagate_fixed.add(
                            pe.ComponentUID(var_map[v]))
            processed |= eq_set
예제 #14
0
    def propagate_solution(self, augmented_model, original_model):

        for avar in augmented_model.component_objects(ctype=aml.Var,
                                                      descend_into=True):
            cuid = aml.ComponentUID(avar)
            original_v = cuid.find_component_on(original_model)
            for k in avar:
                original_v[k].value = aml.value(avar[k])
예제 #15
0
 def set_min_flows(self):
     min_flow = 1E-8
     for var in self._get_flow_vars():
         if not var.fixed and lb(var) == 0 and ub(var) >= min_flow:
             self._tmp_min_flow.add(pe.ComponentUID(var))
             var.setlb(min_flow)
     for unit in itervalues(self.units):
         unit.set_min_flows()
예제 #16
0
파일: spopt.py 프로젝트: mpi-sppy/mpi-sppy
    def avg_min_max(self, compstr):
        """ Can be used to track convergence progress.

        Args:
            compstr (str): 
                The name of the Pyomo component. Should not be indexed.

        Returns:
            tuple: 
                Tuple containing

                avg (float): 
                    Average across all scenarios.
                min (float):
                    Minimum across all scenarios.
                max (float):
                    Maximum across all scenarios.

        Note:
            WARNING: Does a Allreduce.
            Not user-friendly. If you give a bad compstr, it will just crash.
        """
        firsttime = True
        localavg = np.zeros(1, dtype='d')
        localmin = np.zeros(1, dtype='d')
        localmax = np.zeros(1, dtype='d')
        globalavg = np.zeros(1, dtype='d')
        globalmin = np.zeros(1, dtype='d')
        globalmax = np.zeros(1, dtype='d')

        v_cuid = pyo.ComponentUID(compstr)

        for k, s in self.local_scenarios.items():

            compv = pyo.value(v_cuid.find_component_on(s))

            ###compv = pyo.value(getattr(s, compstr))
            localavg[0] += s._mpisppy_probability * compv
            if compv < localmin[0] or firsttime:
                localmin[0] = compv
            if compv > localmax[0] or firsttime:
                localmax[0] = compv
            firsttime = False

        self.comms["ROOT"].Allreduce([localavg, MPI.DOUBLE],
                                     [globalavg, MPI.DOUBLE],
                                     op=MPI.SUM)
        self.comms["ROOT"].Allreduce([localmin, MPI.DOUBLE],
                                     [globalmin, MPI.DOUBLE],
                                     op=MPI.MIN)
        self.comms["ROOT"].Allreduce([localmax, MPI.DOUBLE],
                                     [globalmax, MPI.DOUBLE],
                                     op=MPI.MAX)
        return (float(globalavg[0]), float(globalmin[0]), float(globalmax[0]))
예제 #17
0
    def test_with_tuple_of_sets(self):
        m = pyo.ConcreteModel()
        m.s1 = pyo.Set(initialize=[1, 2, 3])
        m.s2 = pyo.Set(initialize=[1, 2, 3])
        m.v = pyo.Var(m.s1, m.s2)
        sets = (m.s1, )
        slice_ = slice_component_along_sets(m.v[1, 2], sets)

        # These tests are essentially the same, but we run the CUID test
        # first as it gives a nicer error message.
        self.assertEqual(str(pyo.ComponentUID(slice_)), "v[*,2]")
        self.assertEqual(slice_, m.v[:, 2])
예제 #18
0
 def test_len(self):
     m = make_model()
     data = OrderedDict([(pyo.ComponentUID(_slice.referent),
                          [_slice[t].value for t in m.time])
                         for _slice in m.v3_refs])
     name = 'v3'
     tol = 0.1
     history = VectorSeries(data,
                            time=list(m.time),
                            name=name,
                            tolerance=tol)
     assert len(history) == len(m.time)
     assert len(VectorSeries()) == 0
예제 #19
0
def compose_two_stage_stochastic_model(models, complicating_vars):

    if not isinstance(models, dict):
        raise RuntimeError("Model must be a dictionary")
    if not isinstance(complicating_vars, dict):
        raise RuntimeError("complicating_vars must be a dictionary")
    if len(complicating_vars) != len(models):
        raise RuntimeError(
            "Each scenario must have a list of complicated variables")

    counter = 0
    nz = -1
    for k, v in complicating_vars.items():
        if counter == 0:
            nz = len(v)
        else:
            assert len(
                v
            ) == nz, 'all models must have same number of complicating variables'
        counter += 1

    model = pyo.ConcreteModel()
    model.z = pyo.Var(range(nz))
    model.scenario_names = sorted([name for name in models.keys()])

    obj = 0.0

    for i, j in enumerate(model.scenario_names):
        instance = models[j].clone()
        model.add_component("{}_linking".format(j), pyo.ConstraintList())
        model.add_component("{}".format(j), instance)
        linking = getattr(model, "{}_linking".format(j))
        x = complicating_vars[j]

        for k, var in enumerate(x):
            if var.is_indexed():
                raise RuntimeError(
                    'indexed complicating variables not supported')
            vid = pyo.ComponentUID(var)
            vv = vid.find_component_on(instance)
            linking.add(vv == model.z[k])

        # gets objective
        objectives = instance.component_map(pyo.Objective, active=True)
        if len(objectives) > 1:
            raise RuntimeError('Multiple objectives not supported')
        instance_obj = list(objectives.values())[0]
        obj += instance_obj.expr
        instance_obj.deactivate()
    model.obj = pyo.Objective(expr=obj)
    return model
예제 #20
0
    def deactivate_trivial_constraints(self):
        """Find and deactivates trivial constraints: those that are const =
        const, or const < const. Verifies that they evaluate to True first.

        Stores deactivated constraint UIDs in the set
        '_tmp_trivial_deactivated'

        Returns:
            None
        """
        if not hasattr(self, '_tmp_trivial_deactivated'):
            self._tmp_trivial_deactivated = set()
        for condata in self.component_data_objects(ctype=pe.Constraint,
                                                   active=True,
                                                   descend_into=True):
            # if the constraint is trivial, deactivate it.
            if condata.body.is_fixed():
                if not (condata.upper is None
                        or condata.upper.is_fixed()) or not (
                            condata.lower is None or condata.lower.is_fixed()):
                    # that's weird, why would these not be fixed?
                    raise NotImplementedError(
                        'Non-fixed upper or lower bound on constraint.')
                if pe.value(condata.body) != pe.value(
                        condata.upper) or pe.value(condata.body) != pe.value(
                            condata.lower):
                    # inconsistent or inequality. Check if inequality.
                    if condata.upper is None:
                        # Make sure that body >= lower
                        if pe.value(condata.body) < pe.value(condata.lower):
                            raise ValueError('Infeasible constraint: ' +
                                             condata.name)
                    elif condata.lower is None:
                        # Make sure that body <= upper
                        if pe.value(condata.body) > pe.value(condata.upper):
                            raise ValueError('Infeasible constraint: ' +
                                             condata.name)
                    else:
                        raise NotImplementedError('Infeasible constraint: ' +
                                                  condata.name)
                # otherwise, it's fine
                condata.deactivate()
                self._tmp_trivial_deactivated.add(pe.ComponentUID(condata))
예제 #21
0
    def test_dim(self):
        # This test also covers consistent_dimension and
        # validate_dimension.
        m = make_model()
        data = OrderedDict([(pyo.ComponentUID(_slice.referent),
                             [_slice[t].value for t in m.time])
                            for _slice in m.v2a_refs + m.v2b_refs])
        name = 'v2'
        tol = 0.1
        history = VectorSeries(data,
                               time=list(m.time),
                               name=name,
                               tolerance=tol)
        assert history.dim() == len(m.space * m.comp)
        assert VectorSeries().dim() == 0

        vals = [1 for _ in m.space * m.comp]
        assert history.consistent_dimension(vals)
        assert not history.consistent_dimension([1])
        assert history.validate_dimension(vals) == vals
        with pytest.raises(ValueError) as err:
            history.validate_dimension([1])
            assert 'inconsistent dimension' in str(err)
예제 #22
0
model.pprint()

compare_solutions = True
if compare_solutions:
    # compare the solution of the original model with a clone of the
    # original that has a backmapped solution from the scaled model

    # solve the original (unscaled) model
    original_model = model.clone()
    pe.SolverFactory('glpk').solve(original_model)

    # create and solve the scaled model
    scaling_tx = pe.TransformationFactory('core.scale_model')
    scaled_model = scaling_tx.create_using(model)
    pe.SolverFactory('glpk').solve(scaled_model)

    # propagate the solution from the scaled model back to a clone of the original model
    backmapped_unscaled_model = model.clone()
    scaling_tx.propagate_solution(scaled_model, backmapped_unscaled_model)

    # compare the variable values
    print('\n\n')
    print('%s\t%12s           %18s' % ('Var', 'Orig.', 'Scaled -> Backmapped'))
    print('=====================================================')
    for v in original_model.component_data_objects(ctype=pe.Var,
                                                   descend_into=True):
        cuid = pe.ComponentUID(v)
        bv = cuid.find_component_on(backmapped_unscaled_model)
        print('%s\t%.16f\t%.16f' % (v.local_name, pe.value(v), pe.value(bv)))
    print('=====================================================')
예제 #23
0
파일: model.py 프로젝트: IDAES/publications
def get_model_for_dynamic_optimization(
    parameter_perturbation=None,
    model_params=None,
    sample_points=None,
    initial_conditions=None,
    setpoint=None,
    ic_scalar_data=None,
    ic_dae_data=None,
    setpoint_data=None,
    objective_weights=None,
    objective_states=None,
    flatten_out=None,
):
    if model_params is None:
        model_params = {}
    # This is only necessary because horizon is used elsewhere in this
    # script.
    # TODO: horizon should not be used elsewhere in this script
    horizon = model_params.pop("horizon", 300.0)
    with TIMER.context("make dynamic"):
        flattened_vars = [None, None]
        m, var_cat, con_cat = make_square_dynamic_model(
            # TODO: Bounds?
            horizon=horizon,
            **model_params,

            # HACK: This list will be modified to hold
            # scalar_vars and dae_vars so I can re-use them below
            flatten_out=flattened_vars,
        )
    time = m.fs.time
    t0 = time.first()
    if sample_points is None:
        sample_points = [
            time.at(i) for i in range(1,
                                      len(time) + 1) if not (i - 1) % 4
        ]

    # Set initial conditions
    if ic_scalar_data:
        # TODO: This should probably go in initialization?
        # Why is this necessary? What scalar variables are we actually
        # setting here? Probably all of them...
        set_values(m, ic_scalar_data)
    if ic_dae_data:
        # initial_conditions don't have to be only the variables that
        # are fixed... we don't touch the structure of the model, just
        # set values.
        set_values_at_time(m, t0, ic_dae_data)

    # NOTE: flattened_vars comes from hack above
    dynamic_scalar_vars, dynamic_dae_vars = flattened_vars
    if flatten_out is not None:
        flatten_out[0] = dynamic_scalar_vars
        flatten_out[1] = dynamic_dae_vars
    # We rely on this call to set time-varying "parameter" vars
    copy_values_from_time(dynamic_dae_vars, time, t0, include_fixed=True)

    if setpoint_data:
        # FIXME: This branch does not work as expected
        setpoint = setpoint_data
    else:
        raise RuntimeError()

    if objective_states is None:
        objective_states = [
            str(pyo.ComponentUID(var.referent))
            for var in var_cat[VC.DIFFERENTIAL]
        ]
    # Add setpoint to dynamic model
    tracking_cost = get_tracking_cost_expressions(objective_states,
                                                  time,
                                                  setpoint,
                                                  weights=objective_weights)
    m.tracking_cost = tracking_cost
    m.tracking_objective = pyo.Objective(expr=sum(m.tracking_cost[t]
                                                  for t in time
                                                  if t != time.first()))

    # Add piecewise constant constraints and unfix inputs
    inputs = [
        "fs.MB.gas_phase.properties[*,0.0].flow_mol",
        "fs.MB.solid_phase.properties[*,1.0].flow_mass",
    ]
    piecewise_constant_constraint = get_piecewise_constant_constraints(
        inputs, time, sample_points)
    m.piecewise_constant_constraint = piecewise_constant_constraint
    for name in inputs:
        var = m.find_component(name)
        for t in time:
            if t != t0:
                var[t].unfix()

    # Set disturbance values in dynamic model
    if parameter_perturbation is None:
        parameter_perturbation = {}
    load_inputs_into_model(m, time, parameter_perturbation)

    return m
예제 #24
0
파일: model.py 프로젝트: IDAES/publications
def get_nmpc_plant_model(
    parameter_perturbation=None,
    model_params=None,
    setpoint=None,
    ic_scalar_data=None,
    ic_dae_data=None,
    setpoint_data=None,
    objective_weights=None,
    objective_states=None,
    flatten_out=None,
):
    if model_params is None:
        model_params = {}
    # This is only necessary because horizon is used elsewhere in this
    # script.
    # TODO: horizon should not be used elsewhere in this script
    horizon = model_params.pop("horizon", 300.0)
    with TIMER.context("make dynamic"):
        flattened_vars = [None, None]
        m, var_cat, con_cat = make_square_dynamic_model(
            # TODO: Bounds?
            horizon=horizon,
            **model_params,

            # HACK: This list will be modified to hold
            # scalar_vars and dae_vars so I can re-use them below
            flatten_out=flattened_vars,
        )
    time = m.fs.time
    t0 = time.first()

    # Set initial conditions
    if ic_scalar_data:
        # TODO: This should probably go in initialization?
        # Why is this necessary? What scalar variables are we actually
        # setting here? Probably all of them...
        set_values(m, ic_scalar_data)
    if ic_dae_data:
        # initial_conditions don't have to be only the variables that
        # are fixed... we don't touch the structure of the model, just
        # set values.
        set_values_at_time(m, t0, ic_dae_data)

    # NOTE: flattened_vars comes from hack above
    dynamic_scalar_vars, dynamic_dae_vars = flattened_vars
    if flatten_out is not None:
        flatten_out[0] = dynamic_scalar_vars
        flatten_out[1] = dynamic_dae_vars
    # We rely on this call to set time-varying "parameter" vars
    copy_values_from_time(dynamic_dae_vars, time, t0, include_fixed=True)

    if setpoint_data:
        # FIXME: This branch does not work as expected
        setpoint = setpoint_data
    else:
        raise RuntimeError()

    if objective_states is None:
        objective_states = [
            str(pyo.ComponentUID(var.referent))
            for var in var_cat[VC.DIFFERENTIAL]
        ]
    # Add setpoint to dynamic model
    tracking_cost = get_tracking_cost_expressions(objective_states,
                                                  time,
                                                  setpoint,
                                                  weights=objective_weights)
    m.tracking_cost = tracking_cost

    # Set disturbance values in dynamic model
    if parameter_perturbation is None:
        parameter_perturbation = {}
    load_inputs_into_model(m, time, parameter_perturbation, time_tol=None)

    return m
예제 #25
0
    def test_get_data_from_sample(self):
        blk = self.make_block()
        time = blk.time
        ts = blk.sample_points[2]

        n_diff = len(blk.DIFFERENTIAL_SET)
        n_input = len(blk.INPUT_SET)
        n_alg = len(blk.ALGEBRAIC_SET)

        for (i, t), var in blk.vectors.differential.items():
            var.set_value(i*t)
        for (i, t), var in blk.vectors.input.items():
            var.set_value((n_diff+i)*t)
        for (i, t), var in blk.vectors.algebraic.items():
            var.set_value((n_diff+n_input+i)*t)

        # Test default variables with out including t0
        data = blk.get_data_from_sample(ts)
        # By default extract values from differential and input variables
        data_time = list(blk.generate_time_in_sample(ts))

        assert len(data) == n_diff + n_input
        for var in blk.component_objects((DiffVar, InputVar)):
            _slice = var.referent
            cuid = pyo.ComponentUID(_slice)
            assert cuid in data

        for i, b in blk.DIFFERENTIAL_BLOCK.items():
            cuid = pyo.ComponentUID(b.var.referent)
            values = list(b.var[t].value for t in data_time)
            assert values == data[cuid]
        for i, b in blk.INPUT_BLOCK.items():
            cuid = pyo.ComponentUID(b.var.referent)
            values = list(b.var[t].value for t in data_time)
            assert values == data[cuid]

        # Test default variables, including the data point at t0
        data = blk.get_data_from_sample(ts, include_t0=True)
        data_time = list(blk.generate_time_in_sample(ts, include_t0=True))
        assert len(data) == n_diff + n_input
        for var in blk.component_objects((DiffVar, InputVar)):
            _slice = var.referent
            cuid = pyo.ComponentUID(_slice)
            assert cuid in data
        for i, b in blk.DIFFERENTIAL_BLOCK.items():
            cuid = pyo.ComponentUID(b.var.referent)
            values = list(b.var[t].value for t in data_time)
            assert values == data[cuid]
        for i, b in blk.INPUT_BLOCK.items():
            cuid = pyo.ComponentUID(b.var.referent)
            values = list(b.var[t].value for t in data_time)
            assert values == data[cuid]

        data = blk.get_data_from_sample(ts,
                variables=(VariableCategory.ALGEBRAIC,))
        data_time = list(blk.generate_time_in_sample(ts))

        assert len(data) == n_alg
        for var in blk.component_objects(AlgVar):
            cuid = pyo.ComponentUID(var.referent)
            assert cuid in data

        for i, b in blk.ALGEBRAIC_BLOCK.items():
            cuid = pyo.ComponentUID(b.var.referent)
            values = list(b.var[t].value for t in data_time)
            assert values == data[cuid]
예제 #26
0
    def _apply_to(self, model, **kwds):

        complicating_vars = kwds.pop('complicating_vars', None)
        z_estimates = kwds.pop('z_estimates', None)
        w_estimates = kwds.pop('w_estimates', None)
        rho = kwds.pop('rho', 1.0)

        if complicating_vars is None:
            raise RuntimeError('need to pass list of complicating variables')

        assert isinstance(complicating_vars, list)

        cloned_vars = []
        original_vars = []
        for v in complicating_vars:
            vid = aml.ComponentUID(v)
            vv = vid.find_component_on(model)
            if v.is_indexed():
                raise RuntimeError('Indexed variables not supported')
            else:
                cloned_vars.append(vv)
                original_vars.append(v)

        nz = len(cloned_vars)
        z_vals = np.zeros(nz)
        if z_estimates is not None:
            assert len(z_estimates) == nz
            z_vals = z_estimates

        w_vals = np.zeros(nz)
        if w_estimates is not None:
            assert len(w_estimates) == nz
            w_vals = w_estimates

        model._z = aml.Param(range(nz), initialize=0.0, mutable=True)
        model._w = aml.Param(range(nz), initialize=0.0, mutable=True)
        for i in range(nz):
            model._z[i].value = z_vals[i]
            model._w[i].value = w_vals[i]

        model._rho = aml.Param(initialize=rho, mutable=True)

        # defines objective
        objectives = model.component_map(aml.Objective, active=True)
        if len(objectives) > 1:
            raise RuntimeError('Multiple objectives not supported')
        obj = list(objectives.values())[0]

        def rule_linkin_exprs(m, i):
            return cloned_vars[i] - m._z[i]

        # store non-anticipativity expression
        model._linking_residuals = aml.Expression(range(nz),
                                                  rule=rule_linkin_exprs)

        dual_term = 0.0
        penalty_term = 0.0
        for zid in range(nz):
            dual_term += (model._linking_residuals[zid]) * model._w[zid]
            penalty_term += (model._linking_residuals[zid])**2

        # multiplier terms in objective
        model._dual_obj_term = aml.Expression(expr=dual_term)
        # penalty term
        model._penalty_obj_term = aml.Expression(expr=0.5 * model._rho *
                                                 penalty_term)

        model._aug_obj = aml.Objective(expr=obj.expr + model._dual_obj_term +
                                       model._penalty_obj_term)

        obj.deactivate()
예제 #27
0
    def test_linear_scaling(self):
        model = pyo.ConcreteModel()
        model.x = pyo.Var([1, 2, 3], bounds=(-10, 10), initialize=5.0)
        model.z = pyo.Var(bounds=(10, 20))
        model.obj = pyo.Objective(expr=model.z + model.x[1])

        # test scaling of duals as well
        model.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT)
        model.rc = pyo.Suffix(direction=pyo.Suffix.IMPORT)

        def con_rule(m, i):
            if i == 1:
                return m.x[1] + 2*m.x[2] + 1*m.x[3] == 4.0
            if i == 2:
                return m.x[1] + 2*m.x[2] + 2*m.x[3] == 5.0
            if i == 3:
                return m.x[1] + 3.0*m.x[2] + 1*m.x[3] == 5.0
        model.con = pyo.Constraint([1,2,3], rule=con_rule)
        model.zcon = pyo.Constraint(expr=model.z >= model.x[2])

        x_scale = 0.5
        obj_scale = 2.0
        z_scale = -10.0
        con_scale1 = 0.5
        con_scale2 = 2.0
        con_scale3 = -5.0
        zcon_scale = -3.0

        unscaled_model = model.clone()
        unscaled_model.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT)
        unscaled_model.scaling_factor[unscaled_model.obj] = obj_scale
        unscaled_model.scaling_factor[unscaled_model.x] = x_scale
        unscaled_model.scaling_factor[unscaled_model.z] = z_scale
        unscaled_model.scaling_factor[unscaled_model.con[1]] = con_scale1 
        unscaled_model.scaling_factor[unscaled_model.con[2]] = con_scale2
        unscaled_model.scaling_factor[unscaled_model.con[3]] = con_scale3
        unscaled_model.scaling_factor[unscaled_model.zcon] = zcon_scale

        scaled_model = pyo.TransformationFactory('core.scale_model').create_using(unscaled_model)

        # print('*** unscaled ***')
        # unscaled_model.pprint()
        # print('*** scaled ***')
        # scaled_model.pprint()

        glpk_solver =  pyo.SolverFactory('glpk')
        if isinstance(glpk_solver, UnknownSolver) or \
           (not glpk_solver.available()):
            raise unittest.SkipTest("glpk solver not available")

        glpk_solver.solve(unscaled_model)
        glpk_solver.solve(scaled_model)

        # check vars
        self.assertAlmostEqual(pyo.value(unscaled_model.x[1]), pyo.value(scaled_model.scaled_x[1])/x_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.x[2]), pyo.value(scaled_model.scaled_x[2])/x_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.x[3]), pyo.value(scaled_model.scaled_x[3])/x_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.z), pyo.value(scaled_model.scaled_z)/z_scale, 4)
        # check var lb
        self.assertAlmostEqual(pyo.value(unscaled_model.x[1].lb), pyo.value(scaled_model.scaled_x[1].lb)/x_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.x[2].lb), pyo.value(scaled_model.scaled_x[2].lb)/x_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.x[3].lb), pyo.value(scaled_model.scaled_x[3].lb)/x_scale, 4)
        # note: z_scale is negative, therefore, the inequality directions swap
        self.assertAlmostEqual(pyo.value(unscaled_model.z.lb), pyo.value(scaled_model.scaled_z.ub)/z_scale, 4)
        # check var ub
        self.assertAlmostEqual(pyo.value(unscaled_model.x[1].ub), pyo.value(scaled_model.scaled_x[1].ub)/x_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.x[2].ub), pyo.value(scaled_model.scaled_x[2].ub)/x_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.x[3].ub), pyo.value(scaled_model.scaled_x[3].ub)/x_scale, 4)
        # note: z_scale is negative, therefore, the inequality directions swap
        self.assertAlmostEqual(pyo.value(unscaled_model.z.ub), pyo.value(scaled_model.scaled_z.lb)/z_scale, 4)
        # check var multipliers (rc)
        self.assertAlmostEqual(pyo.value(unscaled_model.rc[unscaled_model.x[1]]), pyo.value(scaled_model.rc[scaled_model.scaled_x[1]])*x_scale/obj_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.rc[unscaled_model.x[2]]), pyo.value(scaled_model.rc[scaled_model.scaled_x[2]])*x_scale/obj_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.rc[unscaled_model.x[3]]), pyo.value(scaled_model.rc[scaled_model.scaled_x[3]])*x_scale/obj_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.rc[unscaled_model.z]), pyo.value(scaled_model.rc[scaled_model.scaled_z])*z_scale/obj_scale, 4)
        # check constraint multipliers
        self.assertAlmostEqual(pyo.value(unscaled_model.dual[unscaled_model.con[1]]),pyo.value(scaled_model.dual[scaled_model.scaled_con[1]])*con_scale1/obj_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.dual[unscaled_model.con[2]]),pyo.value(scaled_model.dual[scaled_model.scaled_con[2]])*con_scale2/obj_scale, 4)
        self.assertAlmostEqual(pyo.value(unscaled_model.dual[unscaled_model.con[3]]),pyo.value(scaled_model.dual[scaled_model.scaled_con[3]])*con_scale3/obj_scale, 4)

        # put the solution from the scaled back into the original
        pyo.TransformationFactory('core.scale_model').propagate_solution(scaled_model, model)

        # compare var values and rc with the unscaled soln
        for vm in model.component_objects(ctype=pyo.Var, descend_into=True):
            cuid = pyo.ComponentUID(vm)
            vum = cuid.find_component_on(unscaled_model)
            self.assertEqual((vm in model.rc), (vum in unscaled_model.rc)) 
            if vm in model.rc:
                self.assertAlmostEqual(pyo.value(model.rc[vm]), pyo.value(unscaled_model.rc[vum]), 4)
            for k in vm:
                vmk = vm[k]
                vumk = vum[k]
                self.assertAlmostEqual(pyo.value(vmk), pyo.value(vumk), 4)
                self.assertEqual((vmk in model.rc), (vumk in unscaled_model.rc)) 
                if vmk in model.rc:
                    self.assertAlmostEqual(pyo.value(model.rc[vmk]), pyo.value(unscaled_model.rc[vumk]), 4)

        # compare constraint duals and value
        for model_con in model.component_objects(ctype=pyo.Constraint, descend_into=True):
            cuid = pyo.ComponentUID(model_con)
            unscaled_model_con = cuid.find_component_on(unscaled_model)
            self.assertEqual((model_con in model.rc), (unscaled_model_con in unscaled_model.rc)) 
            if model_con in model.dual:
                self.assertAlmostEqual(pyo.value(model.dual[model_con]), pyo.value(unscaled_model.dual[unscaled_model_con]), 4)
            for k in model_con:
                mk = model_con[k]
                umk = unscaled_model_con[k]
                self.assertEqual((mk in model.dual), (umk in unscaled_model.dual)) 
                if mk in model.dual:
                    self.assertAlmostEqual(pyo.value(model.dual[mk]), pyo.value(unscaled_model.dual[umk]), 4)
예제 #28
0
def main():
    """
    """
    nxfe = 10
    # NOTE: Default inputs: (128.2, 591.4)
    ic_inputs = {
        #"fs.MB.gas_phase.properties[*,0.0].flow_mol": 120.0,
        #"fs.MB.solid_phase.properties[*,1.0].flow_mass": 550.0,
    }
    ic_model_params = {"nxfe": nxfe}
    m_ic = get_steady_state_model(
        ic_inputs,
        solve_kwds={"tee": True},
        model_params=ic_model_params,
    )
    time = m_ic.fs.time
    scalar_data, dae_data = get_data_from_steady_model(m_ic, time)
    # TODO: get steady state data (scalar and dae both necessary)
    x0 = 0.0
    x1 = 1.0

    # These (as well as nxfe above) are the parameters for a small model
    # that I'm using to test NMPC (defaults are 900, 15, 60), nxfe=10
    horizon = 1800
    tfe_width = 60
    sample_width = 120
    sample_points = [
        # Calculate sample points first with integer arithmetic
        # to avoid roundoff error
        float(sample_width*i) for i in range(0, horizon//sample_width + 1)
    ]
    horizon = float(horizon)
    tfe_width = float(tfe_width)
    model_params = {
        "horizon": horizon,
        "tfe_width": tfe_width,
        "ntcp": 1,
        "nxfe": nxfe,
    }

    # These are approximately the default values:
    #disturbance_dict = {"CO2": 0.03, "H2O": 0.0, "CH4": 0.97}
    disturbance_dict = {"CO2": 0.5, "H2O": 0.0, "CH4": 0.5}
    disturbance = dict(
        (
            "fs.MB.gas_phase.properties[*,%s].mole_frac_comp[%s]" % (x0, j),
            {(0.0, horizon): val},
        )
        for j, val in disturbance_dict.items()
    )

    # Create solver here as it is needed to solve for the setpoint
    solver = pyo.SolverFactory("ipopt")
    solver.options["linear_solver"] = "ma57"
    solver.options["max_cpu_time"] = 1500

    #
    # Get setpoint data
    #
    sp_inputs = get_inputs_at_time(disturbance, horizon)
    #sp_inputs.update({
    #    "fs.MB.gas_phase.properties[*,0.0].flow_mol": 272.8,
    #    "fs.MB.solid_phase.properties[*,1.0].flow_mass": 591.4,
    #})
    sp_model_params = {"nxfe": nxfe}
    m_sp = get_steady_state_model(
        sp_inputs,
        solve_kwds={"tee": True},
        model_params=sp_model_params,
    )
    time = m_sp.fs.time
    space = m_sp.fs.MB.gas_phase.length_domain
    t0 = time.first()
    # Solve optimization problem for setpoint
    sp_objective_states = [
        "fs.MB.solid_phase.reactions[*,%s].OC_conv" % x0,
    ]
    sp_target = {
        "fs.MB.solid_phase.reactions[*,%s].OC_conv" % x0: 0.95,
    }
    m_sp.fs.MB.gas_inlet.flow_mol[:].unfix()
    m_sp.setpoint_expr = get_tracking_cost_expressions(
        sp_objective_states, time, sp_target
    )
    m_sp.objective = pyo.Objective(expr=m_sp.setpoint_expr[t0])
    solver.solve(m_sp, tee=True)
    scalar_vars, dae_vars = flatten_dae_components(m_sp, time, pyo.Var)
    setpoint = {
        str(pyo.ComponentUID(var.referent)): var[t0].value
        for var in dae_vars
    }
    ###

    max_data = get_max_values_from_steady(m_sp)
    variance_data = get_variance_of_time_slices(m_sp, time, space)
    #weight_data = None
    weight_data = {
        name: 1.0/s if s != 0 else 1.0 for name, s in variance_data.items()
        #name: 1/w if w != 0 else 1.0 for name, w in max_data.items()
        # Note: 1/w**2 does not converge with states in objective...
    }
    objective_states = get_state_variable_names(space)

    flattened_vars = [None, None]
    m = get_model_for_dynamic_optimization(
        sample_points=sample_points,
        parameter_perturbation=disturbance,
        model_params=model_params,
        ic_scalar_data=scalar_data,
        ic_dae_data=dae_data,
        setpoint_data=setpoint,
        objective_weights=weight_data,
        objective_states=objective_states,

        # this argument is a huge hack to get the flattened
        # vars without having to do a bit more work.
        flatten_out=flattened_vars,
    )
    add_constraints_for_missing_variables(m)
    time = m.fs.time
    t0 = time.first()
    scalar_vars, dae_vars = flattened_vars
    initialize_dynamic(m, dae_vars)

    # Should we initialize to setpoint inputs?
    #sp_input_dict = {
    #    "fs.MB.gas_phase.properties[*,0.0].flow_mol": {(t0, horizon): 250.0},
    #    "fs.MB.solid_phase.properties[*,1.0].flow_mass": {(t0, horizon): 591.4},
    #}
    #load_inputs_into_model(m, time, sp_input_dict)

    # TODO: Should I set inlet flow rates to their target values for
    # this simulation?
    input_vardata = (
        [m.fs.MB.gas_inlet.flow_mol[t] for t in time if t != t0]
        + [m.fs.MB.solid_inlet.flow_mass[t] for t in time if t != t0]
    )
    with TemporarySubsystemManager(
            to_fix=input_vardata,
            to_deactivate=[m.piecewise_constant_constraint],
            ):
        print("Initializing by time element...")
        with TIMER.context("elem-init"):
            initialize_by_time_element(m, time, solver)

    m.fs.MB.solid_phase.reactions[:,0.0].OC_conv.setlb(0.89)

    print("Starting dynamic optimization solve...")
    with TIMER.context("solve dynamic"):
        solver.solve(m, tee=True)

    extra_states = [
        pyo.Reference(m.fs.MB.solid_phase.reactions[:,0.0].OC_conv),
    ]
    plot_outlet_states_over_time(m, show=False, extra_states=extra_states)
    inputs = [
        "fs.MB.gas_phase.properties[*,0.0].flow_mol",
        "fs.MB.solid_phase.properties[*,1.0].flow_mass",
    ]
    plot_inputs_over_time(m, inputs, show=False)
    print(m.tracking_cost.name)
    for t in m.fs.time:
        print(t, pyo.value(m.tracking_cost[t]))
    print()
예제 #29
0
 def deactivate_nonlinear_constraints(self):
     for constr in self.get_nonlinear_constraints(active=True):
         constr.deactivate()
         self._tmp_nonlinear_deactivated.add(pe.ComponentUID(constr))
예제 #30
0
def main(plot_switch=False):

    # This tests the same model constructed in the test_nmpc_constructor_1 file
    m_controller = make_model(horizon=3, ntfe=30, ntcp=2, bounds=True)
    sample_time = 0.5
    m_plant = make_model(horizon=sample_time, ntfe=5, ntcp=2)
    time_plant = m_plant.fs.time

    solve_consistent_initial_conditions(m_plant, time_plant, solver)

    #####
    # Flatten and categorize controller model
    #####
    model = m_controller
    time = model.fs.time
    t0 = time.first()
    t1 = time[2]
    scalar_vars, dae_vars = flatten_dae_components(
        model,
        time,
        pyo.Var,
    )
    scalar_cons, dae_cons = flatten_dae_components(
        model,
        time,
        pyo.Constraint,
    )
    inputs = [
        model.fs.mixer.S_inlet.flow_vol,
        model.fs.mixer.E_inlet.flow_vol,
    ]
    measurements = [
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']),
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']),
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']),
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']),
        model.fs.cstr.outlet.temperature,
    ]
    model.fs.cstr.control_volume.material_holdup[:, 'aq', 'Solvent'].fix()
    model.fs.cstr.total_flow_balance.deactivate()
    var_partition, con_partition = categorize_dae_variables_and_constraints(
        model,
        dae_vars,
        dae_cons,
        time,
        input_vars=inputs,
    )
    controller = ControllerBlock(
        model=model,
        time=time,
        measurements=measurements,
        category_dict={None: var_partition},
    )
    controller.construct()

    solve_consistent_initial_conditions(m_controller, time, solver)
    controller.initialize_to_initial_conditions()

    m_controller._dummy_obj = pyo.Objective(expr=0)
    nlp = PyomoNLP(m_controller)
    igraph = IncidenceGraphInterface(nlp)
    m_controller.del_component(m_controller._dummy_obj)
    diff_vars = [var[t1] for var in var_partition[VC.DIFFERENTIAL]]
    alg_vars = [var[t1] for var in var_partition[VC.ALGEBRAIC]]
    deriv_vars = [var[t1] for var in var_partition[VC.DERIVATIVE]]
    diff_eqns = [con[t1] for con in con_partition[CC.DIFFERENTIAL]]
    alg_eqns = [con[t1] for con in con_partition[CC.ALGEBRAIC]]

    # Assemble and factorize "derivative Jacobian"
    dfdz = nlp.extract_submatrix_jacobian(diff_vars, diff_eqns)
    dfdy = nlp.extract_submatrix_jacobian(alg_vars, diff_eqns)
    dgdz = nlp.extract_submatrix_jacobian(diff_vars, alg_eqns)
    dgdy = nlp.extract_submatrix_jacobian(alg_vars, alg_eqns)
    dfdzdot = nlp.extract_submatrix_jacobian(deriv_vars, diff_eqns)
    fact = sps.linalg.splu(dgdy.tocsc())
    dydz = fact.solve(dgdz.toarray())
    deriv_jac = dfdz - dfdy.dot(dydz)
    fact = sps.linalg.splu(dfdzdot.tocsc())
    dzdotdz = -fact.solve(deriv_jac)

    # Use some heuristic on the eigenvalues of the derivative Jacobian
    # to identify fast states.
    w, V = np.linalg.eig(dzdotdz)
    w_max = np.max(np.abs(w))
    fast_modes, = np.where(np.abs(w) > w_max / 2)
    fast_states = []
    for idx in fast_modes:
        evec = V[:, idx]
        _fast_states, _ = np.where(np.abs(evec) > 0.5)
        fast_states.extend(_fast_states)
    fast_states = set(fast_states)

    # Store components necessary for model reduction in a model-
    # independent form.
    fast_state_derivs = [
        pyo.ComponentUID(var_partition[VC.DERIVATIVE][idx].referent,
                         context=model) for idx in fast_states
    ]
    fast_state_diffs = [
        pyo.ComponentUID(var_partition[VC.DIFFERENTIAL][idx].referent,
                         context=model) for idx in fast_states
    ]
    fast_state_discs = [
        pyo.ComponentUID(con_partition[CC.DISCRETIZATION][idx].referent,
                         context=model) for idx in fast_states
    ]

    # Perform pseudo-steady state model reduction on the fast states
    # and re-categorize
    for cuid in fast_state_derivs:
        var = cuid.find_component_on(m_controller)
        var.fix(0.0)
    for cuid in fast_state_diffs:
        var = cuid.find_component_on(m_controller)
        var[t0].unfix()
    for cuid in fast_state_discs:
        con = cuid.find_component_on(m_controller)
        con.deactivate()

    var_partition, con_partition = categorize_dae_variables_and_constraints(
        model,
        dae_vars,
        dae_cons,
        time,
        input_vars=inputs,
    )
    controller.del_component(model)

    # Re-construct controller block with new categorization
    measurements = [
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']),
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']),
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']),
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']),
    ]
    controller = ControllerBlock(
        model=model,
        time=time,
        measurements=measurements,
        category_dict={None: var_partition},
    )
    controller.construct()

    #####
    # Construct dynamic block for plant
    #####
    model = m_plant
    time = model.fs.time
    t0 = time.first()
    t1 = time[2]
    scalar_vars, dae_vars = flatten_dae_components(
        model,
        time,
        pyo.Var,
    )
    scalar_cons, dae_cons = flatten_dae_components(
        model,
        time,
        pyo.Constraint,
    )
    inputs = [
        model.fs.mixer.S_inlet.flow_vol,
        model.fs.mixer.E_inlet.flow_vol,
    ]
    measurements = [
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']),
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']),
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']),
        pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']),
    ]
    model.fs.cstr.control_volume.material_holdup[:, 'aq', 'Solvent'].fix()
    model.fs.cstr.total_flow_balance.deactivate()

    var_partition, con_partition = categorize_dae_variables_and_constraints(
        model,
        dae_vars,
        dae_cons,
        time,
        input_vars=inputs,
    )
    plant = DynamicBlock(
        model=model,
        time=time,
        measurements=measurements,
        category_dict={None: var_partition},
    )
    plant.construct()

    p_t0 = plant.time.first()
    c_t0 = controller.time.first()
    p_ts = plant.sample_points[1]
    c_ts = controller.sample_points[1]

    controller.set_sample_time(sample_time)
    plant.set_sample_time(sample_time)

    # We now perform the "RTO" calculation: Find the optimal steady state
    # to achieve the following setpoint
    setpoint = [
        (controller.mod.fs.cstr.outlet.conc_mol[0, 'P'], 0.4),
        #(controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 0.01),
        (controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 0.1),
        (controller.mod.fs.cstr.control_volume.energy_holdup[0, 'aq'], 300),
        (controller.mod.fs.mixer.E_inlet.flow_vol[0], 0.1),
        (controller.mod.fs.mixer.S_inlet.flow_vol[0], 2.0),
        (controller.mod.fs.cstr.volume[0], 1.0),
    ]
    setpoint_weights = [
        (controller.mod.fs.cstr.outlet.conc_mol[0, 'P'], 1.),
        (controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 1.),
        (controller.mod.fs.cstr.control_volume.energy_holdup[0, 'aq'], 1.),
        (controller.mod.fs.mixer.E_inlet.flow_vol[0], 1.),
        (controller.mod.fs.mixer.S_inlet.flow_vol[0], 1.),
        (controller.mod.fs.cstr.volume[0], 1.),
    ]

    # Some of the "differential variables" that have been fixed in the
    # model file are different from the measurements listed above. We
    # unfix them here so the RTO solve is not overconstrained.
    # (The RTO solve will only automatically unfix inputs and measurements.)
    controller.mod.fs.cstr.control_volume.material_holdup[0, ...].unfix()
    controller.mod.fs.cstr.control_volume.energy_holdup[0, ...].unfix()
    #controller.mod.fs.cstr.volume[0].unfix()
    controller.mod.fs.cstr.control_volume.material_holdup[0, 'aq',
                                                          'Solvent'].fix()

    controller.add_setpoint_objective(setpoint, setpoint_weights)
    controller.solve_setpoint(solver)

    # Now we are ready to construct the tracking NMPC problem
    tracking_weights = [
        *((v, 1.) for v in controller.vectors.differential[:, 0]),
        *((v, 1.) for v in controller.vectors.input[:, 0]),
    ]

    controller.add_tracking_objective(tracking_weights)

    controller.constrain_control_inputs_piecewise_constant()

    controller.initialize_to_initial_conditions()

    # Solve the first control problem
    controller.vectors.input[...].unfix()
    controller.vectors.input[:, 0].fix()
    solver.solve(controller, tee=True)

    # For a proper NMPC simulation, we must have noise.
    # We do this by treating inputs and measurements as Gaussian random
    # variables with the following variances (and bounds).
    cstr = controller.mod.fs.cstr
    variance = [
        (cstr.outlet.conc_mol[0.0, 'S'], 0.01),
        (cstr.outlet.conc_mol[0.0, 'E'], 0.005),
        (cstr.outlet.conc_mol[0.0, 'C'], 0.01),
        (cstr.outlet.conc_mol[0.0, 'P'], 0.005),
        (cstr.outlet.temperature[0.0], 1.),
        (cstr.volume[0.0], 0.05),
    ]
    controller.set_variance(variance)
    measurement_variance = [
        v.variance for v in controller.MEASUREMENT_BLOCK[:].var
    ]
    measurement_noise_bounds = [(0.0, var[c_t0].ub)
                                for var in controller.MEASUREMENT_BLOCK[:].var]

    mx = plant.mod.fs.mixer
    variance = [
        (mx.S_inlet_state[0.0].flow_vol, 0.02),
        (mx.E_inlet_state[0.0].flow_vol, 0.001),
    ]
    plant.set_variance(variance)
    input_variance = [v.variance for v in plant.INPUT_BLOCK[:].var]
    input_noise_bounds = [(0.0, var[p_t0].ub)
                          for var in plant.INPUT_BLOCK[:].var]

    random.seed(100)

    # Extract inputs from controller and inject them into plant
    inputs = controller.generate_inputs_at_time(c_ts)
    plant.inject_inputs(inputs)

    # This "initialization" really simulates the plant with the new inputs.
    plant.vectors.input[:, :].fix()
    plant.initialize_by_solving_elements(solver)
    plant.vectors.input[:, :].fix()
    solver.solve(plant, tee=True)

    for i in range(1, 11):
        print('\nENTERING NMPC LOOP ITERATION %s\n' % i)
        measured = plant.generate_measurements_at_time(p_ts)
        plant.advance_one_sample()
        plant.initialize_to_initial_conditions()
        measured = apply_noise_with_bounds(
            measured,
            measurement_variance,
            random.gauss,
            measurement_noise_bounds,
        )

        controller.advance_one_sample()
        controller.load_measurements(measured)

        solver.solve(controller, tee=True)

        inputs = controller.generate_inputs_at_time(c_ts)
        inputs = apply_noise_with_bounds(
            inputs,
            input_variance,
            random.gauss,
            input_noise_bounds,
        )
        plant.inject_inputs(inputs)

        plant.initialize_by_solving_elements(solver)
        solver.solve(plant)

    import pdb
    pdb.set_trace()