コード例 #1
0
ファイル: expand_arcs.py プロジェクト: neel1104/pyomo
class ExpandArcs(Transformation):
    alias('network.expand_arcs',
          doc="Expand all Arcs in the model to simple constraints")

    def _apply_to(self, instance, **kwds):
        if __debug__ and logger.isEnabledFor(logging.DEBUG):
            logger.debug("Calling ArcExpander")

        # need to collect all ports to see every port each
        # is related to so that we can expand empty ports
        port_list, known_port_sets, matched_ports = \
            self._collect_ports(instance)

        self._add_blocks(instance)

        for port in port_list:
            # iterate over ref so that the index set is the same
            # for all occurences of this member in related ports
            # and so we iterate over members deterministically
            ref = known_port_sets[id(matched_ports[port])]
            for k, v in sorted(iteritems(ref)):
                rule, kwds = port._rules[k]
                if v[1] >= 0:
                    index_set = v[0].index_set()
                else:
                    index_set = UnindexedComponent_set
                rule(port, k, index_set, **kwds)

        for arc in instance.component_objects(**obj_iter_kwds):
            arc.deactivate()

    def _collect_ports(self, instance):
        self._name_buffer = {}
        # List of the ports in the order in which we found them
        # (this should be deterministic, provided that the user's model
        # is deterministic)
        port_list = []
        # ID of the next port group (set of matched ports)
        groupID = 0
        # port_groups stars out as a dict of {id(set): (groupID, set)}
        # If you sort by the groupID, then this will be deterministic.
        port_groups = dict()
        # map of port to the set of ports that must match it
        matched_ports = ComponentMap()

        for arc in instance.component_data_objects(**obj_iter_kwds):
            ports = ComponentSet(arc.ports)
            ref = None

            for p in arc.ports:
                if p in matched_ports:
                    if ref is None:
                        # The first port in this arc has
                        # already been seen. We will use that Set as
                        # the reference
                        ref = matched_ports[p]
                    elif ref is not matched_ports[p]:
                        # We already have a reference group; merge this
                        # new group into it.

                        # Optimization: this merge is linear in the size
                        # of the src set. If the reference set is
                        # smaller, save time by switching to a new
                        # reference set.
                        src = matched_ports[p]
                        if len(ref) < len(src):
                            ref, src = src, ref
                        ref.update(src)
                        for i in src:
                            matched_ports[i] = ref
                        del port_groups[id(src)]
                    # else: pass
                    #   The new group *is* the reference group;
                    #   there is nothing to do.
                else:
                    # The port has not been seen before.
                    port_list.append(p)
                    if ref is None:
                        # This is the first port in the arc:
                        # start a new reference set.
                        ref = ComponentSet()
                        port_groups[id(ref)] = (groupID, ref)
                        groupID += 1
                    # This port hasn't been seen. Record it.
                    ref.add(p)
                    matched_ports[p] = ref

        # Validate all port sets and expand the empty ones
        known_port_sets = {}
        for groupID, port_set in sorted(itervalues(port_groups)):
            known_port_sets[id(port_set)] \
                = self._validate_and_expand_port_set(port_set)

        return port_list, known_port_sets, matched_ports

    def _validate_and_expand_port_set(self, ports):
        ref = {}
        # First, go through the ports and get the superset of all fields
        for p in ports:
            for k, v in iteritems(p.vars):
                if k in ref:
                    # We have already seen this var
                    continue
                if v is None:
                    # This is an implicit var
                    continue
                # OK: New var, so add it to the reference list
                _len = (-1 if not v.is_indexed() else len(v))
                ref[k] = (v, _len, p, p.rule_for(k))

        if not ref:
            logger.warning("Cannot identify a reference port: no ports "
                           "in the port set have assigned variables:\n\t(%s)" %
                           ', '.join(sorted(p.name
                                            for p in itervalues(ports))))
            return ref

        # Now make sure that ports match
        empty_or_partial = []
        for p in ports:
            p_is_partial = False
            if not p.vars:
                # This is an empty port and should be defined with
                # "auto" vars
                empty_or_partial.append(p)
                continue

            for k, v in iteritems(ref):
                if k not in p.vars:
                    raise ValueError(
                        "Port mismatch: Port '%s' missing variable "
                        "'%s' (appearing in reference port '%s')" %
                        (p.name, k, v[2].name))
                _v = p.vars[k]
                if _v is None:
                    if not p_is_partial:
                        empty_or_partial.append(p)
                        p_is_partial = True
                    continue
                _len = (-1 if not _v.is_indexed() else len(_v))
                if (_len >= 0) ^ (v[1] >= 0):
                    raise ValueError(
                        "Port mismatch: Port variable '%s' mixing "
                        "indexed and non-indexed targets on ports '%s' "
                        "and '%s'" % (k, v[2].name, p.name))
                if _len >= 0 and _len != v[1]:
                    raise ValueError(
                        "Port mismatch: Port variable '%s' index "
                        "mismatch (%s elements in reference port '%s', "
                        "but %s elements in port '%s')" %
                        (k, v[1], v[2].name, _len, p.name))
                if v[1] >= 0 and len(v[0].index_set() ^ _v.index_set()):
                    raise ValueError(
                        "Port mismatch: Port variable '%s' has "
                        "mismatched indices on ports '%s' and '%s'" %
                        (k, v[2].name, p.name))
                if p.rule_for(k) is not v[3]:
                    raise ValueError("Port mismatch: Port variable '%s' has "
                                     "different rules on ports '%s' and '%s'" %
                                     (k, v[2].name, p.name))

        # as we are adding things to the model, sort by key so that
        # the order things are added is deterministic
        sorted_refs = sorted(iteritems(ref))
        if len(empty_or_partial) > 1:
            # This is expensive (names aren't cheap), but does result in
            # a deterministic ordering
            empty_or_partial.sort(key=lambda x: x.getname(
                fully_qualified=True, name_buffer=self._name_buffer))

        # Fill in any empty ports
        for p in empty_or_partial:
            block = p.parent_block()
            for k, v in sorted_refs:
                if k in p.vars and p.vars[k] is not None:
                    continue

                vname = unique_component_name(
                    block, '%s_auto_%s' %
                    (p.getname(fully_qualified=True,
                               name_buffer=self._name_buffer), k))

                new_var = replicate_var(v[0], vname, block)

                # add this new variable to the port so that it has a rule
                p.add(new_var, k, rule=v[3])

        return ref

    def _add_blocks(self, instance):
        # iterate over component_objects so we can make indexed blocks
        for arc in instance.component_objects(**obj_iter_kwds):
            blk = Block(arc.index_set())
            bname = unique_component_name(arc.parent_block(),
                                          "%s_expanded" % arc.local_name)
            arc.parent_block().add_component(bname, blk)
            arc._expanded_block = blk
            if arc.is_indexed():
                for i in arc:
                    arc[i]._expanded_block = blk[i]
コード例 #2
0
ファイル: finitedifference.py プロジェクト: sgjkkx/pyomo
class Finite_Difference_Transformation(Transformation):
    """
    Transformation that applies finite difference methods to
    DAE, ODE, or PDE models.
    """
    alias('dae.finite_difference', doc="Discretizes a DAE model using "\
          "a finite difference method transforming the model into an NLP.")


    def __init__(self):
        super(Finite_Difference_Transformation, self).__init__()
        self._nfe = {}
        self.all_schemes = {
            'BACKWARD' : (_backward_transform, _backward_transform_order2),
            'CENTRAL' : (_central_transform, _central_transform_order2),
            'FORWARD' : (_forward_transform, _forward_transform_order2),
            }

    def _setup(self, instance):
        instance = instance.clone()
        instance.construct()
        return instance

    def _apply_to(self, instance, **kwds):
        """
        Applies the transformation to a modeling instance

        Keyword Arguments:
        nfe           The desired number of finite element points to be
                      included in the discretization.
        wrt           Indicates which ContinuousSet the transformation
                      should be applied to. If this keyword argument is not
                      specified then the same scheme will be applied to all
                      ContinuousSets.
        scheme        Indicates which finite difference method to apply.
                      Options are BACKWARD, CENTRAL, or FORWARD. The default
                      scheme is the backward difference method
        """

        options = kwds.pop('options', {})

        tmpnfe = kwds.pop('nfe',10)
        tmpds = kwds.pop('wrt',None)
        tmpscheme = kwds.pop('scheme','BACKWARD')
        self._scheme_name = tmpscheme.upper()

        if tmpds is not None:
            if tmpds.type() is not ContinuousSet:
                raise TypeError("The component specified using the 'wrt' keyword "\
                     "must be a differential set")
            elif 'scheme' in tmpds.get_discretization_info():
                raise ValueError("The discretization scheme '%s' has already been applied "\
                     "to the ContinuousSet '%s'" %(tmpds.get_discretization_info()['scheme'],tmpds.name))

        if None in self._nfe:
            raise ValueError("A general discretization scheme has already been applied to "\
                    "to every differential set in the model. If you would like to specify a "\
                    "specific discretization scheme for one of the differential sets you must discretize "\
                    "each differential set individually. If you would like to apply a different "\
                    "discretization scheme to all differential sets you must declare a new Implicit_"\
                    "Euler object")

        if len(self._nfe) == 0 and tmpds is None:
            # Same discretization on all differentialsets
            self._nfe[None] = tmpnfe
            currentds = None
        else :
            self._nfe[tmpds.name]=tmpnfe
            currentds = tmpds.name

        self._scheme = self.all_schemes.get(self._scheme_name,None)
        if self._scheme is None:
            raise ValueError("Unknown finite difference scheme '%s' specified using the "\
                     "'scheme' keyword. Valid schemes are 'BACKWARD', 'CENTRAL', and "\
                     "'FORWARD'" %(tmpscheme))

        for block in instance.block_data_objects(active=True):
            self._transformBlock(block,currentds)

        return instance

    def _transformBlock(self, block, currentds):

        self._fe = {}
        for ds in itervalues(block.component_map(ContinuousSet)):
            if currentds is None or currentds == ds.name:
                generate_finite_elements(ds,self._nfe[currentds])
                if not ds.get_changed():
                    if len(ds)-1 > self._nfe[currentds]:
                        print("***WARNING: More finite elements were found in ContinuousSet "\
                            "'%s' than the number of finite elements specified in apply. "\
                            "The larger number of finite elements will be used." %(ds.name))

                self._nfe[ds.name]=len(ds)-1
                self._fe[ds.name]=sorted(ds)
                # Adding discretization information to the differentialset object itself
                # so that it can be accessed outside of the discretization object
                disc_info = ds.get_discretization_info()
                disc_info['nfe']=self._nfe[ds.name]
                disc_info['scheme']=self._scheme_name + ' Difference'

        # Maybe check to see if any of the ContinuousSets have been changed,
        # if they haven't then the model components need not be updated
        # or even iterated through

        for c in itervalues(block.component_map()):
            update_contset_indexed_component(c)

        for d in itervalues(block.component_map(DerivativeVar)):
            dsets = d.get_continuousset_list()
            for i in set(dsets):
                if currentds is None or i.name == currentds:
                    oldexpr = d.get_derivative_expression()
                    loc = d.get_state_var()._contset[i]
                    count = dsets.count(i)
                    if count >= 3:
                        raise DAE_Error(
                            "Error discretizing '%s' with respect to '%s'. Current implementation "\
                            "only allows for taking the first or second derivative with respect to "\
                            "a particular ContinuousSet" %(d.name,i.name))
                    scheme = self._scheme[count-1]
                    newexpr = create_partial_expression(scheme,oldexpr,i,loc)
                    d.set_derivative_expression(newexpr)

            # Reclassify DerivativeVar if all indexing ContinuousSets have been discretized
            if d.is_fully_discretized():
                add_discretization_equations(block,d)
                block.reclassify_component_type(d,Var)

        # Reclassify Integrals if all ContinuousSets have been discretized
        if block_fully_discretized(block):

            if block.contains_component(Integral):
                for i in itervalues(block.component_map(Integral)):
                    i.reconstruct()
                    block.reclassify_component_type(i,Expression)
                # If a model contains integrals they are most likely to appear in the objective
                # function which will need to be reconstructed after the model is discretized.
                for k in itervalues(block.component_map(Objective)):
                    k.reconstruct()
コード例 #3
0
class Collocation_Discretization_Transformation(Transformation):

    alias('dae.collocation', doc="Discretizes a DAE model using "
          "orthogonal collocation over finite elements transforming "
          "the model into an NLP.")

    def __init__(self):
        super(Collocation_Discretization_Transformation, self).__init__()
        self._ncp = {}
        self._nfe = {}
        self._adot = {}
        self._adotdot = {}
        self._afinal = {}
        self._tau = {}
        self._reduced_cp = {}
        self.all_schemes = {
            'LAGRANGE-RADAU': (_lagrange_radau_transform,
                               _lagrange_radau_transform_order2),
            'LAGRANGE-LEGENDRE': (_lagrange_legendre_transform,
                                  _lagrange_legendre_transform_order2)}

    def _get_radau_constants(self, currentds):
        """
        This function sets the radau collocation points and a values depending
        on how many collocation points have been specified and whether or not
        the user has numpy
        """
        if not numpy_available:
            if self._ncp[currentds] > 10:
                raise ValueError("Numpy was not found so the maximum number "
                                 "of collocation points is 10")
            from pyomo.dae.utilities import (radau_tau_dict, radau_adot_dict,
                                             radau_adotdot_dict)
            self._tau[currentds] = radau_tau_dict[self._ncp[currentds]]
            self._adot[currentds] = radau_adot_dict[self._ncp[currentds]]
            self._adotdot[currentds] = radau_adotdot_dict[self._ncp[currentds]]
            self._afinal[currentds] = None
        else:
            alpha = 1
            beta = 0
            k = self._ncp[currentds] - 1
            cp = sorted(list(calc_cp(alpha, beta, k)))
            cp.insert(0, 0.0)
            cp.append(1.0)
            adot = calc_adot(cp, 1)
            adotdot = calc_adot(cp, 2)

            self._tau[currentds] = cp
            self._adot[currentds] = adot
            self._adotdot[currentds] = adotdot
            self._afinal[currentds] = None

    def _get_legendre_constants(self, currentds):
        """
        This function sets the legendre collocation points and a values
        depending on how many collocation points have been specified and
        whether or not the user has numpy
        """
        if not numpy_available:
            if self._ncp[currentds] > 10:
                raise ValueError("Numpy was not found so the maximum number "
                                 "of collocation points is 10")
            from pyomo.dae.utilities import (legendre_tau_dict,
                                             legendre_adot_dict,
                                             legendre_adotdot_dict,
                                             legendre_afinal_dict)
            self._tau[currentds] = legendre_tau_dict[self._ncp[currentds]]
            self._adot[currentds] = legendre_adot_dict[self._ncp[currentds]]
            self._adotdot[currentds] = \
                legendre_adotdot_dict[self._ncp[currentds]]
            self._afinal[currentds] = \
                legendre_afinal_dict[self._ncp[currentds]]
        else:
            alpha = 0
            beta = 0
            k = self._ncp[currentds]
            cp = sorted(list(calc_cp(alpha, beta, k)))
            cp.insert(0, 0.0)
            adot = calc_adot(cp, 1)
            adotdot = calc_adot(cp, 2)
            afinal = calc_afinal(cp)

            self._tau[currentds] = cp
            self._adot[currentds] = adot
            self._adotdot[currentds] = adotdot
            self._afinal[currentds] = afinal

    def _apply_to(self, instance, **kwds):
        """
        Applies specified collocation transformation to a modeling instance

        Keyword Arguments:
        nfe           The desired number of finite element points to be
                      included in the discretization.
        ncp           The desired number of collocation points over each
                      finite element.
        wrt           Indicates which ContinuousSet the transformation
                      should be applied to. If this keyword argument is not
                      specified then the same scheme will be applied to all
                      ContinuousSets.
        scheme        Indicates which finite difference method to apply.
                      Options are 'LAGRANGE-RADAU' and 'LAGRANGE-LEGENDRE'. 
                      The default scheme is Lagrange polynomials with Radau
                      roots.
        """

        tmpnfe = kwds.pop('nfe', 10)
        tmpncp = kwds.pop('ncp', 3)
        tmpds = kwds.pop('wrt', None)
        tmpscheme = kwds.pop('scheme', 'LAGRANGE-RADAU')
        self._scheme_name = tmpscheme.upper()

        if tmpds is not None:
            if tmpds.type() is not ContinuousSet:
                raise TypeError("The component specified using the 'wrt' "
                                "keyword must be a continuous set")
            elif 'scheme' in tmpds.get_discretization_info():
                raise ValueError("The discretization scheme '%s' has already "
                                 "been applied to the ContinuousSet '%s'"
                                 % (tmpds.get_discretization_info()['scheme'],
                                    tmpds.name))

        if tmpnfe <= 0:
            raise ValueError(
                "The number of finite elements must be at least 1")
        if tmpncp <= 0:
            raise ValueError(
                "The number of collocation points must be at least 1")

        if None in self._nfe:
            raise ValueError(
                "A general discretization scheme has already been applied to "
                "to every ContinuousSet in the model. If you would like to "
                "specify a specific discretization scheme for one of the "
                "ContinuousSets you must discretize each ContinuousSet "
                "separately.")

        if len(self._nfe) == 0 and tmpds is None:
            # Same discretization on all ContinuousSets
            self._nfe[None] = tmpnfe
            self._ncp[None] = tmpncp
            currentds = None
        else:
            self._nfe[tmpds.name] = tmpnfe
            self._ncp[tmpds.name] = tmpncp
            currentds = tmpds.name

        self._scheme = self.all_schemes.get(self._scheme_name, None)
        if self._scheme is None:
            raise ValueError("Unknown collocation scheme '%s' specified using "
                             "the 'scheme' keyword. Valid schemes are "
                             "'LAGRANGE-RADAU' and 'LAGRANGE-LEGENDRE'"
                              % tmpscheme)

        if self._scheme_name == 'LAGRANGE-RADAU':
            self._get_radau_constants(currentds)
        elif self._scheme_name == 'LAGRANGE-LEGENDRE':
            self._get_legendre_constants(currentds)

        self._transformBlock(instance, currentds)

        return instance

    def _transformBlock(self, block, currentds):

        self._fe = {}
        for ds in block.component_objects(ContinuousSet, descend_into=True):
            if currentds is None or currentds == ds.name:
                generate_finite_elements(ds, self._nfe[currentds])
                if not ds.get_changed():
                    if len(ds) - 1 > self._nfe[currentds]:
                        logger.warn("More finite elements were found in "
                                    "ContinuousSet '%s' than the number of "
                                    "finite elements specified in apply. The "
                                    "larger number of finite elements will be "
                                    "used." % ds.name)

                self._nfe[ds.name] = len(ds) - 1
                self._fe[ds.name] = sorted(ds)
                generate_colloc_points(ds, self._tau[currentds])
                # Adding discretization information to the continuousset
                # object itself so that it can be accessed outside of the
                # discretization object
                disc_info = ds.get_discretization_info()
                disc_info['nfe'] = self._nfe[ds.name]
                disc_info['ncp'] = self._ncp[currentds]
                disc_info['tau_points'] = self._tau[currentds]
                disc_info['adot'] = self._adot[currentds]
                disc_info['adotdot'] = self._adotdot[currentds]
                disc_info['afinal'] = self._afinal[currentds]
                disc_info['scheme'] = self._scheme_name

        expand_components(block)

        for d in block.component_objects(DerivativeVar, descend_into=True):
            dsets = d.get_continuousset_list()
            for i in set(dsets):
                if currentds is None or i.name == currentds:
                    oldexpr = d.get_derivative_expression()
                    loc = d.get_state_var()._contset[i]
                    count = dsets.count(i)
                    if count >= 3:
                        raise DAE_Error(
                            "Error discretizing '%s' with respect to '%s'. "
                            "Current implementation only allows for taking the"
                            " first or second derivative with respect to a "
                            "particular ContinuousSet" % (d.name, i.name))
                    scheme = self._scheme[count - 1]

                    newexpr = create_partial_expression(scheme, oldexpr, i,
                                                        loc)
                    d.set_derivative_expression(newexpr)
                    if self._scheme_name == 'LAGRANGE-LEGENDRE':
                        # Add continuity equations to DerivativeVar's parent
                        #  block
                        add_continuity_equations(d.parent_block(), d, i, loc)

            # Reclassify DerivativeVar if all indexing ContinuousSets have
            # been discretized. Add discretization equations to the
            # DerivativeVar's parent block.
            if d.is_fully_discretized():
                add_discretization_equations(d.parent_block(), d)
                d.parent_block().reclassify_component_type(d, Var)

                # Keep track of any reclassified DerivativeVar components so
                # that the Simulator can easily identify them if the model
                # is simulated after discretization
                # TODO: Update the discretization transformations to use
                # a Block to add things to the model and store discretization
                # information. Using a list for now because the simulator
                # does not yet support models containing active Blocks
                reclassified_list = getattr(block,
                                            '_pyomo_dae_reclassified_derivativevars',
                                            None)
                if reclassified_list is None:
                    block._pyomo_dae_reclassified_derivativevars = list()
                    reclassified_list = \
                        block._pyomo_dae_reclassified_derivativevars

                reclassified_list.append(d)

        # Reclassify Integrals if all ContinuousSets have been discretized
        if block_fully_discretized(block):

            if block.contains_component(Integral):
                for i in block.component_objects(Integral, descend_into=True):
                    i.reconstruct()
                    i.parent_block().reclassify_component_type(i, Expression)
                # If a model contains integrals they are most likely to appear
                # in the objective function which will need to be reconstructed
                # after the model is discretized.
                for k in block.component_objects(Objective, descend_into=True):
                    # TODO: check this, reconstruct might not work
                    k.reconstruct()

    def _get_idx(self, l, t, n, i, k):
        """
        This function returns the appropriate index for the ContinuousSet
        and the derivative variables. It's needed because the collocation
        constraints are indexed by finite element and collocation point
        however a ContinuousSet contains a list of all the discretization
        points and is not separated into finite elements and collocation
        points.
        """

        tmp = t.index(t._fe[i])
        tik = t[tmp + k]
        if n is None:
            return tik
        else:
            tmpn = n
            if not isinstance(n, tuple):
                tmpn = (n,)
            return tmpn[0:l] + (tik,) + tmpn[l:]

    def reduce_collocation_points(self, instance, var=None, ncp=None,
                                  contset=None):
        """
        This method will add additional constraints to a model to reduce the
        number of free collocation points (degrees of freedom) for a particular
        variable.

        Parameters
        ----------
        instance : Pyomo model
            The discretized Pyomo model to add constraints to

        var : ``pyomo.environ.Var``
            The Pyomo variable for which the degrees of freedom will be reduced

        ncp : int
            The new number of free collocation points for `var`. Must be
            less that the number of collocation points used in discretizing
            the model.

        contset : ``pyomo.dae.ContinuousSet``
            The :py:class:`ContinuousSet<pyomo.dae.ContinuousSet>` that was
            discretized and for which the `var` will have a reduced number
            of degrees of freedom

        """
        if contset is None:
            raise TypeError("A continuous set must be specified using the "
                            "keyword 'contset'")
        if contset.type() is not ContinuousSet:
            raise TypeError("The component specified using the 'contset' "
                            "keyword must be a ContinuousSet")
        ds = contset

        if len(self._ncp) == 0:
            raise RuntimeError("This method should only be called after using "
                               "the apply() method to discretize the model")
        elif None in self._ncp:
            tot_ncp = self._ncp[None]
        elif ds.name in self._ncp:
            tot_ncp = self._ncp[ds.name]
        else:
            raise ValueError("ContinuousSet '%s' has not been discretized, "
                             "please call the apply_to() method with this "
                             "ContinuousSet to discretize it before calling "
                             "this method" % ds.name)

        if var is None:
            raise TypeError("A variable must be specified")
        if var.type() is not Var:
            raise TypeError("The component specified using the 'var' keyword "
                            "must be a variable")

        if ncp is None:
            raise TypeError(
                "The number of collocation points must be specified")
        if ncp <= 0:
            raise ValueError(
                "The number of collocation points must be at least 1")
        if ncp > tot_ncp:
            raise ValueError("The number of collocation points used to "
                             "interpolate an individual variable must be less "
                             "than the number used to discretize the original "
                             "model")
        if ncp == tot_ncp:
            # Nothing to be done
            return instance

        # Check to see if the continuousset is an indexing set of the variable
        if var.dim() == 0:
            raise IndexError("ContinuousSet '%s' is not an indexing set of"
                             " the variable '%s'" % (ds.name, var.name))
        elif var.dim() == 1:
            if ds not in var._index:
                raise IndexError("ContinuousSet '%s' is not an indexing set of"
                                 " the variable '%s'" % (ds.name, var.name))
        elif ds not in var._implicit_subsets:
            raise IndexError("ContinuousSet '%s' is not an indexing set of the"
                             " variable '%s'" % (ds.name, var.name))

        if var.name in self._reduced_cp:
            temp = self._reduced_cp[var.name]
            if ds.name in temp:
                raise RuntimeError("Variable '%s' has already been constrained"
                                   " to a reduced number of collocation points"
                                   " over ContinuousSet '%s'.")
            else:
                temp[ds.name] = ncp
        else:
            self._reduced_cp[var.name] = {ds.name: ncp}

        # TODO: Use unique_component_name for this
        list_name = var.local_name + "_interpolation_constraints"

        instance.add_component(list_name, ConstraintList())
        conlist = instance.find_component(list_name)

        t = sorted(ds)
        fe = ds._fe
        info = get_index_information(var, ds)
        tmpidx = info['non_ds']
        idx = info['index function']

        # Iterate over non_ds indices
        for n in tmpidx:
            # Iterate over finite elements
            for i in xrange(0, len(fe) - 1):
                # Iterate over collocation points
                for k in xrange(1, tot_ncp - ncp + 1):
                    if ncp == 1:
                        # Constant over each finite element
                        conlist.add(var[idx(n, i, k)] ==
                                    var[idx(n, i, tot_ncp)])
                    else:
                        tmp = t.index(fe[i])
                        tmp2 = t.index(fe[i + 1])
                        ti = t[tmp + k]
                        tfit = t[tmp2 - ncp + 1:tmp2 + 1]
                        coeff = self._interpolation_coeffs(ti, tfit)
                        conlist.add(var[idx(n, i, k)] ==
                                    sum(var[idx(n, i, j)] * next(coeff)
                                        for j in xrange(tot_ncp - ncp + 1,
                                                        tot_ncp + 1)))

        return instance

    def _interpolation_coeffs(self, ti, tfit):

        for i in tfit:
            l = 1
            for j in tfit:
                if i != j:
                    l = l * (ti - j) / (i - j)
            yield l
コード例 #4
0
class ExpandConnectors(Transformation):
    alias('core.expand_connectors',
          doc="Expand all connectors in the model to simple constraints")

    def _apply_to(self, instance, **kwds):
        if __debug__ and logger.isEnabledFor(logging.DEBUG):  #pragma:nocover
            logger.debug("Calling ConnectorExpander")

        connectorsFound = False
        for c in instance.component_data_objects(Connector):
            connectorsFound = True
            break
        if not connectorsFound:
            return

        if __debug__ and logger.isEnabledFor(logging.DEBUG):  #pragma:nocover
            logger.debug("   Connectors found!")

        #
        # At this point, there are connectors in the model, so we must
        # look for constraints that involve connectors and expand them.
        #
        connector_types = set([SimpleConnector, _ConnectorData])
        constraint_list = []
        connector_list = []
        matched_connectors = {}
        found = dict()
        for constraint in instance.component_data_objects(Constraint):
            for c in expr.identify_variables(
                    constraint.body, include_potentially_variable=True):
                if c.__class__ in connector_types:
                    found[id(c)] = c
            if not found:
                continue

            # Note that it is important to copy the set of found
            # connectors, since the matching routine below will
            # manipulate sets in place.
            found_this_constraint = dict(found)
            constraint_list.append((constraint, found_this_constraint))

            # Find all the connectors that are used in the constraint,
            # so we know which connectors to validate against each
            # other.  Note that the validation must be transitive (that
            # is, if con1 has a & b and con2 has b & c, then a,b, and c
            # must all validate against each other.
            for cId, c in iteritems(found_this_constraint):
                if cId in matched_connectors:
                    oldSet = matched_connectors[cId]
                    found.update(oldSet)
                    for _cId in oldSet:
                        matched_connectors[_cId] = found
                else:
                    connector_list.append(c)
                matched_connectors[cId] = found

            # Reset found back to empty (this is more efficient as the
            # bulk of the constraints in the model will not have
            # connectors - so if we did this at the top of the loop, we
            # would spend a lot of time clearing empty sets
            found = {}

        # Validate all connector sets and expand the empty ones
        known_conn_sets = {}
        for connector in connector_list:
            conn_set = matched_connectors[id(connector)]
            if id(conn_set) in known_conn_sets:
                continue
            known_conn_sets[id(conn_set)] \
                = self._validate_and_expand_connector_set(conn_set)

        # Expand each constraint
        for constraint, conn_set in constraint_list:
            cList = ConstraintList()
            constraint.parent_block().add_component(
                '%s.expanded' % (constraint.local_name, ), cList)
            connId = next(iterkeys(conn_set))
            ref = known_conn_sets[id(matched_connectors[connId])]
            for k, v in sorted(iteritems(ref)):
                if v[1] >= 0:
                    _iter = v[0]
                else:
                    _iter = (v[0], )
                for idx in _iter:
                    substitution = {}
                    for c in itervalues(conn_set):
                        if v[1] >= 0:
                            new_v = c.vars[k][idx]
                        elif k in c.aggregators:
                            new_v = c.vars[k].add()
                        else:
                            new_v = c.vars[k]
                        substitution[id(c)] = new_v
                    cList.add((constraint.lower,
                               expr.clone_expression(constraint.body,
                                                     substitution),
                               constraint.upper))
            constraint.deactivate()

        # Now, go back and implement VarList aggregators
        for conn in connector_list:
            block = conn.parent_block()
            for var, aggregator in iteritems(conn.aggregators):
                c = Constraint(expr=aggregator(block, conn.vars[var]))
                block.add_component('%s.%s.aggregate' % (conn.local_name, var),
                                    c)

    def _validate_and_expand_connector_set(self, connectors):
        ref = {}
        # First, go through the connectors and get the superset of all fields
        for c in itervalues(connectors):
            for k, v in iteritems(c.vars):
                if k in ref:
                    # We have already seen this var
                    continue
                if v is None:
                    # This is an implicit var
                    continue
                # OK: New var, so add it to the reference list
                _len = (
                    #-3 if v is None else
                    -2 if k in c.aggregators else
                    -1 if not hasattr(v, 'is_indexed') or not v.is_indexed()
                    else len(v))
                ref[k] = (v, _len, c)

        if not ref:
            logger.warning(
                "Cannot identify a reference connector: no connectors "
                "in the connector set have assigned variables:\n\t(%s)" %
                (', '.join(sorted(c.name for c in connectors)), ))
            return ref

        # Now make sure that connectors match
        empty_or_partial = []
        for c in itervalues(connectors):
            c_is_partial = False
            if not c.vars:
                # This is an empty connector and should be defined with
                # "auto" vars
                empty_or_partial.append(c)
                continue

            for k, v in iteritems(ref):
                if k not in c.vars:
                    raise ValueError(
                        "Connector mismatch: Connector '%s' missing variable "
                        "'%s' (appearing in reference connector '%s')" %
                        (c.name, k, v[2].name))
                _v = c.vars[k]
                if _v is None:
                    if not c_is_partial:
                        empty_or_partial.append(c)
                        c_is_partial = True
                    continue
                _len = (-3 if _v is None else -2 if k in c.aggregators else
                        -1 if not hasattr(_v, 'is_indexed')
                        or not _v.is_indexed() else len(_v))
                if (_len >= 0) ^ (v[1] >= 0):
                    raise ValueError(
                        "Connector mismatch: Connector variable '%s' mixing "
                        "indexed and non-indexed targets on connectors '%s' "
                        "and '%s'" % (k, v[2].name, c.name))
                if _len >= 0 and _len != v[1]:
                    raise ValueError(
                        "Connector mismatch: Connector variable '%s' index "
                        "mismatch (%s elements in reference connector '%s', "
                        "but %s elements in connector '%s')" %
                        (k, v[1], v[2].name, _len, c.name))
                if v[1] >= 0 and len(v[0].index_set() ^ _v.index_set()):
                    raise ValueError(
                        "Connector mismatch: Connector variable '%s' has "
                        "mismatched indices on connectors '%s' and '%s'" %
                        (k, v[2].name, c.name))

        # as we are adding things to the model, sort by key so that
        # the order things are added is deterministic
        sorted_refs = sorted(iteritems(ref))
        if len(empty_or_partial) > 1:
            # This is expensive (names aren't cheap), but does result in
            # a deterministic ordering
            empty_or_partial.sort(key=lambda x: x.name)

        # Fill in any empty connectors
        for c in empty_or_partial:
            block = c.parent_block()
            for k, v in sorted_refs:
                if k in c.vars and c.vars[k] is not None:
                    continue

                if v[1] >= 0:
                    idx = (v[0].index_set(), )
                else:
                    idx = ()
                var_args = {}
                try:
                    var_args['domain'] = v[0].domain
                except AttributeError:
                    pass
                try:
                    var_args['bounds'] = v[0].bounds
                except AttributeError:
                    pass
                new_var = Var(*idx, **var_args)
                block.add_component('%s.auto.%s' % (c.local_name, k), new_var)
                if idx:
                    for i in idx[0]:
                        new_var[i].domain = v[0][i].domain
                        new_var[i].setlb(v[0][i].lb)
                        new_var[i].setub(v[0][i].ub)
                c.vars[k] = new_var

        return ref
コード例 #5
0
ファイル: colloc.py プロジェクト: coopercenter/temoatools
class Collocation_Discretization_Transformation(Transformation):

    alias('dae.collocation', doc="TODO")

    def __init__(self):
        super(Collocation_Discretization_Transformation, self).__init__()
        self._ncp = {}
        self._nfe = {}
        self._adot = {}
        self._adotdot = {}
        self._afinal = {}
        self._tau = {}
        self._reduced_cp = {}
        self.all_schemes = {
            'LAGRANGE-RADAU':
            (_lagrange_radau_transform, _lagrange_radau_transform_order2),
            'LAGRANGE-LEGENDRE': (_lagrange_legendre_transform,
                                  _lagrange_legendre_transform_order2),
            'HERMITE-CUBIC':
            _hermite_cubic_transform,
        }

    def _setup(self, instance):
        instance = instance.clone()
        instance.construct()
        return instance

    def _get_radau_constants(self, currentds):
        """
        This function sets the radau collocation points and a values depending
        on how many collocation points have been specified and whether or not
        the user has numpy
        """
        if not numpy_available:
            if self._ncp[currentds] > 10:
                raise ValueError("Numpy was not found so the maximum number of "\
                    "collocation points is 10")
            self._tau[currentds] = radau_tau_dict[self._ncp[currentds]]
            self._adot[currentds] = radau_adot_dict[self._ncp[currentds]]
            self._adotdot[currentds] = radau_adotdot_dict[self._ncp[currentds]]
            self._afinal[currentds] = None
        else:
            alpha = 1
            beta = 0
            k = self._ncp[currentds] - 1
            cp = sorted(list(calc_cp(alpha, beta, k)))
            cp.insert(0, 0.0)
            cp.append(1.0)
            adot = calc_adot(cp, 1)
            adotdot = calc_adot(cp, 2)

            self._tau[currentds] = cp
            self._adot[currentds] = adot
            self._adotdot[currentds] = adotdot
            self._afinal[currentds] = None

    def _get_legendre_constants(self, currentds):
        """
        This function sets the legendre collocation points and a values depending
        on how many collocation points have been specified and whether or not
        the user has numpy
        """
        if not numpy_available:
            if self._ncp[currentds] > 10:
                raise ValueError("Numpy was not found so the maximum number of "\
                    "collocation points is 10")
            self._tau[currentds] = legendre_tau_dict[self._ncp[currentds]]
            self._adot[currentds] = legendre_adot_dict[self._ncp[currentds]]
            self._adotdot[currentds] = legendre_adotdot_dict[
                self._ncp[currentds]]
            self._afinal[currentds] = legendre_afinal_dict[
                self._ncp[currentds]]
        else:
            alpha = 0
            beta = 0
            k = self._ncp[currentds]
            cp = sorted(list(calc_cp(alpha, beta, k)))
            cp.insert(0, 0.0)
            adot = calc_adot(cp, 1)
            adotdot = calc_adot(cp, 2)
            afinal = calc_afinal(cp)

            self._tau[currentds] = cp
            self._adot[currentds] = adot
            self._adotdot[currentds] = adotdot
            self._afinal[currentds] = afinal

    def _get_hermite_constants(self, currentds):
        # TODO: finish this
        raise DAE_Error("Not Implemented")

    def _apply_to(self, instance, **kwds):
        """
        Applies specified collocation transformation to a modeling instance

        Keyword Arguments:
        nfe           The desired number of finite element points to be 
                      included in the discretization.
        ncp           The desired number of collocation points over each 
                      finite element.
        wrt           Indicates which ContinuousSet the transformation 
                      should be applied to. If this keyword argument is not
                      specified then the same scheme will be applied to all
                      ContinuousSets.
        scheme        Indicates which finite difference method to apply. 
                      Options are LAGRANGE-RADAU, LAGRANGE-LEGENDRE, or 
                      HERMITE-CUBIC. The default scheme is Lagrange polynomials
                      with Radau roots.
        """

        options = kwds.pop('options', {})

        tmpnfe = kwds.pop('nfe', 10)
        tmpncp = kwds.pop('ncp', 3)
        tmpds = kwds.pop('wrt', None)
        tmpscheme = kwds.pop('scheme', 'LAGRANGE-RADAU')
        self._scheme_name = tmpscheme.upper()

        if tmpds is not None:
            if tmpds.type() is not ContinuousSet:
                raise TypeError("The component specified using the 'wrt' keyword "\
                     "must be a differential set")
            elif 'scheme' in tmpds.get_discretization_info():
                raise ValueError("The discretization scheme '%s' has already been applied "\
                     "to the ContinuousSet '%s'"%s(tmpds.get_discretization_info()['scheme'],tmpds.cname(True)))

        if tmpnfe <= 0:
            raise ValueError(
                "The number of finite elements must be at least 1")
        if tmpncp <= 0:
            raise ValueError(
                "The number of collocation points must be at least 1")

        if None in self._nfe:
            raise ValueError("A general discretization scheme has already been applied to "\
                    "to every differential set in the model. If you would like to specify a "\
                    "specific discretization scheme for one of the differential sets you must discretize "\
                    "each differential set individually. If you would like to apply a different "\
                    "discretization scheme to all differential sets you must declare a new Collocation"\
                    "_Discretization object")

        if len(self._nfe) == 0 and tmpds is None:
            # Same discretization on all differentialsets
            self._nfe[None] = tmpnfe
            self._ncp[None] = tmpncp
            currentds = None
        else:
            self._nfe[tmpds.name] = tmpnfe
            self._ncp[tmpds.name] = tmpncp
            currentds = tmpds.cname(True)

        self._scheme = self.all_schemes.get(self._scheme_name, None)
        if self._scheme is None:
            raise ValueError("Unknown collocation scheme '%s' specified using the "\
                     "'scheme' keyword. Valid schemes are 'LAGRANGE-RADAU', 'LAGRANGE-LEGENDRE'"\
                     ", and 'HERMITE-CUBIC'" %(tmpscheme))

        if self._scheme_name == 'LAGRANGE-RADAU':
            self._get_radau_constants(currentds)
        elif self._scheme_name == 'LAGRANGE-LEGENDRE':
            self._get_legendre_constants(currentds)

        for block in instance.block_data_objects(active=True):
            self._transformBlock(block, currentds)

        return instance

    def _transformBlock(self, block, currentds):

        self._fe = {}
        for ds in block.component_map(ContinuousSet).itervalues():
            if currentds is None or currentds == ds.cname(True):
                generate_finite_elements(ds, self._nfe[currentds])
                if not ds.get_changed():
                    if len(ds) - 1 > self._nfe[currentds]:
                        print("***WARNING: More finite elements were found in differentialset "\
                            "'%s' than the number of finite elements specified in apply. "\
                              "The larger number of finite elements will be used." % (ds.cname(True),))

                self._nfe[ds.cname(True)] = len(ds) - 1
                self._fe[ds.cname(True)] = sorted(ds)
                generate_colloc_points(ds, self._tau[currentds])
                # Adding discretization information to the differentialset object itself
                # so that it can be accessed outside of the discretization object
                disc_info = ds.get_discretization_info()
                disc_info['nfe'] = self._nfe[ds.cname(True)]
                disc_info['ncp'] = self._ncp[currentds]
                disc_info['tau_points'] = self._tau[currentds]
                disc_info['adot'] = self._adot[currentds]
                disc_info['adotdot'] = self._adotdot[currentds]
                disc_info['afinal'] = self._afinal[currentds]
                disc_info['scheme'] = self._scheme_name

        for c in block.component_map().itervalues():
            update_contset_indexed_component(c)

        for d in block.component_map(DerivativeVar).itervalues():
            dsets = d.get_continuousset_list()
            for i in set(dsets):
                if currentds is None or i.cname(True) == currentds:
                    oldexpr = d.get_derivative_expression()
                    loc = d.get_state_var()._contset[i]
                    count = dsets.count(i)
                    if count >= 3:
                        raise DAE_Error(
                            "Error discretizing '%s' with respect to '%s'. Current implementation "\
                            "only allows for taking the first or second derivative with respect to "\
                            "a particular ContinuousSet" %s(d.cname(True),i.cname(True)))
                    scheme = self._scheme[count - 1]
                    # print i.name, scheme.__name__
                    newexpr = create_partial_expression(
                        scheme, oldexpr, i, loc)
                    d.set_derivative_expression(newexpr)
                    if self._scheme_name == 'LAGRANGE-LEGENDRE':
                        add_continuity_equations(block, d, i, loc)

            # Reclassify DerivativeVar if all indexing ContinuousSets have been discretized
            if d.is_fully_discretized():
                add_discretization_equations(block, d)
                block.reclassify_component_type(d, Var)

        # Reclassify Integrals if all ContinuousSets have been discretized
        if block_fully_discretized(block):

            if block.contains_component(Integral):
                for i in block.component_map(Integral).itervalues():
                    i.reconstruct()
                    block.reclassify_component_type(i, Expression)
                # If a model contains integrals they are most likely to appear in the objective
                # function which will need to be reconstructed after the model is discretized.
                for k in block.component_map(Objective).itervalues():
                    k.reconstruct()

    def _get_idx(self, l, t, n, i, k):
        """
        This function returns the appropriate index for the differential
        and the derivative variables. It's needed because the collocation 
        constraints are indexed by finite element and collocation point
        however a differentialset contains a list of all the discretization
        points and is not separated into finite elements and collocation
        points.
        """

        tmp = t.index(t._fe[i])
        tik = t[tmp + k]
        if n is None:
            return tik
        else:
            tmpn = n
            if not isinstance(n, tuple):
                tmpn = (n, )
            return tmpn[0:l] + (tik, ) + tmpn[l:]

    def reduce_collocation_points(self,
                                  instance,
                                  var=None,
                                  ncp=None,
                                  contset=None):
        """
        This method will add additional constraints to a model if some
        of the Variables are specified as having less collocation points
        than the default
        """
        if contset is None:
            raise TypeError(
                "A continuous set must be specified using the keyword 'contset'"
            )
        if contset.type() is not ContinuousSet:
            raise TypeError("The component specified using the 'contset' keyword "\
                "must be a differential set")
        ds = instance.find_component(contset.cname(True))
        if ds is None:
            raise ValueError("ContinuousSet '%s' is not a valid component of the discretized "\
                "model instance" %(contset.cname(True)))

        if len(self._ncp) == 0:
            raise RuntimeError("This method should only be called after using the apply() method "\
                "to discretize the model")
        elif None in self._ncp:
            tot_ncp = self._ncp[None]
        elif ds.cname(True) in self._ncp:
            tot_ncp = self._ncp[ds.cname(True)]
        else:
            raise ValueError("ContinuousSet '%s' has not been discretized yet, please call "\
                "the apply() method with this ContinuousSet to discretize it before calling "\
                "this method" %s(ds.cname(True)))

        if var is None:
            raise TypeError("A variable must be specified")
        if var.type() is not Var:
            raise TypeError("The component specified using the 'var' keyword "\
                "must be a variable")
        tmpvar = instance.find_component(var.cname(True))
        if tmpvar is None:
            raise ValueError("Variable '%s' is not a valid component of the discretized "\
                "model instance" %(var.cname(True)))

        var = tmpvar

        if ncp is None:
            raise TypeError(
                "The number of collocation points must be specified")
        if ncp <= 0:
            raise ValueError(
                "The number of collocation points must be at least 1")
        if ncp > tot_ncp:
            raise ValueError("The number of collocation points used to interpolate "\
                "an individual variable must be less than the number used to discretize "\
                "the original model")
        if ncp == tot_ncp:
            # Nothing to be done
            return instance

        # Check to see if the continuousset is an indexing set of the variable
        if var.dim() == 1:
            if ds not in var._index:
                raise IndexError("ContinuousSet '%s' is not an indexing set of the variable '%s'"\
                    % (ds.name,var.cname(True)))
        elif ds not in var._index_set:
            raise IndexError("ContinuousSet '%s' is not an indexing set of the variable '%s'"\
                % (ds.name,var.name))

        if var.cname(True) in self._reduced_cp:
            temp = self._reduced_cp[var.cname(True)]
            if ds.cname(True) in temp:
                raise RuntimeError("Variable '%s' has already been constrained to a reduced "\
                    "number of collocation points over ContinuousSet '%s'.")
            else:
                temp[ds.name] = ncp
        else:
            self._reduced_cp[var.name] = {ds.name: ncp}

        list_name = var.name + "_interpolation_constraints"

        instance.add_component(list_name, ConstraintList())
        conlist = instance.find_component(list_name)

        t = sorted(ds)
        fe = ds._fe
        info = get_index_information(var, ds)
        tmpidx = info['non_ds']
        idx = info['index function']

        # Iterate over non_ds indices
        for n in tmpidx:
            # Iterate over finite elements
            for i in xrange(0, len(fe) - 1):
                # Iterate over collocation points
                for k in xrange(1, tot_ncp - ncp + 1):
                    if ncp == 1:
                        # Constant over each finite element
                        conlist.add(
                            var[idx(n, i, k)] == var[idx(n, i, tot_ncp)])
                    else:
                        tmp = t.index(fe[i])
                        tmp2 = t.index(fe[i + 1])
                        ti = t[tmp + k]
                        tfit = t[tmp2 - ncp + 1:tmp2 + 1]
                        coeff = self._interpolation_coeffs(ti, tfit)
                        conlist.add(var[idx(n, i, k)] == sum(
                            var[idx(n, i, j)] * coeff.next()
                            for j in xrange(tot_ncp - ncp + 1, tot_ncp + 1)))

        return instance

    def _interpolation_coeffs(self, ti, tfit):

        for i in tfit:
            l = 1
            for j in tfit:
                if i != j:
                    l = l * (ti - j) / (i - j)
            yield l
コード例 #6
0
class Finite_Difference_Transformation(Transformation):
    """
    Transformation that applies finite difference methods to
    DAE, ODE, or PDE models.
    """
    alias('dae.finite_difference',
          doc="Discretizes a DAE model using "
          "a finite difference method transforming the model into an NLP.")

    def __init__(self):
        super(Finite_Difference_Transformation, self).__init__()
        self._nfe = {}
        self.all_schemes = {
            'BACKWARD': (_backward_transform, _backward_transform_order2),
            'CENTRAL': (_central_transform, _central_transform_order2),
            'FORWARD': (_forward_transform, _forward_transform_order2)
        }

    def _apply_to(self, instance, **kwds):
        """
        Applies the transformation to a modeling instance

        Keyword Arguments:
        nfe           The desired number of finite element points to be
                      included in the discretization.
        wrt           Indicates which ContinuousSet the transformation
                      should be applied to. If this keyword argument is not
                      specified then the same scheme will be applied to all
                      ContinuousSets.
        scheme        Indicates which finite difference method to apply.
                      Options are BACKWARD, CENTRAL, or FORWARD. The default
                      scheme is the backward difference method
        """

        tmpnfe = kwds.pop('nfe', 10)
        tmpds = kwds.pop('wrt', None)
        tmpscheme = kwds.pop('scheme', 'BACKWARD')
        self._scheme_name = tmpscheme.upper()

        if tmpds is not None:
            if tmpds.type() is not ContinuousSet:
                raise TypeError("The component specified using the 'wrt' "
                                "keyword must be a continuous set")
            elif 'scheme' in tmpds.get_discretization_info():
                raise ValueError(
                    "The discretization scheme '%s' has already "
                    "been applied to the ContinuousSet '%s'" %
                    (tmpds.get_discretization_info()['scheme'], tmpds.name))

        if tmpnfe < 1:
            raise ValueError(
                "The number of finite elements must be at least 1")

        if None in self._nfe:
            raise ValueError(
                "A general discretization scheme has already been applied to "
                "to every continuous set in the model. If you would like to "
                "apply a different discretization scheme to each continuous "
                "set, you must declare a new transformation object")

        if len(self._nfe) == 0 and tmpds is None:
            # Same discretization on all ContinuousSets
            self._nfe[None] = tmpnfe
            currentds = None
        else:
            self._nfe[tmpds.name] = tmpnfe
            currentds = tmpds.name

        self._scheme = self.all_schemes.get(self._scheme_name, None)
        if self._scheme is None:
            raise ValueError("Unknown finite difference scheme '%s' specified "
                             "using the 'scheme' keyword. Valid schemes are "
                             "'BACKWARD', 'CENTRAL', and 'FORWARD'" %
                             tmpscheme)

        self._transformBlock(instance, currentds)

        return instance

    def _transformBlock(self, block, currentds):

        self._fe = {}
        for ds in block.component_objects(ContinuousSet):
            if currentds is None or currentds == ds.name or currentds is ds:
                generate_finite_elements(ds, self._nfe[currentds])
                if not ds.get_changed():
                    if len(ds) - 1 > self._nfe[currentds]:
                        logger.warn("More finite elements were found in "
                                    "ContinuousSet '%s' than the number of "
                                    "finite elements specified in apply. The "
                                    "larger number of finite elements will be "
                                    "used." % ds.name)

                self._nfe[ds.name] = len(ds) - 1
                self._fe[ds.name] = sorted(ds)
                # Adding discretization information to the ContinuousSet
                # object itself so that it can be accessed outside of the
                # discretization object
                disc_info = ds.get_discretization_info()
                disc_info['nfe'] = self._nfe[ds.name]
                disc_info['scheme'] = self._scheme_name + ' Difference'

        # Maybe check to see if any of the ContinuousSets have been changed,
        # if they haven't then the model components need not be updated
        # or even iterated through
        expand_components(block)

        for d in block.component_objects(DerivativeVar, descend_into=True):
            dsets = d.get_continuousset_list()
            for i in set(dsets):
                if currentds is None or i.name == currentds:
                    oldexpr = d.get_derivative_expression()
                    loc = d.get_state_var()._contset[i]
                    count = dsets.count(i)
                    if count >= 3:
                        raise DAE_Error(
                            "Error discretizing '%s' with respect to '%s'. "
                            "Current implementation only allows for taking the"
                            " first or second derivative with respect to "
                            "a particular ContinuousSet" % (d.name, i.name))
                    scheme = self._scheme[count - 1]
                    newexpr = create_partial_expression(
                        scheme, oldexpr, i, loc)
                    d.set_derivative_expression(newexpr)

            # Reclassify DerivativeVar if all indexing ContinuousSets have
            # been discretized. Add discretization equations to the
            # DerivativeVar's parent block.
            if d.is_fully_discretized():
                add_discretization_equations(d.parent_block(), d)
                d.parent_block().reclassify_component_type(d, Var)

                # Keep track of any reclassified DerivativeVar components so
                # that the Simulator can easily identify them if the model
                # is simulated after discretization
                # TODO: Update the discretization transformations to use
                # a Block to add things to the model and store discretization
                # information. Using a list for now because the simulator
                # does not yet support models containing active Blocks
                reclassified_list = getattr(
                    block, '_pyomo_dae_reclassified_derivativevars', None)
                if reclassified_list is None:
                    block._pyomo_dae_reclassified_derivativevars = list()
                    reclassified_list = \
                        block._pyomo_dae_reclassified_derivativevars

                reclassified_list.append(d)

        # Reclassify Integrals if all ContinuousSets have been discretized
        if block_fully_discretized(block):

            if block.contains_component(Integral):
                for i in block.component_objects(Integral, descend_into=True):
                    i.reconstruct()
                    i.parent_block().reclassify_component_type(i, Expression)
                # If a model contains integrals they are most likely to
                # appear in the objective function which will need to be
                # reconstructed after the model is discretized.
                for k in block.component_objects(Objective, descend_into=True):
                    # TODO: check this, reconstruct might not work
                    k.reconstruct()
コード例 #7
0
ファイル: expand_connectors.py プロジェクト: xfLee/pyomo
class ExpandConnectors(Transformation):
    alias('core.expand_connectors',
          doc="Expand all connectors in the model to simple constraints")

    def _apply_to(self, instance, **kwds):
        if __debug__ and logger.isEnabledFor(logging.DEBUG):  #pragma:nocover
            logger.debug("Calling ConnectorExpander")

        connectorsFound = False
        for c in instance.component_data_objects(Connector):
            connectorsFound = True
            break
        if not connectorsFound:
            return

        if __debug__ and logger.isEnabledFor(logging.DEBUG):  #pragma:nocover
            logger.debug("   Connectors found!")

        self._name_buffer = {}

        #
        # At this point, there are connectors in the model, so we must
        # look for constraints that involve connectors and expand them.
        #
        # List of the connectors in the order in which we found them
        # (this should be deterministic, provided that the user's model
        # is deterministic)
        connector_list = []
        # list of constraints with connectors: tuple(constraint, connector_set)
        # (this should be deterministic, provided that the user's model
        # is deterministic)
        constraint_list = []
        # ID of the next connector group (set of matched connectors)
        groupID = 0
        # connector_groups stars out as a dict of {id(set): (groupID, set)}
        # If you sort by the groupID, then this will be deterministic.
        connector_groups = dict()
        # map of connector to the set of connectors that must match it
        matched_connectors = ComponentMap()
        # The set of connectors found in the current constraint
        found = ComponentSet()

        connector_types = set([SimpleConnector, _ConnectorData])
        for constraint in instance.component_data_objects(
                Constraint, sort=SortComponents.deterministic):
            ref = None
            for c in EXPR.identify_components(constraint.body,
                                              connector_types):
                found.add(c)
                if c in matched_connectors:
                    if ref is None:
                        # The first connector in this constraint has
                        # already been seen.  We will use that Set as
                        # the reference
                        ref = matched_connectors[c]
                    elif ref is not matched_connectors[c]:
                        # We already have a reference group; merge this
                        # new group into it.
                        #
                        # Optimization: this merge is linear in the size
                        # of the src set.  If the reference set is
                        # smaller, save time by switching to a new
                        # reference set.
                        src = matched_connectors[c]
                        if len(ref) < len(src):
                            ref, src = src, ref
                        ref.update(src)
                        for _ in src:
                            matched_connectors[_] = ref
                        del connector_groups[id(src)]
                    # else: pass
                    #   The new group *is* the reference group;
                    #   there is nothing to do.
                else:
                    # The connector has not been seen before.
                    connector_list.append(c)
                    if ref is None:
                        # This is the first connector in the constraint:
                        # start a new reference set.
                        ref = ComponentSet()
                        connector_groups[id(ref)] = (groupID, ref)
                        groupID += 1
                    # This connector hasn't been seen.  Record it.
                    ref.add(c)
                    matched_connectors[c] = ref
            if ref is not None:
                constraint_list.append((constraint, found))
                found = ComponentSet()

        # Validate all connector sets and expand the empty ones
        known_conn_sets = {}
        for groupID, conn_set in sorted(itervalues(connector_groups)):
            known_conn_sets[id(conn_set)] \
                = self._validate_and_expand_connector_set(conn_set)

        # Expand each constraint
        for constraint, conn_set in constraint_list:
            cList = ConstraintList()
            constraint.parent_block().add_component(
                '%s.expanded' % (constraint.getname(
                    fully_qualified=False, name_buffer=self._name_buffer), ),
                cList)
            connId = next(iter(conn_set))
            ref = known_conn_sets[id(matched_connectors[connId])]
            for k, v in sorted(iteritems(ref)):
                if v[1] >= 0:
                    _iter = v[0]
                else:
                    _iter = (v[0], )
                for idx in _iter:
                    substitution = {}
                    for c in conn_set:
                        if v[1] >= 0:
                            new_v = c.vars[k][idx]
                        elif k in c.aggregators:
                            new_v = c.vars[k].add()
                        else:
                            new_v = c.vars[k]
                        substitution[id(c)] = new_v
                    cList.add((constraint.lower,
                               EXPR.clone_expression(constraint.body,
                                                     substitution),
                               constraint.upper))
            constraint.deactivate()

        # Now, go back and implement VarList aggregators
        for conn in connector_list:
            block = conn.parent_block()
            for var, aggregator in iteritems(conn.aggregators):
                c = Constraint(expr=aggregator(block, conn.vars[var]))
                block.add_component(
                    '%s.%s.aggregate' %
                    (conn.getname(fully_qualified=True,
                                  name_buffer=self._name_buffer), var), c)

    def _validate_and_expand_connector_set(self, connectors):
        ref = {}
        # First, go through the connectors and get the superset of all fields
        for c in connectors:
            for k, v in iteritems(c.vars):
                if k in ref:
                    # We have already seen this var
                    continue
                if v is None:
                    # This is an implicit var
                    continue
                # OK: New var, so add it to the reference list
                _len = (
                    #-3 if v is None else
                    -2 if k in c.aggregators else
                    -1 if not hasattr(v, 'is_indexed') or not v.is_indexed()
                    else len(v))
                ref[k] = (v, _len, c)

        if not ref:
            logger.warning(
                "Cannot identify a reference connector: no connectors "
                "in the connector set have assigned variables:\n\t(%s)" %
                (', '.join(sorted(c.name for c in connectors)), ))
            return ref

        # Now make sure that connectors match
        empty_or_partial = []
        for c in connectors:
            c_is_partial = False
            if not c.vars:
                # This is an empty connector and should be defined with
                # "auto" vars
                empty_or_partial.append(c)
                continue

            for k, v in iteritems(ref):
                if k not in c.vars:
                    raise ValueError(
                        "Connector mismatch: Connector '%s' missing variable "
                        "'%s' (appearing in reference connector '%s')" %
                        (c.name, k, v[2].name))
                _v = c.vars[k]
                if _v is None:
                    if not c_is_partial:
                        empty_or_partial.append(c)
                        c_is_partial = True
                    continue
                _len = (-3 if _v is None else -2 if k in c.aggregators else
                        -1 if not hasattr(_v, 'is_indexed')
                        or not _v.is_indexed() else len(_v))
                if (_len >= 0) ^ (v[1] >= 0):
                    raise ValueError(
                        "Connector mismatch: Connector variable '%s' mixing "
                        "indexed and non-indexed targets on connectors '%s' "
                        "and '%s'" % (k, v[2].name, c.name))
                if _len >= 0 and _len != v[1]:
                    raise ValueError(
                        "Connector mismatch: Connector variable '%s' index "
                        "mismatch (%s elements in reference connector '%s', "
                        "but %s elements in connector '%s')" %
                        (k, v[1], v[2].name, _len, c.name))
                if v[1] >= 0 and len(v[0].index_set() ^ _v.index_set()):
                    raise ValueError(
                        "Connector mismatch: Connector variable '%s' has "
                        "mismatched indices on connectors '%s' and '%s'" %
                        (k, v[2].name, c.name))

        # as we are adding things to the model, sort by key so that
        # the order things are added is deterministic
        sorted_refs = sorted(iteritems(ref))
        if len(empty_or_partial) > 1:
            # This is expensive (names aren't cheap), but does result in
            # a deterministic ordering
            empty_or_partial.sort(key=lambda x: x.getname(
                fully_qualified=True, name_buffer=self._name_buffer))

        # Fill in any empty connectors
        for c in empty_or_partial:
            block = c.parent_block()
            for k, v in sorted_refs:
                if k in c.vars and c.vars[k] is not None:
                    continue

                if v[1] >= 0:
                    idx = (v[0].index_set(), )
                else:
                    idx = ()
                var_args = {}
                try:
                    var_args['domain'] = v[0].domain
                except AttributeError:
                    pass
                try:
                    var_args['bounds'] = v[0].bounds
                except AttributeError:
                    pass
                new_var = Var(*idx, **var_args)
                block.add_component(
                    '%s.auto.%s' %
                    (c.getname(fully_qualified=True,
                               name_buffer=self._name_buffer), k), new_var)
                if idx:
                    for i in idx[0]:
                        new_var[i].domain = v[0][i].domain
                        new_var[i].setlb(v[0][i].lb)
                        new_var[i].setub(v[0][i].ub)
                c.vars[k] = new_var

        return ref