def set_state(self, X):
        """Take the given state vector and set its values into the
        correct state variables.
        """
        unused = len(X)
        idx = 0

        for name in self.list_states():
            val = getattr(self, name)
            flatval = flattened_value(name, val)
            size = len(flatval)
            newval = X[idx:idx+size]
            unused -= size
            idx += size
            try:
                iter(val)
            except TypeError:
                if has_interface(val, IVariableTree):
                    raise RuntimeError("VariableTree states are not supported yet.")
                else:
                    if len(newval) != 1:
                        self.raise_exception("Trying to set a scalar value '%s' with a ")
                    setattr(self, name, newval[0])
            else:
                setattr(self, name, flatval)

        if unused != 0:
            self.raise_exception("State vector size does not match flattened size of state variables.",
                                 ValueError)
    def get_residuals(self):
        """Return a vector of residual values."""

        resids = []
        if self._run_explicit == False:
            for name in self.list_residuals():
                resids.extend(flattened_value(name, getattr(self, name)))
        return np.array(resids)
Exemplo n.º 3
0
 def get_flattened_value(self, path):
     """Return the named value, which may include
     an array index, as a flattened array of floats.  If
     the value is not flattenable into an array of floats,
     raise a TypeError.
     """
     val, idx = get_val_and_index(self, path)
     return flattened_value(path, val)
Exemplo n.º 4
0
    def get_dependents(self, fixed_point=False):
        """Returns a list of current values of the dependents. This includes
        both constraints and severed sources.

        fixed_point: bool
            Set to True if we are doing fixed-point iteration instead of a more
            general solve. In such a case, we need to swap the order of the
            constraints to match the parameter order. We also may need to swap
            signs on the constraints.
        """

        deps = self._parent.eval_eq_constraints(self.scope)

        # Reorder for fixed point
        if fixed_point == True:
            newdeps = zeros(len(deps))
            eqcons = self._parent.get_eq_constraints()
            old_j = 0
            for key, value in eqcons.iteritems():
                con_targets = value.get_referenced_varpaths()
                new_j = 0
                for params in self._parent.list_param_group_targets():
                    if params[0] == value.rhs.text:
                        newdeps[new_j] = deps[old_j]
                    elif params[0] == value.lhs.text:
                        newdeps[new_j] = -deps[old_j]
                    new_j += 1
                old_j += 1
            deps = newdeps

        sev_deps = []
        for src, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            src = from_PA_var(src)
            src_val = self.scope.get(src)
            targ_val = self.scope.get(target)
            res = flattened_value(src, src_val) - flattened_value(target, targ_val)

            sev_deps.extend(res)

        return hstack((deps, sev_deps))
Exemplo n.º 5
0
    def get_dependents(self):
        """Returns a list of current values of the dependents. This includes
        both constraints and severed sources.
        """

        deps = self._parent.eval_eq_constraints(self.scope)
        sev_deps = []
        for src, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            src = from_PA_var(src)
            src_val = self.scope.get(src)
            targ_val = self.scope.get(target)
            res = flattened_value(src, src_val) - flattened_value(target, targ_val)

            sev_deps.extend(res)

        return hstack((deps, sev_deps))
Exemplo n.º 6
0
    def get_inputs(self, x):
        """Return matrix of flattened values from input edges."""

        for srcs in self.inputs:

            # Support for paramters groups
            if isinstance(srcs, basestring):
                srcs = [srcs]

            for src in srcs:
                src_val = self.scope.get(src)
                src_val = flattened_value(src, src_val)
                i1, i2 = self.in_bounds[src]
                if isinstance(src_val, ndarray):
                    x[i1:i2] = src_val.copy()
                else:
                    x[i1:i2] = src_val
Exemplo n.º 7
0
    def get_independents(self):
        """Returns a list of current values of the dependents. This includes
        both parameters and severed targets.
        """

        indeps = self._parent.eval_parameters(self.scope)
        sev_indeps = []
        for _, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            old_val = self.scope.get(target)

            sev_indeps.extend(flattened_value(target, old_val))

        return hstack((indeps, sev_indeps))
Exemplo n.º 8
0
 def apply_inputs(self, scope):
     """Take the values of all of the inputs in this case and apply them
     to the specified scope.
     """
     scope._case_uuid = self.uuid
     for name, value in self._inputs.items():
         expr = self._exprs.get(name)
         if expr:
             expr.set(value, scope) #, tovector=True)
         else:
             scope.set(name, value)
             # FIXME: this extra setting of the vector is messy...
             if hasattr(scope, '_system'):
                 system = scope._system
                 if system is not None:
                     uvec = system.vec.get('u')
                     if uvec and name in uvec:
                         uvec[name][:] = flattened_value(name, value)
Exemplo n.º 9
0
    def get_outputs(self, x):
        """Return matrix of flattened values from output edges."""

        for src in self.outputs:

            # Speedhack: getting an indexed var in OpenMDAO is slow
            if '[' in src:
                basekey, _, index = src.partition('[')
                base = self.scope.get(basekey)
                exec("src_val = base[%s" % index)
            else:
                src_val = self.scope.get(src)

            src_val = flattened_value(src, src_val)
            i1, i2 = self.out_bounds[src]
            if isinstance(src_val, ndarray):
                x[i1:i2] = src_val.copy()
            else:
                x[i1:i2] = src_val
Exemplo n.º 10
0
 def get_state(self):
     """Return the current flattened state vector."""
     states = []
     for name in self.list_states():
         states.extend(flattened_value(name, getattr(self, name)))
     return np.array(states)
Exemplo n.º 11
0
    def _matvecFWD(self, arg):
        '''Callback function for performing the matrix vector product of the
        state-to-residual Jacobian with an incoming vector arg.'''

        result = np.zeros(len(arg))

        comp_inputs = self.list_states()
        comp_outputs = self.list_residuals()
        inputs = {}
        outputs = {}

        idx = 0

        for varname in comp_inputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            inputs[varname] = arg[i1:i2].copy()

            idx += size

        for varname in comp_outputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            inputs[varname] = arg[i1:i2].copy()
            outputs[varname] = arg[i1:i2].copy()

            idx += size

        applyJ(self, inputs, outputs, [], self._shape_cache, J=self._cache_J)
        #print inputs, outputs

        idx = 0

        # Each state input adds an equation
        for varname in comp_inputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            result[i1:i2] = arg[i1:i2].copy()

            idx += size

        for varname in comp_outputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            result[i1:i2] = outputs[varname]

            idx += size

        #print arg, result
        return result
Exemplo n.º 12
0
 def get_state(self):
     """Return the current flattened state vector."""
     states = []
     for name in self.list_states():
         states.extend(flattened_value(name, getattr(self, name)))
     return np.array(states)
Exemplo n.º 13
0
    def _matvecFWD(self, arg):
        '''Callback function for performing the matrix vector product of the
        state-to-residual Jacobian with an incoming vector arg.'''

        result = np.zeros(len(arg))

        comp_inputs = self.list_states()
        comp_outputs = self.list_residuals()
        inputs = {}
        outputs = {}

        idx = 0

        for varname in comp_inputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            inputs[varname] = arg[i1:i2].copy()

            idx += size

        for varname in comp_outputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            inputs[varname] = arg[i1:i2].copy()
            outputs[varname] = arg[i1:i2].copy()

            idx += size

        applyJ(self, inputs, outputs, [], self._shape_cache, J=self._cache_J)
        #print inputs, outputs

        idx = 0

        # Each state input adds an equation
        for varname in comp_inputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            result[i1:i2] = arg[i1:i2].copy()

            idx += size

        for varname in comp_outputs:
            val = getattr(self, varname)
            flatval = flattened_value(varname, val)
            size = len(flatval)

            i1, i2 = idx, idx + size
            result[i1:i2] = outputs[varname]

            idx += size

        #print arg, result
        return result
Exemplo n.º 14
0
 def get_residuals(self):
     """Return a vector of residual values."""
     resids = []
     for name in self.list_residuals():
         resids.extend(flattened_value(name, getattr(self, name)))
     return np.array(resids)
Exemplo n.º 15
0
    def get_dependents(self, fixed_point=False):
        """Returns a list of current values of the dependents. This includes
        both constraints and severed sources.

        fixed_point: bool
            Set to True if we are doing fixed-point iteration instead of a more
            general solve. In such a case, we need to swap the order of the
            constraints to match the parameter order. We also may need to swap
            signs on the constraints.
        """

        parent = self.parent
        deps = array(parent.eval_eq_constraints(self.scope))
        # Reorder for fixed point
        if fixed_point is True:
            eqcons = parent.get_eq_constraints()

            rhs = {}
            lhs = {}
            i = 0
            for value in eqcons.itervalues():
                #make a mapping of position of each constraint
                rhs[value.rhs.text] = (i, value.size)
                lhs[value.lhs.text] = (i, value.size)
                i += value.size

            new_dep_index = empty(len(deps), dtype="int")
            new_dep_sign = empty(len(deps), dtype="int")
            k = 0
            for params in parent.list_param_group_targets():
                #for each param, grab the right map value and set the sign convention
                try:
                    j, size = rhs[params[0]]
                    new_dep_index[k:k +
                                  size] = j + arange(0, size, dtype="int")
                    new_dep_sign[k:k + size] = ones((size, ))
                    k += size
                except KeyError:  #wasn't in the rhs dict, try the lhs
                    try:
                        j, size = lhs[params[0]]
                        new_dep_index[k:k +
                                      size] = j + arange(0, size, dtype="int")
                        new_dep_sign[k:k + size] = -1 * ones(size)
                        k += size
                    except KeyError:
                        pass  #TODO: need to throw an error here. Why was there a param that didn't show up in the constraint

            #reset the deps array to the new order and sign
            deps = deps[new_dep_index] * new_dep_sign

        sev_deps = []
        for src, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            src = from_PA_var(src)
            src_val = self.scope.get(src)
            targ_val = self.scope.get(target)
            res = flattened_value(src, src_val) - flattened_value(
                target, targ_val)

            sev_deps.extend(res)

        return hstack((deps, sev_deps))
Exemplo n.º 16
0
    def get_dependents(self, fixed_point=False):
        """Returns a list of current values of the dependents. This includes
        both constraints and severed sources.

        fixed_point: bool
            Set to True if we are doing fixed-point iteration instead of a more
            general solve. In such a case, we need to swap the order of the
            constraints to match the parameter order. We also may need to swap
            signs on the constraints.
        """

        parent = self.parent
        deps = array(parent.eval_eq_constraints(self.scope))
        # Reorder for fixed point
        if fixed_point is True:
            eqcons = parent.get_eq_constraints()

            rhs = {}
            lhs = {}
            i = 0
            for value in eqcons.itervalues():
                #make a mapping of position of each constraint
                rhs[value.rhs.text] = (i, value.size)
                lhs[value.lhs.text] = (i, value.size)
                i += value.size

            new_dep_index = empty(len(deps), dtype="int")
            new_dep_sign = empty(len(deps), dtype="int")
            k = 0
            for params in parent.list_param_group_targets():
                #for each param, grab the right map value and set the sign convention
                try:
                    j, size = rhs[params[0]]
                    new_dep_index[k:k+size] = j+arange(0, size, dtype="int")
                    new_dep_sign[k:k+size] = ones((size,))
                    k += size
                except KeyError: #wasn't in the rhs dict, try the lhs
                    try:
                        j, size = lhs[params[0]]
                        new_dep_index[k:k+size] = j+arange(0, size, dtype="int")
                        new_dep_sign[k:k+size] = -1*ones(size)
                        k += size
                    except KeyError:
                        pass #TODO: need to throw an error here. Why was there a param that didn't show up in the constraint

            #reset the deps array to the new order and sign
            deps = deps[new_dep_index]*new_dep_sign


        sev_deps = []
        for src, target in self._severed_edges:

            if not isinstance(target, str):
                target = target[0]

            target = from_PA_var(target)
            src = from_PA_var(src)
            src_val = self.scope.get(src)
            targ_val = self.scope.get(target)
            res = flattened_value(src, src_val) - flattened_value(target, targ_val)

            sev_deps.extend(res)

        return hstack((deps, sev_deps))