コード例 #1
0
    def _jax_linearize(self):
        """
        Compute the jacobian using jax.

        This updates self._jacobian.
        """
        func = self._apply_nonlinear_func
        # argnums specifies which position args are to be differentiated
        inames = list(func.get_input_names())
        argnums = aa = [i for i, m in enumerate(func._inputs.values()) if 'is_option' not in m]
        if len(argnums) == len(inames):
            argnums = None  # speedup if there are no static args
        osize = len(self._outputs)
        isize = len(self._inputs) + osize
        invals = list(self._ordered_func_invals(self._inputs, self._outputs))
        coloring = self._coloring_info['coloring']

        if self._mode == 'rev':  # use reverse mode to compute derivs
            outvals = tuple(self._outputs.values())
            tangents = self._get_tangents(outvals, 'rev', coloring)
            if coloring is not None:
                j = [np.asarray(a).reshape((a.shape[0], shape_to_len(a.shape[1:])))
                     for a in jac_reverse(self._apply_nonlinear_func_jax, argnums,
                                          tangents)(*invals)]
                j = coloring.expand_jac(np.hstack(self._reorder_col_chunks(j)), 'rev')
            else:
                j = []
                for a in jac_reverse(self._apply_nonlinear_func_jax, argnums, tangents)(*invals):
                    a = np.asarray(a)
                    if a.ndim < 2:
                        a = a.reshape((a.size, 1))
                    else:
                        a = a.reshape((a.shape[0], shape_to_len(a.shape[1:])))
                    j.append(a)
                j = np.hstack(self._reorder_col_chunks(j)).reshape((osize, isize))
        else:
            if coloring is not None:
                tangents = self._get_tangents(invals, 'fwd', coloring, argnums,
                                              trans=self._get_jac2func_inds(self._inputs,
                                                                            self._outputs))
                j = [np.asarray(a).reshape((shape_to_len(a.shape[:-1]), a.shape[-1]))
                     for a in jac_forward(self._apply_nonlinear_func_jax, argnums,
                                          tangents)(*invals)]
                j = coloring.expand_jac(np.vstack(j), 'fwd')
            else:
                tangents = self._get_tangents(invals, 'fwd', coloring, argnums)
                j = []
                for a in jac_forward(self._apply_nonlinear_func_jax, argnums, tangents)(*invals):
                    a = np.asarray(a)
                    if a.ndim < 2:
                        a = a.reshape((1, a.size))
                    else:
                        a = a.reshape((shape_to_len(a.shape[:-1]), a.shape[-1]))
                    j.append(a)
                j = self._reorder_cols(np.vstack(j).reshape((osize, isize)))

        self._jacobian.set_dense_jac(self, j)
コード例 #2
0
    def _get_shapes(self, shape, dist_shape):
        if shape is None:
            return None, None

        shape = shape2tuple(shape)
        if self._flat_src:
            shape = (shape_to_len(shape), )

        if dist_shape is None:
            return shape, shape

        dist_shape = shape2tuple(dist_shape)
        if self._flat_src:
            dist_shape = (shape_to_len(dist_shape), )

        return shape, dist_shape
コード例 #3
0
    def _get_jac2func_inds(self, inputs, outputs):
        """
        Return a translation array from jac column indices into function input ordering.

        Parameters
        ----------
        inputs : Vector
            The input vector.
        outputs : Vector
            The output vector (contains the states).

        Returns
        -------
        ndarray
            Index translation array
        """
        if self._jac2func_inds is None:
            inds = np.arange(len(outputs) + len(inputs), dtype=INT_DTYPE)
            indict = {}
            start = end = 0
            for n, meta in self._apply_nonlinear_func._inputs.items():
                if 'is_option' not in meta:
                    end += shape_to_len(meta['shape'])
                    indict[n] = inds[start:end]
                    start = end

            inds = [indict[n] for n in chain(outputs, inputs)]
            self._jac2func_inds = np.concatenate(inds)

        return self._jac2func_inds
コード例 #4
0
    def as_array(self, copy=False, flat=True):
        """
        Return an index array into a flat array.

        Parameters
        ----------
        copy : bool
            If True, make sure the array returned is a copy.
        flat : bool
            If True, return a flat array.

        Returns
        -------
        ndarray
            The index array into a flat array.
        """
        if self._src_shape is None:
            raise ValueError(
                f"Can't determine extent of array because source shape is not known."
            )

        idxs = np.arange(shape_to_len(self._src_shape),
                         dtype=np.int32).reshape(self._src_shape)

        if flat:
            return idxs[self()].ravel()
        else:
            return idxs[self()]
コード例 #5
0
    def __call__(self, idx, src_shape=None, flat_src=False):
        """
        Return an Indexer instance based on the passed indices/slices.

        Parameters
        ----------
        idx : int, ndarray, slice, or tuple
            Some sort of index/indices/slice.
        src_shape : tuple or None
            Source shape if known.
        flat_src : bool
            If True, indices are into a flat source.

        Returns
        -------
        Indexer
            The Indexer instance we created based on the args.
        """
        if idx is ...:
            idxer = EllipsisIndexer((idx, ), flat_src=flat_src)
        elif isinstance(idx, int):
            idxer = IntIndexer(idx, flat_src=flat_src)
        elif isinstance(idx, slice):
            idxer = SliceIndexer(idx, flat_src=flat_src)

        elif isinstance(idx, tuple):
            multi = len(idx) > 1
            for i in idx:
                if i is ...:
                    multi = len(
                        idx
                    ) > 2  # ... doesn't count toward limit of dimensions
                    idxer = EllipsisIndexer(idx, flat_src=flat_src)
                    break
            else:
                idxer = MultiIndexer(idx, flat_src=flat_src)
            if flat_src and multi:
                raise RuntimeError(
                    "Can't use a multdimensional index into a flat source.")
        else:
            arr = np.atleast_1d(idx)
            if arr.ndim == 1:
                idxer = ArrayIndexer(arr, flat_src=flat_src)
            else:
                issue_warning(
                    "Using a non-tuple sequence for multidimensional indexing is "
                    "deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the "
                    "future this will be interpreted as an array index, "
                    "`arr[np.array(seq)]`, which will result either in an error or a "
                    "different result.")
                idxer = MultiIndexer(tuple(idx), flat_src=flat_src)

        if src_shape is not None:
            if flat_src:
                src_shape = (shape_to_len(src_shape), )
            idxer.set_src_shape(src_shape)

        return idxer
コード例 #6
0
    def indexed_src_size(self):
        """
        Return the size of the result if the index were applied to the source.

        Returns
        -------
        int
            Size of flattened indices.
        """
        return shape_to_len(self.indexed_src_shape)
コード例 #7
0
    def add_output(self, name, val=1.0, units=None):
        """
        Add an independent variable to this component.

        This should never be called by a user, as it skips all checks.

        Parameters
        ----------
        name : str
            Name of the variable in this component's namespace.
        val : float or list or tuple or ndarray
            The initial value of the variable being added in user-defined units. Default is 1.0.
        units : str or None
            Units in which the output variables will be provided to the component during execution.
            Default is None, which means it has no units.

        Returns
        -------
        dict
            Metadata for added variable.

        """
        # Add the output quickly.
        # We don't need to check for errors because we get the value straight from a
        # source, and ivc metadata is minimal.
        value, shape = ensure_compatible(name, val, None)
        metadata = {
            'val': value,
            'shape': shape,
            'size': shape_to_len(shape),
            'units': units,
            'res_units': None,
            'desc': '',
            'distributed': False,
            'tags': set(),
            'ref': 1.0,
            'ref0': 0.0,
            'res_ref': 1.0,
            'lower': None,
            'upper': None,
            'shape_by_conn': False,
            'copy_shape': None
        }

        self._static_var_rel2meta[name] = metadata
        self._static_var_rel_names['output'].append(name)
        self._var_added(name)
        return metadata
コード例 #8
0
 def _check_bounds(self):
     """
     Check that indices are within the bounds of the source shape.
     """
     # a slice with start or stop outside of the source range is allowed in numpy arrays
     # and just results in an empty array, but in OpenMDAO that behavior would probably be
     # unintended, so for now make it an error.
     if self._src_shape is not None:
         start = self._slice.start
         stop = self._slice.stop
         sz = shape_to_len(self._dist_shape)
         if (start is not None and (start >= sz or start < -sz)
                 or (stop is not None and (stop > sz or stop < -sz))):
             raise IndexError(
                 f"{self._slice} is out of bounds of the source shape "
                 f"{self._dist_shape}.")
コード例 #9
0
 def _check_bounds(self):
     """
     Check that indices are within the bounds of the source shape.
     """
     if self._src_shape is not None and self._arr.size > 0:
         src_size = shape_to_len(self._dist_shape)
         amax = np.max(self._arr)
         ob = None
         if amax >= src_size or -amax < -src_size:
             ob = amax
         if ob is None:
             amin = np.min(self._arr)
             if amin < 0 and -amin > src_size:
                 ob = amin
         if ob is not None:
             raise IndexError(
                 f"index {ob} is out of bounds for source dimension of size "
                 f"{src_size}.")
コード例 #10
0
    def indexed_src_shape(self):
        """
        Return the shape of the result if the indices were applied to a source array.

        Returns
        -------
        tuple
            The shape of the result.
        """
        s = self.shaped_instance()
        if s is None:
            raise RuntimeError(
                f"Can't get indexed_src_shape of {self} because source shape "
                "is unknown.")
        if self._flat_src:
            return resolve_shape(shape_to_len(self._src_shape))[self.flat()]
        else:
            return resolve_shape(self._src_shape)[self()]
コード例 #11
0
    def compute_partials(self, inputs, partials):
        """
        Compute sub-jacobian parts. The model is assumed to be in an unscaled state.

        Parameters
        ----------
        inputs : Vector
            Unscaled, dimensional input variables read via inputs[key].
        partials : Jacobian
            Sub-jac components written to partials[output_name, input_name].
        """
        vec_size = self.options['vec_size']

        if vec_size > 1:
            flat_inputs = self._vec_to_array_vectorized(inputs)
        else:
            flat_inputs = self._vec_to_array(inputs)

        for out_name, out_shape in self._surrogate_output_names:
            surrogate = self._metadata(out_name).get('surrogate')
            if vec_size > 1:
                out_size = shape_to_len(out_shape)
                for j in range(vec_size):
                    flat_input = flat_inputs[j]
                    if overrides_method('linearize', surrogate,
                                        SurrogateModel):
                        derivs = surrogate.linearize(flat_input)
                        idx = 0
                        for in_name, sz in self._surrogate_input_names:
                            j1 = j * out_size * sz
                            j2 = j1 + out_size * sz
                            partials[out_name,
                                     in_name][j1:j2] = derivs[:, idx:idx +
                                                              sz].flat
                            idx += sz

            else:
                if overrides_method('linearize', surrogate, SurrogateModel):
                    sjac = surrogate.linearize(flat_inputs)

                    idx = 0
                    for in_name, sz in self._surrogate_input_names:
                        partials[(out_name, in_name)] = sjac[:, idx:idx + sz]
                        idx += sz
コード例 #12
0
    def as_array(self, copy=False, flat=True):
        """
        Return an index array into a flat array.

        Parameters
        ----------
        copy : bool
            If True, make sure the array returned is a copy.
        flat : bool
            If True, return a flat array.

        Returns
        -------
        ndarray
            The index array.
        """
        if len(self._src_shape) == 1:
            # Case 1: Requested flat or nonflat indices but src_shape is None or flat
            # return a flattened arange
            slc = self._slice
            if slc.stop is None and slc.step < 0:  # special case - neg step down to -1
                return np.arange(self._src_shape[0], dtype=int)[slc]
            else:
                # use maxsize here since a shaped slice always has positive int start and stop
                return np.arange(*slc.indices(sys.maxsize), dtype=int)
        else:
            src_size = shape_to_len(self._src_shape)
            arr = np.arange(src_size, dtype=int).reshape(
                self._src_shape)[self._slice].ravel()
            if flat:
                # Case 2: Requested flattened indices of multidimensional array
                # Return indices into a flattened src.
                return arr
            else:
                # Case 3: Requested non-flat indices of multidimensional array
                # This is never called within OpenMDAO
                return np.unravel_index(arr, shape=self._src_shape)
コード例 #13
0
    def _train(self):
        """
        Train the metamodel, if necessary, using the provided training data.
        """
        missing_training_data = []
        num_sample = None
        for name, _ in chain(self._surrogate_input_names,
                             self._surrogate_output_names):
            train_name = f'train_{name}'
            val = self.options[train_name]
            if val is None:
                missing_training_data.append(train_name)
                continue

            if num_sample is None:
                num_sample = len(val)
            elif len(val) != num_sample:
                raise RuntimeError(
                    f"{self.msginfo}: Each variable must have the same number "
                    f"of training points. Expected {num_sample} but found "
                    f"{len(val)} points for '{name}'.")

        if len(missing_training_data) > 0:
            raise RuntimeError(
                f"{self.msginfo}: The following training data sets must be "
                f"provided as options: {missing_training_data}")

        inputs = np.zeros((num_sample, self._input_size))
        self._training_input = inputs

        # Assemble input data.
        idx = 0
        for name, sz in self._surrogate_input_names:
            val = self.options[f'train_{name}']
            if isinstance(val[0], float):
                inputs[:, idx] = val
                idx += 1
            else:
                for row_idx, v in enumerate(val):
                    v = np.asarray(v)
                    inputs[row_idx, idx:idx + sz] = v.flat
                idx += sz

        # Assemble output data and train each output.
        for name, shape in self._surrogate_output_names:
            output_size = shape_to_len(shape)

            outputs = np.zeros((num_sample, output_size))
            self._training_output[name] = outputs

            val = self.options[f'train_{name}']

            if isinstance(val[0], float):
                outputs[:, 0] = val
            else:
                for row_idx, v in enumerate(val):
                    v = np.asarray(v)
                    outputs[row_idx, :] = v.flat

            surrogate = self._metadata(name).get('surrogate')
            if surrogate is None:
                raise RuntimeError(
                    f"{self.msginfo}: No surrogate specified for output '{name}'"
                )
            else:
                surrogate.train(self._training_input,
                                self._training_output[name])

        self.train = False
コード例 #14
0
    def _setup_partials(self):
        """
        Process all partials and approximations that the user declared.

        Metamodel needs to declare its partials after inputs and outputs are known.
        """
        super()._setup_partials()

        vec_size = self.options['vec_size']
        if vec_size > 1:
            vec_arange = np.arange(vec_size)

            # Sparse specification of partials for vectorized models.
            for wrt, n_wrt in self._surrogate_input_names:
                for of, shape_of in self._surrogate_output_names:
                    n_of = shape_to_len(shape_of)
                    rows = np.repeat(np.arange(n_of), n_wrt)
                    cols = np.tile(np.arange(n_wrt), n_of)
                    repeat = np.repeat(vec_arange, len(rows))
                    rows = np.tile(rows, vec_size) + repeat * n_of
                    cols = np.tile(cols, vec_size) + repeat * n_wrt

                    dct = {
                        'rows': rows,
                        'cols': cols,
                        'dependent': True,
                    }
                    self._declare_partials(of=of, wrt=wrt, dct=dct)
        else:
            dct = {
                'val': None,
                'dependent': True,
            }
            # Dense specification of partials for non-vectorized models.
            self._declare_partials(
                of=tuple([name[0] for name in self._surrogate_output_names]),
                wrt=tuple([name[0] for name in self._surrogate_input_names]),
                dct=dct)

        # Support for user declaring fd partials in a child class and assigning new defaults.
        # We want a warning for all partials that were not explicitly declared.
        declared_partials = set([
            key for key, dct in self._subjacs_info.items()
            if 'method' in dct and dct['method']
        ])

        # Gather undeclared fd partials on surrogates that don't support analytic derivatives.
        # While we do this, declare the missing ones.
        non_declared_partials = []
        for of, _ in self._surrogate_output_names:
            surrogate = self._metadata(of).get('surrogate')
            if surrogate and not overrides_method('linearize', surrogate,
                                                  SurrogateModel):
                wrt_list = [name[0] for name in self._surrogate_input_names]
                self._approx_partials(of=of, wrt=wrt_list, method='fd')

                for wrt in wrt_list:
                    abs_key = rel_key2abs_key(self, (of, wrt))
                    if abs_key not in declared_partials:
                        non_declared_partials.append(abs_key)

        if non_declared_partials:
            self._get_approx_scheme('fd')

            msg = "Because the MetaModelUnStructuredComp '{}' uses a surrogate " \
                  "which does not define a linearize method,\nOpenMDAO will use " \
                  "finite differences to compute derivatives. Some of the derivatives " \
                  "will be computed\nusing default finite difference " \
                  "options because they were not explicitly declared.\n".format(self.name)
            msg += "The derivatives computed using the defaults are:\n"
            for abs_key in non_declared_partials:
                msg += "    {}, {}\n".format(*abs_key)
            issue_warning(msg, category=DerivativesWarning)
コード例 #15
0
    def _jax_linearize(self):
        """
        Compute the jacobian using jax.

        This updates self._jacobian.
        """
        inames = list(self._compute.get_input_names())
        # argnums specifies which position args are to be differentiated
        argnums = [i for i, m in enumerate(self._compute._inputs.values()) if 'is_option' not in m]
        # keep this around for use locally even if we pass None as argnums to jax
        argidxs = argnums
        if len(argnums) == len(inames):
            argnums = None  # speedup if there are no static args
        osize = len(self._outputs)
        isize = len(self._inputs)
        invals = list(self._func_values(self._inputs))
        coloring = self._coloring_info['coloring']
        func = self._compute_jax

        if self._mode == 'rev':  # use reverse mode to compute derivs
            outvals = tuple(self._outputs.values())
            tangents = self._get_tangents(outvals, 'rev', coloring)
            if coloring is None:
                j = np.empty((osize, isize), dtype=float)
                cstart = cend = 0
                for i, a in zip(argidxs, jac_reverse(func, argnums, tangents)(*invals)):
                    if isinstance(invals[i], np.ndarray):
                        cend += invals[i].size
                    else:  # must be a scalar
                        cend += 1
                    a = np.asarray(a)
                    if a.ndim < 2:
                        j[:, cstart:cend] = a.reshape((a.size, 1))
                    else:
                        j[:, cstart:cend] = a.reshape((a.shape[0], cend - cstart))
                    cstart = cend
            else:
                j = [np.asarray(a).reshape((a.shape[0], shape_to_len(a.shape[1:])))
                     for a in jac_reverse(func, argnums, tangents)(*invals)]
                j = coloring.expand_jac(np.hstack(j), 'rev')
        else:
            tangents = self._get_tangents(invals, 'fwd', coloring, argnums)
            if coloring is None:
                j = np.empty((osize, isize), dtype=float)
                start = end = 0
                for a in jac_forward(func, argnums, tangents)(*invals):
                    a = np.asarray(a)
                    if a.ndim < 2:
                        a = a.reshape((1, a.size))
                    else:
                        a = a.reshape((shape_to_len(a.shape[:-1]), a.shape[-1]))
                    end += a.shape[0]
                    if osize == 1:
                        j[0, start:end] = a
                    else:
                        j[start:end, :] = a
                    start = end
            else:
                j = [np.asarray(a).reshape((shape_to_len(a.shape[:-1]), a.shape[-1]))
                     for a in jac_forward(func, argnums, tangents)(*invals)]
                j = coloring.expand_jac(np.vstack(j), 'fwd')

        self._jacobian.set_dense_jac(self, j)
コード例 #16
0
def ensure_compatible(name, value, shape=None, indices=None):
    """
    Make value compatible with the specified shape or the shape of indices.

    Parameters
    ----------
    name : str
        The name of the value.
    value : float or list or tuple or ndarray or Iterable
        The value of a variable.
    shape : int or tuple or list or None
        The expected or desired shape of the value.
    indices : Indexer or None
        The indices into a source variable.

    Returns
    -------
    ndarray
        The value in a shape compatible with the specified shape and/or indices.
    tuple
        The resulting shape of the value.

    Raises
    ------
    ValueError
        If value cannot be made to conform to shape or if shape and indices
        are incompatible.
    """
    if isinstance(value, Iterable):
        value = np.asarray(value)

    # if shape is not given, infer from value (if not scalar) or indices
    if shape is not None:
        if isinstance(shape, Integral):
            shape = (shape,)
        elif isinstance(shape, list):
            shape = tuple(shape)
    elif not np.isscalar(value):
        shape = np.atleast_1d(value).shape

    if indices is not None:
        if not indices._flat_src and shape is None:
            raise RuntimeError(f"src_indices for '{name}' is not flat, so its input "
                               "shape must be provided.")
        try:
            indshape = indices.indexed_src_shape
        except (RuntimeError, ValueError, TypeError):
            pass  # use shape provided or shape of value and check vs. shape of indices later
        else:
            if shape is not None and shape_to_len(indshape) != shape_to_len(shape):
                raise ValueError(f"Shape of indices {indshape} does not match shape of {shape} for"
                                 f" '{name}'.")
            if shape is None:
                shape = indshape

    if shape is None:
        # shape is not determined, assume the shape of value was intended
        value = np.atleast_1d(value)
        shape = value.shape
    else:
        # shape is determined, if value is scalar assign it to array of shape
        # otherwise make sure value is an array of the determined shape
        if np.ndim(value) == 0 or value.shape == (1,):
            value = np.full(shape, value)
        else:
            value = np.atleast_1d(value).astype(np.float64)
            if value.shape != shape:
                raise ValueError(f"Incompatible shape for '{name}': Expected {shape} but got "
                                 f"{value.shape}.")

    return value, shape
コード例 #17
0
    def add_var(self, name, val=1.0, shape=None, units=None, desc='', axis=0):
        """
        Add an input variable to be demuxed, and all associated output variables.

        Parameters
        ----------
        name : str
            Name of the variable in this component's namespace.
        val : float or list or tuple or ndarray or Iterable
            The initial value of the variable being added in user-defined units.
            Default is 1.0.
        shape : int or tuple or list or None
            Shape of this variable, only required if val is not an array. Default is None.
        units : str or None
            Units in which this input variable will be provided to the component
            during execution. Default is None, which means it is unitless.
        desc : str
            Description of the variable.
        axis : int
            The axis along which the elements will be selected.  Note the axis must have length
            vec_size, otherwise a RuntimeError is raised at setup.
        """
        self._vars[name] = {
            'val': val,
            'shape': shape,
            'units': units,
            'desc': desc,
            'axis': axis
        }

        opts = self.options
        vec_size = opts['vec_size']

        # for var, options in self._vars.items():
        options = self._vars[name]
        kwgs = dict(options)
        shape = options['shape']
        size = shape_to_len(shape)
        axis = kwgs.pop('axis')

        if axis >= len(shape):
            raise RuntimeError("{}: Invalid axis ({}) for variable '{}' of "
                               "shape {}".format(self.msginfo, axis, name,
                                                 shape))

        if shape[axis] != vec_size:
            raise RuntimeError(
                "{}: Variable '{}' cannot be demuxed along axis {}. Axis size "
                "is {} but vec_size is {}.".format(self.msginfo, name, axis,
                                                   shape[axis], vec_size))

        self.add_input(name, **kwgs)

        template = np.reshape(np.arange(size), shape)

        self._output_names[name] = []

        out_shape = list(shape)
        out_shape.pop(axis)
        if len(out_shape) == 0:
            out_shape = [1]

        for i in range(vec_size):
            out_name = '{0}_{1}'.format(name, i)
            self._output_names[name].append(out_name)
            self.add_output(name=out_name,
                            val=options['val'],
                            shape=out_shape,
                            units=options['units'],
                            desc=options['desc'])

            rs = np.arange(shape_to_len(out_shape))
            cs = np.atleast_1d(np.take(template, indices=i,
                                       axis=axis)).flatten()

            self.declare_partials(of=out_name,
                                  wrt=name,
                                  rows=rs,
                                  cols=cs,
                                  val=1.0)
コード例 #18
0
    def add_balance(self,
                    name,
                    eq_units=None,
                    lhs_name=None,
                    rhs_name=None,
                    rhs_val=0.0,
                    use_mult=False,
                    mult_name=None,
                    mult_val=1.0,
                    normalize=True,
                    val=None,
                    **kwargs):
        """
        Add a new state variable and associated equation to be balanced.

        This will create new inputs `lhs:name`, `rhs:name`, and `mult:name` that will
        define the left and right sides of the equation to be balanced, and a
        multiplier for the left-hand-side.

        Parameters
        ----------
        name : str
            The name of the state variable to be created.
        eq_units : str or None
            Units for the left-hand-side and right-hand-side of the equation to be balanced.
        lhs_name : str or None
            Optional name for the LHS variable associated with the implicit state variable.  If
            None, the default will be used:  'lhs:{name}'.
        rhs_name : str or None
            Optional name for the RHS variable associated with the implicit state variable.  If
            None, the default will be used:  'rhs:{name}'.
        rhs_val : int, float, or np.array
            Default value for the RHS.  Must be compatible with the shape (optionally)
            given by the val or shape option in kwargs.
        use_mult : bool
            Specifies whether the LHS multiplier is to be used.  If True, then an additional
            input `mult_name` is created, with the default value given by `mult_val`, that
            multiplies lhs.  Default is False.
        mult_name : str or None
            Optional name for the LHS multiplier variable associated with the implicit state
            variable. If None, the default will be used: 'mult:{name}'.
        mult_val : int, float, or np.array
            Default value for the LHS multiplier.  Must be compatible with the shape (optionally)
            given by the val or shape option in kwargs.
        normalize : bool
            Specifies whether or not the resulting residual should be normalized by a quadratic
            function of the RHS.
        val : float, int, or np.ndarray
            Set initial value for the state.
        **kwargs : dict
            Additional arguments to be passed for the creation of the implicit state variable.
            (see `add_output` method).
        """
        options = {
            'kwargs': kwargs,
            'eq_units': eq_units,
            'lhs_name': lhs_name,
            'rhs_name': rhs_name,
            'rhs_val': rhs_val,
            'use_mult': use_mult,
            'mult_name': mult_name,
            'mult_val': mult_val,
            'normalize': normalize
        }

        self._state_vars[name] = options

        if val is None:
            # If user doesn't specify initial guess for val, we can size problem from initial
            # rhs_val.
            if 'shape' not in kwargs and np.ndim(rhs_val) > 0:
                kwargs['shape'] = rhs_val.shape

        else:
            options['kwargs']['val'] = val

        meta = self.add_output(name, **options['kwargs'])

        shape = meta['shape']

        for s in ('lhs', 'rhs', 'mult'):
            if options['{0}_name'.format(s)] is None:
                options['{0}_name'.format(s)] = '{0}:{1}'.format(s, name)

        self.add_input(options['lhs_name'],
                       val=np.ones(shape),
                       units=options['eq_units'])

        self.add_input(options['rhs_name'],
                       val=options['rhs_val'] * np.ones(shape),
                       units=options['eq_units'])

        if options['use_mult']:
            self.add_input(options['mult_name'],
                           val=options['mult_val'] * np.ones(shape),
                           units=None)

        ar = np.arange(shape_to_len(shape))
        self.declare_partials(of=name,
                              wrt=options['lhs_name'],
                              rows=ar,
                              cols=ar,
                              val=1.0)
        self.declare_partials(of=name,
                              wrt=options['rhs_name'],
                              rows=ar,
                              cols=ar,
                              val=1.0)

        if options['use_mult']:
            self.declare_partials(of=name,
                                  wrt=options['mult_name'],
                                  rows=ar,
                                  cols=ar,
                                  val=1.0)
コード例 #19
0
    def _train(self):
        """
        Override MetaModelUnStructured _train method to take into account multi-fidelity input data.
        """
        if self._nfi == 1:
            # shortcut: fallback to base class behaviour immediatly
            super()._train()
            return

        num_sample = self._nfi * [None]
        for name_root, _ in chain(self._surrogate_input_names, self._surrogate_output_names):
            for fi in range(self._nfi):
                name = _get_name_fi(name_root, fi)
                val = self.options['train_' + name]
                if num_sample[fi] is None:
                    num_sample[fi] = len(val)
                elif len(val) != num_sample[fi]:
                    msg = f"{self.msginfo}: Each variable must have the same number " \
                          f"of training points. Expected {num_sample[fi]} but found {len(val)} " \
                          f"points for '{name}'."
                    raise RuntimeError(msg)

        inputs = [np.zeros((num_sample[fi], self._input_sizes[fi]))
                  for fi in range(self._nfi)]

        # add training data for each input
        idx = self._nfi * [0]
        for name_root, sz in self._surrogate_input_names:
            for fi in range(self._nfi):
                name = _get_name_fi(name_root, fi)
                val = self.options['train_' + name]
                if isinstance(val[0], float):
                    inputs[fi][:, idx[fi]] = val
                    idx[fi] += 1
                else:
                    for row_idx, v in enumerate(val):
                        v = np.asarray(v)
                        inputs[fi][row_idx, idx[fi]:idx[fi] + sz] = v.flat

        # add training data for each output
        outputs = self._nfi * [None]
        for name_root, shape in self._surrogate_output_names:
            output_size = shape_to_len(shape)
            for fi in range(self._nfi):
                name_fi = _get_name_fi(name_root, fi)
                outputs[fi] = np.zeros((num_sample[fi], output_size))

                val = self.options['train_' + name_fi]

                if isinstance(val[0], float):
                    outputs[fi][:, 0] = val
                else:
                    for row_idx, v in enumerate(val):
                        v = np.asarray(v)
                        outputs[fi][row_idx, :] = v.flat

            self._training_output[name] = []
            self._training_output[name].extend(outputs)

            surrogate = self._metadata(name_root).get('surrogate')
            if surrogate is None:
                msg = f"{self.msginfo}: No surrogate specified for output '{name_root}'"
                raise RuntimeError(msg)
            else:
                surrogate.train_multifi(inputs, self._training_output[name])

        self._training_input = inputs
        self.train = False
コード例 #20
0
    def add_eq_output(self,
                      name,
                      eq_units=None,
                      lhs_name=None,
                      rhs_name=None,
                      rhs_val=0.0,
                      use_mult=False,
                      mult_name=None,
                      mult_val=1.0,
                      normalize=True,
                      add_constraint=False,
                      ref=None,
                      ref0=None,
                      adder=None,
                      scaler=None,
                      **kwargs):
        """
        Add a new output variable computed via the difference equation.

        This will create new inputs `lhs:name`, `rhs:name`, and `mult:name` that will
        define the left and right sides of the difference equation, and a
        multiplier for the left-hand-side.

        Parameters
        ----------
        name : str
            The name of the output variable to be created.
        eq_units : str or None
            Units for the left-hand-side and right-hand-side of the difference equation.
        lhs_name : str or None
            Optional name for the LHS variable associated with the difference equation.  If
            None, the default will be used:  'lhs:{name}'.
        rhs_name : str or None
            Optional name for the RHS variable associated with the difference equation.  If
            None, the default will be used:  'rhs:{name}'.
        rhs_val : int, float, or np.array
            Default value for the RHS.  Must be compatible with the shape (optionally)
            given by the val or shape option in kwargs.
        use_mult : bool
            Specifies whether the LHS multiplier is to be used.  If True, then an additional
            input `mult_name` is created, with the default value given by `mult_val`, that
            multiplies lhs.  Default is False.
        mult_name : str or None
            Optional name for the LHS multiplier variable associated with the output
            variable. If None, the default will be used: 'mult:{name}'.
        mult_val : int, float, or np.array
            Default value for the LHS multiplier.  Must be compatible with the shape (optionally)
            given by the val or shape option in kwargs.
        normalize : bool
            Specifies whether or not the resulting output should be normalized by a quadratic
            function of the RHS. When this option is True, the user-provided ref/ref0 scaler/adder
            options below are typically unnecessary.
        add_constraint : bool
            Specifies whether to add an equality constraint.
        ref : float or ndarray, optional
            Value of response variable that scales to 1.0 in the driver. This option is only
            meaningful when add_constraint=True.
        ref0 : float or ndarray, optional
            Value of response variable that scales to 0.0 in the driver. This option is only
            meaningful when add_constraint=True.
        adder : float or ndarray, optional
            Value to add to the model value to get the scaled value for the driver. adder
            is first in precedence. This option is only meaningful when add_constraint=True.
        scaler : float or ndarray, optional
            Value to multiply the model value to get the scaled value for the driver. scaler
            is second in precedence. This option is only meaningful when add_constraint=True.
        **kwargs : dict
            Additional arguments to be passed for the creation of the output variable.
            (see `add_output` method).
        """
        self._output_vars[name] = options = {
            'kwargs': kwargs,
            'eq_units': eq_units,
            'lhs_name': lhs_name,
            'rhs_name': rhs_name,
            'rhs_val': rhs_val,
            'use_mult': use_mult,
            'mult_name': mult_name,
            'mult_val': mult_val,
            'normalize': normalize,
            'add_constraint': add_constraint,
            'ref': ref,
            'ref0': ref0,
            'adder': adder,
            'scaler': scaler
        }

        meta = self.add_output(name, **options['kwargs'])

        shape = meta['shape']

        for s in ('lhs', 'rhs', 'mult'):
            if options['{0}_name'.format(s)] is None:
                options['{0}_name'.format(s)] = '{0}:{1}'.format(s, name)

        self.add_input(options['lhs_name'],
                       val=np.ones(shape),
                       units=options['eq_units'])

        self.add_input(options['rhs_name'],
                       val=options['rhs_val'] * np.ones(shape),
                       units=options['eq_units'])

        if options['use_mult']:
            self.add_input(options['mult_name'],
                           val=options['mult_val'] * np.ones(shape),
                           units=None)

        ar = np.arange(shape_to_len(shape))
        self.declare_partials(of=name,
                              wrt=options['lhs_name'],
                              rows=ar,
                              cols=ar,
                              val=1.0)
        self.declare_partials(of=name,
                              wrt=options['rhs_name'],
                              rows=ar,
                              cols=ar,
                              val=1.0)

        if options['use_mult']:
            self.declare_partials(of=name,
                                  wrt=options['mult_name'],
                                  rows=ar,
                                  cols=ar,
                                  val=1.0)

        if options['add_constraint']:
            self.add_constraint(name,
                                equals=0.,
                                ref0=options['ref0'],
                                ref=options['ref'],
                                adder=options['adder'],
                                scaler=options['scaler'])
コード例 #21
0
ファイル: indepvarcomp.py プロジェクト: zenshuo100/OpenMDAO
    def add_output(self,
                   name,
                   val=1.0,
                   shape=None,
                   units=None,
                   res_units=None,
                   desc='',
                   lower=None,
                   upper=None,
                   ref=None,
                   ref0=None,
                   res_ref=None,
                   tags=None,
                   shape_by_conn=False,
                   copy_shape=None):
        """
        Add an independent variable to this component.

        This should never be called by a user, as it skips all checks.

        Parameters
        ----------
        name : str
            name of the variable in this component's namespace.
        val : float or list or tuple or ndarray
            The initial value of the variable being added in user-defined units. Default is 1.0.
        shape : int or tuple or list or None
            Shape of this variable, only required if val is not an array.
            Default is None.
        units : str or None
            Units in which the output variables will be provided to the component during execution.
            Default is None, which means it has no units.
        res_units : None
            This argument is deprecated because it was unused.
        desc : str
            description of the variable
        lower : None
            This argument is deprecated because it was unused.
        upper : None
            This argument is deprecated because it was unused.
        ref : None
            This argument is deprecated because it was unused.
        ref0 : None
            This argument is deprecated because it was unused.
        res_ref : None
            This argument is deprecated because it was unused.
        tags : str or list of strs
            User defined tags that can be used to filter what gets listed when calling
            list_outputs.
        shape_by_conn : bool
            If True, shape this output to match its connected input(s).
        copy_shape : str or None
            If a str, that str is the name of a variable. Shape this output to match that of
            the named variable.
        """
        # Add the output quickly.
        # We don't need to check for errors because we get the value straight from a
        # source, and ivc metadata is minimal.
        value, shape, _ = ensure_compatible(name, val, None)
        metadata = {
            'value': value,
            'shape': shape,
            'size': shape_to_len(shape),
            'units': units,
            'res_units': None,
            'desc': '',
            'distributed': False,
            'tags': set(),
            'ref': 1.0,
            'ref0': 0.0,
            'res_ref': 1.0,
            'lower': None,
            'upper': None,
            'shape_by_conn': False,
            'copy_shape': None
        }

        self._static_var_rel2meta[name] = metadata
        self._static_var_rel_names['output'].append(name)
        self._var_added(name)
コード例 #22
0
    def add_var(self, name, val=1.0, shape=None, units=None, desc='', axis=0):
        """
        Add an output variable to be muxed, and all associated input variables.

        Parameters
        ----------
        name : str
            Name of the variable in this component's namespace.
        val : float or list or tuple or ndarray or Iterable
            The initial value of the variable being added in user-defined units.
            Default is 1.0.
        shape : int or tuple or list or None
            Shape of the input variables to be muxed, only required if val is not an array.
            Default is None.
        units : str or None
            Units in which this input variable will be provided to the component
            during execution. Default is None, which means it is unitless.
        desc : str
            Description of the variable.
        axis : int
            The axis along which the elements will be stacked.  Note that N-dimensional inputs
            cannot be stacked along an axis greater than N.
        """
        self._vars[name] = {
            'val': val,
            'shape': shape,
            'units': units,
            'desc': desc,
            'axis': axis
        }

        opts = self.options
        vec_size = opts['vec_size']

        options = self._vars[name]

        kwgs = dict(options)
        in_shape = np.asarray(options['val']).shape \
            if options['shape'] is None else options['shape']
        in_size = shape_to_len(in_shape)
        out_shape = list(in_shape)
        out_shape.insert(options['axis'], vec_size)
        kwgs.pop('shape')
        ax = kwgs.pop('axis')

        in_dimension = len(in_shape)

        if ax > in_dimension:
            raise ValueError(
                '{3}: Cannot mux a {0}D inputs for {2} along axis greater '
                'than {0} ({1})'.format(in_dimension, ax, name, self.msginfo))

        self.add_output(name=name,
                        val=options['val'],
                        shape=out_shape,
                        units=options['units'],
                        desc=options['desc'])

        self._input_names[name] = []

        for i in range(vec_size):
            in_name = '{0}_{1}'.format(name, i)
            self._input_names[name].append(in_name)

            self.add_input(name=in_name, shape=in_shape, **kwgs)

            in_templates = [
                np.zeros(in_shape, dtype=int) for _ in range(vec_size)
            ]

            rs = []
            cs = []

            for j in range(in_size):
                in_templates[i].flat[:] = 0
                in_templates[i].flat[j] = 1
                temp_out = np.stack(in_templates, axis=ax)
                cs.append(j)
                rs.append(int(np.nonzero(temp_out.ravel())[0]))

            self.declare_partials(of=name,
                                  wrt=in_name,
                                  rows=rs,
                                  cols=cs,
                                  val=1.0)
コード例 #23
0
    def _evaluate_spline(self, values):
        """
        Interpolate at all fixed output coordinates given the new table values.

        This method is called from OpenMDAO, and is not meant for standalone use.

        Parameters
        ----------
        values : ndarray(n_nodes x n_points)
            The data on the regular grid in n dimensions.

        Returns
        -------
        ndarray
            Value of interpolant at all sample points.
        """
        xi = self.x_interp
        self.values = values

        table = self.table
        if table._vectorized:

            if table._name == 'bsplines':
                # bsplines is fully vectorized.
                table.values = values
                result, _, derivs_val, _ = table.evaluate_vectorized(xi)

            else:
                # Scipy implementation vectorized over lookups, but not over multiple table values.
                interp = self._interp
                n_nodes, _ = values.shape
                nx = shape_to_len(xi.shape)

                result = np.empty((n_nodes, nx), dtype=values.dtype)
                derivs_val = None

                for j in range(n_nodes):

                    table = interp(self.grid, values[j, :], interp,
                                   **self._interp_options)
                    table._compute_d_dvalues = False
                    table._compute_d_dx = False

                    result[j, :], _, _, _ = table.evaluate_vectorized(
                        xi.reshape((nx, 1)))

        else:
            interp = self._interp
            n_nodes, _ = values.shape
            nx = shape_to_len(xi.shape)
            result = np.empty((n_nodes, nx), dtype=values.dtype)
            derivs_val = None

            # TODO: it might be possible to vectorize over n_nodes.
            for j in range(n_nodes):

                table = interp(self.grid, values[j, :], interp,
                               **self._interp_options)
                table._compute_d_dvalues = True
                table._compute_d_dx = False

                for k in range(nx):
                    x_pt = np.atleast_2d(xi[k])
                    val, _, d_values, _ = table.evaluate(x_pt)
                    result[j, k] = val
                    if d_values is not None:
                        if derivs_val is None:
                            dv_shape = [n_nodes, nx]
                            dv_shape.extend(values.shape[1:])
                            derivs_val = np.zeros(dv_shape, dtype=values.dtype)
                        in_slice = table._full_slice
                        full_slice = [slice(j, j + 1), slice(k, k + 1)]
                        full_slice.extend(in_slice)
                        shape = derivs_val[tuple(full_slice)].shape
                        derivs_val[tuple(full_slice)] = d_values.reshape(shape)

        # Cache derivatives
        self._d_dvalues = derivs_val

        self.table = table
        return result