예제 #1
0
    def _check_partials_meta(self, abs_key, meta):
        """
        Check a given partial derivative and metadata for the correct shapes.

        Parameters
        ----------
        abs_key : tuple(str, str)
            The of/wrt pair (given absolute names) defining the partial derivative.
        meta : dict
            Metadata dictionary from declare_partials.
        """
        if meta['dependent']:
            out_size = np.prod(self._var_abs2meta['output'][abs_key[0]]['shape'])
            if abs_key[1] in self._var_abs2meta['input']:
                in_size = self._var_abs2meta['input'][abs_key[1]]['size']
            else:  # assume output (or get a KeyError)
                in_size = self._var_abs2meta['output'][abs_key[1]]['size']

            if in_size == 0 and self.comm.rank != 0:  # 'inactive' component
                return

            rows = meta['rows']
            cols = meta['cols']
            if not (rows is None or rows.size == 0):
                if rows.min() < 0:
                    of, wrt = abs_key2rel_key(self, abs_key)
                    msg = '{}: d({})/d({}): row indices must be non-negative'
                    raise ValueError(msg.format(self.pathname, of, wrt))
                if cols.min() < 0:
                    of, wrt = abs_key2rel_key(self, abs_key)
                    msg = '{}: d({})/d({}): col indices must be non-negative'
                    raise ValueError(msg.format(self.pathname, of, wrt))
                if rows.max() >= out_size or cols.max() >= in_size:
                    of, wrt = abs_key2rel_key(self, abs_key)
                    msg = '{}: d({})/d({}): Expected {}x{} but declared at least {}x{}'
                    raise ValueError(msg.format(
                        self.pathname, of, wrt,
                        out_size, in_size,
                        rows.max() + 1, cols.max() + 1))
            elif meta['value'] is not None:
                val = meta['value']
                val_shape = val.shape
                if len(val_shape) == 1:
                    val_out, val_in = val_shape[0], 1
                else:
                    val_out, val_in = val.shape
                if val_out > out_size or val_in > in_size:
                    of, wrt = abs_key2rel_key(self, abs_key)
                    msg = '{}: d({})/d({}): Expected {}x{} but val is {}x{}'
                    raise ValueError(msg.format(
                        self.pathname, of, wrt,
                        out_size, in_size,
                        val_out, val_in))
예제 #2
0
    def _check_partials_meta(self, abs_key, val, shape):
        """
        Check a given partial derivative and metadata for the correct shapes.

        Parameters
        ----------
        abs_key : tuple(str, str)
            The of/wrt pair (given absolute names) defining the partial derivative.
        val : ndarray
            Subjac value.
        shape : tuple
            Expected shape of val.
        """
        out_size, in_size = shape

        if in_size == 0 and self.comm.rank != 0:  # 'inactive' component
            return

        if val is not None:
            val_shape = val.shape
            if len(val_shape) == 1:
                val_out, val_in = val_shape[0], 1
            else:
                val_out, val_in = val.shape
            if val_out > out_size or val_in > in_size:
                of, wrt = abs_key2rel_key(self, abs_key)
                msg = '{}: d({})/d({}): Expected {}x{} but val is {}x{}'
                raise ValueError(
                    msg.format(self.pathname, of, wrt, out_size, in_size,
                               val_out, val_in))
예제 #3
0
    def _check_partials_meta(self, abs_key, val, shape):
        """
        Check a given partial derivative and metadata for the correct shapes.

        Parameters
        ----------
        abs_key : tuple(str, str)
            The of/wrt pair (given absolute names) defining the partial derivative.
        val : ndarray
            Subjac value.
        shape : tuple
            Expected shape of val.
        """
        out_size, in_size = shape

        if in_size == 0 and self.comm.rank != 0:  # 'inactive' component
            return

        if val is not None:
            val_shape = val.shape
            if len(val_shape) == 1:
                val_out, val_in = val_shape[0], 1
            else:
                val_out, val_in = val.shape
            if val_out > out_size or val_in > in_size:
                of, wrt = abs_key2rel_key(self, abs_key)
                msg = '{}: d({})/d({}): Expected {}x{} but val is {}x{}'
                raise ValueError(msg.format(self.pathname, of, wrt, out_size, in_size,
                                            val_out, val_in))
예제 #4
0
    def compute_approximations(self, system, jac=None, deriv_type='partial'):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.
        jac : None or dict-like
            If None, update system with the approximated sub-Jacobians. Otherwise, store the
            approximations in the given dict-like object.
        deriv_type : str
            One of 'total' or 'partial', indicating if total or partial derivatives are
            being approximated.
        """
        if jac is None:
            jac = system._jacobian

        if deriv_type == 'total':
            current_vec = system._outputs
        elif deriv_type == 'partial':
            current_vec = system._residuals
        else:
            raise ValueError('deriv_type must be one of "total" or "partial"')

        # Turn on complex step.
        system._inputs._vector_info._under_complex_step = True

        # create a scratch array
        out_tmp = system._outputs.get_data()
        results_clone = current_vec._clone(True)

        # To support driver src_indices, we need to override some checks in Jacobian, but do it
        # selectively.
        uses_src_indices = (system._owns_approx_of_idx or system._owns_approx_wrt_idx) and \
            not isinstance(jac, dict)

        for key, approximations in groupby(self._exec_list, self._key_fun):
            # groupby (along with this key function) will group all 'of's that have the same wrt and
            # step size.
            wrt, form, delta = key
            if form == 'reverse':
                delta *= -1.0
            fact = 1.0 / delta
            if deriv_type == 'total':
                # Sign difference between output and resids
                fact = -fact

            if wrt in system._owns_approx_wrt_idx:
                in_idx = system._owns_approx_wrt_idx[wrt]
                in_size = len(in_idx)
            else:
                if wrt in system._var_abs2meta:
                    in_size = system._var_abs2meta[wrt]['size']

                in_idx = range(in_size)

            outputs = []

            # Note: If access to `approximations` is required again in the future, we will need to
            # throw it in a list first. The groupby iterator only works once.
            for approx_tuple in approximations:
                of = approx_tuple[0]
                # TODO: Sparse derivatives
                if of in system._owns_approx_of_idx:
                    out_idx = system._owns_approx_of_idx[of]
                    out_size = len(out_idx)
                else:
                    out_size = system._var_abs2meta[of]['size']

                outputs.append((of, np.zeros((out_size, in_size))))

            for i_count, idx in enumerate(in_idx):
                # Run the Finite Difference
                input_delta = [(wrt, idx, delta)]
                result = self._run_point_complex(system, input_delta, out_tmp,
                                                 results_clone, deriv_type)

                for of, subjac in outputs:
                    if of in system._owns_approx_of_idx:
                        out_idx = system._owns_approx_of_idx[of]
                        subjac[:, i_count] = result._imag_views_flat[of][
                            out_idx] * fact
                    else:
                        subjac[:, i_count] = result._imag_views_flat[of] * fact

            for of, subjac in outputs:
                rel_key = abs_key2rel_key(system, (of, wrt))
                if uses_src_indices:
                    jac._override_checks = True
                jac[rel_key] = subjac
                if uses_src_indices:
                    jac._override_checks = False

        # Turn off complex step.
        system._inputs._vector_info._under_complex_step = False
예제 #5
0
    def compute_approximations(self, system, jac=None, deriv_type='partial'):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.

        jac : None or dict-like
            If None, update system with the approximated sub-Jacobians. Otherwise, store the
            approximations in the given dict-like object.

        deriv_type : str
            One of 'total' or 'partial', indicating if total or partial derivatives are
            being approximated.
        """
        if jac is None:
            jac = system._jacobian

        if deriv_type == 'total':
            current_vec = system._outputs
        elif deriv_type == 'partial':
            current_vec = system._residuals
        else:
            raise ValueError('deriv_type must be one of "total" or "partial"')

        result = system._outputs._clone(True)
        result_array = result.get_data()
        out_tmp = current_vec.get_data()
        in_tmp = system._inputs.get_data()

        for key, approximations in groupby(self._exec_list, self._key_fun):
            # groupby (along with this key function) will group all 'of's that have the same wrt and
            # step size.
            wrt, form, order, step, step_calc = key

            # FD forms are written as a collection of changes to inputs (deltas) and the associated
            # coefficients (coeffs). Since we do not need to (re)evaluate the current step, its
            # coefficient is stored seperately (current_coeff). For example,
            # f'(x) = (f(x+h) - f(x))/h + O(h) = 1/h * f(x+h) + (-1/h) * f(x) + O(h)
            # would be stored as deltas = [h], coeffs = [1/h], and current_coeff = -1/h.
            # A central second order accurate approximation for the first derivative would be stored
            # as deltas = [-2, -1, 1, 2] * h, coeffs = [1/12, -2/3, 2/3 , -1/12] * 1/h,
            # current_coeff = 0.
            fd_form = _generate_fd_coeff(form, order)

            if step_calc == 'rel':
                if wrt in system._outputs._views_flat:
                    scale = np.linalg.norm(system._outputs._views_flat[wrt])
                else:
                    scale = np.linalg.norm(system._inputs._views_flat[wrt])
                step *= scale

            deltas = fd_form.deltas * step
            coeffs = fd_form.coeffs / step
            current_coeff = fd_form.current_coeff / step

            if wrt in system._owns_approx_wrt_idx:
                in_idx = system._owns_approx_wrt_idx[wrt]
                in_size = len(in_idx)
            else:
                if wrt in system._var_abs2meta['input']:
                    in_size = system._var_abs2meta['input'][wrt]['size']
                elif wrt in system._var_abs2meta['output']:
                    in_size = system._var_abs2meta['output'][wrt]['size']

                in_idx = range(in_size)

            result.set_vec(system._outputs)

            outputs = []

            # Note: If access to `approximations` is required again in the future, we will need to
            # throw it in a list first. The groupby iterator only works once.
            for approx_tuple in approximations:
                of = approx_tuple[0]
                # TODO: Sparse derivatives
                if of in system._owns_approx_of_idx:
                    out_idx = system._owns_approx_of_idx[of]
                    out_size = len(out_idx)
                else:
                    out_size = system._var_abs2meta['output'][of]['size']
                outputs.append((of, np.zeros((out_size, in_size))))

            for i_count, idx in enumerate(in_idx):
                if current_coeff:
                    result.set_vec(current_vec)
                    result *= current_coeff
                else:
                    result.set_const(0.)

                # Run the Finite Difference
                for delta, coeff in zip(deltas, coeffs):
                    input_delta = [(wrt, idx, delta)]
                    self._run_point(system, input_delta, out_tmp, in_tmp,
                                    result_array, deriv_type)
                    result_array *= coeff
                    result.iadd_data(result_array)

                if deriv_type == 'total':
                    # Sign difference between output and resids. This arises from the definitions
                    # in the unified derivatives equations.
                    # For ExplicitComponent: resid = output(n-1) - output(n)
                    # so dresid/d* = - doutput/d*
                    result *= -1.0

                for of, subjac in outputs:

                    if of in system._owns_approx_of_idx:
                        out_idx = system._owns_approx_of_idx[of]
                        subjac[:, i_count] = result._views_flat[of][out_idx]
                    else:
                        subjac[:, i_count] = result._views_flat[of]

            for of, subjac in outputs:
                rel_key = abs_key2rel_key(system, (of, wrt))
                jac[rel_key] = subjac
예제 #6
0
    def _declare_partials(self,
                          of,
                          wrt,
                          dependent=True,
                          rows=None,
                          cols=None,
                          val=None):
        """
        Store subjacobian metadata for later use.

        Parameters
        ----------
        of : str or list of str
            The name of the residual(s) that derivatives are being computed for.
            May also contain a glob pattern.
        wrt : str or list of str
            The name of the variables that derivatives are taken with respect to.
            This can contain the name of any input or output variable.
            May also contain a glob pattern.
        dependent : bool(True)
            If False, specifies no dependence between the output(s) and the
            input(s). This is only necessary in the case of a sparse global
            jacobian, because if 'dependent=False' is not specified and
            declare_partials is not called for a given pair, then a dense
            matrix of zeros will be allocated in the sparse global jacobian
            for that pair.  In the case of a dense global jacobian it doesn't
            matter because the space for a dense subjac will always be
            allocated for every pair.
        rows : ndarray of int or None
            Row indices for each nonzero entry.  For sparse subjacobians only.
        cols : ndarray of int or None
            Column indices for each nonzero entry.  For sparse subjacobians only.
        val : float or ndarray of float or scipy.sparse
            Value of subjacobian.  If rows and cols are not None, this will
            contain the values found at each (row, col) location in the subjac.
        """
        is_scalar = isscalar(val)

        if dependent:
            if rows is None:
                if val is not None and not is_scalar and not issparse(val):
                    val = atleast_2d(val)
                    val = val.astype(promote_types(val.dtype, float),
                                     copy=False)
                rows_max = cols_max = 0
            else:  # sparse list format
                rows = np.array(rows, dtype=INT_DTYPE, copy=False)
                cols = np.array(cols, dtype=INT_DTYPE, copy=False)

                if rows.shape != cols.shape:
                    raise ValueError('rows and cols must have the same shape,'
                                     ' rows: {}, cols: {}'.format(
                                         rows.shape, cols.shape))

                if is_scalar:
                    val = np.full(rows.size, val, dtype=float)
                    is_scalar = False
                elif val is not None:
                    # np.promote_types will choose the smallest dtype that can contain
                    # both arguments
                    val = atleast_1d(val)
                    safe_dtype = promote_types(val.dtype, float)
                    val = val.astype(safe_dtype, copy=False)

                    if rows.shape != val.shape:
                        raise ValueError(
                            'If rows and cols are specified, val must be a scalar or '
                            'have the same shape, val: {}, '
                            'rows/cols: {}'.format(val.shape, rows.shape))
                else:
                    val = np.zeros_like(rows, dtype=float)

                if rows.size > 0:
                    if rows.min() < 0:
                        # of, wrt = abs_key2rel_key(self, abs_key)
                        msg = '{}: d({})/d({}): row indices must be non-negative'
                        raise ValueError(msg.format(self.pathname, of, wrt))
                    if cols.min() < 0:
                        # of, wrt = abs_key2rel_key(self, abs_key)
                        msg = '{}: d({})/d({}): col indices must be non-negative'
                        raise ValueError(msg.format(self.pathname, of, wrt))
                    rows_max = rows.max()
                    cols_max = cols.max()
                else:
                    rows_max = cols_max = 0

        pattern_matches = self._find_partial_matches(of, wrt)
        abs2meta = self._var_abs2meta

        is_array = isinstance(val, ndarray)

        for of_bundle, wrt_bundle in product(*pattern_matches):
            of_pattern, of_matches = of_bundle
            wrt_pattern, wrt_matches = wrt_bundle
            if not of_matches:
                raise ValueError(
                    'No matches were found for of="{}"'.format(of_pattern))
            if not wrt_matches:
                raise ValueError(
                    'No matches were found for wrt="{}"'.format(wrt_pattern))

            for rel_key in product(of_matches, wrt_matches):
                abs_key = rel_key2abs_key(self, rel_key)
                if not dependent:
                    if abs_key in self._subjacs_info:
                        del self._subjacs_info[abs_key]
                    continue

                if abs_key in self._subjacs_info:
                    meta = self._subjacs_info[abs_key]
                else:
                    meta = SUBJAC_META_DEFAULTS.copy()

                meta['rows'] = rows
                meta['cols'] = cols
                meta['dependent'] = dependent
                meta['shape'] = shape = (abs2meta[abs_key[0]]['size'],
                                         abs2meta[abs_key[1]]['size'])

                if val is None:
                    # we can only get here if rows is None  (we're not sparse list format)
                    meta['value'] = np.zeros(shape)
                elif is_array:
                    if rows is None and val.shape != shape and val.size == shape[
                            0] * shape[1]:
                        meta['value'] = val = val.copy().reshape(shape)
                    else:
                        meta['value'] = val.copy()
                elif is_scalar:
                    meta['value'] = np.full(shape, val, dtype=float)
                else:
                    meta['value'] = val

                if rows_max >= shape[0] or cols_max >= shape[1]:
                    of, wrt = abs_key2rel_key(self, abs_key)
                    msg = '{}: d({})/d({}): Expected {}x{} but declared at least {}x{}'
                    raise ValueError(
                        msg.format(self.pathname, of, wrt, shape[0], shape[1],
                                   rows_max + 1, cols_max + 1))

                self._check_partials_meta(
                    abs_key, meta['value'], shape if rows is None else
                    (rows.shape[0], 1))
                self._subjacs_info[abs_key] = meta
예제 #7
0
    def compute_approximations(self, system, jac=None, total=False):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.
        jac : None or dict-like
            If None, update system with the approximated sub-Jacobians. Otherwise, store the
            approximations in the given dict-like object.
        total : bool
            If True total derivatives are being approximated, else partials.
        """
        if len(self._exec_list) == 0:
            return

        if jac is None:
            jac = system._jacobian

        if total:
            current_vec = system._outputs
        else:
            current_vec = system._residuals

        result = system._outputs._clone(True)
        result_array = result._data.copy()
        out_tmp = current_vec._data.copy()
        in_tmp = system._inputs._data.copy()

        # To support driver src_indices, we need to override some checks in Jacobian, but do it
        # selectively.
        uses_src_indices = (system._owns_approx_of_idx or system._owns_approx_wrt_idx) and \
            not isinstance(jac, dict)

        num_par_fd = system.options['num_par_fd']
        use_parallel_fd = num_par_fd > 1 and (system._full_comm is not None
                                              and system._full_comm.size > 1)
        is_parallel = use_parallel_fd or system.comm.size > 1

        results = defaultdict(list)
        iproc = system.comm.rank
        owns = system._owning_rank
        mycomm = system._full_comm if use_parallel_fd else system.comm

        fd_count = 0
        approx_groups = self._get_approx_groups(system)
        for wrt, deltas, coeffs, current_coeff, in_idx, in_size, outputs in approx_groups:

            for i_count, idx in enumerate(in_idx):
                if fd_count % num_par_fd == system._par_fd_id:
                    if current_coeff:
                        result._data[:] = current_vec._data
                        result._data *= current_coeff
                    else:
                        result._data[:] = 0.

                    # Run the Finite Difference
                    for delta, coeff in zip(deltas, coeffs):
                        self._run_point(system, wrt, idx, delta, out_tmp,
                                        in_tmp, result_array, total)
                        result_array *= coeff
                        result._data += result_array

                    if is_parallel:
                        for of, _, out_idx in outputs:
                            if owns[of] == iproc:
                                results[(of, wrt)].append(
                                    (i_count,
                                     result._views_flat[of][out_idx].copy()))
                    else:
                        for of, subjac, out_idx in outputs:
                            subjac[:,
                                   i_count] = result._views_flat[of][out_idx]

                fd_count += 1

        if is_parallel:
            results = _gather_jac_results(mycomm, results)

        for wrt, _, _, _, _, _, outputs in approx_groups:
            for of, subjac, _ in outputs:
                key = (of, wrt)
                if is_parallel:
                    for i, result in results[key]:
                        subjac[:, i] = result

                rel_key = abs_key2rel_key(system, key)

                if uses_src_indices:
                    jac._override_checks = True
                    jac[rel_key] = subjac
                    jac._override_checks = False
                else:
                    jac[rel_key] = subjac
예제 #8
0
    def compute_approximations(self, system, jac, total=False):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.
        jac : dict-like
            Approximations are stored in the given dict-like object.
        total : bool
            If True total derivatives are being approximated, else partials.
        """
        if len(self._exec_list) == 0:
            return

        if total:
            current_vec = system._outputs
        else:
            current_vec = system._residuals

        # Clean vector for results
        results_clone = current_vec._clone(True)

        # Turn on complex step.
        system._set_complex_step_mode(True)
        results_clone.set_complex_step_mode(True)

        # To support driver src_indices, we need to override some checks in Jacobian, but do it
        # selectively.
        uses_src_indices = (system._owns_approx_of_idx or system._owns_approx_wrt_idx) and \
            not isinstance(jac, dict)

        num_par_fd = system.options['num_par_fd']
        use_parallel_fd = num_par_fd > 1 and (system._full_comm is not None
                                              and system._full_comm.size > 1)
        is_parallel = use_parallel_fd or system.comm.size > 1

        results = defaultdict(list)
        iproc = system.comm.rank
        owns = system._owning_rank
        mycomm = system._full_comm if use_parallel_fd else system.comm

        fd_count = 0
        approx_groups = self._get_approx_groups(system)
        for tup in approx_groups:
            wrt, delta, fact, in_idx, in_size, outputs = tup
            for i_count, idx in enumerate(in_idx):
                if fd_count % num_par_fd == system._par_fd_id:
                    # Run the Finite Difference
                    result = self._run_point_complex(system, wrt, idx, delta,
                                                     results_clone, total)

                    if is_parallel:
                        for of, _, out_idx in outputs:
                            if owns[of] == iproc:
                                results[(of, wrt)].append(
                                    (i_count, result._views_flat[of]
                                     [out_idx].imag.copy()))
                    else:
                        for of, subjac, out_idx in outputs:
                            subjac[:, i_count] = result._views_flat[of][
                                out_idx].imag

                fd_count += 1

        if is_parallel:
            results = _gather_jac_results(mycomm, results)

        for wrt, _, fact, _, _, outputs in approx_groups:
            for of, subjac, _ in outputs:
                key = (of, wrt)
                if is_parallel:
                    for i, result in results[key]:
                        subjac[:, i] = result

                subjac *= fact
                rel_key = abs_key2rel_key(system, key)
                if uses_src_indices:
                    jac._override_checks = True
                    jac[rel_key] = subjac
                    jac._override_checks = False
                else:
                    jac[rel_key] = subjac

        # Turn off complex step.
        system._set_complex_step_mode(False)
예제 #9
0
    def _declare_partials(self, of, wrt, dependent=True, rows=None, cols=None, val=None):
        """
        Store subjacobian metadata for later use.

        Parameters
        ----------
        of : str or list of str
            The name of the residual(s) that derivatives are being computed for.
            May also contain a glob pattern.
        wrt : str or list of str
            The name of the variables that derivatives are taken with respect to.
            This can contain the name of any input or output variable.
            May also contain a glob pattern.
        dependent : bool(True)
            If False, specifies no dependence between the output(s) and the
            input(s). This is only necessary in the case of a sparse global
            jacobian, because if 'dependent=False' is not specified and
            declare_partials is not called for a given pair, then a dense
            matrix of zeros will be allocated in the sparse global jacobian
            for that pair.  In the case of a dense global jacobian it doesn't
            matter because the space for a dense subjac will always be
            allocated for every pair.
        rows : ndarray of int or None
            Row indices for each nonzero entry.  For sparse subjacobians only.
        cols : ndarray of int or None
            Column indices for each nonzero entry.  For sparse subjacobians only.
        val : float or ndarray of float or scipy.sparse
            Value of subjacobian.  If rows and cols are not None, this will
            contain the values found at each (row, col) location in the subjac.
        """
        is_scalar = isscalar(val)

        if dependent:
            if rows is None:
                if val is not None and not is_scalar and not issparse(val):
                    val = atleast_2d(val)
                    val = val.astype(promote_types(val.dtype, float), copy=False)
                rows_max = cols_max = 0
            else:  # sparse list format
                rows = np.array(rows, dtype=INT_DTYPE, copy=False)
                cols = np.array(cols, dtype=INT_DTYPE, copy=False)

                if rows.shape != cols.shape:
                    raise ValueError('rows and cols must have the same shape,'
                                     ' rows: {}, cols: {}'.format(rows.shape, cols.shape))

                if is_scalar:
                    val = np.full(rows.size, val, dtype=float)
                    is_scalar = False
                elif val is not None:
                    # np.promote_types will choose the smallest dtype that can contain
                    # both arguments
                    val = atleast_1d(val)
                    safe_dtype = promote_types(val.dtype, float)
                    val = val.astype(safe_dtype, copy=False)

                    if rows.shape != val.shape:
                        raise ValueError('If rows and cols are specified, val must be a scalar or '
                                         'have the same shape, val: {}, '
                                         'rows/cols: {}'.format(val.shape, rows.shape))
                else:
                    val = np.zeros_like(rows, dtype=float)

                if rows.size > 0:
                    if rows.min() < 0:
                        msg = '{}: d({})/d({}): row indices must be non-negative'
                        raise ValueError(msg.format(self.pathname, of, wrt))
                    if cols.min() < 0:
                        msg = '{}: d({})/d({}): col indices must be non-negative'
                        raise ValueError(msg.format(self.pathname, of, wrt))
                    rows_max = rows.max()
                    cols_max = cols.max()
                else:
                    rows_max = cols_max = 0

        pattern_matches = self._find_partial_matches(of, wrt)
        abs2meta = self._var_abs2meta

        is_array = isinstance(val, ndarray)

        for of_bundle, wrt_bundle in product(*pattern_matches):
            of_pattern, of_matches = of_bundle
            wrt_pattern, wrt_matches = wrt_bundle
            if not of_matches:
                raise ValueError('No matches were found for of="{}"'.format(of_pattern))
            if not wrt_matches:
                raise ValueError('No matches were found for wrt="{}"'.format(wrt_pattern))

            for rel_key in product(of_matches, wrt_matches):
                abs_key = rel_key2abs_key(self, rel_key)
                if not dependent:
                    if abs_key in self._subjacs_info:
                        del self._subjacs_info[abs_key]
                    continue

                if abs_key in self._subjacs_info:
                    meta = self._subjacs_info[abs_key]
                else:
                    meta = SUBJAC_META_DEFAULTS.copy()

                meta['rows'] = rows
                meta['cols'] = cols
                meta['dependent'] = dependent
                meta['shape'] = shape = (abs2meta[abs_key[0]]['size'], abs2meta[abs_key[1]]['size'])

                if val is None:
                    # we can only get here if rows is None  (we're not sparse list format)
                    meta['value'] = np.zeros(shape)
                elif is_array:
                    if rows is None and val.shape != shape and val.size == shape[0] * shape[1]:
                        meta['value'] = val = val.copy().reshape(shape)
                    else:
                        meta['value'] = val.copy()
                elif is_scalar:
                    meta['value'] = np.full(shape, val, dtype=float)
                else:
                    meta['value'] = val

                if rows_max >= shape[0] or cols_max >= shape[1]:
                    of, wrt = abs_key2rel_key(self, abs_key)
                    msg = '{}: d({})/d({}): Expected {}x{} but declared at least {}x{}'
                    raise ValueError(msg.format(self.pathname, of, wrt, shape[0], shape[1],
                                                rows_max + 1, cols_max + 1))

                self._check_partials_meta(abs_key, meta['value'],
                                          shape if rows is None else (rows.shape[0], 1))
                self._subjacs_info[abs_key] = meta
예제 #10
0
    def compute_approximations(self, system, jac=None, deriv_type='partial'):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.
        jac : None or dict-like
            If None, update system with the approximated sub-Jacobians. Otherwise, store the
            approximations in the given dict-like object.
        deriv_type : str
            One of 'total' or 'partial', indicating if total or partial derivatives are
            being approximated.
        """
        if len(self._exec_list) == 0:
            return

        if jac is None:
            jac = system._jacobian

        if deriv_type == 'total':
            current_vec = system._outputs
        else:
            current_vec = system._residuals

        result = system._outputs._clone(True)
        result_array = result._data.copy()
        out_tmp = current_vec._data.copy()
        in_tmp = system._inputs._data.copy()

        # To support driver src_indices, we need to override some checks in Jacobian, but do it
        # selectively.
        uses_src_indices = (system._owns_approx_of_idx or system._owns_approx_wrt_idx) and \
            not isinstance(jac, dict)

        for key, approximations in groupby(self._exec_list, self._key_fun):
            # groupby (along with this key function) will group all 'of's that have the same wrt and
            # step size.
            wrt, form, order, step, step_calc = key

            # FD forms are written as a collection of changes to inputs (deltas) and the associated
            # coefficients (coeffs). Since we do not need to (re)evaluate the current step, its
            # coefficient is stored seperately (current_coeff). For example,
            # f'(x) = (f(x+h) - f(x))/h + O(h) = 1/h * f(x+h) + (-1/h) * f(x) + O(h)
            # would be stored as deltas = [h], coeffs = [1/h], and current_coeff = -1/h.
            # A central second order accurate approximation for the first derivative would be stored
            # as deltas = [-2, -1, 1, 2] * h, coeffs = [1/12, -2/3, 2/3 , -1/12] * 1/h,
            # current_coeff = 0.
            fd_form = _generate_fd_coeff(form, order)

            if step_calc == 'rel':
                if wrt in system._outputs._views_flat:
                    scale = np.linalg.norm(system._outputs._views_flat[wrt])
                else:
                    scale = np.linalg.norm(system._inputs._views_flat[wrt])
                step *= scale

            deltas = fd_form.deltas * step
            coeffs = fd_form.coeffs / step
            current_coeff = fd_form.current_coeff / step

            if wrt in system._owns_approx_wrt_idx:
                in_idx = system._owns_approx_wrt_idx[wrt]
                in_size = len(in_idx)
            else:
                in_size = system._var_allprocs_abs2meta[wrt]['size']
                in_idx = range(in_size)

            result._data[:] = system._outputs._data

            outputs = []

            # Note: If access to `approximations` is required again in the future, we will need to
            # throw it in a list first. The groupby iterator only works once.
            for approx_tuple in approximations:
                of = approx_tuple[0]
                # TODO: Sparse derivatives
                if of in system._owns_approx_of_idx:
                    out_idx = system._owns_approx_of_idx[of]
                    out_size = len(out_idx)
                else:
                    out_size = system._var_allprocs_abs2meta[of]['size']
                outputs.append((of, np.zeros((out_size, in_size))))

            for i_count, idx in enumerate(in_idx):
                if current_coeff:
                    result._data[:] = current_vec._data
                    result._data *= current_coeff
                else:
                    result._data[:] = 0.

                # Run the Finite Difference
                for delta, coeff in zip(deltas, coeffs):
                    input_delta = [(wrt, idx, delta)]
                    self._run_point(system, input_delta, out_tmp, in_tmp,
                                    result_array, deriv_type)
                    result_array *= coeff
                    result._data += result_array

                for of, subjac in outputs:
                    if of in system._owns_approx_of_idx:
                        out_idx = system._owns_approx_of_idx[of]
                        subjac[:, i_count] = result._views_flat[of][out_idx]
                    else:
                        subjac[:, i_count] = result._views_flat[of]

            for of, subjac in outputs:
                rel_key = abs_key2rel_key(system, (of, wrt))
                if uses_src_indices:
                    jac._override_checks = True
                    jac[rel_key] = subjac
                    jac._override_checks = False
                else:
                    jac[rel_key] = subjac
예제 #11
0
    def compute_approximations(self, system, jac=None, deriv_type='partial'):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.
        jac : None or dict-like
            If None, update system with the approximated sub-Jacobians. Otherwise, store the
            approximations in the given dict-like object.
        deriv_type : str
            One of 'total' or 'partial', indicating if total or partial derivatives are
            being approximated.
        """
        if jac is None:
            jac = system._jacobian

        if deriv_type == 'total':
            current_vec = system._outputs
        elif deriv_type == 'partial':
            current_vec = system._residuals
        else:
            raise ValueError('deriv_type must be one of "total" or "partial"')

        result = system._outputs._clone(True)
        result_array = result.get_data()
        out_tmp = current_vec.get_data()
        in_tmp = system._inputs.get_data()

        for key, approximations in groupby(self._exec_list, self._key_fun):
            # groupby (along with this key function) will group all 'of's that have the same wrt and
            # step size.
            wrt, form, order, step, step_calc = key

            # FD forms are written as a collection of changes to inputs (deltas) and the associated
            # coefficients (coeffs). Since we do not need to (re)evaluate the current step, its
            # coefficient is stored seperately (current_coeff). For example,
            # f'(x) = (f(x+h) - f(x))/h + O(h) = 1/h * f(x+h) + (-1/h) * f(x) + O(h)
            # would be stored as deltas = [h], coeffs = [1/h], and current_coeff = -1/h.
            # A central second order accurate approximation for the first derivative would be stored
            # as deltas = [-2, -1, 1, 2] * h, coeffs = [1/12, -2/3, 2/3 , -1/12] * 1/h,
            # current_coeff = 0.
            fd_form = _generate_fd_coeff(form, order)

            if step_calc == 'rel':
                if wrt in system._outputs._views_flat:
                    scale = np.linalg.norm(system._outputs._views_flat[wrt])
                else:
                    scale = np.linalg.norm(system._inputs._views_flat[wrt])
                step *= scale

            deltas = fd_form.deltas * step
            coeffs = fd_form.coeffs / step
            current_coeff = fd_form.current_coeff / step

            if wrt in system._owns_approx_wrt_idx:
                in_idx = system._owns_approx_wrt_idx[wrt]
                in_size = len(in_idx)
            else:
                in_size = system._var_abs2meta[wrt]['size']
                in_idx = range(in_size)

            result.set_vec(system._outputs)

            outputs = []

            # Note: If access to `approximations` is required again in the future, we will need to
            # throw it in a list first. The groupby iterator only works once.
            for approx_tuple in approximations:
                of = approx_tuple[0]
                # TODO: Sparse derivatives
                if of in system._owns_approx_of_idx:
                    out_idx = system._owns_approx_of_idx[of]
                    out_size = len(out_idx)
                else:
                    out_size = system._var_abs2meta[of]['size']
                outputs.append((of, np.zeros((out_size, in_size))))

            for i_count, idx in enumerate(in_idx):
                if current_coeff:
                    result.set_vec(current_vec)
                    result *= current_coeff
                else:
                    result.set_const(0.)

                # Run the Finite Difference
                for delta, coeff in zip(deltas, coeffs):
                    input_delta = [(wrt, idx, delta)]
                    self._run_point(system, input_delta, out_tmp, in_tmp, result_array, deriv_type)
                    result_array *= coeff
                    result.iadd_data(result_array)

                if deriv_type == 'total':
                    # Sign difference between output and resids. This arises from the definitions
                    # in the unified derivatives equations.
                    # For ExplicitComponent: resid = output(n-1) - output(n)
                    # so dresid/d* = - doutput/d*
                    result *= -1.0

                for of, subjac in outputs:

                    if of in system._owns_approx_of_idx:
                        out_idx = system._owns_approx_of_idx[of]
                        subjac[:, i_count] = result._views_flat[of][out_idx]
                    else:
                        subjac[:, i_count] = result._views_flat[of]

            for of, subjac in outputs:
                rel_key = abs_key2rel_key(system, (of, wrt))
                jac[rel_key] = subjac