def _approx_partials(self, of, wrt, method='fd', **kwargs): """ Inform the framework that the specified derivatives are to be approximated. Parameters ---------- of : str or list of str The name of the residual(s) that derivatives are being computed for. May also contain a glob pattern. wrt : str or list of str The name of the variables that derivatives are taken with respect to. This can contain the name of any input or output variable. May also contain a glob pattern. method : str The type of approximation that should be used. Valid options include: - 'fd': Finite Difference **kwargs : dict Keyword arguments for controlling the behavior of the approximation. """ pattern_matches = self._find_partial_matches(of, wrt) for of_bundle, wrt_bundle in product(*pattern_matches): of_pattern, of_matches = of_bundle wrt_pattern, wrt_matches = wrt_bundle if not of_matches: raise ValueError( 'No matches were found for of="{}"'.format(of_pattern)) if not wrt_matches: raise ValueError( 'No matches were found for wrt="{}"'.format(wrt_pattern)) info = self._subjacs_info for rel_key in product(of_matches, wrt_matches): abs_key = rel_key2abs_key(self, rel_key) if abs_key in info: meta = info[abs_key] else: meta = SUBJAC_META_DEFAULTS.copy() meta['method'] = method meta.update(kwargs) info[abs_key] = meta
def _approx_partials(self, of, wrt, method='fd', **kwargs): """ Inform the framework that the specified derivatives are to be approximated. Parameters ---------- of : str or list of str The name of the residual(s) that derivatives are being computed for. May also contain a glob pattern. wrt : str or list of str The name of the variables that derivatives are taken with respect to. This can contain the name of any input or output variable. May also contain a glob pattern. method : str The type of approximation that should be used. Valid options include: - 'fd': Finite Difference **kwargs : dict Keyword arguments for controlling the behavior of the approximation. """ pattern_matches = self._find_partial_matches(of, wrt) for of_bundle, wrt_bundle in product(*pattern_matches): of_pattern, of_matches = of_bundle wrt_pattern, wrt_matches = wrt_bundle if not of_matches: raise ValueError('No matches were found for of="{}"'.format(of_pattern)) if not wrt_matches: raise ValueError('No matches were found for wrt="{}"'.format(wrt_pattern)) info = self._subjacs_info for rel_key in product(of_matches, wrt_matches): abs_key = rel_key2abs_key(self, rel_key) if abs_key in info: meta = info[abs_key] else: meta = SUBJAC_META_DEFAULTS.copy() meta['method'] = method meta.update(kwargs) info[abs_key] = meta
def _setup_partials(self, recurse=True): """ Process all partials and approximations that the user declared. Metamodel needs to declare its partials after inputs and outputs are known. Parameters ---------- recurse : bool Whether to call this method in subsystems. """ super(MetaModelUnStructuredComp, self)._setup_partials() vec_size = self.options['vec_size'] if vec_size > 1: # Sparse specification of partials for vectorized models. for wrt, n_wrt in self._surrogate_input_names: for of, shape_of in self._surrogate_output_names: n_of = np.prod(shape_of) rows = np.repeat(np.arange(n_of), n_wrt) cols = np.tile(np.arange(n_wrt), n_of) nnz = len(rows) rows = np.tile(rows, vec_size) + np.repeat( np.arange(vec_size), nnz) * n_of cols = np.tile(cols, vec_size) + np.repeat( np.arange(vec_size), nnz) * n_wrt self._declare_partials(of=of, wrt=wrt, rows=rows, cols=cols) else: # Dense specification of partials for non-vectorized models. self._declare_partials( of=[name[0] for name in self._surrogate_output_names], wrt=[name[0] for name in self._surrogate_input_names]) # warn the user that if they don't explicitly set options for fd, # the defaults will be used # get a list of approximated partials declared_partials = set() for of, wrt, method, fd_options in self._approximated_partials: pattern_matches = self._find_partial_matches(of, wrt) for of_bundle, wrt_bundle in product(*pattern_matches): of_pattern, of_matches = of_bundle wrt_pattern, wrt_matches = wrt_bundle for rel_key in product(of_matches, wrt_matches): abs_key = rel_key2abs_key(self, rel_key) declared_partials.add(abs_key) non_declared_partials = [] for of, n_of in self._surrogate_output_names: has_derivs = False surrogate = self._metadata(of).get('surrogate') if surrogate: has_derivs = overrides_method('linearize', surrogate, SurrogateModel) if not has_derivs: for wrt, n_wrt in self._surrogate_input_names: abs_key = rel_key2abs_key(self, (of, wrt)) if abs_key not in declared_partials: non_declared_partials.append(abs_key) if non_declared_partials: msg = "Because the MetaModelUnStructuredComp '{}' uses a surrogate " \ "which does not define a linearize method,\nOpenMDAO will use " \ "finite differences to compute derivatives. Some of the derivatives " \ "will be computed\nusing default finite difference " \ "options because they were not explicitly declared.\n".format(self.name) msg += "The derivatives computed using the defaults are:\n" for abs_key in non_declared_partials: msg += " {}, {}\n".format(*abs_key) simple_warning(msg, RuntimeWarning) for out_name, out_shape in self._surrogate_output_names: surrogate = self._metadata(out_name).get('surrogate') if surrogate and not overrides_method('linearize', surrogate, SurrogateModel): self._approx_partials( of=out_name, wrt=[name[0] for name in self._surrogate_input_names], method='fd') if "fd" not in self._approx_schemes: self._approx_schemes['fd'] = FiniteDifference()
def _declare_partials(self, of, wrt, dependent=True, rows=None, cols=None, val=None): """ Store subjacobian metadata for later use. Parameters ---------- of : str or list of str The name of the residual(s) that derivatives are being computed for. May also contain a glob pattern. wrt : str or list of str The name of the variables that derivatives are taken with respect to. This can contain the name of any input or output variable. May also contain a glob pattern. dependent : bool(True) If False, specifies no dependence between the output(s) and the input(s). This is only necessary in the case of a sparse global jacobian, because if 'dependent=False' is not specified and declare_partials is not called for a given pair, then a dense matrix of zeros will be allocated in the sparse global jacobian for that pair. In the case of a dense global jacobian it doesn't matter because the space for a dense subjac will always be allocated for every pair. rows : ndarray of int or None Row indices for each nonzero entry. For sparse subjacobians only. cols : ndarray of int or None Column indices for each nonzero entry. For sparse subjacobians only. val : float or ndarray of float or scipy.sparse Value of subjacobian. If rows and cols are not None, this will contain the values found at each (row, col) location in the subjac. """ if dependent and val is not None and not issparse(val): val = np.atleast_1d(val) # np.promote_types will choose the smallest dtype that can contain both arguments safe_dtype = np.promote_types(val.dtype, float) val = val.astype(safe_dtype, copy=False) if dependent and rows is not None: rows = np.array(rows, dtype=int, copy=False) cols = np.array(cols, dtype=int, copy=False) if rows.shape != cols.shape: raise ValueError('rows and cols must have the same shape,' ' rows: {}, cols: {}'.format(rows.shape, cols.shape)) if val is not None and val.shape != (1,) and rows.shape != val.shape: raise ValueError('If rows and cols are specified, val must be a scalar or have the ' 'same shape, val: {}, rows/cols: {}'.format(val.shape, rows.shape)) if val is None: val = np.zeros_like(rows, dtype=float) pattern_matches = self._find_partial_matches(of, wrt) multiple_items = False for of_bundle, wrt_bundle in product(*pattern_matches): of_pattern, of_matches = of_bundle wrt_pattern, wrt_matches = wrt_bundle if not of_matches: raise ValueError('No matches were found for of="{}"'.format(of_pattern)) if not wrt_matches: raise ValueError('No matches were found for wrt="{}"'.format(wrt_pattern)) make_copies = (multiple_items or len(of_matches) > 1 or len(wrt_matches) > 1) # Setting this to true means that future loop iterations (i.e. if there are multiple # items in either of or wrt) will make copies. multiple_items = True for rel_key in product(of_matches, wrt_matches): abs_key = rel_key2abs_key(self, rel_key) if not dependent: if abs_key in self._subjacs_info: del self._subjacs_info[abs_key] continue if abs_key in self._subjacs_info: meta = self._subjacs_info[abs_key] else: meta = SUBJAC_META_DEFAULTS.copy() meta['rows'] = rows meta['cols'] = cols meta['value'] = deepcopy(val) if make_copies else val meta['dependent'] = dependent self._check_partials_meta(abs_key, meta) self._subjacs_info[abs_key] = meta
def _setup_partials(self): """ Process all partials and approximations that the user declared. Metamodel needs to declare its partials after inputs and outputs are known. """ super()._setup_partials() vec_size = self.options['vec_size'] if vec_size > 1: vec_arange = np.arange(vec_size) # Sparse specification of partials for vectorized models. for wrt, n_wrt in self._surrogate_input_names: for of, shape_of in self._surrogate_output_names: n_of = np.prod(shape_of) rows = np.repeat(np.arange(n_of), n_wrt) cols = np.tile(np.arange(n_wrt), n_of) repeat = np.repeat(vec_arange, len(rows)) rows = np.tile(rows, vec_size) + repeat * n_of cols = np.tile(cols, vec_size) + repeat * n_wrt dct = { 'rows': rows, 'cols': cols, 'dependent': True, } self._declare_partials(of=of, wrt=wrt, dct=dct) else: dct = { 'value': None, 'dependent': True, } # Dense specification of partials for non-vectorized models. self._declare_partials( of=tuple([name[0] for name in self._surrogate_output_names]), wrt=tuple([name[0] for name in self._surrogate_input_names]), dct=dct) # Support for user declaring fd partials in a child class and assigning new defaults. # We want a warning for all partials that were not explicitly declared. declared_partials = set([ key for key, dct in self._subjacs_info.items() if 'method' in dct and dct['method'] ]) # Gather undeclared fd partials on surrogates that don't support analytic derivatives. # While we do this, declare the missing ones. non_declared_partials = [] for of, _ in self._surrogate_output_names: surrogate = self._metadata(of).get('surrogate') if surrogate and not overrides_method('linearize', surrogate, SurrogateModel): wrt_list = [name[0] for name in self._surrogate_input_names] self._approx_partials(of=of, wrt=wrt_list, method='fd') for wrt in wrt_list: abs_key = rel_key2abs_key(self, (of, wrt)) if abs_key not in declared_partials: non_declared_partials.append(abs_key) if non_declared_partials: self._get_approx_scheme('fd') msg = "Because the MetaModelUnStructuredComp '{}' uses a surrogate " \ "which does not define a linearize method,\nOpenMDAO will use " \ "finite differences to compute derivatives. Some of the derivatives " \ "will be computed\nusing default finite difference " \ "options because they were not explicitly declared.\n".format(self.name) msg += "The derivatives computed using the defaults are:\n" for abs_key in non_declared_partials: msg += " {}, {}\n".format(*abs_key) issue_warning(msg, category=DerivativesWarning)
def _setup_partials(self, recurse=True): """ Process all partials and approximations that the user declared. Metamodel needs to declare its partials after inputs and outputs are known. Parameters ---------- recurse : bool Whether to call this method in subsystems. """ super(MetaModelUnStructuredComp, self)._setup_partials() vec_size = self.options['vec_size'] if vec_size > 1: vec_arange = np.arange(vec_size) # Sparse specification of partials for vectorized models. for wrt, n_wrt in self._surrogate_input_names: for of, shape_of in self._surrogate_output_names: n_of = np.prod(shape_of) rows = np.repeat(np.arange(n_of), n_wrt) cols = np.tile(np.arange(n_wrt), n_of) repeat = np.repeat(vec_arange, len(rows)) rows = np.tile(rows, vec_size) + repeat * n_of cols = np.tile(cols, vec_size) + repeat * n_wrt dct = { 'rows': rows, 'cols': cols, 'dependent': True, } self._declare_partials(of=of, wrt=wrt, dct=dct) else: dct = { 'value': None, 'dependent': True, } # Dense specification of partials for non-vectorized models. self._declare_partials( of=tuple([name[0] for name in self._surrogate_output_names]), wrt=tuple([name[0] for name in self._surrogate_input_names]), dct=dct) # warn the user that if they don't explicitly set options for fd, # the defaults will be used # get a list of approximated partials declared_partials = set([ key for key, dct in iteritems(self._subjacs_info) if 'method' in dct and dct['method'] ]) non_declared_partials = [] for of, n_of in self._surrogate_output_names: has_derivs = False surrogate = self._metadata(of).get('surrogate') if surrogate: has_derivs = overrides_method('linearize', surrogate, SurrogateModel) if not has_derivs: for wrt, n_wrt in self._surrogate_input_names: abs_key = rel_key2abs_key(self, (of, wrt)) if abs_key not in declared_partials: non_declared_partials.append(abs_key) if non_declared_partials: msg = "Because the MetaModelUnStructuredComp '{}' uses a surrogate " \ "which does not define a linearize method,\nOpenMDAO will use " \ "finite differences to compute derivatives. Some of the derivatives " \ "will be computed\nusing default finite difference " \ "options because they were not explicitly declared.\n".format(self.name) msg += "The derivatives computed using the defaults are:\n" for abs_key in non_declared_partials: msg += " {}, {}\n".format(*abs_key) simple_warning(msg, RuntimeWarning) for out_name, out_shape in self._surrogate_output_names: surrogate = self._metadata(out_name).get('surrogate') if surrogate and not overrides_method('linearize', surrogate, SurrogateModel): self._approx_partials( of=out_name, wrt=[name[0] for name in self._surrogate_input_names], method='fd') self._get_approx_scheme('fd')
def _declare_partials(self, of, wrt, dependent=True, rows=None, cols=None, val=None): """ Store subjacobian metadata for later use. Parameters ---------- of : str or list of str The name of the residual(s) that derivatives are being computed for. May also contain a glob pattern. wrt : str or list of str The name of the variables that derivatives are taken with respect to. This can contain the name of any input or output variable. May also contain a glob pattern. dependent : bool(True) If False, specifies no dependence between the output(s) and the input(s). This is only necessary in the case of a sparse global jacobian, because if 'dependent=False' is not specified and declare_partials is not called for a given pair, then a dense matrix of zeros will be allocated in the sparse global jacobian for that pair. In the case of a dense global jacobian it doesn't matter because the space for a dense subjac will always be allocated for every pair. rows : ndarray of int or None Row indices for each nonzero entry. For sparse subjacobians only. cols : ndarray of int or None Column indices for each nonzero entry. For sparse subjacobians only. val : float or ndarray of float or scipy.sparse Value of subjacobian. If rows and cols are not None, this will contain the values found at each (row, col) location in the subjac. """ is_scalar = isscalar(val) if dependent: if rows is None: if val is not None and not is_scalar and not issparse(val): val = atleast_2d(val) val = val.astype(promote_types(val.dtype, float), copy=False) rows_max = cols_max = 0 else: # sparse list format rows = np.array(rows, dtype=INT_DTYPE, copy=False) cols = np.array(cols, dtype=INT_DTYPE, copy=False) if rows.shape != cols.shape: raise ValueError('rows and cols must have the same shape,' ' rows: {}, cols: {}'.format( rows.shape, cols.shape)) if is_scalar: val = np.full(rows.size, val, dtype=float) is_scalar = False elif val is not None: # np.promote_types will choose the smallest dtype that can contain # both arguments val = atleast_1d(val) safe_dtype = promote_types(val.dtype, float) val = val.astype(safe_dtype, copy=False) if rows.shape != val.shape: raise ValueError( 'If rows and cols are specified, val must be a scalar or ' 'have the same shape, val: {}, ' 'rows/cols: {}'.format(val.shape, rows.shape)) else: val = np.zeros_like(rows, dtype=float) if rows.size > 0: if rows.min() < 0: # of, wrt = abs_key2rel_key(self, abs_key) msg = '{}: d({})/d({}): row indices must be non-negative' raise ValueError(msg.format(self.pathname, of, wrt)) if cols.min() < 0: # of, wrt = abs_key2rel_key(self, abs_key) msg = '{}: d({})/d({}): col indices must be non-negative' raise ValueError(msg.format(self.pathname, of, wrt)) rows_max = rows.max() cols_max = cols.max() else: rows_max = cols_max = 0 pattern_matches = self._find_partial_matches(of, wrt) abs2meta = self._var_abs2meta is_array = isinstance(val, ndarray) for of_bundle, wrt_bundle in product(*pattern_matches): of_pattern, of_matches = of_bundle wrt_pattern, wrt_matches = wrt_bundle if not of_matches: raise ValueError( 'No matches were found for of="{}"'.format(of_pattern)) if not wrt_matches: raise ValueError( 'No matches were found for wrt="{}"'.format(wrt_pattern)) for rel_key in product(of_matches, wrt_matches): abs_key = rel_key2abs_key(self, rel_key) if not dependent: if abs_key in self._subjacs_info: del self._subjacs_info[abs_key] continue if abs_key in self._subjacs_info: meta = self._subjacs_info[abs_key] else: meta = SUBJAC_META_DEFAULTS.copy() meta['rows'] = rows meta['cols'] = cols meta['dependent'] = dependent meta['shape'] = shape = (abs2meta[abs_key[0]]['size'], abs2meta[abs_key[1]]['size']) if val is None: # we can only get here if rows is None (we're not sparse list format) meta['value'] = np.zeros(shape) elif is_array: if rows is None and val.shape != shape and val.size == shape[ 0] * shape[1]: meta['value'] = val = val.copy().reshape(shape) else: meta['value'] = val.copy() elif is_scalar: meta['value'] = np.full(shape, val, dtype=float) else: meta['value'] = val if rows_max >= shape[0] or cols_max >= shape[1]: of, wrt = abs_key2rel_key(self, abs_key) msg = '{}: d({})/d({}): Expected {}x{} but declared at least {}x{}' raise ValueError( msg.format(self.pathname, of, wrt, shape[0], shape[1], rows_max + 1, cols_max + 1)) self._check_partials_meta( abs_key, meta['value'], shape if rows is None else (rows.shape[0], 1)) self._subjacs_info[abs_key] = meta
def _declare_partials(self, of, wrt, dependent=True, rows=None, cols=None, val=None): """ Store subjacobian metadata for later use. Parameters ---------- of : str or list of str The name of the residual(s) that derivatives are being computed for. May also contain a glob pattern. wrt : str or list of str The name of the variables that derivatives are taken with respect to. This can contain the name of any input or output variable. May also contain a glob pattern. dependent : bool(True) If False, specifies no dependence between the output(s) and the input(s). This is only necessary in the case of a sparse global jacobian, because if 'dependent=False' is not specified and declare_partials is not called for a given pair, then a dense matrix of zeros will be allocated in the sparse global jacobian for that pair. In the case of a dense global jacobian it doesn't matter because the space for a dense subjac will always be allocated for every pair. rows : ndarray of int or None Row indices for each nonzero entry. For sparse subjacobians only. cols : ndarray of int or None Column indices for each nonzero entry. For sparse subjacobians only. val : float or ndarray of float or scipy.sparse Value of subjacobian. If rows and cols are not None, this will contain the values found at each (row, col) location in the subjac. """ is_scalar = isscalar(val) if dependent: if rows is None: if val is not None and not is_scalar and not issparse(val): val = atleast_2d(val) val = val.astype(promote_types(val.dtype, float), copy=False) rows_max = cols_max = 0 else: # sparse list format rows = np.array(rows, dtype=INT_DTYPE, copy=False) cols = np.array(cols, dtype=INT_DTYPE, copy=False) if rows.shape != cols.shape: raise ValueError('rows and cols must have the same shape,' ' rows: {}, cols: {}'.format(rows.shape, cols.shape)) if is_scalar: val = np.full(rows.size, val, dtype=float) is_scalar = False elif val is not None: # np.promote_types will choose the smallest dtype that can contain # both arguments val = atleast_1d(val) safe_dtype = promote_types(val.dtype, float) val = val.astype(safe_dtype, copy=False) if rows.shape != val.shape: raise ValueError('If rows and cols are specified, val must be a scalar or ' 'have the same shape, val: {}, ' 'rows/cols: {}'.format(val.shape, rows.shape)) else: val = np.zeros_like(rows, dtype=float) if rows.size > 0: if rows.min() < 0: msg = '{}: d({})/d({}): row indices must be non-negative' raise ValueError(msg.format(self.pathname, of, wrt)) if cols.min() < 0: msg = '{}: d({})/d({}): col indices must be non-negative' raise ValueError(msg.format(self.pathname, of, wrt)) rows_max = rows.max() cols_max = cols.max() else: rows_max = cols_max = 0 pattern_matches = self._find_partial_matches(of, wrt) abs2meta = self._var_abs2meta is_array = isinstance(val, ndarray) for of_bundle, wrt_bundle in product(*pattern_matches): of_pattern, of_matches = of_bundle wrt_pattern, wrt_matches = wrt_bundle if not of_matches: raise ValueError('No matches were found for of="{}"'.format(of_pattern)) if not wrt_matches: raise ValueError('No matches were found for wrt="{}"'.format(wrt_pattern)) for rel_key in product(of_matches, wrt_matches): abs_key = rel_key2abs_key(self, rel_key) if not dependent: if abs_key in self._subjacs_info: del self._subjacs_info[abs_key] continue if abs_key in self._subjacs_info: meta = self._subjacs_info[abs_key] else: meta = SUBJAC_META_DEFAULTS.copy() meta['rows'] = rows meta['cols'] = cols meta['dependent'] = dependent meta['shape'] = shape = (abs2meta[abs_key[0]]['size'], abs2meta[abs_key[1]]['size']) if val is None: # we can only get here if rows is None (we're not sparse list format) meta['value'] = np.zeros(shape) elif is_array: if rows is None and val.shape != shape and val.size == shape[0] * shape[1]: meta['value'] = val = val.copy().reshape(shape) else: meta['value'] = val.copy() elif is_scalar: meta['value'] = np.full(shape, val, dtype=float) else: meta['value'] = val if rows_max >= shape[0] or cols_max >= shape[1]: of, wrt = abs_key2rel_key(self, abs_key) msg = '{}: d({})/d({}): Expected {}x{} but declared at least {}x{}' raise ValueError(msg.format(self.pathname, of, wrt, shape[0], shape[1], rows_max + 1, cols_max + 1)) self._check_partials_meta(abs_key, meta['value'], shape if rows is None else (rows.shape[0], 1)) self._subjacs_info[abs_key] = meta
def _setup_partials(self, recurse=True): """ Process all partials and approximations that the user declared. Metamodel needs to declare its partials after inputs and outputs are known. Parameters ---------- recurse : bool Whether to call this method in subsystems. """ super(MetaModelUnStructuredComp, self)._setup_partials() vec_size = self.options['vec_size'] if vec_size > 1: # Sparse specification of partials for vectorized models. for wrt, n_wrt in self._surrogate_input_names: for of, shape_of in self._surrogate_output_names: n_of = np.prod(shape_of) rows = np.repeat(np.arange(n_of), n_wrt) cols = np.tile(np.arange(n_wrt), n_of) nnz = len(rows) rows = np.tile(rows, vec_size) + np.repeat(np.arange(vec_size), nnz) * n_of cols = np.tile(cols, vec_size) + np.repeat(np.arange(vec_size), nnz) * n_wrt self._declare_partials(of=of, wrt=wrt, rows=rows, cols=cols) else: # Dense specification of partials for non-vectorized models. self._declare_partials(of=[name[0] for name in self._surrogate_output_names], wrt=[name[0] for name in self._surrogate_input_names]) # warn the user that if they don't explicitly set options for fd, # the defaults will be used # get a list of approximated partials declared_partials = set() for of, wrt, method, fd_options in self._approximated_partials: pattern_matches = self._find_partial_matches(of, wrt) for of_bundle, wrt_bundle in product(*pattern_matches): of_pattern, of_matches = of_bundle wrt_pattern, wrt_matches = wrt_bundle for rel_key in product(of_matches, wrt_matches): abs_key = rel_key2abs_key(self, rel_key) declared_partials.add(abs_key) non_declared_partials = [] for of, n_of in self._surrogate_output_names: has_derivs = False surrogate = self._metadata(of).get('surrogate') if surrogate: has_derivs = overrides_method('linearize', surrogate, SurrogateModel) if not has_derivs: for wrt, n_wrt in self._surrogate_input_names: abs_key = rel_key2abs_key(self, (of, wrt)) if abs_key not in declared_partials: non_declared_partials.append(abs_key) if non_declared_partials: msg = "Because the MetaModelUnStructuredComp '{}' uses a surrogate " \ "which does not define a linearize method,\nOpenMDAO will use " \ "finite differences to compute derivatives. Some of the derivatives " \ "will be computed\nusing default finite difference " \ "options because they were not explicitly declared.\n".format(self.name) msg += "The derivatives computed using the defaults are:\n" for abs_key in non_declared_partials: msg += " {}, {}\n".format(*abs_key) simple_warning(msg, RuntimeWarning) for out_name, out_shape in self._surrogate_output_names: surrogate = self._metadata(out_name).get('surrogate') if surrogate and not overrides_method('linearize', surrogate, SurrogateModel): self._approx_partials(of=out_name, wrt=[name[0] for name in self._surrogate_input_names], method='fd') if "fd" not in self._approx_schemes: self._approx_schemes['fd'] = FiniteDifference()