def __init__(self): self.name = '' self.pathname = '' self._params_dict = OrderedDict() self._unknowns_dict = OrderedDict() # specify which variables are promoted up to the parent. Wildcards # are allowed. self._promotes = () self.comm = None # create placeholders for all of the vectors self.unknowns = _PlaceholderVecWrapper('unknowns') self.resids = _PlaceholderVecWrapper('resids') self.params = _PlaceholderVecWrapper('params') self.dunknowns = _PlaceholderVecWrapper('dunknowns') self.dresids = _PlaceholderVecWrapper('dresids') self.dparams = _PlaceholderVecWrapper('dparams') # dicts of vectors used for parallel solution of multiple RHS self.dumat = {} self.dpmat = {} self.drmat = {} opt = self.fd_options = OptionsDictionary() opt.add_option('force_fd', False, desc="Set to True to finite difference this system.") opt.add_option('form', 'forward', values=['forward', 'backward', 'central', 'complex_step'], desc="Finite difference mode. (forward, backward, central) " "You can also set to 'complex_step' to peform the complex " "step method if your components support it.") opt.add_option("step_size", 1.0e-6, desc="Default finite difference stepsize") opt.add_option("step_type", 'absolute', values=['absolute', 'relative'], desc='Set to absolute, relative') self._relevance = None self._impl_factory = None
def __init__(self, pathname='', comm=None): self.pathname = pathname self.comm = comm self.vec = None self._vardict = OrderedDict() self._slices = OrderedDict() # add a flat attribute that will have access method consistent # with non-flat access (__getitem__) self.flat = _flat_dict(self._vardict) # Automatic unit conversion in target vectors self.deriv_units = False self.adj_accumulate_mode = False
class System(object): """ Base class for systems in OpenMDAO. When building models, user should inherit from `Group` or `Component`""" def __init__(self): self.name = '' self.pathname = '' self._params_dict = OrderedDict() self._unknowns_dict = OrderedDict() # specify which variables are promoted up to the parent. Wildcards # are allowed. self._promotes = () self.comm = None # create placeholders for all of the vectors self.unknowns = _PlaceholderVecWrapper('unknowns') self.resids = _PlaceholderVecWrapper('resids') self.params = _PlaceholderVecWrapper('params') self.dunknowns = _PlaceholderVecWrapper('dunknowns') self.dresids = _PlaceholderVecWrapper('dresids') self.dparams = _PlaceholderVecWrapper('dparams') # dicts of vectors used for parallel solution of multiple RHS self.dumat = {} self.dpmat = {} self.drmat = {} opt = self.fd_options = OptionsDictionary() opt.add_option('force_fd', False, desc="Set to True to finite difference this system.") opt.add_option('form', 'forward', values=['forward', 'backward', 'central', 'complex_step'], desc="Finite difference mode. (forward, backward, central) " "You can also set to 'complex_step' to peform the complex " "step method if your components support it.") opt.add_option("step_size", 1.0e-6, desc="Default finite difference stepsize") opt.add_option("step_type", 'absolute', values=['absolute', 'relative'], desc='Set to absolute, relative') self._relevance = None self._impl_factory = None def __getitem__(self, name): """ Return the variable of the given name from this system. Args ---- name : str The name of the variable. Returns ------- value The unflattened value of the given variable. """ msg = "Variable '%s' must be accessed from a containing Group" raise RuntimeError(msg % name) def _promoted(self, name): """Determine if the given variable name is being promoted from this `System`. Args ---- name : str The name of a variable, relative to this `System`. Returns ------- bool True if the named variable is being promoted from this `System`. Raises ------ TypeError if the promoted variable specifications are not in a valid format """ if isinstance(self._promotes, string_types): raise TypeError("'%s' promotes must be specified as a list, " "tuple or other iterator of strings, but '%s' was specified" % (self.name, self._promotes)) for prom in self._promotes: if fnmatch(name, prom): for meta in chain(self._params_dict.values(), self._unknowns_dict.values()): if name == meta.get('promoted_name'): return True return False def check_setup(self, out_stream=sys.stdout): """Write a report to the given stream indicating any potential problems found with the current configuration of this ``System``. Args ---- out_stream : a file-like object, optional Stream where report will be written. """ pass def _check_promotes(self): """Check that the `System`s promotes are valid. Raise an Exception if there are any promotes that do not match at least one variable in the `System`. Raises ------ TypeError if the promoted variable specifications are not in a valid format RuntimeError if a promoted variable specification does not match any variables """ if isinstance(self._promotes, string_types): raise TypeError("'%s' promotes must be specified as a list, " "tuple or other iterator of strings, but '%s' was specified" % (self.name, self._promotes)) for prom in self._promotes: found = False for name, meta in chain(self._params_dict.items(), self._unknowns_dict.items()): if fnmatch(meta.get('promoted_name', name), prom): found = True if not found: msg = "'%s' promotes '%s' but has no variables matching that specification" raise RuntimeError(msg % (self.name, prom)) def subsystems(self, local=False, recurse=False, include_self=False): """ Returns an iterator over subsystems. For `System`, this is an empty list. Args ---- local : bool, optional If True, only return those `Components` that are local. Default is False. recurse : bool, optional If True, return all `Components` in the system tree, subject to the value of the local arg. Default is False. typ : type, optional If a class is specified here, only those subsystems that are instances of that type will be returned. Default type is `System`. include_self : bool, optional If True, yield self before iterating over subsystems, assuming type of self is appropriate. Default is False. Returns ------- iterator Iterator over subsystems. """ if include_self: yield self def _setup_paths(self, parent_path): """Set the absolute pathname of each `System` in the tree. Parameter --------- parent_path : str The pathname of the parent `System`, which is to be prepended to the name of this child `System`. """ if parent_path: self.pathname = '.'.join((parent_path, self.name)) else: self.pathname = self.name def clear_dparams(self): """ Zeros out the dparams (dp) vector.""" for parallel_set in self._relevance.vars_of_interest(): for name in parallel_set: if name in self.dpmat: self.dpmat[name].vec[:] = 0.0 self.dpmat[None].vec[:] = 0.0 # Recurse to clear all dparams vectors. for system in self.subsystems(local=True): system.clear_dparams() def solve_linear(self, dumat, drmat, vois, mode=None): """ Single linear solution applied to whatever input is sitting in the rhs vector. Args ---- dumat : dict of `VecWrappers` In forward mode, each `VecWrapper` contains the incoming vector for the states. There is one vector per quantity of interest for this problem. In reverse mode, it contains the outgoing vector for the states. (du) drmat : `dict of VecWrappers` `VecWrapper` containing either the outgoing result in forward mode or the incoming vector in reverse mode. There is one vector per quantity of interest for this problem. (dr) vois : list of strings List of all quantities of interest to key into the mats. mode : string Derivative mode, can be 'fwd' or 'rev', but generally should be called without mode so that the user can set the mode in this system's ln_solver.options. """ pass def is_active(self): """ Returns ------- bool If running under MPI, returns True if this `System` has a valid communicator. Always returns True if not running under MPI. """ return MPI is None or self.comm != MPI.COMM_NULL def get_req_procs(self): """ Returns ------- tuple A tuple of the form (min_procs, max_procs), indicating the min and max processors usable by this `System`. """ return (1, 1) def _setup_communicators(self, comm): """ Assign communicator to this `System` and all of its subsystems. Args ---- comm : an MPI communicator (real or fake) The communicator being offered by the parent system. """ self.comm = comm def _set_vars_as_remote(self): """ Set 'remote' attribute in metadata of all variables for this subsystem. """ for meta in self._params_dict.values(): meta['remote'] = True for meta in self._unknowns_dict.values(): meta['remote'] = True def fd_jacobian(self, params, unknowns, resids, step_size=None, form=None, step_type=None, total_derivs=False, fd_params=None, fd_unknowns=None): """Finite difference across all unknowns in this system w.r.t. all incoming params. Args ---- params : `VecWrapper` `VecWrapper` containing parameters. (p) unknowns : `VecWrapper` `VecWrapper` containing outputs and states. (u) resids : `VecWrapper` `VecWrapper` containing residuals. (r) step_size : float, optional Override all other specifications of finite difference step size. form : float, optional Override all other specifications of form. Can be forward, backward, or central. step_type : float, optional Override all other specifications of step_type. Can be absolute or relative. total_derivs : bool, optional Set to true to calculate total derivatives. Otherwise, partial derivatives are returned. fd_params : list of strings, optional List of parameter name strings with respect to which derivatives are desired. This is used by problem to limit the derivatives that are taken. fd_unknowns : list of strings, optional List of output or state name strings for derivatives to be calculated. This is used by problem to limit the derivatives that are taken. Returns ------- dict Dictionary whose keys are tuples of the form ('unknown', 'param') and whose values are ndarrays containing the derivative for that tuple pair. """ # Params and Unknowns that we provide at this level. if fd_params is None: fd_params = self._get_fd_params() if fd_unknowns is None: fd_unknowns = self._get_fd_unknowns() # Function call arguments have precedence over the system dict. step_size = self.fd_options.get('step_size', step_size) form = self.fd_options.get('form', form) step_type = self.fd_options.get('step_type', step_type) jac = {} cache2 = None # Prepare for calculating partial derivatives or total derivatives if total_derivs == False: run_model = self.apply_nonlinear cache1 = resids.vec.copy() resultvec = resids states = [name for name, meta in self.unknowns.items() if meta.get('state')] else: run_model = self.solve_nonlinear cache1 = unknowns.vec.copy() resultvec = unknowns states = [] # Compute gradient for this param or state. for p_name in chain(fd_params, states): # If our input is connected to a Paramcomp, then we need to twiddle # the unknowns vector instead of the params vector. param_src = self.connections.get(p_name) if param_src is not None: # Have to convert to promoted name to key into unknowns if param_src not in self.unknowns: param_src = self.unknowns.get_promoted_varname(param_src) target_input = unknowns.flat[param_src] else: # Cases where the paramcomp is somewhere above us. if p_name in states: inputs = unknowns else: inputs = params target_input = inputs.flat[p_name] mydict = {} for val in self._params_dict.values(): if val['promoted_name'] == p_name: mydict = val break # Local settings for this var trump all fdstep = mydict.get('fd_step_size', step_size) fdtype = mydict.get('fd_step_type', step_type) fdform = mydict.get('fd_form', form) # Size our Inputs p_size = np.size(target_input) # Size our Outputs for u_name in fd_unknowns: u_size = np.size(unknowns[u_name]) jac[u_name, p_name] = np.zeros((u_size, p_size)) # Finite Difference each index in array for idx in range(p_size): # Relative or Absolute step size if fdtype == 'relative': step = target_input[idx] * fdstep if step < fdstep: step = fdstep else: step = fdstep if fdform == 'forward': target_input[idx] += step run_model(params, unknowns, resids) target_input[idx] -= step # delta resid is delta unknown resultvec.vec[:] -= cache1 resultvec.vec[:] *= (1.0/step) elif fdform == 'backward': target_input[idx] -= step run_model(params, unknowns, resids) target_input[idx] += step # delta resid is delta unknown resultvec.vec[:] -= cache1 resultvec.vec[:] *= (-1.0/step) elif fdform == 'central': target_input[idx] += step run_model(params, unknowns, resids) cache2 = resultvec.vec.copy() target_input[idx] -= step resultvec.vec[:] = cache1 target_input[idx] -= step run_model(params, unknowns, resids) # central difference formula resultvec.vec[:] -= cache2 resultvec.vec[:] *= (-0.5/step) target_input[idx] += step for u_name in fd_unknowns: jac[u_name, p_name][:, idx] = resultvec.flat[u_name] # Restore old residual resultvec.vec[:] = cache1 return jac def _apply_linear_jac(self, params, unknowns, dparams, dunknowns, dresids, mode): """ See apply_linear. This method allows the framework to override any derivative specification in any `Component` or `Group` to perform finite difference.""" if not self._jacobian_cache: msg = ("No derivatives defined for Component '{name}'") msg = msg.format(name=self.name) raise ValueError(msg) for key, J in iteritems(self._jacobian_cache): unknown, param = key # States are never in dparams. if param in dparams: arg_vec = dparams elif param in dunknowns: arg_vec = dunknowns else: continue if unknown not in dresids: continue result = dresids[unknown] # Vectors are flipped during adjoint if mode == 'fwd': dresids[unknown] += J.dot(arg_vec[param].flat).reshape(result.shape) else: arg_vec[param] += J.T.dot(result.flat).reshape(arg_vec[param].shape) def _create_views(self, top_unknowns, parent, my_params, var_of_interest=None): """ A manager of the data transfer of a possibly distributed collection of variables. The variables are based on views into an existing `VecWrapper`. Args ---- top_unknowns : `VecWrapper` The `Problem` level unknowns `VecWrapper`. parent : `System` The `System` which provides the `VecWrapper` on which to create views. my_params : list List of pathnames for parameters that this `Group` is responsible for propagating. relevance : `Relevance` Object containing relevance info for each variable of interest. var_of_interest : str The name of a variable of interest. Returns ------- `VecTuple` A namedtuple of six (6) `VecWrappers`: unknowns, dunknowns, resids, dresids, params, dparams. """ comm = self.comm unknowns_dict = self._unknowns_dict params_dict = self._params_dict voi = var_of_interest relevance = self._relevance # map promoted name in parent to corresponding promoted name in this view umap = _get_relname_map(parent.unknowns, unknowns_dict, self.pathname) if voi is None: self.unknowns = parent.unknowns.get_view(self.pathname, comm, umap, relevance, voi) self.resids = parent.resids.get_view(self.pathname, comm, umap, relevance, voi) self.params = parent._impl_factory.create_tgt_vecwrapper(self.pathname, comm) self.params.setup(parent.params, params_dict, top_unknowns, my_params, self.connections, store_byobjs=True) self.dumat[voi] = parent.dumat[voi].get_view(self.pathname, comm, umap, relevance, voi) self.drmat[voi] = parent.drmat[voi].get_view(self.pathname, comm, umap, relevance, voi) self.dpmat[voi] = parent._impl_factory.create_tgt_vecwrapper(self.pathname, comm) self.dpmat[voi].setup(parent.dpmat[voi], params_dict, top_unknowns, my_params, self.connections, relevant_vars=relevance[voi]) #def get_combined_jac(self, J): #""" #Take a J dict that's distributed, i.e., has different values across #different MPI processes, and return a dict that contains all of the #values from all of the processes. If values are duplicated, use the #value from the lowest rank process. Note that J has a nested dict #structure. #Args #---- #J : `dict` #Distributed Jacobian #Returns #------- #`dict` #Local gathered Jacobian #""" #comm = self.comm #if not self.is_active(): #return J #myrank = comm.rank #tups = [] ## Gather a list of local tuples for J. #for output, dct in J.items(): #for param, value in dct.items(): ## Params are already only on this process. We need to add ## only outputs of components that are on this process. #sub = getattr(self, output.partition('.')[0]) #if sub.is_active() and value is not None and value.size > 0: #tups.append((output, param)) #dist_tups = comm.gather(tups, root=0) #tupdict = {} #if myrank == 0: #for rank, tups in enumerate(dist_tups): #for tup in tups: #if not tup in tupdict: #tupdict[tup] = rank ##get rid of tups from the root proc before bcast #for tup, rank in tupdict.items(): #if rank == 0: #del tupdict[tup] #tupdict = comm.bcast(tupdict, root=0) #if myrank == 0: #for (param, output), rank in tupdict.items(): #J[param][output] = comm.recv(source=rank, tag=0) #else: #for (param, output), rank in tupdict.items(): #if rank == myrank: #comm.send(J[param][output], dest=0, tag=0) ## FIXME: rework some of this using knowledge of local_var_sizes in order ## to avoid any unnecessary data passing ## return the combined dict #return comm.bcast(J, root=0) def _get_var_pathname(self, name): if self.pathname: return '.'.join((self.pathname, name)) return name
class VecWrapper(object): """ A dict-like container of a collection of variables. Args ---- pathname : str, optional the pathname of the containing `System` comm : an MPI communicator (real or fake) a communicator that can be used for distributed operations when running under MPI. If not running under MPI, it is ignored Attributes ---------- idx_arr_type : dtype, optional A dtype indicating how index arrays are to be represented. The value 'i' indicates an numpy integer array, other implementations, e.g., petsc, will define this differently. """ idx_arr_type = 'i' def __init__(self, pathname='', comm=None): self.pathname = pathname self.comm = comm self.vec = None self._vardict = OrderedDict() self._slices = OrderedDict() # add a flat attribute that will have access method consistent # with non-flat access (__getitem__) self.flat = _flat_dict(self._vardict) # Automatic unit conversion in target vectors self.deriv_units = False self.adj_accumulate_mode = False def metadata(self, name): """ Returns the metadata for the named variable. Args ---- name : str Name of variable to get the metadata for. Returns ------- dict The metadata dict for the named variable. Raises ------- KeyError If the named variable is not in this vector. """ try: return self._vardict[name] except KeyError as error: msg = "Variable '{name}' does not exist".format(name=name) raise KeyError(msg) def _setup_prom_map(self): """ Sets up the internal dict that maps absolute name to promoted name. """ self._to_prom_name = {} for prom_name, meta in self.items(): self._to_prom_name[meta['pathname']] = prom_name def __getitem__(self, name): """ Retrieve unflattened value of named var. Args ---- name : str Name of variable to get the value for. Returns ------- The unflattened value of the named variable. """ meta = self.metadata(name) if meta.get('pass_by_obj'): return meta['val'].val unitconv = meta.get('unit_conv') shape = meta.get('shape') # For dparam vector, getitem is disabled in adjoint mode. if self.adj_accumulate_mode == True: return numpy.zeros((shape)) # Convert units elif unitconv: scale, offset = unitconv # Gradient is just the scale if self.deriv_units: offset = 0.0 # if shape is 1, it's a float if shape == 1: return scale*(meta['val'][0] + offset) else: return scale*(meta['val'].reshape(shape) + offset) else: # if shape is 1, it's a float if shape == 1: return meta['val'][0] else: return meta['val'].reshape(shape) def __setitem__(self, name, value): """ Set the value of the named variable. Args ---- name : str Name of variable to get the value for. value : The unflattened value of the named variable. """ meta = self.metadata(name) if meta.get('pass_by_obj'): meta['val'].val = value return unitconv = meta.get('unit_conv') # For dparam vector in adjoint mode, assignement behaves as +=. if self.adj_accumulate_mode is True: if self.deriv_units and unitconv: scale, offset = unitconv if isinstance(value, numpy.ndarray): meta['val'][:] += scale*value.flat[:] else: meta['val'][0] += scale*value else: if isinstance(value, numpy.ndarray): meta['val'][:] += value.flat[:] else: meta['val'][0] += value # Convert Units else: if self.deriv_units and unitconv: scale, offset = unitconv if isinstance(value, numpy.ndarray): meta['val'][:] = scale*value.flat[:] else: meta['val'][0] = scale*value else: if isinstance(value, numpy.ndarray): meta['val'][:] = value.flat[:] else: meta['val'][0] = value def __len__(self): """ Returns ------- The number of keys (variables) in this vector. """ return len(self._vardict) def __contains__(self, key): """ Returns ------- A boolean indicating if the given key (variable name) is in this vector. """ return key in self._vardict def __iter__(self): """ Returns ------- A dictionary iterator over the items in _vardict. """ return self._vardict.__iter__() def keys(self): """ Returns ------- list or KeyView (python 3) the keys (variable names) in this vector. """ return self._vardict.keys() def items(self): """ Returns ------- iterator Iterator returning the name and metadata dict for each variable. """ return iteritems(self._vardict) def values(self): """ Returns ------- iterator Iterator returning a metadata dict for each variable. """ for meta in self._vardict.values(): yield meta def get_local_idxs(self, name, idx_dict): """ Returns all of the indices for the named variable in this vector. Args ---- name : str Name of variable to get the indices for. Returns ------- size The size of the named variable. ndarray Index array containing all local indices for the named variable. """ # TODO: add support for returning slice objects meta = self._vardict[name] if meta.get('pass_by_obj'): raise RuntimeError("No vector indices can be provided " "for 'pass by object' variable '%s'" % name) if name not in self._slices: return meta['size'], self.make_idx_array(0, 0) start, end = self._slices[name] if name in idx_dict: idxs = self.to_idx_array(idx_dict[name]) + start if idxs.size > (end-start) or max(idxs) >= end: raise RuntimeError("Indices of interest specified for '%s'" "are too large" % name) return idxs.size, idxs else: return meta['size'], self.make_idx_array(start, end) def norm(self): """ Calculates the norm of this vector. Returns ------- float Norm of our internal vector. """ return norm(self.vec) def get_view(self, sys_pathname, comm, varmap, relevance, var_of_interest): """ Return a new `VecWrapper` that is a view into this one. Args ---- sys_pathname : str pathname of the system for which the view is being created. comm : an MPI communicator (real or fake) A communicator that is used in the creation of the view. varmap : dict Mapping of variable names in the old `VecWrapper` to the names they will have in the new `VecWrapper`. Returns ------- `VecWrapper` A new `VecWrapper` that is a view into this one. """ view = self.__class__(sys_pathname, comm) view_size = 0 start = -1 for name, meta in self.items(): if name in varmap: view._vardict[varmap[name]] = self._vardict[name] if not meta.get('pass_by_obj') and not meta.get('remote'): pstart, pend = self._slices[name] if start == -1: start = pstart end = pend else: assert pstart == end, \ "%s not contiguous in block containing %s" % \ (name, varmap.keys()) end = pend view._slices[varmap[name]] = (view_size, view_size + meta['size']) view_size += meta['size'] if start == -1: # no items found view.vec = self.vec[0:0] else: view.vec = self.vec[start:end] view._setup_prom_map() return view def make_idx_array(self, start, end): """ Return an index vector of the right int type for the current implementation. Args ---- start : int The starting index. end : int The ending index. Returns ------- ndarray of idx_arr_type index array containing all indices from start up to but not including end """ return numpy.arange(start, end, dtype=self.idx_arr_type) def to_idx_array(self, indices): """ Given some iterator of indices, return an index array of the right int type for the current implementation. Args ---- indices : iterator of ints An iterator of indices. Returns ------- ndarray of idx_arr_type Index array containing all of the given indices. """ return numpy.array(indices, dtype=self.idx_arr_type) def merge_idxs(self, src_idxs, tgt_idxs): """ Return source and target index arrays, built up from smaller index arrays and combined in order of ascending source index (to allow us to convert src indices to a slice in some cases). Args ---- src_idxs : array Source indices. tgt_idxs : array Target indices. Returns ------- ndarray of idx_arr_type Index array containing all of the merged indices. """ assert(len(src_idxs) == len(tgt_idxs)) # filter out any zero length idx array entries src_idxs = [i for i in src_idxs if len(i)] tgt_idxs = [i for i in tgt_idxs if len(i)] if len(src_idxs) == 0: return self.make_idx_array(0, 0), self.make_idx_array(0, 0) src_tups = list(enumerate(src_idxs)) src_sorted = sorted(src_tups, key=lambda x: x[1].min()) new_src = [idxs for i, idxs in src_sorted] new_tgt = [tgt_idxs[i] for i, _ in src_sorted] return idx_merge(new_src), idx_merge(new_tgt) def get_promoted_varname(self, abs_name): """ Returns the relative pathname for the given absolute variable pathname. Args ---- abs_name : str Absolute pathname of a variable. Returns ------- rel_name : str Relative name mapped to the given absolute pathname. """ try: return self._to_prom_name[abs_name] except KeyError: raise KeyError("Relative name not found for variable '%s'" % abs_name) def get_states(self): """ Returns ------- list A list of names of state variables. """ return [n for n, meta in self.items() if meta.get('state')] def get_vecvars(self): """ Returns ------- A list of names of variables found in our 'vec' array. """ return [(n, meta) for n, meta in self.items() if not meta.get('pass_by_obj')] def get_byobjs(self): """ Returns ------- list A list of names of variables that are passed by object rather than through scattering entries from one array to another. """ return [(n, meta) for n, meta in self.items() if meta.get('pass_by_obj')] def _scoped_abs_name(self, name): """ Args ---- name : str The absolute pathname of a variable. Returns ------- str The given name as seen from the 'scope' of the `System` that contains this `VecWrapper`. """ if self.pathname: start = len(self.pathname)+1 else: start = 0 return name[start:] def dump(self, out_stream=sys.stdout): """ Args ---- out_stream : file_like Where to send human readable output. Default is sys.stdout. Set to None to return a str. """ if out_stream is None: out_stream = cStringIO() return_str = True else: return_str = False lens = [len(n) for n in self.keys()] nwid = max(lens) if lens else 10 vlens = [len(repr(self[v])) for v in self.keys()] vwid = max(vlens) if vlens else 1 if len(self.get_vecvars()) != len(self.keys()): # we have some pass by obj defwid = 8 else: defwid = 1 slens = [len('[{0[0]}:{0[1]}]'.format(self._slices[v])) for v in self.keys() if v in self._slices]+[defwid] swid = max(slens) for v, meta in self.items(): if meta.get('pass_by_obj') or meta.get('remote'): continue if v in self._slices: uslice = '[{0[0]}:{0[1]}]'.format(self._slices[v]) else: uslice = '' template = "{0:<{nwid}} {1:<{swid}} {2:>{vwid}}\n" out_stream.write(template.format(v, uslice, repr(self[v]), nwid=nwid, swid=swid, vwid=vwid)) for v, meta in self.items(): if meta.get('pass_by_obj') and not meta.get('remote'): template = "{0:<{nwid}} {1:<{swid}} {2}\n" out_stream.write(template.format(v, '(by obj)', repr(self[v]), nwid=nwid, swid=swid)) if return_str: return out_stream.getvalue() def _set_adjoint_mode(self, mode=False): """ Turn on or off adjoint accumlate mode.""" self.adj_accumulate_mode = mode