def test_flat_shaped_src_inds(self): src = np.arange(24).reshape((8, 3)) # 2D source ind = indexer([[1, 3, 5, 4], [22, -4, 11, 3]], flat=True) assert_equal(ind(), [[1, 3, 5, 4], [22, -4, 11, 3]]) ind.set_src_shape(src.shape) assert_equal(ind.shaped_array(), np.array([1, 3, 5, 4, 22, 20, 11, 3])) assert_equal(ind.as_array(), np.array([1, 3, 5, 4, 22, -4, 11, 3])) assert_equal(ind.as_array(flat=False), np.array([[1, 3, 5, 4], [22, -4, 11, 3]])) assert_equal(ind.shape, (2, 4)) assert_equal(ind.src_ndim, 1)
def test_nonflat_2D_neg_src_inds(self): # test using our special format where the src inds are basically an array of a certain shape # containing tuples that indicate the indices into each dimension of the source. src = np.arange(24).reshape((8, 3)) # 2D source ind = indexer([((1, 2), (3, 2)), ((5, 0), (4, -2))], new_style=False) assert_equal(ind(), (np.array([1, 3, 5, 4]), np.array([2, 2, 0, -2]))) ind.set_src_shape(src.shape) assert_equal(ind.shaped_array(), np.array([5, 11, 15, 13])) assert_equal(ind.as_array(), np.array([5, 11, 15, 13])) assert_equal(ind.as_array(flat=False), np.array([[5, 11], [15, 13]])) assert_equal(ind.shape, (2, 2)) assert_equal(ind.src_ndim, 2)
def set_var(self, name, val, idxs=_full_slice, flat=False): """ Set the array view corresponding to the named variable, with optional indexing. Parameters ---------- name : str The name of the variable. val : float or ndarray Scalar or array to set data array to. idxs : int or slice or tuple of ints and/or slices The locations where the data array should be updated. flat : bool If True, set into flattened variable. """ abs_name = self._name2abs_name(name) if abs_name is None: raise KeyError(f"{self._system().msginfo}: Variable name '{name}' not found.") if self.read_only: raise ValueError(f"{self._system().msginfo}: Attempt to set value of '{name}' in " f"{self._kind} vector when it is read only.") if not isinstance(idxs, Indexer): idxs = indexer(idxs, flat=flat) if flat: if isinstance(val, float): self._views_flat[abs_name][idxs()] = val else: self._views_flat[abs_name][idxs.flat()] = np.asarray(val).flat else: value = np.asarray(val) try: self._views[abs_name][idxs()] = value except Exception as err: try: value = value.reshape(self._views[abs_name][idxs()].shape) except Exception: raise ValueError(f"{self._system().msginfo}: Failed to set value of " f"'{name}': {str(err)}.") self._views[abs_name][idxs()] = value
def set_var(self, name, val, idxs=_full_slice, flat=False, var_name=None): """ Set the array view corresponding to the named variable, with optional indexing. Parameters ---------- name : str The name of the variable. val : float or ndarray Scalar or array to set data array to. idxs : int or slice or tuple of ints and/or slices The locations where the data array should be updated. flat : bool If True, set into flattened variable. var_name : str or None If specified, the variable name to use when reporting errors. This is useful when setting an AutoIVC value that the user only knows by a connected input name. """ abs_name = self._name2abs_name(name) if abs_name is None: raise KeyError(f"{self._system().msginfo}: Variable name " f"'{var_name if var_name else name}' not found.") if self.read_only: raise ValueError( f"{self._system().msginfo}: Attempt to set value of " f"'{var_name if var_name else name}' in " f"{self._kind} vector when it is read only.") if idxs is _full_slice: if flat: idxs = _flat_full_indexer else: idxs = _full_indexer elif not isinstance(idxs, Indexer): idxs = indexer(idxs, flat_src=flat) if flat: if isinstance(val, float): self._views_flat[abs_name][idxs.flat()] = val else: self._views_flat[abs_name][idxs.flat()] = np.asarray(val).flat else: value = np.asarray(val) view = self._views[abs_name] try: if view.shape: view[idxs()] = value else: # view is a scalar so we can't update it without breaking its connection # to the underlying array, so set the value into the # array using the flat view, which is an array of size 1. self._views_flat[abs_name][0] = value except Exception as err: try: value = value.reshape(view[idxs()].shape) except Exception: raise ValueError( f"{self._system().msginfo}: Failed to set value of " f"'{var_name if var_name else name}': {str(err)}.") view[idxs()] = value
"""Define the base Vector and Transfer classes.""" from copy import deepcopy import os import weakref import numpy as np from numpy import isscalar from openmdao.utils.name_maps import prom_name2abs_name, rel_name2abs_name from openmdao.utils.indexer import Indexer, indexer _full_slice = slice(None) _flat_full_indexer = indexer(_full_slice, flat_src=True) _full_indexer = indexer(_full_slice, flat_src=False) _type_map = {'input': 'input', 'output': 'output', 'residual': 'output'} class Vector(object): """ Base Vector class. This class is instantiated for inputs, outputs, and residuals. It provides a dictionary interface and an arithmetic operations interface. Implementations: - <DefaultVector> - <PETScVector> Parameters ----------
def _setup_driver(self, problem): """ Prepare the driver for execution. This is the final thing to run during setup. Parameters ---------- problem : <Problem> Pointer to the containing problem. """ self._problem = weakref.ref(problem) model = problem.model self._total_jac = None self._has_scaling = ( np.any([r['total_scaler'] is not None for r in self._responses.values()]) or np.any([dv['total_scaler'] is not None for dv in self._designvars.values()]) ) # Determine if any design variables are discrete. self._designvars_discrete = [name for name, meta in self._designvars.items() if meta['ivc_source'] in model._discrete_outputs] if not self.supports['integer_design_vars'] and len(self._designvars_discrete) > 0: msg = "Discrete design variables are not supported by this driver: " msg += '.'.join(self._designvars_discrete) raise RuntimeError(msg) self._remote_dvs = remote_dv_dict = {} self._remote_cons = remote_con_dict = {} self._dist_driver_vars = dist_dict = {} self._remote_objs = remote_obj_dict = {} # Only allow distributed design variables on drivers that support it. if self.supports['distributed_design_vars'] is False: dist_vars = [] abs2meta_in = model._var_allprocs_abs2meta['input'] discrete_in = model._var_allprocs_discrete['input'] for dv, meta in self._designvars.items(): # For Auto-ivcs, we need to check the distributed metadata on the target instead. if meta['ivc_source'].startswith('_auto_ivc.'): for abs_name in model._var_allprocs_prom2abs_list['input'][dv]: if abs_name in discrete_in: # Discrete vars aren't distributed. break if abs2meta_in[abs_name]['distributed']: dist_vars.append(dv) break elif meta['distributed']: dist_vars.append(dv) if dist_vars: dstr = ', '.join(dist_vars) msg = "Distributed design variables are not supported by this driver, but the " msg += f"following variables are distributed: [{dstr}]" raise RuntimeError(msg) # Now determine if later we'll need to allgather cons, objs, or desvars. if model.comm.size > 1 and model._subsystems_allprocs: con_set = set() obj_set = set() dv_set = set() src_design_vars = _prom2ivc_src_dict(self._designvars) responses = _prom2ivc_src_dict(self._responses) local_out_vars = set(model._outputs._abs_iter()) remote_dvs = set(src_design_vars) - local_out_vars remote_cons = set(_prom2ivc_src_name_iter(self._cons)) - local_out_vars remote_objs = set(_prom2ivc_src_name_iter(self._objs)) - local_out_vars all_remote_vois = model.comm.allgather( (remote_dvs, remote_cons, remote_objs)) for rem_dvs, rem_cons, rem_objs in all_remote_vois: con_set.update(rem_cons) obj_set.update(rem_objs) dv_set.update(rem_dvs) # If we have remote VOIs, pick an owning rank for each and use that # to bcast to others later owning_ranks = model._owning_rank sizes = model._var_sizes['output'] rank = model.comm.rank nprocs = model.comm.size for i, (vname, meta) in enumerate(model._var_allprocs_abs2meta['output'].items()): if vname in responses: indices = responses[vname].get('indices') elif vname in src_design_vars: indices = src_design_vars[vname].get('indices') else: continue if meta['distributed']: dist_sizes = sizes[:, i] tot_size = np.sum(dist_sizes) # Determine which indices are on our proc. offsets = sizes2offsets(dist_sizes) if indices is not None: indices = indices.shaped_array() true_sizes = np.zeros(nprocs, dtype=INT_DTYPE) for irank in range(nprocs): dist_inds = indices[np.logical_and(indices >= offsets[irank], indices < (offsets[irank] + dist_sizes[irank]))] true_sizes[irank] = dist_inds.size if irank == rank: local_indices = dist_inds - offsets[rank] distrib_indices = dist_inds ind = indexer(local_indices, src_shape=(tot_size,), flat_src=True) dist_dict[vname] = (ind, true_sizes, distrib_indices) else: dist_dict[vname] = (_full_slice, dist_sizes, slice(offsets[rank], offsets[rank] + dist_sizes[rank])) else: owner = owning_ranks[vname] sz = sizes[owner, i] if vname in dv_set: remote_dv_dict[vname] = (owner, sz) if vname in con_set: remote_con_dict[vname] = (owner, sz) if vname in obj_set: remote_obj_dict[vname] = (owner, sz) self._remote_responses = self._remote_cons.copy() self._remote_responses.update(self._remote_objs) # set up simultaneous deriv coloring if coloring_mod._use_total_sparsity: # reset the coloring if self._coloring_info['dynamic'] or self._coloring_info['static'] is not None: self._coloring_info['coloring'] = None coloring = self._get_static_coloring() if coloring is not None and self.supports['simultaneous_derivatives']: if model._owns_approx_jac: coloring._check_config_partial(model) else: coloring._check_config_total(self) self._setup_simul_coloring()