def get_values(self, var, units=None): if units is not None and not valid_units(units): raise ValueError('{0} is not a valid set of units.'.format(units)) var_in_phase = True if var == 'time': var_type = 'indep' elif var in self.outputs['states']: var_type = 'states' elif var in self.outputs['controls']: var_type = 'controls' elif var in self.outputs['design_parameters']: var_type = 'design_parameters' elif var in self.outputs['input_parameters']: var_type = 'input_parameters' elif var in self.outputs['control_rates']: var_type = 'control_rates' elif var in self.outputs['ode']: var_type = 'ode' else: var_in_phase = False if not var_in_phase: raise ValueError('Variable "{0}" not found in phase ' 'simulation results.'.format(var)) output = convert_units(self.outputs[var_type][var]['value'], self.outputs[var_type][var]['units'], units) return output
def check_option(option, value): """ Check option for validity. Parameters ---------- option : str The name of the option value : any The value of the option Raises ------ ValueError """ if option is 'units' and value is not None and not valid_units(value): raise ValueError("The units '%s' are invalid." % value)
def add_output(self, name, val=1.0, shape=None, units=None, res_units=None, desc='', lower=None, upper=None, ref=1.0, ref0=0.0, res_ref=1.0, var_set=0): """ Add an output variable to the component. Parameters ---------- name : str name of the variable in this component's namespace. val : float or list or tuple or ndarray The initial value of the variable being added in user-defined units. Default is 1.0. shape : int or tuple or list or None Shape of this variable, only required if val is not an array. Default is None. units : str or None Units in which the output variables will be provided to the component during execution. Default is None, which means it has no units. res_units : str or None Units in which the residuals of this output will be given to the user when requested. Default is None, which means it has no units. desc : str description of the variable. lower : float or list or tuple or ndarray or Iterable or None lower bound(s) in user-defined units. It can be (1) a float, (2) an array_like consistent with the shape arg (if given), or (3) an array_like matching the shape of val, if val is array_like. A value of None means this output has no lower bound. Default is None. upper : float or list or tuple or ndarray or or Iterable None upper bound(s) in user-defined units. It can be (1) a float, (2) an array_like consistent with the shape arg (if given), or (3) an array_like matching the shape of val, if val is array_like. A value of None means this output has no upper bound. Default is None. ref : float or ndarray Scaling parameter. The value in the user-defined units of this output variable when the scaled value is 1. Default is 1. ref0 : float or ndarray Scaling parameter. The value in the user-defined units of this output variable when the scaled value is 0. Default is 0. res_ref : float or ndarray Scaling parameter. The value in the user-defined res_units of this output's residual when the scaled value is 1. Default is 1. var_set : hashable object For advanced users only. ID or color for this variable, relevant for reconfigurability. Default is 0. Returns ------- dict metadata for added variable """ if units == 'unitless': warn_deprecation("Output '%s' has units='unitless' but 'unitless' " "has been deprecated. Use " "units=None instead. Note that connecting a " "unitless variable to one with units is no longer " "an error, but will issue a warning instead." % name) units = None # First, type check all arguments if not isinstance(name, str): raise TypeError('The name argument should be a string') if not np.isscalar(val) and not isinstance(val, (list, tuple, np.ndarray, Iterable)): raise TypeError('The val argument should be a float, list, tuple, or ndarray') if not np.isscalar(ref) and not isinstance(val, (list, tuple, np.ndarray, Iterable)): raise TypeError('The ref argument should be a float, list, tuple, or ndarray') if not np.isscalar(ref0) and not isinstance(val, (list, tuple, np.ndarray, Iterable)): raise TypeError('The ref0 argument should be a float, list, tuple, or ndarray') if not np.isscalar(res_ref) and not isinstance(val, (list, tuple, np.ndarray, Iterable)): raise TypeError('The res_ref argument should be a float, list, tuple, or ndarray') if shape is not None and not isinstance(shape, (int, tuple, list, np.integer)): raise TypeError("The shape argument should be an int, tuple, or list but " "a '%s' was given" % type(shape)) if units is not None and not isinstance(units, str): raise TypeError('The units argument should be a str or None') if res_units is not None and not isinstance(res_units, str): raise TypeError('The res_units argument should be a str or None') # Check that units are valid if units is not None and not valid_units(units): raise ValueError("The units '%s' are invalid" % units) metadata = {} # value, shape: based on args, making sure they are compatible metadata['value'], metadata['shape'] = ensure_compatible(name, val, shape) metadata['size'] = np.prod(metadata['shape']) # units, res_units: taken as is metadata['units'] = units metadata['res_units'] = res_units # desc: taken as is metadata['desc'] = desc if lower is not None: lower = ensure_compatible(name, lower, metadata['shape'])[0] if upper is not None: upper = ensure_compatible(name, upper, metadata['shape'])[0] metadata['lower'] = lower metadata['upper'] = upper # All refs: check the shape if necessary for item, item_name in zip([ref, ref0, res_ref], ['ref', 'ref0', 'res_ref']): if not np.isscalar(item): if np.atleast_1d(item).shape != metadata['shape']: raise ValueError('The %s argument has the wrong shape' % item_name) if np.isscalar(ref): self._has_output_scaling |= ref != 1.0 else: self._has_output_scaling |= np.any(ref != 1.0) if np.isscalar(ref0): self._has_output_scaling |= ref0 != 0.0 else: self._has_output_scaling |= np.any(ref0) if np.isscalar(res_ref): self._has_resid_scaling |= res_ref != 1.0 else: self._has_resid_scaling |= np.any(res_ref != 1.0) ref = format_as_float_or_array('ref', ref, flatten=True) ref0 = format_as_float_or_array('ref0', ref0, flatten=True) res_ref = format_as_float_or_array('res_ref', res_ref, flatten=True) # ref, ref0, res_ref: taken as is metadata['ref'] = ref metadata['ref0'] = ref0 metadata['res_ref'] = res_ref # var_set: taken as is metadata['var_set'] = var_set metadata['distributed'] = self.distributed # We may not know the pathname yet, so we have to use name for now, instead of abs_name. if self._static_mode: var_rel2data_io = self._static_var_rel2data_io var_rel_names = self._static_var_rel_names else: var_rel2data_io = self._var_rel2data_io var_rel_names = self._var_rel_names # Disallow dupes if name in var_rel2data_io: msg = "Variable name '{}' already exists.".format(name) raise ValueError(msg) var_rel2data_io[name] = { 'prom': name, 'rel': name, 'my_idx': len(self._var_rel_names['output']), 'type': 'output', 'metadata': metadata} var_rel_names['output'].append(name) return metadata
def add_input(self, name, val=1.0, shape=None, src_indices=None, flat_src_indices=None, units=None, desc='', var_set=0): """ Add an input variable to the component. Parameters ---------- name : str name of the variable in this component's namespace. val : float or list or tuple or ndarray or Iterable The initial value of the variable being added in user-defined units. Default is 1.0. shape : int or tuple or list or None Shape of this variable, only required if src_indices not provided and val is not an array. Default is None. src_indices : int or list of ints or tuple of ints or int ndarray or Iterable or None The global indices of the source variable to transfer data from. A value of None implies this input depends on all entries of source. Default is None. The shapes of the target and src_indices must match, and form of the entries within is determined by the value of 'flat_src_indices'. flat_src_indices : bool If True, each entry of src_indices is assumed to be an index into the flattened source. Otherwise each entry must be a tuple or list of size equal to the number of dimensions of the source. units : str or None Units in which this input variable will be provided to the component during execution. Default is None, which means it is unitless. desc : str description of the variable var_set : hashable object For advanced users only. ID or color for this variable, relevant for reconfigurability. Default is 0. Returns ------- dict metadata for added variable """ if units == 'unitless': warn_deprecation("Input '%s' has units='unitless' but 'unitless' " "has been deprecated. Use " "units=None instead. Note that connecting a " "unitless variable to one with units is no longer " "an error, but will issue a warning instead." % name) units = None # First, type check all arguments if not isinstance(name, str): raise TypeError('The name argument should be a string') if not np.isscalar(val) and not isinstance(val, (list, tuple, np.ndarray, Iterable)): raise TypeError('The val argument should be a float, list, tuple, ndarray or Iterable') if shape is not None and not isinstance(shape, (int, tuple, list, np.integer)): raise TypeError("The shape argument should be an int, tuple, or list but " "a '%s' was given" % type(shape)) if src_indices is not None and not isinstance(src_indices, (int, list, tuple, np.ndarray, Iterable)): raise TypeError('The src_indices argument should be an int, list, ' 'tuple, ndarray or Iterable') if units is not None and not isinstance(units, str): raise TypeError('The units argument should be a str or None') # Check that units are valid if units is not None and not valid_units(units): raise ValueError("The units '%s' are invalid" % units) metadata = {} # value, shape: based on args, making sure they are compatible metadata['value'], metadata['shape'] = ensure_compatible(name, val, shape, src_indices) metadata['size'] = np.prod(metadata['shape']) # src_indices: None or ndarray if src_indices is None: metadata['src_indices'] = None else: metadata['src_indices'] = np.atleast_1d(src_indices) metadata['flat_src_indices'] = flat_src_indices # units: taken as is metadata['units'] = units # desc: taken as is metadata['desc'] = desc # var_set: taken as is metadata['var_set'] = var_set # We may not know the pathname yet, so we have to use name for now, instead of abs_name. if self._static_mode: var_rel2data_io = self._static_var_rel2data_io var_rel_names = self._static_var_rel_names else: var_rel2data_io = self._var_rel2data_io var_rel_names = self._var_rel_names # Disallow dupes if name in var_rel2data_io: msg = "Variable name '{}' already exists.".format(name) raise ValueError(msg) var_rel2data_io[name] = { 'prom': name, 'rel': name, 'my_idx': len(self._var_rel_names['input']), 'type': 'input', 'metadata': metadata} var_rel_names['input'].append(name) return metadata
def get_values(self, var, nodes=None, units=None): """ Retrieve the values of the given variable at the given subset of nodes. Parameters ---------- var : str The variable whose values are to be returned. This may be the name 'time', the name of a state, control, or parameter, or the path to a variable in the ODEFunction of the phase. nodes : str The name of the node subset. units : str The units in which the values should be expressed. Must be compatible with the corresponding units inside the phase. Returns ------- ndarray An array of the values at the requested node subset. The node index is the first dimension of the ndarray. """ if nodes is None: nodes = 'all' gd = self.grid_data var_type = self._classify_var(var) op = dict(self.list_outputs(explicit=True, values=True, units=True, shape=True, out_stream=None)) if units is not None: if not valid_units(units): raise ValueError('Units {0} is not a valid units identifier'.format(units)) var_prefix = '{0}.'.format(self.pathname) if self.pathname else '' path_map = {'time': 'time.{0}', 'state': 'indep_states.states:{0}', 'indep_control': 'control_interp_comp.control_values:{0}', 'input_control': 'control_interp_comp.control_values:{0}', 'design_parameter': 'design_params.design_parameters:{0}', 'input_parameter': 'input_params.input_parameters:{0}_out', 'control_rate': 'control_interp_comp.control_rates:{0}', 'control_rate2': 'control_interp_comp.control_rates:{0}', 'ode': 'rhs_all.{0}'} if var_type == 'state': var_path = var_prefix + path_map[var_type].format(var) output_units = op[var_path]['units'] output_value = convert_units(op[var_path]['value'][gd.input_maps['state_input_to_disc'], ...], output_units, units) elif var_type in ('input_control', 'indep_control'): var_path = var_prefix + path_map[var_type].format(var) output_units = op[var_path]['units'] vals = op[var_path]['value'] output_value = convert_units(vals, output_units, units) elif var_type in ('design_parameter', 'input_parameter', 'traj_design_parameter', 'traj_input_parameter'): var_path = var_prefix + path_map[var_type].format(var) output_units = op[var_path]['units'] output_value = convert_units(op[var_path]['value'], output_units, units) output_value = np.repeat(output_value, gd.num_nodes, axis=0) elif var_type == 'ode': rhs_all_outputs = dict(self.rhs_all.list_outputs(out_stream=None, values=True, shape=True, units=True)) prom2abs_all = self.rhs_all._var_allprocs_prom2abs_list abs_path_all = prom2abs_all['output'][var][0] output_value = rhs_all_outputs[abs_path_all]['value'] output_units = rhs_all_outputs[abs_path_all]['units'] output_value = convert_units(output_value, output_units, units) else: var_path = var_prefix + path_map[var_type].format(var) output_units = op[var_path]['units'] output_value = convert_units(op[var_path]['value'], output_units, units) # Always return a column vector if len(output_value.shape) == 1: output_value = np.reshape(output_value, (gd.num_nodes, 1)) return output_value[gd.subset_node_indices[nodes], ...]
def __init__(self, exprs, vectorize=False, units=None, **kwargs): r""" Create a <Component> using only an expression string. Given a list of assignment statements, this component creates input and output variables at construction time. All variables appearing on the left-hand side of an assignment are outputs, and the rest are inputs. Each variable is assumed to be of type float unless the initial value for that variable is supplied in \*\*kwargs. Derivatives are calculated using complex step. The following functions are available for use in expressions: ========================= ==================================== Function Description ========================= ==================================== abs(x) Absolute value of x acos(x) Inverse cosine of x acosh(x) Inverse hyperbolic cosine of x arange(start, stop, step) Array creation arccos(x) Inverse cosine of x arccosh(x) Inverse hyperbolic cosine of x arcsin(x) Inverse sine of x arcsinh(x) Inverse hyperbolic sine of x arctan(x) Inverse tangent of x asin(x) Inverse sine of x asinh(x) Inverse hyperbolic sine of x atan(x) Inverse tangent of x cos(x) Cosine of x cosh(x) Hyperbolic cosine of x dot(x, y) Dot-product of x and y e Euler's number erf(x) Error function erfc(x) Complementary error function exp(x) Exponential function expm1(x) exp(x) - 1 factorial(x) Factorial of all numbers in x fmax(x, y) Element-wise maximum of x and y fmin(x, y) Element-wise minimum of x and y inner(x, y) Inner product of arrays x and y isinf(x) Element-wise detection of np.inf isnan(x) Element-wise detection of np.nan kron(x, y) Kronecker product of arrays x and y linspace(x, y, N) Numpy linear spaced array creation log(x) Natural logarithm of x log10(x) Base-10 logarithm of x log1p(x) log(1+x) matmul(x, y) Matrix multiplication of x and y maximum(x, y) Element-wise maximum of x and y minimum(x, y) Element-wise minimum of x and y ones(N) Create an array of ones outer(x, y) Outer product of x and y pi Pi power(x, y) Element-wise x**y prod(x) The product of all elements in x sin(x) Sine of x sinh(x) Hyperbolic sine of x sum(x) The sum of all elements in x tan(x) Tangent of x tanh(x) Hyperbolic tangent of x tensordot(x, y) Tensor dot product of x and y zeros(N) Create an array of zeros ========================= ==================================== Parameters ---------- exprs : str, tuple of str or list of str An assignment statement or iter of them. These express how the outputs are calculated based on the inputs. In addition to standard Python operators, a subset of numpy and scipy functions is supported. vectorize : bool If True, treat all array/array partials as diagonal if both arrays have size > 1. All arrays with size > 1 must have the same flattened size or an exception will be raised. units : str or None Units to be assigned to all variables in this component. Default is None, which means units are provided for variables individually. **kwargs : dict of named args Initial values of variables can be set by setting a named arg with the var name. If the value is a dict it is assumed to contain metadata. To set the initial value in addition to other metadata, assign the initial value to the 'value' entry of the dict. Notes ----- If a variable has an initial value that is anything other than 1.0, either because it has a different type than float or just because its initial value is != 1.0, you must use a keyword arg to set the initial value. For example, let's say we have an ExecComp that takes an array 'x' as input and outputs a float variable 'y' which is the sum of the entries in 'x'. .. code-block:: python import numpy from openmdao.api import ExecComp excomp = ExecComp('y=sum(x)', x=numpy.ones(10,dtype=float)) In this example, 'y' would be assumed to be the default type of float and would be given the default initial value of 1.0, while 'x' would be initialized with a size 10 float array of ones. If you want to assign certain metadata for 'x' in addition to its initial value, you can do it as follows: .. code-block:: python excomp = ExecComp('y=sum(x)', x={'value': numpy.ones(10,dtype=float), 'units': 'ft'}) """ super(ExecComp, self).__init__() # Check that units arg is valid if units is not None: if not isinstance(units, str): raise TypeError('The units argument should be a str or None.') if not valid_units(units): raise ValueError("The units '%s' are invalid." % units) # if complex step is used for derivatives, this is the stepsize self.complex_stepsize = 1.e-40 if isinstance(exprs, string_types): exprs = [exprs] self._exprs = exprs[:] self._codes = None self._kwargs = kwargs self._vectorize = vectorize self._units = units
def get_values(self, var, phases=None, units=None, flat=False): """ Returns the values of the given variable from the given phases, if provided. If the variable is not present in one ore more phases, it will be returned as numpy.nan at each time step. Parameters ---------- var : str The variable whose values are to be returned. phases : Sequence, None The phases from which the values are desired. If None, included all Phases. units : str, None The units in which the values are desired. flat : bool If False return the values in a dictionary keyed by phase name. If True, return a single array incorporating values from all phases. Returns ------- dict or np.array If flat=False, a dictionary of the values of the variable in each phase will be returned, keyed by Phase name. If the values are not present in a subset of the phases, return numpy.nan at each time point in those phases. Raises ------ KeyError If the given variable is not found in any phase, a KeyError is raised. """ if units is not None and not valid_units(units): raise ValueError('{0} is not a valid set of units.'.format(units)) phases = self.get_phase_names() if phases is None else phases return_vals = dict([(phase_name, {}) for phase_name in phases]) var_in_traj = False times = {} time_units = None for phase_name in phases: var_in_phase = True # Gather times for the purposes of flattening the returned values # Note the adjustment to the last time, for the purposes of sorting only if time_units is None: time_units = self.outputs['phases'][phase_name]['indep'][ 'time']['units'] times[phase_name] = convert_units( self.outputs['phases'][phase_name]['indep']['time']['value'], self.outputs['phases'][phase_name]['indep']['time']['units'], time_units) times[phase_name][-1, ...] -= 1.0E-15 if var == 'time': var_type = 'indep' elif var in self.outputs['phases'][phase_name]['states']: var_type = 'states' elif var in self.outputs['phases'][phase_name]['controls']: var_type = 'controls' elif var in self.outputs['phases'][phase_name][ 'design_parameters']: var_type = 'design_parameters' elif var in self.outputs['phases'][phase_name]['input_parameters']: var_type = 'input_parameters' elif var.endswith('_rate') \ and var[:-5] in self.outputs['phases'][phase_name]['controls']: var_type = 'control_rates' elif var.endswith('_rate2') \ and var[:-6] in self.outputs['phases'][phase_name]['controls']: var_type = 'control_rates' elif var in self.outputs['phases'][phase_name]['ode']: var_type = 'ode' else: var_in_phase = False if var_in_phase: var_in_traj = True output = convert_units( self.outputs['phases'][phase_name][var_type][var]['value'], self.outputs['phases'][phase_name][var_type][var]['units'], units) else: indep_var = list( self.outputs['phases'][phase_name]['indep'].keys())[0] n = len(self.outputs['phases'][phase_name]['indep'][indep_var] ['value']) output = np.empty(n) output[:] = np.nan if not var_in_traj: raise KeyError('Variable "{0}" not found in trajectory ' 'simulation results.'.format(var)) return_vals[phase_name] = output if flat: time_array = np.concatenate([times[pname] for pname in phases]) sort_idxs = np.argsort(time_array, axis=0).ravel() return_vals = np.concatenate( [return_vals[pname] for pname in phases])[sort_idxs, ...] return return_vals
def add_output(self, name, val=1.0, shape=None, units=None, res_units=None, desc='', lower=None, upper=None, ref=1.0, ref0=0.0, res_ref=1.0): """ Add an output variable to the component. Parameters ---------- name : str name of the variable in this component's namespace. val : float or list or tuple or ndarray The initial value of the variable being added in user-defined units. Default is 1.0. shape : int or tuple or list or None Shape of this variable, only required if val is not an array. Default is None. units : str or None Units in which the output variables will be provided to the component during execution. Default is None, which means it has no units. res_units : str or None Units in which the residuals of this output will be given to the user when requested. Default is None, which means it has no units. desc : str description of the variable. lower : float or list or tuple or ndarray or Iterable or None lower bound(s) in user-defined units. It can be (1) a float, (2) an array_like consistent with the shape arg (if given), or (3) an array_like matching the shape of val, if val is array_like. A value of None means this output has no lower bound. Default is None. upper : float or list or tuple or ndarray or or Iterable None upper bound(s) in user-defined units. It can be (1) a float, (2) an array_like consistent with the shape arg (if given), or (3) an array_like matching the shape of val, if val is array_like. A value of None means this output has no upper bound. Default is None. ref : float or ndarray Scaling parameter. The value in the user-defined units of this output variable when the scaled value is 1. Default is 1. ref0 : float or ndarray Scaling parameter. The value in the user-defined units of this output variable when the scaled value is 0. Default is 0. res_ref : float or ndarray Scaling parameter. The value in the user-defined res_units of this output's residual when the scaled value is 1. Default is 1. Returns ------- dict metadata for added variable """ if units == 'unitless': warn_deprecation("Output '%s' has units='unitless' but 'unitless' " "has been deprecated. Use " "units=None instead. Note that connecting a " "unitless variable to one with units is no longer " "an error, but will issue a warning instead." % name) units = None if not isinstance(name, str): raise TypeError('The name argument should be a string') if not _valid_var_name(name): raise NameError("'%s' is not a valid output name." % name) if not isscalar(val) and not isinstance(val, (list, tuple, ndarray, Iterable)): msg = 'The val argument should be a float, list, tuple, ndarray or Iterable' raise TypeError(msg) if not isscalar(ref) and not isinstance(val, (list, tuple, ndarray, Iterable)): msg = 'The ref argument should be a float, list, tuple, ndarray or Iterable' raise TypeError(msg) if not isscalar(ref0) and not isinstance(val, (list, tuple, ndarray, Iterable)): msg = 'The ref0 argument should be a float, list, tuple, ndarray or Iterable' raise TypeError(msg) if not isscalar(res_ref) and not isinstance(val, (list, tuple, ndarray, Iterable)): msg = 'The res_ref argument should be a float, list, tuple, ndarray or Iterable' raise TypeError(msg) if shape is not None and not isinstance(shape, (int, tuple, list, np.integer)): raise TypeError("The shape argument should be an int, tuple, or list but " "a '%s' was given" % type(shape)) if units is not None and not isinstance(units, str): raise TypeError('The units argument should be a str or None') if res_units is not None and not isinstance(res_units, str): raise TypeError('The res_units argument should be a str or None') # Check that units are valid if units is not None and not valid_units(units): raise ValueError("The units '%s' are invalid" % units) metadata = {} # value, shape: based on args, making sure they are compatible metadata['value'], metadata['shape'], _ = ensure_compatible(name, val, shape) metadata['size'] = np.prod(metadata['shape']) # units, res_units: taken as is metadata['units'] = units metadata['res_units'] = res_units # desc: taken as is metadata['desc'] = desc if lower is not None: lower = ensure_compatible(name, lower, metadata['shape'])[0] if upper is not None: upper = ensure_compatible(name, upper, metadata['shape'])[0] metadata['lower'] = lower metadata['upper'] = upper # All refs: check the shape if necessary for item, item_name in zip([ref, ref0, res_ref], ['ref', 'ref0', 'res_ref']): if not isscalar(item): it = atleast_1d(item) if it.shape != metadata['shape']: raise ValueError("'{}': When adding output '{}', expected shape {} but got " "shape {} for argument '{}'.".format(self.name, name, metadata['shape'], it.shape, item_name)) if isscalar(ref): self._has_output_scaling |= ref != 1.0 else: self._has_output_scaling |= np.any(ref != 1.0) if isscalar(ref0): self._has_output_scaling |= ref0 != 0.0 else: self._has_output_scaling |= np.any(ref0) if isscalar(res_ref): self._has_resid_scaling |= res_ref != 1.0 else: self._has_resid_scaling |= np.any(res_ref != 1.0) ref = format_as_float_or_array('ref', ref, flatten=True) ref0 = format_as_float_or_array('ref0', ref0, flatten=True) res_ref = format_as_float_or_array('res_ref', res_ref, flatten=True) metadata['ref'] = ref metadata['ref0'] = ref0 metadata['res_ref'] = res_ref metadata['distributed'] = self.options['distributed'] # We may not know the pathname yet, so we have to use name for now, instead of abs_name. if self._static_mode: var_rel2meta = self._static_var_rel2meta var_rel_names = self._static_var_rel_names else: var_rel2meta = self._var_rel2meta var_rel_names = self._var_rel_names # Disallow dupes if name in var_rel2meta: msg = "Variable name '{}' already exists.".format(name) raise ValueError(msg) var_rel2meta[name] = metadata var_rel_names['output'].append(name) return metadata
def add_input(self, name, val=1.0, shape=None, src_indices=None, flat_src_indices=None, units=None, desc=''): """ Add an input variable to the component. Parameters ---------- name : str name of the variable in this component's namespace. val : float or list or tuple or ndarray or Iterable The initial value of the variable being added in user-defined units. Default is 1.0. shape : int or tuple or list or None Shape of this variable, only required if src_indices not provided and val is not an array. Default is None. src_indices : int or list of ints or tuple of ints or int ndarray or Iterable or None The global indices of the source variable to transfer data from. A value of None implies this input depends on all entries of source. Default is None. The shapes of the target and src_indices must match, and form of the entries within is determined by the value of 'flat_src_indices'. flat_src_indices : bool If True, each entry of src_indices is assumed to be an index into the flattened source. Otherwise each entry must be a tuple or list of size equal to the number of dimensions of the source. units : str or None Units in which this input variable will be provided to the component during execution. Default is None, which means it is unitless. desc : str description of the variable Returns ------- dict metadata for added variable """ if units == 'unitless': warn_deprecation("Input '%s' has units='unitless' but 'unitless' " "has been deprecated. Use " "units=None instead. Note that connecting a " "unitless variable to one with units is no longer " "an error, but will issue a warning instead." % name) units = None # First, type check all arguments if not isinstance(name, str): raise TypeError('The name argument should be a string') if not _valid_var_name(name): raise NameError("'%s' is not a valid input name." % name) if not isscalar(val) and not isinstance(val, (list, tuple, ndarray, Iterable)): raise TypeError('The val argument should be a float, list, tuple, ndarray or Iterable') if shape is not None and not isinstance(shape, (int, tuple, list, np.integer)): raise TypeError("The shape argument should be an int, tuple, or list but " "a '%s' was given" % type(shape)) if src_indices is not None and not isinstance(src_indices, (int, list, tuple, ndarray, Iterable)): raise TypeError('The src_indices argument should be an int, list, ' 'tuple, ndarray or Iterable') if units is not None and not isinstance(units, str): raise TypeError('The units argument should be a str or None') # Check that units are valid if units is not None and not valid_units(units): raise ValueError("The units '%s' are invalid" % units) metadata = {} # value, shape: based on args, making sure they are compatible metadata['value'], metadata['shape'], src_indices = ensure_compatible(name, val, shape, src_indices) metadata['size'] = np.prod(metadata['shape']) # src_indices: None or ndarray if src_indices is None: metadata['src_indices'] = None else: metadata['src_indices'] = np.asarray(src_indices, dtype=INT_DTYPE) metadata['flat_src_indices'] = flat_src_indices metadata['units'] = units metadata['desc'] = desc metadata['distributed'] = self.options['distributed'] # We may not know the pathname yet, so we have to use name for now, instead of abs_name. if self._static_mode: var_rel2meta = self._static_var_rel2meta var_rel_names = self._static_var_rel_names else: var_rel2meta = self._var_rel2meta var_rel_names = self._var_rel_names # Disallow dupes if name in var_rel2meta: msg = "Variable name '{}' already exists.".format(name) raise ValueError(msg) var_rel2meta[name] = metadata var_rel_names['input'].append(name) return metadata
def get_values(self, var, nodes=None, units=None): """ Retrieve the values of the given variable at the given subset of nodes. Parameters ---------- var : str The variable whose values are to be returned. This may be the name 'time', the name of a state, control, or parameter, or the path to a variable in the ODEFunction of the phase. nodes : str The name of the node subset or None (default). units : str The units in which the values should be expressed. Must be compatible with the corresponding units inside the phase. Returns ------- ndarray An array of the values at the requested node subset. The node index is the first dimension of the ndarray. """ if nodes is None: nodes = 'all' gd = self.grid_data disc_node_idxs = gd.subset_node_indices['state_disc'] col_node_idxs = gd.subset_node_indices['col'] var_type = self._classify_var(var) op = dict( self.list_outputs(explicit=True, values=True, units=True, shape=True, out_stream=None)) if units is not None: if not valid_units(units): raise ValueError( 'Units {0} is not a valid units identifier'.format(units)) var_prefix = '{0}.'.format(self.pathname) if self.pathname else '' path_map = { 'time': 'time.{0}', 'state': ('indep_states.states:{0}', 'state_interp.state_col:{0}'), 'indep_control': 'control_interp_comp.control_values:{0}', 'input_control': 'control_interp_comp.control_values:{0}', 'design_parameter': 'design_params.design_parameters:{0}', 'input_parameter': 'input_params.input_parameters:{0}_out', 'control_rate': 'control_interp_comp.control_rates:{0}', 'control_rate2': 'control_interp_comp.control_rates:{0}', 'ode': ('rhs_disc.{0}', 'rhs_col.{0}') } if var_type == 'state': # State and RHS values need to be interleaved since disc and col values are not # available from the same output disc_path_fmt, col_path_fmt = path_map[var_type] disc_path = var_prefix + disc_path_fmt.format(var) col_path = var_prefix + col_path_fmt.format(var) state_shape = op[disc_path]['shape'][1:] disc_units = op[disc_path]['units'] disc_vals = op[disc_path]['value'] col_units = op[col_path]['units'] col_vals = op[col_path]['value'] # If units is none, use the units from the IndepVarComp if units is None: units = disc_units output_value = np.zeros((gd.num_nodes, ) + state_shape) output_value[disc_node_idxs, ...] = \ convert_units(disc_vals[gd.input_maps['state_input_to_disc'], ...], disc_units, units) output_value[col_node_idxs, ...] = convert_units(col_vals, col_units, units) elif var_type in ('indep_control', 'input_control'): var_path = var_prefix + path_map[var_type].format(var) output_units = op[var_path]['units'] vals = op[var_path]['value'] output_value = convert_units(vals, output_units, units) elif var_type in ('design_parameter', 'input_parameter', 'traj_design_parameter', 'traj_input_parameter'): var_path = var_prefix + path_map[var_type].format(var) output_units = op[var_path]['units'] output_value = convert_units(op[var_path]['value'], output_units, units) output_value = np.repeat(output_value, gd.num_nodes, axis=0) elif var_type == 'ode': rhs_disc_outputs = dict( self.rhs_disc.list_outputs(out_stream=None, values=True, shape=True, units=True)) rhs_col_outputs = dict( self.rhs_col.list_outputs(out_stream=None, values=True, shape=True, units=True)) prom2abs_disc = self.rhs_disc._var_allprocs_prom2abs_list prom2abs_col = self.rhs_col._var_allprocs_prom2abs_list # Is var in prom2abs_disc['output']? abs_path_disc = prom2abs_disc['output'][var][0] abs_path_col = prom2abs_col['output'][var][0] shape = rhs_disc_outputs[abs_path_disc]['shape'][1:] disc_units = rhs_disc_outputs[abs_path_disc]['units'] col_units = rhs_col_outputs[abs_path_col]['units'] output_value = np.zeros((gd.num_nodes, ) + shape) disc_vals = rhs_disc_outputs[abs_path_disc]['value'] col_vals = rhs_col_outputs[abs_path_col]['value'] output_value[disc_node_idxs, ...] = convert_units(disc_vals, disc_units, units) output_value[col_node_idxs, ...] = convert_units(col_vals, col_units, units) else: var_path = var_prefix + path_map[var_type].format(var) output_units = op[var_path]['units'] output_value = convert_units(op[var_path]['value'], output_units, units) # Always return a column vector if len(output_value.shape) == 1: output_value = np.reshape(output_value, (gd.num_nodes, 1)) return output_value[gd.subset_node_indices[nodes], ...]