def __ufunc_add(a, b): """Operator that implements element-wise addition with Gyrus operator(s). If either operand is a Gyrus fold, then it is replaced with its underlying array of Gyrus operators to follow NumPy's broadcasting rules for np.add. If both operands are Gyrus operators, then they are added together by Nengo transform(s) that add every dimension, element-wise. If one of the operands is like an array, then it is expected to be either 0D (scalar) or 1D (list). In the 0D case, the scalar is provided by a Nengo node, and broadcasted through Nengo transforms according to the shape of the other operand. In the 1D case, the list is expected to have the same number of elements as the size_out of each element in the other operand, such that the same vector can be added to every vector that is produced by the other operand. """ args = _broadcast_folds(a, b) if args is not None: return asoperator(np.add(*args)) elif isinstance(a, Operator) and isinstance(b, Operator): return reduce_transform([a, b], trs=[1, 1], axis=0) elif isinstance(b, Operator): a, b = b, a # At least one of the two operands has to be an Operator. assert isinstance(a, Operator) assert not isinstance(b, Operator) # due to first if statement if is_array_like(b): # This can be generalized to handle a wider variety of cases, # But, similar to __ufunc_multiply we are keeping the behaviour # as unambiguous as possible for now. b = np.asarray(b) if np.all(b == 0): return a elif b.ndim == 0: # Creates a single scalar Node and then transforms it # according to the size_out of each element in a. b = broadcast_scalar(b, size_out=a.size_out) elif b.ndim == 1: # Repeats the same Node for every element in a. if np.any(a.size_out != len(b)): raise TypeError( f"add size mismatch for a (operator) + b (array): " f"a.size_out ({a.size_out}) must match len(b) ({len(b)})") b = asoperator(np.broadcast_to(stimulus(b), shape=a.shape)) else: return NotImplemented assert isinstance(b, Operator) return np.add(a, b) return NotImplemented
def __init__(self, input_ops, trs): input_ops = asoperator(input_ops) if not isinstance(input_ops, Fold): raise TypeError(f"input_ops ({input_ops}) must be a Fold") trs = tuple(trs) if len(input_ops) != len(trs): raise ValueError( f"number of input operators ({len(input_ops)}) must equal the number " f"of transforms ({len(trs)})") if input_ops.ndim != 1: raise ValueError( f"expected a Fold with only a single axis, but input_ops has shape: " f"{input_ops.shape}") input_ops, trs = self._combine_transforms(input_ops, trs) # Infer input and output dimensionality of each transform. size_in = input_ops.size_out assert size_in.ndim == 1 # since input_ops.ndim == 1 size_out = np.asarray([ get_transform_size_out(tr, size_in) for tr, size_in in zip(trs, input_ops.size_out) ]) if len(size_out) == 0 or np.any(size_out[0] != size_out[1:]): raise ValueError( f"input_ops must have all the same size, got: {size_out}") self._size_out = size_out[0] self._trs = trs super().__init__(input_ops)
def broadcast_scalar(scalar, size_out): """Operator that creates a scalar Node and transforms it to match some shape. The parameter ``size_out`` is expected to be in the format of ``op.size_out`` where ``op`` is an Operator (such as a Fold). """ scalar = np.asarray(scalar) if not scalar.ndim == 0: raise TypeError( f"expected scalar, but got array with shape: {scalar.shape}") input_op = stimulus(scalar) def _project(_size_out): return input_op.transform(np.ones((_size_out, 1))) # This is vectorized across the elements of op.size_out. If op is just a basic # Operator then this trivially reduces to a single transform. If op is a Fold # then the elements of size_out correspond to the size_out of each of its operators # and a separate transform (from the same stimulus object) is created for each # element. For every repeated value of size_out, the same transform will be # recreated. An optimizer could reuse the transform if it makes sense to (it might # not always make sense to do so, for example, if a scalar is being communicated to # multiple different processing cores and it saves communication to do the transform # after communicating the scalar as opposed to doing it before and communicating a # high-dimensional vector to each core. return asoperator( np.vectorize(_project, otypes=[Transforms])(_size_out=size_out))
def reduce_transform(input_ops, trs, axis=-1): """Operator that sums together transforms applied to each output along an axis. This reduces the dimensionality of the Fold by one by applying ``Transforms`` along the chosen axis. """ return asoperator( np.apply_along_axis(func1d=lambda arr: Transforms(arr, trs=trs), axis=axis, arr=input_ops))
def bundle(input_ops, axis=-1): """Operator that joins all of the outputs along a given axis into a single output. This reduces the dimensionality of the Fold by one by applying ``Bundle1D`` along the chosen axis. When each element has a ``size_out`` of 1, this is the inverse of unbundle (when called with the same axis). """ # This together with Bundle1D is a reasonably compact example of how to define a # custom operator without using @gyrus.vectorize. Instead of np.apply_along_axis # many custom operators might apply np.vectorize. return asoperator( np.apply_along_axis(func1d=Bundle1D, axis=axis, arr=input_ops))
def __ufunc_multiply(a, b): """Operator that implements element-wise multiplication with Gyrus operator(s). If either operand is a Gyrus fold, then it is replaced with its underlying array of Gyrus operators to follow NumPy's broadcasting rules for np.multiply. If both operands are Gyrus operators, then they are multiplied together by using ``gyrus.multiply``, which vectorizes an element-wise product network across both operands. If one of the operands is like an array, then it is expected to be either 0D (scalar) or 1D (list). In either case, the operand becomes a Nengo transform that is applied to each Gyrus operator to scale its outputs element-wise. """ # gyrus.multiply (used here and defined elsewhere) is a bit different from # the ufunc defined here, np.multiply. The former only supports multiplying two # operators. The latter delegates to the former (in the same way as __mul__), but # also delegates to transform to handle a wider variety of types. In particular, if # one of the two operands is not an operator, then it will be used as a transform on # the other operand. To make the semantics of this unambiguous, in a similar manner # to __ufunc_add, only 0D or 1D transforms are currently supported, such that the # transform is doing an element-wise multiplication on each element. args = _broadcast_folds(a, b) if args is not None: return asoperator(np.multiply(*args)) elif isinstance(a, Operator) and isinstance(b, Operator): return multiply(a, b) elif isinstance(b, Operator): a, b = b, a # At least one of the two operands has to be an Operator. assert isinstance(a, Operator) assert not isinstance(b, Operator) # due to first if statement if is_array_like(b): b = np.asarray(b) # Scalars (b.ndim == 0) are fine, as is, as they naturally work with # nengo.Connection. 1D arrays also work, as is, so long as their length is equal # to the number of input dimensions in Nengo, which will also be the number of # output dimensions. if np.all(b == 1): return a elif b.ndim == 1 and np.any(a.size_out != len(b)): raise TypeError( f"multiply size mismatch for a (operator) * b (array): " f"a.size_out ({a.size_out}) must match len(b) ({len(b)})") elif b.ndim >= 2: return NotImplemented return transform(a, tr=b)
def configure(input_op, reset=False, **config): """Operator that applies configuration settings to all downstream operators. Applying the configure operator to any Operator or Fold results in a new Operator or Fold that contains the given keyword arguments as configuration. Moreover, the configuration is propagated downstream and picked up by applicable operators. Currently, configuration is only picked up by operators that have been decorated by ``@gyrus.vectorize``', and is restricted to keyword arguments that have been explicitly whitelisted as ``configurable`` by the decorator. Another limitation is that if any Gyrus operators are created within a generate call (for example, considering the ``integrand`` option to the integrator) then those operators are by default disconnected from the rest of the graph (in the case of the integrator, ``Stimulus(x)`` creates a disjoint root). Configurations obey the following natural rules for precedence: 1. Keyword arguments explicitly provided to the operator have highest precedence. 2. An operator's own configuration takes precedence over those of its input operators. However, if ``reset=True`` then the input operators are essentially ignored, and the configuration is 'reset' to what it would be if configuring a root in the graph. 3. The input operators take precedence in left-to-right order. 4. And so on, recursively (upstream). Since operators are immutable and form a directed acyclic graph (DAG), these rules are unambiguous, and give a consistent configuration setting for every operator in the DAG. """ return asoperator( np.vectorize(Configure, otypes=[Configure])(input_op=input_op, reset=reset, **config))
def transform(input_op, tr): """Operator that applies a single transform to every output.""" return asoperator( np.vectorize(pyfunc=lambda elem: Transforms([elem], trs=[tr]), otypes=[Transforms])(input_op))
def wrapper(*args, **kwargs): array = vectorized(*args, **kwargs) op = asoperator(array) return op