def __getitem__(self, item): """Index or slice into array.""" if self.sparse: raise SignalError("Attempting to create a view of a sparse Signal") if item is Ellipsis or (isinstance(item, slice) and item == slice(None)): return self if not isinstance(item, tuple): item = (item,) if not all(npext.is_integer(i) or isinstance(i, slice) for i in item): raise SignalError("Can only index or slice into signals") if all(npext.is_integer(i) for i in item): # turn one index into slice to get a view from numpy item = item[:-1] + (slice(item[-1], item[-1] + 1),) view = self._initial_value[item] offset = npext.array_offset(view) - npext.array_offset(self._initial_value) return Signal( view, name=f"{self.name}[{item}]", base=self.base, offset=offset, readonly=self.readonly, )
def coerce_ndarray(self, instance, ndarray): # noqa: C901 if isinstance(ndarray, np.ndarray): ndarray = ndarray.view() else: try: ndarray = np.array(ndarray, dtype=np.float64) except (ValueError, TypeError): raise ValidationError( "Must be a float NumPy array (got type %r)" % type(ndarray).__name__, attr=self.name, obj=instance) if self.readonly: ndarray.setflags(write=False) if self.shape is None: return ndarray if '...' in self.shape: # Convert '...' to the appropriate number of '*'s nfixed = len(self.shape) - 1 n = ndarray.ndim - nfixed if n < 0: raise ValidationError("ndarray must be at least %dD (got %dD)" % (nfixed, ndarray.ndim), attr=self.name, obj=instance) i = self.shape.index('...') shape = list(self.shape[:i]) + (['*'] * n) if i < len(self.shape) - 1: shape.extend(self.shape[i+1:]) else: shape = self.shape if ndarray.ndim != len(shape): raise ValidationError("ndarray must be %dD (got %dD)" % (len(shape), ndarray.ndim), attr=self.name, obj=instance) for i, attr in enumerate(shape): assert is_integer(attr) or isinstance(attr, str), ( "shape can only be an int or str representing an attribute") if attr == '*': continue desired = attr if is_integer(attr) else getattr(instance, attr) if not is_integer(desired): raise ValidationError( "%s not yet initialized; cannot determine if shape is " "correct. Consider using a distribution instead." % attr, attr=self.name, obj=instance) if ndarray.shape[i] != desired: raise ValidationError("shape[%d] should be %d (got %d)" % (i, desired, ndarray.shape[i]), attr=self.name, obj=instance) return ndarray
def lookup(cls, dims, x): if not is_integer(dims) or dims < 1: raise ValidationError("must be an integer >= 1", attr="dims") table = cls.load_table() if dims in table: xp, yp = table[dims] else: known_dims = np.array(list(table)) i = np.searchsorted(known_dims, dims) assert i > 0 if i >= len(known_dims): # dims is larger than any dimension we have, so use the largest xp, yp = table[known_dims[-1]] else: # take average of two curves dims0, dims1 = known_dims[i - 1], known_dims[i] xp0, yp0 = table[dims0] xp1, yp1 = table[dims1] assert dims0 < dims < dims1 ratio0 = (dims1 - dims) / (dims1 - dims0) ratio1 = 1 - ratio0 xp = (ratio0 * xp0 + ratio1 * xp1) if len(xp0) == len(xp1) else xp0 yp = ratio0 * np.interp(xp, xp0, yp0) + ratio1 * np.interp( xp, xp1, yp1) return np.interp(x, xp, yp)
def __init__(self, obj, key=slice(None)): self.obj = obj # Node.size_in != size_out, so one of these can be invalid # NumPy <= 1.8 raises a ValueError instead of an IndexError. try: self.size_in = np.arange(self.obj.size_in)[key].size except (IndexError, ValueError): self.size_in = None try: self.size_out = np.arange(self.obj.size_out)[key].size except (IndexError, ValueError): self.size_out = None if self.size_in is None and self.size_out is None: raise IndexError("Invalid slice '%s' of %s" % (key, self.obj)) if is_integer(key): # single slices of the form [i] should be cast into # slice objects for convenience if key == -1: # special case because slice(-1, 0) gives the empty list self.slice = slice(key, None) else: self.slice = slice(key, key+1) else: self.slice = key
def __init__( self, dimensions, randomize=True, unitary=False, max_similarity=0.1, include_pairs=False, rng=None, ): if not is_integer(dimensions) or dimensions < 1: raise ValidationError("dimensions must be a positive integer", attr="dimensions", obj=self) self.dimensions = dimensions self.randomize = randomize self.unitary = unitary self.max_similarity = max_similarity self.pointers = {} self.keys = [] self.key_pairs = None self.vectors = np.zeros((0, dimensions), dtype=rc.float_dtype) self.vector_pairs = None self._include_pairs = None self.include_pairs = include_pairs self._identity = None self.rng = rng self.readonly = False self.parent = None
def __init__(self, obj, key=slice(None)): self.obj = obj # Node.size_in != size_out, so one of these can be invalid # NumPy <= 1.8 raises a ValueError instead of an IndexError. try: self.size_in = np.arange(self.obj.size_in)[key].size except (IndexError, ValueError): self.size_in = None try: self.size_out = np.arange(self.obj.size_out)[key].size except (IndexError, ValueError): self.size_out = None if self.size_in is None and self.size_out is None: raise IndexError("Invalid slice '%s' of %s" % (key, self.obj)) if is_integer(key): # single slices of the form [i] should be cast into # slice objects for convenience if key == -1: # special case because slice(-1, 0) gives the empty list self.slice = slice(key, None) else: self.slice = slice(key, key + 1) else: self.slice = key
def add_spikes(self, ti, spike_idxs, permanent=False): assert is_integer(ti) ti = int(ti) assert ti > 0, "Spike times must be >= 1 (got %d)" % ti assert ti not in self.spikes self.spikes[ti] = spike_idxs if permanent: self.permanent.add(ti)
def __getitem__(self, item): """Index or slice into array""" if item is Ellipsis or ( isinstance(item, slice) and item == slice(None)): return self if not isinstance(item, tuple): item = (item,) if not all(npext.is_integer(i) or isinstance(i, slice) for i in item): raise SignalError("Can only index or slice into signals") if all(npext.is_integer(i) for i in item): # turn one index into slice to get a view from numpy item = item[:-1] + (slice(item[-1], item[-1]+1),) view = self._initial_value[item] offset = (npext.array_offset(view) - npext.array_offset(self._initial_value)) return Signal(view, name="%s[%s]" % (self.name, item), base=self.base, offset=offset)
def coerce(self, instance, value): value = super().coerce(instance, value) for i, v in enumerate(value): if not is_integer(v): raise ValidationError("Element %d must be an int (got type %r)" % (i, type(v).__name__), attr=self.name, obj=instance) if self.low is not None and v < self.low: raise ValidationError( "Element %d must be >= %d (got %d)" % (i, self.low, v), attr=self.name, obj=instance) return value
def __getitem__(self, item): """Index or slice into array""" if item is Ellipsis or (isinstance(item, slice) and item == slice(None)): return self if not isinstance(item, tuple): item = (item, ) if not all(npext.is_integer(i) or isinstance(i, slice) for i in item): raise SignalError("Can only index or slice into signals") if all(npext.is_integer(i) for i in item): # turn one index into slice to get a view from numpy item = item[:-1] + (slice(item[-1], item[-1] + 1), ) view = self._initial_value[item] offset = (npext.array_offset(view) - npext.array_offset(self._initial_value)) return Signal(view, name="%s[%s]" % (self.name, item), base=self.base, offset=offset)
def coerce(self, instance, value): value = super().coerce(instance, value) for i, v in enumerate(value): if not is_integer(v): raise ValidationError( "Element %d must be an int (got type %r)" % (i, type(v).__name__), attr=self.name, obj=instance) if self.low is not None and v < self.low: raise ValidationError("Element %d must be >= %d (got %d)" % (i, self.low, v), attr=self.name, obj=instance) return value
def coerce(self, instance, value): value = super().coerce(instance, value) if value is not None: for i, v in enumerate(value): if not is_integer(v): raise ValidationError( f"Element {i} must be an int (got type {type(v).__name__})", attr=self.name, obj=instance, ) if self.low is not None and v < self.low: raise ValidationError( f"Element {i} must be >= {self.low} (got {v})", attr=self.name, obj=instance, ) return value
def __init__(self, dimensions, randomize=True, unitary=False, max_similarity=0.1, include_pairs=False, rng=None): if not is_integer(dimensions) or dimensions < 1: raise ValidationError("dimensions must be a positive integer", attr='dimensions', obj=self) self.dimensions = dimensions self.randomize = randomize self.unitary = unitary self.max_similarity = max_similarity self.pointers = {} self.keys = [] self.key_pairs = None self.vectors = np.zeros((0, dimensions), dtype=float) self.vector_pairs = None self._include_pairs = None self.include_pairs = include_pairs self._identity = None self.rng = rng self.readonly = False self.parent = None
def __init__(self, data, rng=None): if is_integer(data): if data < 1: raise ValidationError( "Number of dimensions must be a " "positive int", attr='data', obj=self) self.randomize(data, rng=rng) else: try: len(data) except Exception: raise ValidationError( "Must specify either the data or the length for a " "SemanticPointer.", attr='data', obj=self) self.v = np.array(data, dtype=float) if len(self.v.shape) != 1: raise ValidationError("'data' must be a vector", 'data', self)
def __init__(self, shape, ensemble_shape_or_transform): self.ensemble_shape = ensemble_shape_or_transform if isinstance(ensemble_shape_or_transform, nengo.Convolution): self.ensemble_shape = ensemble_shape_or_transform.output_shape.shape self.shape = shape for attr in ["ensemble_shape", "shape"]: shape = getattr(self, attr) if not isinstance(shape, tuple): raise ValidationError("Must be a tuple", attr=attr) if any(not is_integer(el) for el in shape): raise ValidationError("All elements must be an int", attr=attr) if len(self.shape) != len(self.ensemble_shape): raise ValidationError( "`shape` and `ensemble_shape` must be the same length", attr="shape") # Store numpy array versions of these shapes for easier manipulation self._ens_shape = np.asarray(self.ensemble_shape) self._shape = np.asarray(self.shape) n_splits = np.ceil(self._ens_shape / self._shape).astype(int) self.n_splits = np.prod(n_splits)
def size_out(self): # Note: This can be relatively expensive for operators that dynamically # create other operators inside of their generate methods (or depend on # operators that do). All such dependencies will be generated too, as will # any that they might generate in their generate methods. The result of this # property is therefore cached, but that is still less than ideal # as each size_out calculation can recreate some portion of the graph. with nengo.Network(add_to_container=False): input_nodes = [] for input_op in self.input_ops: input_op_size_out = input_op.size_out if not is_integer(input_op_size_out): raise ValueError( f"default size_out property only works if all input_ops " f"produce an integer size_out, but got: {input_op_size_out}" ) input_nodes.append(nengo.Node(np.zeros(input_op_size_out))) output = self.generate(*input_nodes) if not hasattr(output, "size_out"): # Expected to be a Nengo node or a sliced object view of a Nengo node. raise ValueError( f"default size_out property only works if the output from generate " f"defines size_out, but got: {output}") return output.size_out
def build_connection(model, conn): """Builds a `.Connection` object into a model. A brief summary of what happens in the connection build process, in order: 1. Solve for decoders. 2. Combine transform matrix with decoders to get weights. 3. Add operators for computing the function or multiplying neural activity by weights. 4. Call build function for the synapse. 5. Call build function for the learning rule. 6. Add operator for applying learning rule delta to weights. Some of these steps may be altered or omitted depending on the parameters of the connection, in particular the pre and post types. Parameters ---------- model : Model The model to build into. conn : Connection The connection to build. Notes ----- Sets ``model.params[conn]`` to a `.BuiltConnection` instance. """ # Create random number generator rng = np.random.RandomState(model.seeds[conn]) # Get input and output connections from pre and post def get_prepost_signal(is_pre): target = conn.pre_obj if is_pre else conn.post_obj key = "out" if is_pre else "in" if target not in model.sig: raise BuildError("Building %s: the %r object %s is not in the " "model, or has a size of zero." % (conn, "pre" if is_pre else "post", target)) signal = model.sig[target].get(key, None) if signal is None or signal.size == 0: raise BuildError( "Building %s: the %r object %s has a %r size of zero." % (conn, "pre" if is_pre else "post", target, key)) return signal model.sig[conn]["in"] = get_prepost_signal(is_pre=True) model.sig[conn]["out"] = get_prepost_signal(is_pre=False) decoders = None encoders = None eval_points = None solver_info = None post_slice = conn.post_slice # Figure out the signal going across this connection in_signal = model.sig[conn]["in"] if isinstance(conn.pre_obj, Node) or (isinstance(conn.pre_obj, Ensemble) and isinstance(conn.pre_obj.neuron_type, Direct)): # Node or Decoded connection in directmode sliced_in = slice_signal(model, in_signal, conn.pre_slice) if conn.function is None: in_signal = sliced_in elif isinstance(conn.function, np.ndarray): raise BuildError("Cannot use function points in direct connection") else: in_signal = Signal(shape=conn.size_mid, name="%s.func" % conn) model.add_op(SimPyFunc(in_signal, conn.function, None, sliced_in)) elif isinstance(conn.pre_obj, Ensemble): # Normal decoded connection eval_points, decoders, solver_info = model.build( conn.solver, conn, rng) if isinstance(conn.post_obj, Ensemble) and conn.solver.weights: model.sig[conn]["out"] = model.sig[conn.post_obj.neurons]["in"] encoders = model.params[conn.post_obj].scaled_encoders.T encoders = encoders[conn.post_slice] # post slice already applied to encoders (either here or in # `build_decoders`), so don't apply later post_slice = None else: in_signal = slice_signal(model, in_signal, conn.pre_slice) # Build transform if conn.solver.weights and not conn.solver.compositional: # special case for non-compositional weight solvers, where # the solver is solving for the full weight matrix. so we don't # need to combine decoders/transform/encoders. weighted, weights = model.build(Dense(decoders.shape, init=decoders), in_signal, rng=rng) else: weighted, weights = model.build(conn.transform, in_signal, decoders=decoders, encoders=encoders, rng=rng) model.sig[conn]["weights"] = weights # Build synapse if conn.synapse is not None: weighted = model.build(conn.synapse, weighted, mode="update") # Store the weighted-filtered output in case we want to probe it model.sig[conn]["weighted"] = weighted if isinstance(conn.post_obj, Neurons): # Apply neuron gains (we don't need to do this if we're connecting to # an Ensemble, because the gains are rolled into the encoders) gains = Signal( model.params[conn.post_obj.ensemble].gain[post_slice], name="%s.gains" % conn, ) if is_integer(post_slice) or isinstance(post_slice, slice): sliced_out = model.sig[conn]["out"][post_slice] else: # advanced indexing not supported on Signals, so we need to set up an # intermediate signal and use a Copy op to perform the indexing sliced_out = Signal(shape=gains.shape, name="%s.sliced_out" % conn) model.add_op(Reset(sliced_out)) model.add_op( Copy(sliced_out, model.sig[conn]["out"], dst_slice=post_slice, inc=True)) model.add_op( ElementwiseInc(gains, weighted, sliced_out, tag="%s.gains_elementwiseinc" % conn)) else: # Copy to the proper slice model.add_op( Copy( weighted, model.sig[conn]["out"], dst_slice=post_slice, inc=True, tag="%s" % conn, )) # Build learning rules if conn.learning_rule is not None: # TODO: provide a general way for transforms to expose learnable params if not isinstance(conn.transform, (Dense, NoTransform)): raise NotImplementedError( "Learning on connections with %s transforms is not supported" % (type(conn.transform).__name__, )) rule = conn.learning_rule rule = [rule] if not is_iterable(rule) else rule targets = [] for r in rule.values() if isinstance(rule, dict) else rule: model.build(r) targets.append(r.modifies) if "encoders" in targets: encoder_sig = model.sig[conn.post_obj]["encoders"] encoder_sig.readonly = False if "decoders" in targets or "weights" in targets: if weights.ndim < 2: raise BuildError( "'transform' must be a 2-dimensional array for learning") model.sig[conn]["weights"].readonly = False model.params[conn] = BuiltConnection( eval_points=eval_points, solver_info=solver_info, transform=conn.transform, weights=getattr(weights, "initial_value", None), )
def coerce_defaults(self): if self.shape is None: return True return all(is_integer(dim) or dim in ('...', '*') for dim in self.shape)
def coerce_ndarray(self, instance, ndarray): # noqa: C901 if isinstance(ndarray, np.ndarray): ndarray = ndarray.view() else: try: ndarray = np.array(ndarray, dtype=self.dtype) except (ValueError, TypeError): raise ValidationError( "Must be a %s NumPy array (got type %r)" % (self.dtype, type(ndarray).__name__), attr=self.name, obj=instance, ) if self.readonly: ndarray.setflags(write=False) if self.shape is None: return ndarray if "..." in self.shape: # Convert '...' to the appropriate number of '*'s nfixed = len(self.shape) - 1 n = ndarray.ndim - nfixed if n < 0: raise ValidationError( "ndarray must be at least %dD (got %dD)" % (nfixed, ndarray.ndim), attr=self.name, obj=instance, ) i = self.shape.index("...") shape = list(self.shape[:i]) + (["*"] * n) if i < len(self.shape) - 1: shape.extend(self.shape[i + 1:]) else: shape = self.shape if ndarray.ndim != len(shape): raise ValidationError( "ndarray must be %dD (got %dD)" % (len(shape), ndarray.ndim), attr=self.name, obj=instance, ) for i, attr in enumerate(shape): assert is_integer(attr) or isinstance( attr, str ), "shape can only be an int or str representing an attribute" if attr == "*": continue desired = attr if is_integer(attr) else getattr(instance, attr) if not is_integer(desired): raise ValidationError( "%s not yet initialized; cannot determine if shape is " "correct. Consider using a distribution instead." % attr, attr=self.name, obj=instance, ) if ndarray.shape[i] != desired: raise ValidationError( "shape[%d] should be %d (got %d)" % (i, desired, ndarray.shape[i]), attr=self.name, obj=instance, ) return ndarray
def coerce_defaults(self): if self.shape is None: return True return all( is_integer(dim) or dim in ("...", "*") for dim in self.shape)