class Oja(LearningRuleType): """Oja learning rule. Modifies connection weights according to the Hebbian Oja rule, which augments typically Hebbian coactivity with a "forgetting" term that is proportional to the weight of the connection and the square of the postsynaptic activity. Notes ----- The Oja rule is dependent on pre and post neural activities, not decoded values, and so is not affected by changes in the size of pre and post ensembles. However, if you are decoding from the post ensemble, the Oja rule will have an increased effect on larger post ensembles because more connection weights are changing. In these cases, it may be advantageous to scale the learning rate on the Oja rule by ``1 / post.n_neurons``. Parameters ---------- learning_rate : float, optional A scalar indicating the rate at which weights will be adjusted. pre_synapse : `.Synapse`, optional Synapse model used to filter the pre-synaptic activities. post_synapse : `.Synapse`, optional Synapse model used to filter the post-synaptic activities. If None, ``post_synapse`` will be the same as ``pre_synapse``. beta : float, optional A scalar weight on the forgetting term. Attributes ---------- beta : float A scalar weight on the forgetting term. learning_rate : float A scalar indicating the rate at which weights will be adjusted. post_synapse : `.Synapse` Synapse model used to filter the post-synaptic activities. pre_synapse : `.Synapse` Synapse model used to filter the pre-synaptic activities. """ modifies = "weights" probeable = ("pre_filtered", "post_filtered", "delta") learning_rate = NumberParam("learning_rate", low=0, readonly=True, default=1e-6) pre_synapse = SynapseParam("pre_synapse", default=Lowpass(tau=0.005), readonly=True) post_synapse = SynapseParam("post_synapse", default=None, readonly=True) beta = NumberParam("beta", low=0, readonly=True, default=1.0) pre_tau = _deprecated_tau("pre_tau", "pre_synapse") post_tau = _deprecated_tau("post_tau", "post_synapse") def __init__( self, learning_rate=Default, pre_synapse=Default, post_synapse=Default, beta=Default, pre_tau=Unconfigurable, post_tau=Unconfigurable, ): super().__init__(learning_rate, size_in=0) self.beta = beta if pre_tau is Unconfigurable: self.pre_synapse = pre_synapse else: self.pre_tau = pre_tau if post_tau is Unconfigurable: self.post_synapse = (self.pre_synapse if post_synapse is Default else post_synapse) else: self.post_tau = post_tau @property def _argdefaults(self): return ( ("learning_rate", Oja.learning_rate.default), ("pre_synapse", Oja.pre_synapse.default), ("post_synapse", self.pre_synapse), ("beta", Oja.beta.default), )
class Connection(NengoObject): """Connects two objects together. Almost any Nengo object can act as the pre or post side of a connection. Additionally, you can use Python slice syntax to access only some of the dimensions of the pre or post object. For example, if ``node`` has ``size_out=2`` and ``ensemble`` has ``size_in=1``, we could not create the following connection:: nengo.Connection(node, ensemble) But, we could create either of these two connections. nengo.Connection(node[0], ensemble) nengo.Connection(ndoe[1], ensemble) Parameters ---------- pre : Ensemble or Neurons or Node The source Nengo object for the connection. post : Ensemble or Neurons or Node or Probe The destination object for the connection. label : string A descriptive label for the connection. dimensions : int The number of output dimensions of the pre object, including `function`, but not including `transform`. eval_points : (n_eval_points, pre_size) array_like or int Points at which to evaluate `function` when computing decoders, spanning the interval (-pre.radius, pre.radius) in each dimension. synapse : float, optional Post-synaptic time constant (PSTC) to use for filtering. transform : (post_size, pre_size) array_like, optional Linear transform mapping the pre output to the post input. This transform is in terms of the sliced size; if either pre or post is a slice, the transform must be of shape (len(pre_slice), len(post_slice)). solver : Solver Instance of a Solver class to compute decoders or weights (see `nengo.solvers`). If `solver.weights` is True, a full connection weight matrix is computed instead of decoders. function : callable, optional Function to compute using the pre population (pre must be Ensemble). eval_points : (n_eval_points, pre_size) array_like or int, optional Points at which to evaluate `function` when computing decoders, spanning the interval (-pre.radius, pre.radius) in each dimension. scale_eval_points : bool Indicates whether the eval_points should be scaled by the radius of the pre Ensemble. Defaults to True. learning_rule_type : instance or list or dict of LearningRuleType, optional Methods of modifying the connection weights during simulation. Attributes ---------- dimensions : int The number of output dimensions of the pre object, including `function`, but before applying the `transform`. function : callable The given function. function_size : int The output dimensionality of the given function. Defaults to 0. is_decoded: bool True if and only if the connection is decoded. This will not occur when `solver.weights` is True or both `pre` and `post` are `Neurons`. label : str A human-readable connection label for debugging and visualization. Incorporates the labels of the pre and post objects. learning_rule : LearningRule or collection of LearningRule The LearningRule objects corresponding to `learning_rule_type`, and in the same format. Use these to probe the learning rules. learning_rule_type : instance or list or dict of LearningRuleType, optional The learning rule types. post : Ensemble or Neurons or Node or Probe The given pre object. pre : Ensemble or Neurons or Node The given pre object. transform : (post_size, pre_size) array_like Linear transform mapping the pre output to the post input. seed : int The seed used for random number generation. """ pre = PrePostParam('pre', nonzero_size_out=True) post = PrePostParam('post', nonzero_size_in=True) synapse = SynapseParam('synapse', default=Lowpass(0.005)) transform = TransformParam('transform', default=np.array(1.0)) solver = ConnectionSolverParam('solver', default=LstsqL2()) learning_rule_type = ConnectionLearningRuleTypeParam('learning_rule_type', default=None, optional=True) function_info = ConnectionFunctionParam('function', default=None, optional=True) eval_points = EvalPointsParam('eval_points', default=None, optional=True, sample_shape=('*', 'size_in')) scale_eval_points = BoolParam('scale_eval_points', default=True) modulatory = ObsoleteParam( 'modulatory', "Modulatory connections have been removed. " "Connect to a learning rule instead.", since="v2.1.0", url="https://github.com/nengo/nengo/issues/632#issuecomment-71663849") def __init__(self, pre, post, synapse=Default, transform=Default, solver=Default, learning_rule_type=Default, function=Default, eval_points=Default, scale_eval_points=Default, label=Default, seed=Default, modulatory=Unconfigurable): super(Connection, self).__init__(label=label, seed=seed) self.pre = pre self.post = post self.synapse = synapse self.transform = transform self.scale_eval_points = scale_eval_points self.eval_points = eval_points # Must be set before function self.function_info = function # Must be set after transform self.solver = solver # Must be set before learning rule self.learning_rule_type = learning_rule_type # set after transform self.modulatory = modulatory @property def function(self): return self.function_info.function @function.setter def function(self, function): self.function_info = function @property def probeable(self): return ['output', 'input', 'weights'] @property def pre_obj(self): return self.pre.obj if isinstance(self.pre, ObjView) else self.pre @property def pre_slice(self): return self.pre.slice if isinstance(self.pre, ObjView) else slice(None) @property def post_obj(self): return self.post.obj if isinstance(self.post, ObjView) else self.post @property def post_slice(self): return (self.post.slice if isinstance(self.post, ObjView) else slice(None)) @property def size_in(self): """Output size of sliced `pre`; input size of the function.""" return self.pre.size_out @property def size_mid(self): """Output size of the function; input size of the transform. If the function is None, then `size_in == size_mid`. """ size = self.function_info.size return self.size_in if size is None else size @property def size_out(self): """Output size of the transform; input size to the sliced post.""" return self.post.size_in @property def _str(self): if self.label is not None: return self.label return "from %s to %s%s" % (self.pre, self.post, " computing '%s'" % self.function.__name__ if self.function is not None else "") def __str__(self): return "<Connection %s>" % self._str def __repr__(self): return "<Connection at 0x%x %s>" % (id(self), self._str) @property def learning_rule(self): if self.learning_rule_type is not None and self._learning_rule is None: types = self.learning_rule_type if isinstance(types, dict): self._learning_rule = types.__class__() # dict of same type for k, v in iteritems(types): self._learning_rule[k] = LearningRule(self, v) elif is_iterable(types): self._learning_rule = [LearningRule(self, v) for v in types] elif isinstance(types, LearningRuleType): self._learning_rule = LearningRule(self, types) else: raise ValidationError("Invalid type %r" % types.__class__.__name__, attr='learning_rule_type', obj=self) return self._learning_rule @property def is_decoded(self): return not (self.solver.weights or (isinstance(self.pre_obj, Neurons) and isinstance(self.post_obj, Neurons)))
class Connection(NengoObject): """Connects two objects together. The connection between the two object is unidirectional, transmitting information from the first argument, ``pre``, to the second argument, ``post``. Almost any Nengo object can act as the pre or post side of a connection. Additionally, you can use Python slice syntax to access only some of the dimensions of the pre or post object. For example, if ``node`` has ``size_out=2`` and ``ensemble`` has ``size_in=1``: .. testcode:: with nengo.Network() as net: node = nengo.Node(np.zeros(2)) ensemble = nengo.Ensemble(10, 1) We could not create the following connection: .. testcode:: with net: nengo.Connection(node, ensemble) .. testoutput:: :hide: Traceback (most recent call last): ... nengo.exceptions.ValidationError: init: Shape of initial value () does not \ match expected shape (1, 2) But, we could create either of these two connections: .. testcode:: with net: nengo.Connection(node[0], ensemble) nengo.Connection(node[1], ensemble) Parameters ---------- pre : Ensemble or Neurons or Node The source Nengo object for the connection. post : Ensemble or Neurons or Node or LearningRule The destination object for the connection. synapse : Synapse or None, optional Synapse model to use for filtering (see `~nengo.synapses.Synapse`). If *None*, no synapse will be used and information will be transmitted without any delay (if supported by the backend---some backends may introduce a single time step delay). Note that at least one connection must have a synapse that is not *None* if components are connected in a cycle. Furthermore, a synaptic filter with a zero time constant is different from a *None* synapse as a synaptic filter will always add a delay of at least one time step. function : callable or (n_eval_points, size_mid) array_like, optional Function to compute across the connection. Note that ``pre`` must be an ensemble to apply a function across the connection. If an array is passed, the function is implicitly defined by the points in the array and the provided ``eval_points``, which have a one-to-one correspondence. transform : (size_out, size_mid) array_like, optional Linear transform mapping the pre output to the post input. This transform is in terms of the sliced size; if either pre or post is a slice, the transform must be shaped according to the sliced dimensionality. Additionally, the function is applied before the transform, so if a function is computed across the connection, the transform must be of shape ``(size_out, size_mid)``. solver : Solver, optional Solver instance to compute decoders or weights (see `~nengo.solvers.Solver`). If ``solver.weights`` is True, a full connection weight matrix is computed instead of decoders. learning_rule_type : LearningRuleType or iterable of LearningRuleType, optional Modifies the decoders or connection weights during simulation. eval_points : (n_eval_points, size_in) array_like or int, optional Points at which to evaluate ``function`` when computing decoders, spanning the interval (-pre.radius, pre.radius) in each dimension. If None, will use the eval_points associated with ``pre``. scale_eval_points : bool, optional Indicates whether the evaluation points should be scaled by the radius of the pre Ensemble. label : str, optional A descriptive label for the connection. seed : int, optional The seed used for random number generation. Attributes ---------- function : callable The given function. function_size : int The output dimensionality of the given function. If no function is specified, function_size will be 0. label : str A human-readable connection label for debugging and visualization. If not overridden, incorporates the labels of the pre and post objects. learning_rule_type : instance or list or dict of LearningRuleType, optional The learning rule types. post : Ensemble or Neurons or Node or Probe or ObjView The given post object. post_obj : Ensemble or Neurons or Node or Probe The underlying post object, even if ``post`` is an ``ObjView``. post_slice : slice or list or None The slice associated with ``post`` if it is an ObjView, or None. pre : Ensemble or Neurons or Node or ObjView The given pre object. pre_obj : Ensemble or Neurons or Node The underlying pre object, even if ``post`` is an ``ObjView``. pre_slice : slice or list or None The slice associated with ``pre`` if it is an ObjView, or None. seed : int The seed used for random number generation. solver : Solver The Solver instance that will be used to compute decoders or weights (see ``nengo.solvers``). synapse : Synapse The Synapse model used for filtering across the connection (see ``nengo.synapses``). transform : (size_out, size_mid) array_like Linear transform mapping the pre function output to the post input. Properties ---------- learning_rule : LearningRule or iterable of LearningRule Connectable learning rule object(s) associated with this connection. size_in : int The number of output dimensions of the pre object. Also the input size of the function, if one is specified. size_mid : int The number of output dimensions of the function, if specified. If the function is not specified, then ``size_in == size_mid``. size_out : int The number of input dimensions of the post object. Also the number of output dimensions of the transform. """ probeable = ("output", "input", "weights") pre = PrePostParam("pre", nonzero_size_out=True) post = PrePostParam("post", nonzero_size_in=True) synapse = SynapseParam("synapse", default=Lowpass(tau=0.005)) function_info = ConnectionFunctionParam("function", default=None, optional=True) transform = ConnectionTransformParam("transform", default=None, optional=True) solver = ConnectionSolverParam("solver", default=LstsqL2()) learning_rule_type = ConnectionLearningRuleTypeParam( "learning_rule_type", default=None, optional=True ) eval_points = EvalPointsParam( "eval_points", default=None, optional=True, sample_shape=("*", "size_in") ) scale_eval_points = BoolParam("scale_eval_points", default=True) _param_init_order = [ "pre", "post", "synapse", "eval_points", "function_info", "transform", "solver", "learning_rule_type", ] def __init__( self, pre, post, synapse=Default, function=Default, transform=Default, solver=Default, learning_rule_type=Default, eval_points=Default, scale_eval_points=Default, label=Default, seed=Default, ): super().__init__(label=label, seed=seed) self.pre = pre self.post = post self.synapse = synapse self.eval_points = eval_points # Must be set before function self.scale_eval_points = scale_eval_points self.function_info = function self.transform = transform # Must be set after function self.solver = solver # Must be set before learning rule self.learning_rule_type = learning_rule_type # set after transform def __str__(self): return self._str(include_id=False) def __repr__(self): return self._str(include_id=True) def _str(self, include_id): desc = "<Connection " if include_id: desc += "at 0x%x " % id(self) if self.label is None: desc += "from %s to %s%s" % ( self.pre, self.post, ( "" if self.function is None else " computing '%s'" % (function_name(self.function)) ), ) else: desc += self.label desc += ">" return desc @property def function(self): return self.function_info.function @function.setter def function(self, function): self.function_info = function @property def has_weights(self): return not isinstance(self.transform, NoTransform) or ( isinstance(self.pre_obj, Ensemble) and not isinstance(self.pre_obj.neuron_type, Direct) ) @property def is_decoded(self): warnings.warn( "is_decoded is deprecated; directly check the pre/post objects for the " "properties of interest instead", DeprecationWarning, ) return not ( self.solver.weights or ( isinstance(self.pre_obj, Neurons) and isinstance(self.post_obj, Neurons) ) ) @property def _to_neurons(self): return isinstance(self.post_obj, Neurons) or ( isinstance(self.pre_obj, Ensemble) and isinstance(self.post_obj, Ensemble) and self.solver.weights ) @property def _label(self): if self.label is not None: return self.label return "from %s to %s%s" % ( self.pre, self.post, " computing '%s'" % function_name(self.function) if self.function is not None else "", ) @property def learning_rule(self): """(LearningRule or iterable) Connectable learning rule object(s).""" if self.learning_rule_type is None: return None types = self.learning_rule_type if isinstance(types, dict): learning_rule = type(types)() # dict of same type for k, v in types.items(): learning_rule[k] = LearningRule(self, v) elif is_iterable(types): learning_rule = [LearningRule(self, v) for v in types] elif isinstance(types, LearningRuleType): learning_rule = LearningRule(self, types) else: raise ValidationError( "Invalid type %r" % type(types).__name__, attr="learning_rule_type", obj=self, ) return learning_rule @property def post_obj(self): return self.post.obj if isinstance(self.post, ObjView) else self.post @property def post_slice(self): return self.post.slice if isinstance(self.post, ObjView) else slice(None) @property def pre_obj(self): return self.pre.obj if isinstance(self.pre, ObjView) else self.pre @property def pre_slice(self): return self.pre.slice if isinstance(self.pre, ObjView) else slice(None) @property def size_in(self): """(int) The number of output dimensions of the pre object. Also the input size of the function, if one is specified. """ return self.pre.size_out @property def size_mid(self): """(int) The number of output dimensions of the function, if specified. If the function is not specified, then ``size_in == size_mid``. """ size = self.function_info.size return self.size_in if size is None else size @property def size_out(self): """(int) The number of input dimensions of the post object. Also the number of output dimensions of the transform. """ return self.post.size_in
class BCM(LearningRuleType): """Bienenstock-Cooper-Munroe learning rule. Modifies connection weights as a function of the presynaptic activity and the difference between the postsynaptic activity and the average postsynaptic activity. Notes ----- The BCM rule is dependent on pre and post neural activities, not decoded values, and so is not affected by changes in the size of pre and post ensembles. However, if you are decoding from the post ensemble, the BCM rule will have an increased effect on larger post ensembles because more connection weights are changing. In these cases, it may be advantageous to scale the learning rate on the BCM rule by ``1 / post.n_neurons``. Parameters ---------- learning_rate : float, optional A scalar indicating the rate at which weights will be adjusted. pre_synapse : `.Synapse`, optional Synapse model used to filter the pre-synaptic activities. post_synapse : `.Synapse`, optional Synapse model used to filter the post-synaptic activities. If None, ``post_synapse`` will be the same as ``pre_synapse``. theta_synapse : `.Synapse`, optional Synapse model used to filter the theta signal. Attributes ---------- learning_rate : float A scalar indicating the rate at which weights will be adjusted. post_synapse : `.Synapse` Synapse model used to filter the post-synaptic activities. pre_synapse : `.Synapse` Synapse model used to filter the pre-synaptic activities. theta_synapse : `.Synapse` Synapse model used to filter the theta signal. """ modifies = "weights" probeable = ("theta", "pre_filtered", "post_filtered", "delta") learning_rate = NumberParam("learning_rate", low=0, readonly=True, default=1e-9) pre_synapse = SynapseParam("pre_synapse", default=Lowpass(tau=0.005), readonly=True) post_synapse = SynapseParam("post_synapse", default=None, readonly=True) theta_synapse = SynapseParam("theta_synapse", default=Lowpass(tau=1.0), readonly=True) pre_tau = _deprecated_tau("pre_tau", "pre_synapse") post_tau = _deprecated_tau("post_tau", "post_synapse") theta_tau = _deprecated_tau("theta_tau", "theta_synapse") def __init__( self, learning_rate=Default, pre_synapse=Default, post_synapse=Default, theta_synapse=Default, pre_tau=Unconfigurable, post_tau=Unconfigurable, theta_tau=Unconfigurable, ): super().__init__(learning_rate, size_in=0) if pre_tau is Unconfigurable: self.pre_synapse = pre_synapse else: self.pre_tau = pre_tau if post_tau is Unconfigurable: self.post_synapse = (self.pre_synapse if post_synapse is Default else post_synapse) else: self.post_tau = post_tau if theta_tau is Unconfigurable: self.theta_synapse = theta_synapse else: self.theta_tau = theta_tau @property def _argdefaults(self): return ( ("learning_rate", BCM.learning_rate.default), ("pre_synapse", BCM.pre_synapse.default), ("post_synapse", self.pre_synapse), ("theta_synapse", BCM.theta_synapse.default), )
class Connection(NengoObject): """Connects two objects together. TODO: Document slice syntax here and in the transform parameter. Parameters ---------- pre : Ensemble or Neurons or Node The source Nengo object for the connection. post : Ensemble or Neurons or Node or Probe The destination object for the connection. label : string A descriptive label for the connection. dimensions : int The number of output dimensions of the pre object, including `function`, but not including `transform`. eval_points : (n_eval_points, pre_size) array_like or int Points at which to evaluate `function` when computing decoders, spanning the interval (-pre.radius, pre.radius) in each dimension. synapse : float, optional Post-synaptic time constant (PSTC) to use for filtering. transform : (post_size, pre_size) array_like, optional Linear transform mapping the pre output to the post input. solver : Solver Instance of a Solver class to compute decoders or weights (see `nengo.decoders`). If solver.weights is True, a full connection weight matrix is computed instead of decoders. function : callable, optional Function to compute using the pre population (pre must be Ensemble). modulatory : bool, optional Specifies whether the connection is modulatory (does not physically connect to post, for use by learning rules), or not (default). eval_points : (n_eval_points, pre_size) array_like or int, optional Points at which to evaluate `function` when computing decoders, spanning the interval (-pre.radius, pre.radius) in each dimension. learning_rule : LearningRule or list of LearningRule, optional Methods of modifying the connection weights during simulation. Attributes ---------- dimensions : int The number of output dimensions of the pre object, including `function`, but before applying the `transform`. function : callable The given function. function_size : int The output dimensionality of the given function. Defaults to 0. label : str A human-readable connection label for debugging and visualization. Incorporates the labels of the pre and post objects. learning_rule : list of LearningRule The given learning rules. If given a single LearningRule, this will be a list with a single element. post : Ensemble or Neurons or Node or Probe The given pre object. pre : Ensemble or Neurons or Node The given pre object. transform : (post_size, pre_size) array_like Linear transform mapping the pre output to the post input. modulatory : bool Whether the output of this signal is to act as an error signal for a learning rule. seed : int The seed used for random number generation. """ pre = NengoObjectParam(nonzero_size_out=True) post = NengoObjectParam(nonzero_size_in=True) synapse = SynapseParam(default=Lowpass(0.005)) transform = TransformParam(default=np.array(1.0)) solver = ConnectionSolverParam(default=LstsqL2()) function_info = ConnectionFunctionParam(default=None, optional=True) modulatory = BoolParam(default=False) learning_rule = ConnectionLearningRuleParam(default=None, optional=True) eval_points = EvalPointsParam(default=None, optional=True, shape=('*', 'size_in')) seed = IntParam(default=None, optional=True) probeable = ListParam(default=['signal']) def __init__(self, pre, post, synapse=Default, transform=Default, solver=Default, learning_rule=Default, function=Default, modulatory=Default, eval_points=Default, seed=Default): self.pre = pre self.post = post self.probeable = Default self.solver = solver # Must be set before learning rule self.learning_rule = learning_rule self.modulatory = modulatory self.synapse = synapse self.transform = transform self.eval_points = eval_points # Must be set before function self.function_info = function # Must be set after transform @property def function(self): return self.function_info.function @function.setter def function(self, function): self.function_info = function @property def pre_obj(self): return self.pre.obj if isinstance(self.pre, ObjView) else self.pre @property def pre_slice(self): return self.pre.slice if isinstance(self.pre, ObjView) else slice(None) @property def post_obj(self): return self.post.obj if isinstance(self.post, ObjView) else self.post @property def post_slice(self): return (self.post.slice if isinstance(self.post, ObjView) else slice(None)) @property def size_in(self): """Output size of sliced `pre`; input size of the function.""" return self.pre.size_out @property def size_mid(self): """Output size of the function; input size of the transform. If the function is None, then `size_in == size_mid`. """ size = self.function_info.size return self.size_in if size is None else size @property def size_out(self): """Output size of the transform; input size to the sliced post.""" return self.post.size_in @property def label(self): label = "%s->%s" % (self.pre.label, self.post.label) if self.function is not None: return "%s:%s" % (label, self.function.__name__) return label
class Test: sp = SynapseParam("sp", default=Lowpass(0.1))
class Connection(NengoObject): """Connects two objects together. Almost any Nengo object can act as the pre or post side of a connection. Additionally, you can use Python slice syntax to access only some of the dimensions of the pre or post object. For example, if ``node`` has ``size_out=2`` and ``ensemble`` has ``size_in=1``, we could not create the following connection:: nengo.Connection(node, ensemble) But, we could create either of these two connections. nengo.Connection(node[0], ensemble) nengo.Connection(ndoe[1], ensemble) Parameters ---------- pre : Ensemble or Neurons or Node The source Nengo object for the connection. post : Ensemble or Neurons or Node or Probe The destination object for the connection. label : string A descriptive label for the connection. dimensions : int The number of output dimensions of the pre object, including `function`, but not including `transform`. eval_points : (n_eval_points, pre_size) array_like or int Points at which to evaluate `function` when computing decoders, spanning the interval (-pre.radius, pre.radius) in each dimension. synapse : float, optional Post-synaptic time constant (PSTC) to use for filtering. transform : (post_size, pre_size) array_like, optional Linear transform mapping the pre output to the post input. This transform is in terms of the sliced size; if either pre or post is a slice, the transform must be of shape (len(pre_slice), len(post_slice)). solver : Solver Instance of a Solver class to compute decoders or weights (see `nengo.solvers`). If solver.weights is True, a full connection weight matrix is computed instead of decoders. function : callable, optional Function to compute using the pre population (pre must be Ensemble). modulatory : bool, optional Specifies whether the connection is modulatory (does not physically connect to post, for use by learning rules), or not (default). eval_points : (n_eval_points, pre_size) array_like or int, optional Points at which to evaluate `function` when computing decoders, spanning the interval (-pre.radius, pre.radius) in each dimension. scale_eval_points : bool Indicates whether the eval_points should be scaled by the radius of the pre Ensemble. Defaults to True. learning_rule_type : instance or list or dict of LearningRuleType, optional Methods of modifying the connection weights during simulation. Attributes ---------- dimensions : int The number of output dimensions of the pre object, including `function`, but before applying the `transform`. function : callable The given function. function_size : int The output dimensionality of the given function. Defaults to 0. label : str A human-readable connection label for debugging and visualization. Incorporates the labels of the pre and post objects. learning_rule : LearningRule or collection of LearningRule The LearningRule objects corresponding to `learning_rule_type`, and in the same format. Use these to probe the learning rules. learning_rule_type : instance or list or dict of LearningRuleType, optional The learning rule types. post : Ensemble or Neurons or Node or Probe The given pre object. pre : Ensemble or Neurons or Node The given pre object. transform : (post_size, pre_size) array_like Linear transform mapping the pre output to the post input. modulatory : bool Whether the output of this signal is to act as an error signal for a learning rule. seed : int The seed used for random number generation. """ pre = NengoObjectParam(nonzero_size_out=True) post = NengoObjectParam(nonzero_size_in=True) synapse = SynapseParam(default=Lowpass(0.005)) transform = TransformParam(default=np.array(1)) solver = ConnectionSolverParam(default=LstsqL2()) function_info = ConnectionFunctionParam(default=None, optional=True) modulatory = BoolParam(default=False) learning_rule_type = ConnectionLearningRuleTypeParam(default=None, optional=True) eval_points = EvalPointsParam(default=None, optional=True, sample_shape=('*', 'size_in')) scale_eval_points = BoolParam(default=True) seed = IntParam(default=None, optional=True) def __init__(self, pre, post, synapse=Default, transform=Default, solver=Default, learning_rule_type=Default, function=Default, modulatory=Default, eval_points=Default, scale_eval_points=Default, seed=Default): self.pre = pre self.post = post self.solver = solver # Must be set before learning rule self.learning_rule_type = learning_rule_type self.modulatory = modulatory self.synapse = synapse self.transform = transform self.scale_eval_points = scale_eval_points self.eval_points = eval_points # Must be set before function self.function_info = function # Must be set after transform @property def function(self): return self.function_info.function @function.setter def function(self, function): self.function_info = function @property def probeable(self): probeables = ["output", "input", "transform"] if isinstance(self.pre, Ensemble): probeables += ["decoders"] return probeables @property def pre_obj(self): return self.pre.obj if isinstance(self.pre, ObjView) else self.pre @property def pre_slice(self): return self.pre.slice if isinstance(self.pre, ObjView) else slice(None) @property def post_obj(self): return self.post.obj if isinstance(self.post, ObjView) else self.post @property def post_slice(self): return (self.post.slice if isinstance(self.post, ObjView) else slice(None)) @property def size_in(self): """Output size of sliced `pre`; input size of the function.""" return self.pre.size_out @property def size_mid(self): """Output size of the function; input size of the transform. If the function is None, then `size_in == size_mid`. """ size = self.function_info.size return self.size_in if size is None else size @property def size_out(self): """Output size of the transform; input size to the sliced post.""" return self.post.size_in @property def _label(self): return "from %s to %s%s" % (self.pre, self.post, " computing '%s'" % self.function.__name__ if self.function is not None else "") def __str__(self): return "<Connection %s>" % self._label def __repr__(self): return "<Connection at 0x%x %s>" % (id(self), self._label) @property def learning_rule(self): if self.learning_rule_type is not None and self._learning_rule is None: types = self.learning_rule_type if isinstance(types, dict): self._learning_rule = types.__class__() # dict of same type for k, v in iteritems(types): self._learning_rule[k] = LearningRule(self, v) elif is_iterable(types): self._learning_rule = [LearningRule(self, v) for v in types] elif isinstance(types, LearningRuleType): self._learning_rule = LearningRule(self, types) else: raise ValueError("Invalid type for `learning_rule_type`: %s" % (types.__class__.__name__)) return self._learning_rule
) nengo.Connection(err, conn.learning_rule["pes"]) # Case 3: neurons -> ens conn = nengo.Connection( ens1.neurons, ens2, transform=np.ones((1, ens1.n_neurons)), learning_rule_type={"pes": nengo.PES()}, ) nengo.Connection(err, conn.learning_rule["pes"]) with Simulator(net) as sim: sim.run(0.01) @pytest.mark.parametrize("pre_synapse", [0, Lowpass(tau=0.05), Alpha(tau=0.005)]) def test_pes_synapse(Simulator, seed, pre_synapse, allclose): rule = PES(pre_synapse=pre_synapse) with nengo.Network(seed=seed) as model: stim = nengo.Node(output=WhiteSignal(0.5, high=10)) x = nengo.Ensemble(100, 1) nengo.Connection(stim, x, synapse=None) conn = nengo.Connection(x, x, learning_rule_type=rule) p_neurons = nengo.Probe(x.neurons, synapse=pre_synapse) p_pes = nengo.Probe(conn.learning_rule, "activities") with Simulator(model) as sim: sim.run(0.5)
class Connection(NengoObject): """Connects two objects together. The connection between the two object is unidirectional, transmitting information from the first argument, ``pre``, to the second argument, ``post``. Almost any Nengo object can act as the pre or post side of a connection. Additionally, you can use Python slice syntax to access only some of the dimensions of the pre or post object. For example, if ``node`` has ``size_out=2`` and ``ensemble`` has ``size_in=1``, we could not create the following connection:: nengo.Connection(node, ensemble) But, we could create either of these two connections:: nengo.Connection(node[0], ensemble) nengo.Connection(node[1], ensemble) Parameters ---------- pre : Ensemble or Neurons or Node The source Nengo object for the connection. post : Ensemble or Neurons or Node or Probe The destination object for the connection. synapse : Synapse or None, optional Synapse model to use for filtering (see `~nengo.synapses.Synapse`). If *None*, no synapse will be used and information will be transmitted without any delay (if supported by the backend---some backends may introduce a single time step delay). Note that at least one connection must have a synapse that is not *None* if components are connected in a cycle. Furthermore, a synaptic filter with a zero time constant is different from a *None* synapse as a synaptic filter will always add a delay of at least one time step. function : callable or (n_eval_points, size_mid) array_like, optional Function to compute across the connection. Note that ``pre`` must be an ensemble to apply a function across the connection. If an array is passed, the function is implicitly defined by the points in the array and the provided ``eval_points``, which have a one-to-one correspondence. transform : (size_out, size_mid) array_like, optional Linear transform mapping the pre output to the post input. This transform is in terms of the sliced size; if either pre or post is a slice, the transform must be shaped according to the sliced dimensionality. Additionally, the function is applied before the transform, so if a function is computed across the connection, the transform must be of shape ``(size_out, size_mid)``. solver : Solver, optional Solver instance to compute decoders or weights (see `~nengo.solvers.Solver`). If ``solver.weights`` is True, a full connection weight matrix is computed instead of decoders. learning_rule_type : LearningRuleType or iterable of LearningRuleType, \ optional Modifies the decoders or connection weights during simulation. eval_points : (n_eval_points, size_in) array_like or int, optional Points at which to evaluate ``function`` when computing decoders, spanning the interval (-pre.radius, pre.radius) in each dimension. If None, will use the eval_points associated with ``pre``. scale_eval_points : bool, optional Indicates whether the evaluation points should be scaled by the radius of the pre Ensemble. label : str, optional A descriptive label for the connection. seed : int, optional The seed used for random number generation. Attributes ---------- is_decoded : bool True if and only if the connection is decoded. This will not occur when ``solver.weights`` is True or both pre and post are `~nengo.ensemble.Neurons`. function : callable The given function. function_size : int The output dimensionality of the given function. If no function is specified, function_size will be 0. label : str A human-readable connection label for debugging and visualization. If not overridden, incorporates the labels of the pre and post objects. learning_rule_type : instance or list or dict of LearningRuleType, optional The learning rule types. post : Ensemble or Neurons or Node or Probe or ObjView The given post object. post_obj : Ensemble or Neurons or Node or Probe The underlying post object, even if ``post`` is an ``ObjView``. post_slice : slice or list or None The slice associated with ``post`` if it is an ObjView, or None. pre : Ensemble or Neurons or Node or ObjView The given pre object. pre_obj : Ensemble or Neurons or Node The underlying pre object, even if ``post`` is an ``ObjView``. pre_slice : slice or list or None The slice associated with ``pre`` if it is an ObjView, or None. seed : int The seed used for random number generation. solver : Solver The Solver instance that will be used to compute decoders or weights (see ``nengo.solvers``). synapse : Synapse The Synapse model used for filtering across the connection (see ``nengo.synapses``). transform : (size_out, size_mid) array_like Linear transform mapping the pre function output to the post input. Properties ---------- size_in : int The number of output dimensions of the pre object. Also the input size of the function, if one is specified. size_mid : int The number of output dimensions of the function, if specified. If the function is not specified, then ``size_in == size_mid``. size_out : int The number of input dimensions of the post object. Also the number of output dimensions of the transform. """ probeable = ('output', 'input', 'weights') pre = PrePostParam('pre', nonzero_size_out=True) post = PrePostParam('post', nonzero_size_in=True) synapse = SynapseParam('synapse', default=Lowpass(tau=0.005)) function_info = ConnectionFunctionParam('function', default=None, optional=True) transform = ConnectionTransformParam('transform', default=1.0) solver = ConnectionSolverParam('solver', default=LstsqL2()) learning_rule_type = ConnectionLearningRuleTypeParam('learning_rule_type', default=None, optional=True) eval_points = EvalPointsParam('eval_points', default=None, optional=True, sample_shape=('*', 'size_in')) scale_eval_points = BoolParam('scale_eval_points', default=True) modulatory = ObsoleteParam( 'modulatory', "Modulatory connections have been removed. " "Connect to a learning rule instead.", since="v2.1.0", url="https://github.com/nengo/nengo/issues/632#issuecomment-71663849") _param_init_order = [ 'pre', 'post', 'synapse', 'eval_points', 'function_info', 'transform', 'solver', 'learning_rule_type' ] def __init__(self, pre, post, synapse=Default, function=Default, transform=Default, solver=Default, learning_rule_type=Default, eval_points=Default, scale_eval_points=Default, label=Default, seed=Default, modulatory=Unconfigurable): super().__init__(label=label, seed=seed) self.pre = pre self.post = post self.synapse = synapse self.eval_points = eval_points # Must be set before function self.scale_eval_points = scale_eval_points self.function_info = function self.transform = transform # Must be set after function self.solver = solver # Must be set before learning rule self.learning_rule_type = learning_rule_type # set after transform self.modulatory = modulatory def __str__(self): return "<Connection %s>" % self._str def __repr__(self): return "<Connection at 0x%x %s>" % (id(self), self._str) @property def _str(self): if self.label is not None: return self.label desc = "" if self.function is None else " computing '%s'" % ( function_name(self.function)) return "from %s to %s%s" % (self.pre, self.post, desc) @property def function(self): return self.function_info.function @function.setter def function(self, function): self.function_info = function @property def is_decoded(self): return not (self.solver.weights or (isinstance(self.pre_obj, Neurons) and isinstance(self.post_obj, Neurons))) @property def _label(self): if self.label is not None: return self.label return "from %s to %s%s" % (self.pre, self.post, " computing '%s'" % function_name(self.function) if self.function is not None else "") @property def learning_rule(self): """(LearningRule or iterable) Connectable learning rule object(s).""" if self.learning_rule_type is None: return None types = self.learning_rule_type if isinstance(types, dict): learning_rule = type(types)() # dict of same type for k, v in types.items(): learning_rule[k] = LearningRule(self, v) elif is_iterable(types): learning_rule = [LearningRule(self, v) for v in types] elif isinstance(types, LearningRuleType): learning_rule = LearningRule(self, types) else: raise ValidationError("Invalid type %r" % type(types).__name__, attr='learning_rule_type', obj=self) return learning_rule @property def post_obj(self): return self.post.obj if isinstance(self.post, ObjView) else self.post @property def post_slice(self): return (self.post.slice if isinstance(self.post, ObjView) else slice(None)) @property def pre_obj(self): return self.pre.obj if isinstance(self.pre, ObjView) else self.pre @property def pre_slice(self): return self.pre.slice if isinstance(self.pre, ObjView) else slice(None) @property def size_in(self): """(int) The number of output dimensions of the pre object. Also the input size of the function, if one is specified. """ return self.pre.size_out @property def size_mid(self): """(int) The number of output dimensions of the function, if specified. If the function is not specified, then ``size_in == size_mid``. """ size = self.function_info.size return self.size_in if size is None else size @property def size_out(self): """(int) The number of input dimensions of the post object. Also the number of output dimensions of the transform. """ return self.post.size_in
transform=np.ones((1, 3)), solver=nengo.solvers.LstsqL2(weights=True), learning_rule_type={"pes": nengo.PES()}) nengo.Connection(err, conn.learning_rule["pes"]) # Case 3: neurons -> ens conn = nengo.Connection(ens1.neurons, ens2, transform=np.ones((1, ens1.n_neurons)), learning_rule_type={"pes": nengo.PES()}) nengo.Connection(err, conn.learning_rule["pes"]) with Simulator(net) as sim: sim.run(0.01) @pytest.mark.parametrize('pre_synapse', [ 0, Lowpass(tau=0.05), Alpha(tau=0.005)]) def test_pes_synapse(Simulator, seed, pre_synapse): rule = PES(pre_synapse=pre_synapse) with nengo.Network(seed=seed) as model: stim = nengo.Node(output=WhiteSignal(0.5, high=10)) x = nengo.Ensemble(100, 1) nengo.Connection(stim, x, synapse=None) conn = nengo.Connection(x, x, learning_rule_type=rule) p_neurons = nengo.Probe(x.neurons, synapse=pre_synapse) p_pes = nengo.Probe(conn.learning_rule, 'activities') with Simulator(model) as sim: sim.run(0.5)
def plot_angles(x, label=None): filt = Lowpass(10, default_dt=n_per_batch) y = filt.filtfilt(x) if len(x) > 0 else [] batch_inds = n_per_batch * np.arange(len(x)) plt.plot(batch_inds, y, label=label)
def plot_batches(x, label=None, color=None): filt = Lowpass(10, default_dt=n_per_batch) y = filt.filtfilt(x) if len(x) > 0 else [] batch_inds = n_per_batch * np.arange(len(x)) plt.semilogy(batch_inds, y, label=label, color=color)
def build_pes(model, pes, rule): """Builds a `.PES` object into a model. Calls synapse build functions to filter the pre activities, and adds several operators to implement the PES learning rule. Unlike other learning rules, there is no corresponding `.Operator` subclass for the PES rule. Instead, the rule is implemented with generic operators like `.ElementwiseInc` and `.DotInc`. Generic operators are used because they are more likely to be implemented on other backends like Nengo OCL. Parameters ---------- model : Model The model to build into. pes : PES Learning rule type to build. rule : LearningRule The learning rule object corresponding to the neuron type. Notes ----- Does not modify ``model.params[]`` and can therefore be called more than once with the same `.PES` instance. """ conn = rule.connection # Create input error signal error = Signal(np.zeros(rule.size_in), name="PES:error") model.add_op(Reset(error)) model.sig[rule]['in'] = error # error connection will attach here acts = model.build(Lowpass(pes.pre_tau), model.sig[conn.pre_obj]['out']) # Compute the correction, i.e. the scaled negative error correction = Signal(np.zeros(error.shape), name="PES:correction") model.add_op(Reset(correction)) # correction = -learning_rate * (dt / n_neurons) * error n_neurons = (conn.pre_obj.n_neurons if isinstance(conn.pre_obj, Ensemble) else conn.pre_obj.size_in) lr_sig = Signal(-pes.learning_rate * model.dt / n_neurons, name="PES:learning_rate") model.add_op(ElementwiseInc(lr_sig, error, correction, tag="PES:correct")) if not conn.is_decoded: post = get_post_ens(conn) weights = model.sig[conn]['weights'] encoders = model.sig[post]['encoders'] # encoded = dot(encoders, correction) encoded = Signal(np.zeros(weights.shape[0]), name="PES:encoded") model.add_op(Reset(encoded)) model.add_op(DotInc(encoders, correction, encoded, tag="PES:encode")) local_error = encoded elif isinstance(conn.pre_obj, (Ensemble, Neurons)): local_error = correction else: raise BuildError("'pre' object '%s' not suitable for PES learning" % (conn.pre_obj)) # delta = local_error * activities model.add_op(Reset(model.sig[rule]['delta'])) model.add_op( ElementwiseInc(local_error.column(), acts.row(), model.sig[rule]['delta'], tag="PES:Inc Delta")) # expose these for probes model.sig[rule]['error'] = error model.sig[rule]['correction'] = correction model.sig[rule]['activities'] = acts
class Thalamus(Network): """Inhibits non-selected actions. The thalamus is intended to work in tandem with a `.BasalGanglia` module. It converts basal ganglia output into a signal with (approximately) 1 for the selected action and 0 elsewhere. In order to suppress low responses and strengthen high responses, a constant bias is added to each dimension (i.e., action), and dimensions mutually inhibit each other. Additionally, the ensemble representing each dimension is created with positive encoders and can be assigned positive x-intercepts to threshold low responses. Parameters ---------- neurons_action : int, optional (Default: 50) Number of neurons per action to represent the selection. threshold_action : float, optional (Default: 0.2) Minimum value for action representation. mutual_inhibit : float, optional (Default: 1.0) Strength of inhibition between actions. route_inhibit : float, optional (Default: 3.0) Strength of inhibition for unchosen actions. synapse_inhibit : float, optional (Default: 0.008) Synaptic filter to apply for inhibition between actions. synapse_bg : float, optional (Default: 0.008) Synaptic filter for connection between basal ganglia and thalamus. synapse_direct : float, optional (Default: 0.01) Synaptic filter for direct outputs. neurons_channel_dim : int, optional (Default: 50) Number of neurons per routing channel dimension. synapse_channel : float, optional (Default: 0.01) Synaptic filter for channel inputs and outputs. neurons_gate : int, optional (Default: 40) Number of neurons per gate. threshold_gate : float, optional (Default: 0.3) Minimum value for gating neurons. synapse_to-gate : float, optional (Default: 0.002) Synaptic filter for controlling a gate. kwargs : dict Passed through to `nengo_spa.Network`. Attributes ---------- actions : nengo.networks.EnsembleArray Each ensemble represents one dimension (action). bias : nengo.Node The constant bias injected in each *actions* ensemble. input : nengo.Node Input to the *actions* ensembles. output : nengo.Node Output from the *actions* ensembles. """ neurons_action = IntParam('neurons_action', default=50) threshold_action = NumberParam('threshold_action', default=0.2) mutual_inhibit = NumberParam('mutual_inhibit', default=1.) route_inhibit = NumberParam('route_inhibit', default=3.) synapse_inhibit = SynapseParam('synapse_inhibit', default=Lowpass(0.008)) synapse_bg = SynapseParam('synapse_bg', default=Lowpass(0.008)) neurons_channel_dim = IntParam('neurons_channel_dim', default=50) synapse_channel = SynapseParam('synapse_channel', default=Lowpass(0.01)) neurons_gate = IntParam('neurons_gate', default=40) threshold_gate = NumberParam('threshold_gate', default=0.3) synapse_to_gate = SynapseParam('synapse_to_gate', default=Lowpass(0.002)) def __init__(self, action_count, neurons_action=Default, threshold_action=Default, mutual_inhibit=Default, route_inhibit=Default, synapse_inhibit=Default, synapse_bg=Default, neurons_channel_dim=Default, synapse_channel=Default, neurons_gate=Default, threshold_gate=Default, synapse_to_gate=Default, **kwargs): kwargs.setdefault('label', "Thalamus") super(Thalamus, self).__init__(**kwargs) self.action_count = action_count self.neurons_action = neurons_action self.mutual_inhibit = mutual_inhibit self.route_inhibit = route_inhibit self.synapse_inhibit = synapse_inhibit self.threshold_action = threshold_action self.neurons_channel_dim = neurons_channel_dim self.synapse_channel = synapse_channel self.neurons_gate = neurons_gate self.threshold_gate = threshold_gate self.synapse_to_gate = synapse_to_gate self.synapse_bg = synapse_bg self.gates = {} # gating ensembles per action (created as needed) self.channels = [] # channels to pass data between networks self.gate_in_connections = {} self.gate_out_connections = {} self.channel_out_connections = [] self.fixed_connections = {} self.bg_connection = None with self: self.actions = nengo.networks.EnsembleArray( self.neurons_action, self.action_count, intercepts=nengo.dists.Uniform(self.threshold_action, 1), encoders=nengo.dists.Choice([[1.0]]), label="actions") nengo.Connection(self.actions.output, self.actions.input, transform=(np.eye(self.action_count) - 1) * self.mutual_inhibit) self.bias = nengo.Node([1], label="thalamus bias") nengo.Connection(self.bias, self.actions.input, transform=np.ones((self.action_count, 1))) self.input = self.actions.input self.output = self.actions.output def construct_gate(self, index, bias, label=None): """Construct a gate ensemble. The gate neurons have no activity when the action is selected, but are active when the action is not selected. This makes the gate useful for inhibiting ensembles that should only be active when this action is active. Parameters ---------- index : int Index to identify the gate. bias : :class:`nengo.Network` Node providing a bias input of 1. label : str, optional Label for the gate. Returns ------- nengo.Ensemble The constructed gate. """ if label is None: label = 'gate[%d]' % index intercepts = Uniform(self.threshold_gate, 1) self.gates[index] = gate = nengo.Ensemble(self.neurons_gate, dimensions=1, intercepts=intercepts, label=label, encoders=[[1]] * self.neurons_gate) nengo.Connection(bias, gate, synapse=None) self.gate_in_connections[index] = nengo.Connection( self.actions.ensembles[index], self.gates[index], synapse=self.synapse_to_gate, transform=-1) return self.gates[index] def construct_channel(self, sink, type_, label=None): """Construct a channel. Channels are an additional neural population in-between a source population and a target population. This allows inhibiting the channel without affecting the source and thus is useful in routing information. Parameters ---------- sink : nengo.base.NengoObject Sink/target that the channel feeds into. type_ : nengo_spa.types.Type Type of the data transmitted through the channel. label : str, optional Label for the channel. Returns ------- :class:`nengo.networks.EnsembleArray` The constructed channel. """ if label is None: label = 'channel' if type_ == TScalar: channel = dynamic.ScalarRealization() else: channel = dynamic.StateRealization(vocab=type_.vocab) self.channels.append(channel) self.channel_out_connections.append( nengo.Connection(channel.output, sink, synapse=self.synapse_channel)) return channel def connect_bg(self, bg): """Connect a basal ganglia network to this thalamus.""" self.bg_connection = nengo.Connection(bg.output, self.input, synapse=self.synapse_bg) def connect_gate(self, index, channel): """Connect a gate to a channel for information routing. Parameters ---------- index : int Index of the gate to connect. channel : nengo.networks.EnsembleArray Channel to inhibit with the gate. """ if isinstance(channel, Scalar): target = channel.scalar.neurons elif isinstance(channel, State): target = channel.state_ensembles.add_neuron_input() else: raise NotImplementedError() inhibit = ([[-self.route_inhibit]] * (target.size_in)) self.gate_out_connections[index] = nengo.Connection( self.gates[index], target, transform=inhibit, synapse=self.synapse_inhibit) def connect_fixed(self, index, target, transform): """Create connection to route fixed value. Parameters ---------- index : int Index of the action to connect. target : nengo.base.NengoObject Target of the connection. transform : array-like Transform to apply to apply to the connection. """ self.fixed_connections[index] = self.connect( self.actions.ensembles[index], target, transform) def connect(self, source, target, transform): """Create connection. The connection will use the thalamus's *synapse_channel*. Parameters ---------- source : nengo.base.NengoObject Source object. target : nengo.base.NengoObject Target object. transform : array-like Transform to apply to the connection. """ return nengo.Connection(source, target, transform=transform, synapse=self.synapse_channel)
def test_mergeable(): # anything is mergeable with an empty list assert mergeable(None, []) # ops with different numbers of sets/incs/reads/updates are not mergeable assert not mergeable(DummyOp(sets=[DummySignal()]), [DummyOp()]) assert not mergeable(DummyOp(incs=[DummySignal()]), [DummyOp()]) assert not mergeable(DummyOp(reads=[DummySignal()]), [DummyOp()]) assert not mergeable(DummyOp(updates=[DummySignal()]), [DummyOp()]) assert mergeable(DummyOp(sets=[DummySignal()]), [DummyOp(sets=[DummySignal()])]) # check matching dtypes assert not mergeable(DummyOp(sets=[DummySignal(dtype=np.float32)]), [DummyOp(sets=[DummySignal(dtype=np.float64)])]) # shape mismatch assert not mergeable(DummyOp(sets=[DummySignal(shape=(1, 2))]), [DummyOp(sets=[DummySignal(shape=(1, 3))])]) # display shape mismatch assert not mergeable( DummyOp(sets=[DummySignal(base_shape=(2, 2), shape=(4, 1))]), [DummyOp(sets=[DummySignal(base_shape=(2, 2), shape=(1, 4))])]) # first dimension mismatch assert mergeable(DummyOp(sets=[DummySignal(shape=(3, 2))]), [DummyOp(sets=[DummySignal(shape=(4, 2))])]) # Copy (inc must match) assert mergeable(Copy(DummySignal(), DummySignal(), inc=True), [Copy(DummySignal(), DummySignal(), inc=True)]) assert not mergeable(Copy(DummySignal(), DummySignal(), inc=True), [Copy(DummySignal(), DummySignal(), inc=False)]) # elementwise (first dimension must match) assert mergeable( ElementwiseInc(DummySignal(), DummySignal(), DummySignal()), [ElementwiseInc(DummySignal(), DummySignal(), DummySignal())]) assert mergeable( ElementwiseInc(DummySignal(shape=(1,)), DummySignal(), DummySignal()), [ElementwiseInc(DummySignal(shape=()), DummySignal(), DummySignal())]) assert not mergeable( ElementwiseInc(DummySignal(shape=(3,)), DummySignal(), DummySignal()), [ElementwiseInc(DummySignal(shape=(2,)), DummySignal(), DummySignal())]) # simpyfunc (t input must match) time = DummySignal() assert mergeable(SimPyFunc(None, None, time, None), [SimPyFunc(None, None, time, None)]) assert mergeable(SimPyFunc(None, None, None, DummySignal()), [SimPyFunc(None, None, None, DummySignal())]) assert not mergeable(SimPyFunc(None, None, DummySignal(), None), [SimPyFunc(None, None, None, DummySignal())]) # simneurons # check matching TF_NEURON_IMPL assert mergeable(SimNeurons(LIF(), DummySignal(), DummySignal()), [SimNeurons(LIF(), DummySignal(), DummySignal())]) assert not mergeable(SimNeurons(LIF(), DummySignal(), DummySignal()), [SimNeurons(LIFRate(), DummySignal(), DummySignal())]) # check custom with non-custom implementation assert not mergeable(SimNeurons(LIF(), DummySignal(), DummySignal()), [SimNeurons(Izhikevich(), DummySignal(), DummySignal())]) # check non-custom matching assert not mergeable( SimNeurons(Izhikevich(), DummySignal(), DummySignal()), [SimNeurons(AdaptiveLIF(), DummySignal(), DummySignal())]) assert not mergeable( SimNeurons(Izhikevich(), DummySignal(), DummySignal(), states=[DummySignal(dtype=np.float32)]), [SimNeurons(Izhikevich(), DummySignal(), DummySignal(), states=[DummySignal(dtype=np.int32)])]) assert mergeable( SimNeurons(Izhikevich(), DummySignal(), DummySignal(), states=[DummySignal(shape=(3,))]), [SimNeurons(Izhikevich(), DummySignal(), DummySignal(), states=[DummySignal(shape=(2,))])]) assert not mergeable( SimNeurons(Izhikevich(), DummySignal(), DummySignal(), states=[DummySignal(shape=(2, 1))]), [SimNeurons(Izhikevich(), DummySignal(), DummySignal(), states=[DummySignal(shape=(2, 2))])]) # simprocess # mode must match assert not mergeable( SimProcess(Lowpass(0), None, None, DummySignal(), mode="inc"), [SimProcess(Lowpass(0), None, None, DummySignal(), mode="set")]) # check matching TF_PROCESS_IMPL # note: we only have one item in TF_PROCESS_IMPL at the moment, so no # such thing as a mismatch assert mergeable(SimProcess(Lowpass(0), None, None, DummySignal()), [SimProcess(Lowpass(0), None, None, DummySignal())]) # check custom vs non custom assert not mergeable(SimProcess(Lowpass(0), None, None, DummySignal()), [SimProcess(Alpha(0), None, None, DummySignal())]) # check non-custom matching assert mergeable(SimProcess(Triangle(0), None, None, DummySignal()), [SimProcess(Alpha(0), None, None, DummySignal())]) # simtensornode a = SimTensorNode(None, DummySignal(), None, DummySignal()) assert not mergeable(a, [a]) # learning rules a = SimBCM(DummySignal((4,)), DummySignal(), DummySignal(), DummySignal(), DummySignal()) b = SimBCM(DummySignal((5,)), DummySignal(), DummySignal(), DummySignal(), DummySignal()) assert not mergeable(a, [b])
def test_mergeable(): # anything is mergeable with an empty list assert mergeable(None, []) # ops with different numbers of sets/incs/reads/updates are not mergeable assert not mergeable(dummies.Op(sets=[dummies.Signal()]), [dummies.Op()]) assert not mergeable(dummies.Op(incs=[dummies.Signal()]), [dummies.Op()]) assert not mergeable(dummies.Op(reads=[dummies.Signal()]), [dummies.Op()]) assert not mergeable(dummies.Op(updates=[dummies.Signal()]), [dummies.Op()]) assert mergeable(dummies.Op(sets=[dummies.Signal()]), [dummies.Op(sets=[dummies.Signal()])]) # check matching dtypes assert not mergeable(dummies.Op(sets=[dummies.Signal(dtype=np.float32)]), [dummies.Op(sets=[dummies.Signal(dtype=np.float64)])]) # shape mismatch assert not mergeable(dummies.Op(sets=[dummies.Signal(shape=(1, 2))]), [dummies.Op(sets=[dummies.Signal(shape=(1, 3))])]) # display shape mismatch assert not mergeable( dummies.Op(sets=[dummies.Signal(base_shape=(2, 2), shape=(4, 1))]), [dummies.Op(sets=[dummies.Signal(base_shape=(2, 2), shape=(1, 4))])]) # first dimension mismatch assert mergeable(dummies.Op(sets=[dummies.Signal(shape=(3, 2))]), [dummies.Op(sets=[dummies.Signal(shape=(4, 2))])]) # Copy (inc must match) assert mergeable(Copy(dummies.Signal(), dummies.Signal(), inc=True), [Copy(dummies.Signal(), dummies.Signal(), inc=True)]) assert not mergeable(Copy(dummies.Signal(), dummies.Signal(), inc=True), [Copy(dummies.Signal(), dummies.Signal(), inc=False)]) # elementwise (first dimension must match) assert mergeable( ElementwiseInc(dummies.Signal(), dummies.Signal(), dummies.Signal()), [ElementwiseInc(dummies.Signal(), dummies.Signal(), dummies.Signal())]) assert mergeable( ElementwiseInc(dummies.Signal(shape=(1,)), dummies.Signal(), dummies.Signal()), [ElementwiseInc(dummies.Signal(shape=()), dummies.Signal(), dummies.Signal())]) assert not mergeable( ElementwiseInc(dummies.Signal(shape=(3,)), dummies.Signal(), dummies.Signal()), [ElementwiseInc(dummies.Signal(shape=(2,)), dummies.Signal(), dummies.Signal())]) # simpyfunc (t input must match) time = dummies.Signal() assert mergeable(SimPyFunc(None, None, time, None), [SimPyFunc(None, None, time, None)]) assert mergeable(SimPyFunc(None, None, None, dummies.Signal()), [SimPyFunc(None, None, None, dummies.Signal())]) assert not mergeable(SimPyFunc(None, None, dummies.Signal(), None), [SimPyFunc(None, None, None, dummies.Signal())]) # simneurons # check matching TF_NEURON_IMPL assert mergeable(SimNeurons(LIF(), dummies.Signal(), dummies.Signal()), [SimNeurons(LIF(), dummies.Signal(), dummies.Signal())]) assert not mergeable(SimNeurons(LIF(), dummies.Signal(), dummies.Signal()), [SimNeurons(LIFRate(), dummies.Signal(), dummies.Signal())]) # check custom with non-custom implementation assert not mergeable(SimNeurons(LIF(), dummies.Signal(), dummies.Signal()), [SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal())]) # check non-custom matching assert not mergeable( SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal()), [SimNeurons(AdaptiveLIF(), dummies.Signal(), dummies.Signal())]) assert not mergeable( SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(), states=[dummies.Signal(dtype=np.float32)]), [SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(), states=[dummies.Signal(dtype=np.int32)])]) assert mergeable( SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(), states=[dummies.Signal(shape=(3,))]), [SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(), states=[dummies.Signal(shape=(2,))])]) assert not mergeable( SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(), states=[dummies.Signal(shape=(2, 1))]), [SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(), states=[dummies.Signal(shape=(2, 2))])]) # simprocess # mode must match assert not mergeable( SimProcess(Lowpass(0), None, dummies.Signal(), dummies.Signal(), mode="inc"), [SimProcess(Lowpass(0), None, dummies.Signal(), dummies.Signal(), mode="set")]) # check that lowpass match assert mergeable(SimProcess(Lowpass(0), None, None, dummies.Signal()), [SimProcess(Lowpass(0), None, None, dummies.Signal())]) # check that lowpass and linear don't match assert not mergeable(SimProcess(Lowpass(0), None, None, dummies.Signal()), [SimProcess(Alpha(0), None, None, dummies.Signal())]) # check that two linear do match assert mergeable( SimProcess(Alpha(0.1), dummies.Signal(), None, dummies.Signal()), [SimProcess(LinearFilter([1], [1, 1, 1]), dummies.Signal(), None, dummies.Signal())]) # check custom and non-custom don't match assert not mergeable(SimProcess(Triangle(0), None, None, dummies.Signal()), [SimProcess(Alpha(0), None, None, dummies.Signal())]) # check non-custom matching assert mergeable(SimProcess(Triangle(0), None, None, dummies.Signal()), [SimProcess(Triangle(0), None, None, dummies.Signal())]) # simtensornode a = SimTensorNode(None, dummies.Signal(), None, dummies.Signal()) assert not mergeable(a, [a]) # learning rules a = SimBCM(dummies.Signal((4,)), dummies.Signal(), dummies.Signal(), dummies.Signal(), dummies.Signal()) b = SimBCM(dummies.Signal((5,)), dummies.Signal(), dummies.Signal(), dummies.Signal(), dummies.Signal()) assert not mergeable(a, [b])
def __init__(self): self.seed = -1 self.set_seed(self.seed) self.learn_init_trfm_max = 0.15 self.learn_init_trfm_bias = 0.05 self.learn_learning_rate = 1e-4 self.learn_init_transforms = [] self.pstc = Lowpass(0.005) self.n_neurons_ens = 50 self.n_neurons_cconv = 150 self.n_neurons_mb = 50 self.n_neurons_am = 50 self.max_rates = Uniform(100, 200) self.neuron_type = nengo.LIF() self.sim_dt = 0.001 self.ps_mb_gain_scale = 2.0 self.ps_use_am_mb = True self.ps_action_am_threshold = 0.2 self.enc_mb_acc_radius_scale = 2.5 self.enc_pos_cleanup_mode = 2 self.mb_rehearsalbuf_input_scale = 1.0 # 1.75 self.mb_decaybuf_input_scale = 1.5 # 1.75 self.mb_decay_val = 0.975 self.mb_fdbk_val = 1.3 self.mb_config = {'mem_synapse': Lowpass(0.08), 'difference_gain': 6, 'gate_gain': 5} self.mb_gate_scale = 1.0 # 1.2 self.trans_cconv_radius = 2 self.trans_ave_scale = 0.3 self.dcconv_radius = 2 self.dcconv_item_in_scale = 0.75 # 0.5 self.dec_am_min_thresh = 0.30 self.dec_am_min_diff = 0.1 self.dec_fr_min_thresh = self.dec_am_min_thresh * 1.2 # 0.3 self.dec_fr_item_in_scale = 0.65 # 1.0 self.dec_fr_to_am_scale = 0.25 self.mtr_ramp_synapse = 0.05 self.mtr_ramp_reset_hold_transform = 0.1 # 0.945 self.mtr_ramp_scale = 2 self.mtr_est_digit_response_time = 1.0 / self.mtr_ramp_scale + 0.5 self.mtr_kp = 65 self.mtr_kv1 = np.sqrt(8) self.mtr_kv2 = np.sqrt(18) - self.mtr_kv1 self.mtr_arm_type = 'three_link' self.mtr_arm_rest_x_bias = -0.3 self.mtr_arm_rest_y_bias = 2.5 self.mtr_tgt_threshold = 0.075 self._backend = 'ref' self.data_dir = '' self.probe_data_filename = 'probe_data.npz'
class Test(object): sp = SynapseParam('sp', default=Lowpass(0.1))
) data = ( np.zeros((mini_size, n_steps, signal_d)) + np.arange(mini_size)[:, None, None] ) sim.run_steps(n_steps, data={inp: data}) for i in range(mini_size): filt = synapse.filt(np.ones((n_steps, signal_d)) * i, y0=0) assert np.allclose(sim.data[p0][i, 1:], filt[:-1]) assert np.allclose(sim.data[p1][i, 1:], filt[:-1]) @pytest.mark.parametrize("synapse", (Lowpass(0.1), LinearFilter([1], [0.1, 1]))) def test_weight_filter(Simulator, synapse): if not isinstance(synapse, Lowpass): pytest.xfail(reason="Multidimensional LinearFilter not implemented") with nengo.Network() as net: a = nengo.Node([0, 0]) b = nengo.Node(size_in=2) c = nengo.Connection(a, b, transform=np.ones((2, 2))) p = nengo.Probe(c, "weights", synapse=synapse) with Simulator(net) as sim: sim.run_steps(10) assert np.allclose(sim.data[p][1:], synapse.filt(np.ones((9, 2, 2)), y0=0))