def test_learning_rule_types(): check_init_args(PES, ["learning_rate", "pre_synapse"]) check_repr(PES(learning_rate=0.1, pre_synapse=Lowpass(tau=0.2))) assert repr(PES()) == "PES()" assert (repr(PES(learning_rate=0.1, pre_synapse=0.2)) == "PES(learning_rate=0.1, pre_synapse=Lowpass(tau=0.2))") check_init_args( BCM, ["learning_rate", "pre_synapse", "post_synapse", "theta_synapse"]) check_repr( BCM(learning_rate=0.1, pre_synapse=0.2, post_synapse=0.3, theta_synapse=0.4)) assert repr(BCM()) == "BCM()" assert repr( BCM(learning_rate=0.1, pre_synapse=0.2, post_synapse=0.3, theta_synapse=0.4) ) == ("BCM(learning_rate=0.1, pre_synapse=Lowpass(tau=0.2), " "post_synapse=Lowpass(tau=0.3), theta_synapse=Lowpass(tau=0.4))") check_init_args(Oja, ["learning_rate", "pre_synapse", "post_synapse", "beta"]) check_repr( Oja( learning_rate=0.1, pre_synapse=Lowpass(tau=0.2), post_synapse=Lowpass(tau=0.3), beta=0.4, )) assert repr(Oja()) == "Oja()" assert repr( Oja(learning_rate=0.1, pre_synapse=0.2, post_synapse=0.3, beta=0.4)) == ( "Oja(learning_rate=0.1, pre_synapse=Lowpass(tau=0.2), " "post_synapse=Lowpass(tau=0.3), beta=0.4)") check_init_args(Voja, ["learning_rate", "post_synapse"]) check_repr(Voja(learning_rate=0.1, post_synapse=Lowpass(tau=0.2))) assert repr(Voja()) == "Voja()" assert (repr(Voja(learning_rate=0.1, post_synapse=0.2)) == "Voja(learning_rate=0.1, post_synapse=Lowpass(tau=0.2))") check_init_args(RLS, ["learning_rate", "pre_synapse"]) check_repr(RLS(2.4e-3)) check_repr(RLS(learning_rate=0.1, pre_synapse=Alpha(tau=0.2))) assert repr(RLS()) == "RLS()" assert (repr(RLS(learning_rate=0.1, pre_synapse=0.2)) == "RLS(learning_rate=0.1, pre_synapse=Lowpass(tau=0.2))")
def test_synapses(): check_init_args(LinearFilter, ["num", "den", "analog", "method"]) check_repr(LinearFilter([1, 2], [3, 4])) check_repr(LinearFilter([1, 2], [3, 4], analog=False)) assert (repr(LinearFilter( [1], [0.03, 1 ])) == "LinearFilter(num=array([1.]), den=array([0.03, 1. ]))") check_init_args(Lowpass, ["tau"]) check_repr(Lowpass(0.3)) assert repr(Lowpass(0.01)) == "Lowpass(tau=0.01)" check_init_args(Alpha, ["tau"]) check_repr(Alpha(0.3)) assert repr(Alpha(0.02)) == "Alpha(tau=0.02)" check_init_args(Triangle, ["t"]) check_repr(Triangle(0.3)) assert repr(Triangle(0.03)) == "Triangle(t=0.03)"
def merge_synapses(self, syn1, syn2): """Return an equivalent synapse for the two provided synapses.""" if syn1 is None: return syn2 elif syn2 is None: return syn1 else: assert isinstance(syn1, Lowpass) and isinstance(syn2, Lowpass) warnings.warn( "Combining two Lowpass synapses, this may change the " "behaviour of the network (set `remove_passthrough=False` " "to avoid this).") return Lowpass(syn1.tau + syn2.tau)
def build_pes(model, pes, rule): """ Builds a `nengo.PES` object into a model. Parameters ---------- model : Model The model to build into. pes : PES Learning rule type to build. rule : LearningRule The learning rule object corresponding to the neuron type. Notes ----- Does not modify ``model.params[]`` and can therefore be called more than once with the same `nengo.PES` instance. """ conn = rule.connection # Create input error signal error = Signal(np.zeros(rule.size_in), name="PES:error") model.add_op(Reset(error)) model.sig[rule]['in'] = error # error connection will attach here if LooseVersion(nengo_version) < "2.7.1": acts = model.build( Lowpass(pes.pre_tau), model.sig[conn.pre_obj]["out"]) else: acts = model.build(pes.pre_synapse, model.sig[conn.pre_obj]["out"]) if not conn.is_decoded: # multiply error by post encoders to get a per-neuron error post = get_post_ens(conn) encoders = model.sig[post]["encoders"] if conn.post_obj is not conn.post: # in order to avoid slicing encoders along an axis > 0, we pad # `error` out to the full base dimensionality and then do the # dotinc with the full encoder matrix padded_error = Signal(np.zeros(encoders.shape[1])) model.add_op(Copy(error, padded_error, dst_slice=conn.post_slice)) else: padded_error = error # error = dot(encoders, error) local_error = Signal(np.zeros(post.n_neurons), name="PES:encoded") model.add_op(Reset(local_error)) model.add_op(DotInc(encoders, padded_error, local_error, tag="PES:encode")) else: local_error = error model.operators.append(SimPES(acts, local_error, model.sig[rule]["delta"], pes.learning_rate)) # expose these for probes model.sig[rule]["error"] = error model.sig[rule]["activities"] = acts