Exemplo n.º 1
0
def test_linearfilter_combine(rng):
    nt = 3000
    tau0, tau1 = 0.01, 0.02
    u = rng.normal(size=(nt, 10))
    x = LinearFilter([1], [tau0 * tau1, tau0 + tau1, 1]).filt(u, y0=0)
    y = Lowpass(tau0).combine(Lowpass(tau1)).filt(u, y0=0)
    assert np.allclose(x, y)
Exemplo n.º 2
0
def test_decoders(Simulator, plt, seed, allclose):
    dt = 1e-3
    tau = 0.01

    t, x, yhat = run_synapse(Simulator, seed, Lowpass(tau), dt=dt, n_neurons=100)

    y = Lowpass(tau).filt(x, dt=dt, y0=0)
    assert signals_allclose(t, y, yhat, delay=dt, plt=plt, allclose=allclose)
Exemplo n.º 3
0
def test_lowpass(Simulator, plt, seed):
    dt = 1e-3
    tau = 0.03

    t, x, yhat = run_synapse(Simulator, seed, Lowpass(tau), dt=dt)
    y = Lowpass(tau).filt(x, dt=dt, y0=0)

    assert allclose(t, y, yhat, delay=dt, plt=plt)
Exemplo n.º 4
0
def build_delta_rule(model, delta_rule, rule):
    conn = rule.connection

    # Create input error signal
    error = Signal(np.zeros(rule.size_in), name="DeltaRule:error")
    model.add_op(Reset(error))
    model.sig[rule]["in"] = error  # error connection will attach here

    # Multiply by post_fn output if necessary
    post_fn = delta_rule.post_fn.function
    post_tau = delta_rule.post_tau
    post_target = delta_rule.post_target
    if post_fn is not None:
        post_sig = model.sig[conn.post_obj][post_target]
        post_synapse = Lowpass(post_tau) if post_tau is not None else None
        post_input = (post_sig if post_synapse is None else model.build(
            post_synapse, post_sig))

        post = Signal(np.zeros(post_input.shape), name="DeltaRule:post")
        model.add_op(
            SimPyFunc(post,
                      post_fn,
                      t=None,
                      x=post_input,
                      tag="DeltaRule:post_fn"))
        model.sig[rule]["post"] = post

        error0 = error
        error = Signal(np.zeros(rule.size_in), name="DeltaRule:post_error")
        model.add_op(Reset(error))
        model.add_op(ElementwiseInc(error0, post, error))

    # Compute: correction = -learning_rate * dt * error
    correction = Signal(np.zeros(error.shape), name="DeltaRule:correction")
    model.add_op(Reset(correction))
    lr_sig = Signal(-delta_rule.learning_rate * model.dt,
                    name="DeltaRule:learning_rate")
    model.add_op(DotInc(lr_sig, error, correction, tag="DeltaRule:correct"))

    # delta_ij = correction_i * pre_j
    pre_synapse = Lowpass(delta_rule.pre_tau)
    pre = model.build(pre_synapse, model.sig[conn.pre_obj]["out"])

    model.add_op(Reset(model.sig[rule]["delta"]))
    model.add_op(
        ElementwiseInc(
            correction.reshape((-1, 1)),
            pre.reshape((1, -1)),
            model.sig[rule]["delta"],
            tag="DeltaRule:Inc Delta",
        ))

    # expose these for probes
    model.sig[rule]["error"] = error
    model.sig[rule]["correction"] = correction
    model.sig[rule]["pre"] = pre
Exemplo n.º 5
0
def test_linearfilter_combine(rng, allclose):
    nt = 3000
    tau0, tau1 = 0.01, 0.02
    u = rng.normal(size=(nt, 10))
    x = LinearFilter([1], [tau0 * tau1, tau0 + tau1, 1]).filt(u, y0=0)
    y = Lowpass(tau0).combine(Lowpass(tau1)).filt(u, y0=0)
    assert allclose(x, y)

    with pytest.raises(ValidationError, match="other LinearFilters"):
        Lowpass(0.1).combine(Triangle(0.01))

    with pytest.raises(ValidationError, match="analog and digital"):
        Lowpass(0.1).combine(LinearFilter([1], [1], analog=False))
Exemplo n.º 6
0
def test_multiple_get_probe_output():
    n_steps = 15
    n_axons = 3

    model = Model()

    # n_axons controls number of input spikes and thus amount of overflow
    input = SpikeInput(n_axons)
    for t in np.arange(1, n_steps + 1):
        input.add_spikes(t, np.arange(n_axons))  # send spikes to all axons
    model.add_input(input)

    block = LoihiBlock(1)
    block.compartment.configure_relu()
    block.compartment.configure_filter(0.1)
    model.add_block(block)

    synapse = Synapse(n_axons)
    synapse.set_weights(np.ones((n_axons, 1)))
    block.add_synapse(synapse)

    axon = Axon(n_axons)
    axon.target = synapse
    input.add_axon(axon)

    probe_u = LoihiProbe(target=block, key="current", synapse=Lowpass(0.005))
    model.add_probe(probe_u)
    probe_v = LoihiProbe(target=block, key="voltage", synapse=Lowpass(0.005))
    model.add_probe(probe_v)
    probe_s = LoihiProbe(target=block, key="spiked", synapse=Lowpass(0.005))
    model.add_probe(probe_s)

    discretize_model(model)

    # must set these after `discretize` to specify discretized values
    block.compartment.vmin = -(2**22) + 1
    block.compartment.vth[:] = VTH_MAX

    with EmulatorInterface(model) as emu:
        emu.run_steps(n_steps)
        first_u = emu.get_probe_output(probe_u)
        first_v = emu.get_probe_output(probe_v)
        first_s = emu.get_probe_output(probe_s)
        second_u = emu.get_probe_output(probe_u)
        second_v = emu.get_probe_output(probe_v)
        second_s = emu.get_probe_output(probe_s)

    assert np.all(first_u == second_u)
    assert np.all(first_v == second_v)
    assert np.all(first_s == second_s)
Exemplo n.º 7
0
 def __init__(self, synapse=Lowpass(tau=0.005), synapse_kwargs={},
              dist=Gaussian(mean=0, std=1), scale=True, seed=None):
     super(FilteredNoise, self).__init__(seed=seed)
     self.synapse = synapse
     self.synapse_kwargs = synapse_kwargs
     self.dist = dist
     self.scale = scale
Exemplo n.º 8
0
class RMSP(LearningRuleType):
    """Hebbian synaptic plasticity learning rule.  Modifies connection weights
    according to the presynaptic and postsynaptic firing rates and the
    target firing rate.

    """
    modifies = 'weights'
    probeable = ('pre_filtered', 'post_filtered', 'delta')
    
    learning_rate = NumberParam("learning_rate", low=0, readonly=True, default=1e-6)
    pre_synapse = SynapseParam("pre_synapse", default=Lowpass(tau=0.005), readonly=True)
    post_synapse = SynapseParam("post_synapse", default=None, readonly=True)

    jit = BoolParam("jit", default=True, readonly=True)

    def __init__(self,
                 learning_rate=Default,
                 pre_synapse=Default,
                 post_synapse=Default,
                 theta_synapse=Default,
                 jit=Default):
        super().__init__(learning_rate, size_in=1)
        self.pre_synapse = pre_synapse
        self.post_synapse = (
            self.pre_synapse if post_synapse is Default else post_synapse
        )
        self.jit = jit

    @property
    def _argreprs(self):
        return _remove_default_post_synapse(super()._argreprs, self.pre_synapse)
Exemplo n.º 9
0
def test_filtfilt(plt, rng, allclose):
    dt = 1e-3
    tend = 3.0
    t = dt * np.arange(tend / dt)
    nt = len(t)
    synapse = Lowpass(0.03)

    u = rng.normal(size=nt)
    x = synapse.filt(u, dt=dt)
    x = synapse.filt(x[::-1], y0=x[-1], dt=dt)[::-1]
    y = synapse.filtfilt(u, dt=dt)

    plt.plot(t, x)
    plt.plot(t, y, "--")

    assert allclose(x, y)
Exemplo n.º 10
0
def test_filtfilt(plt, rng):
    dt = 1e-3
    tend = 3.
    t = dt * np.arange(tend / dt)
    nt = len(t)

    tau = 0.03

    u = rng.normal(size=nt)
    x = Lowpass(tau).filt(u, dt=dt)
    x = Lowpass(tau).filt(x[::-1], y0=x[-1], dt=dt)[::-1]
    y = Lowpass(tau).filtfilt(u, dt=dt)

    plt.plot(t, x)
    plt.plot(t, y, '--')

    assert np.allclose(x, y)
Exemplo n.º 11
0
    def __init__(self, learning_rate=1.0, pre_synapse=Lowpass(tau=0.005)):
        if version_info >= (2, 4, 1):
            # https://github.com/nengo/nengo/pull/1310
            super(RLS, self).__init__(learning_rate, size_in='post_state')
        else:  # pragma: no cover
            self.error_type = 'decoded'
            super(RLS, self).__init__(learning_rate)

        self.pre_synapse = pre_synapse
Exemplo n.º 12
0
 def __init__(self,
              synapse=Lowpass(tau=0.005),
              dist=Gaussian(mean=0, std=1),
              scale=True,
              **kwargs):
     super().__init__(default_size_in=0, **kwargs)
     self.synapse = synapse
     self.dist = dist
     self.scale = scale
Exemplo n.º 13
0
def test_decoders(Simulator, plt, seed):
    dt = 1e-3
    tau = 0.01

    t, x, yhat = run_synapse(
        Simulator, seed, Lowpass(tau), dt=dt, n_neurons=100)

    y = filt(x, tau, dt=dt)
    assert allclose(t, y, yhat, delay=dt, plt=plt)
Exemplo n.º 14
0
def build_voja(model, voja, rule):
    """Builds a `.Voja` object into a model.

    Calls synapse build functions to filter the post activities,
    and adds a `.SimVoja` operator to the model to calculate the delta.

    Parameters
    ----------
    model : Model
        The model to build into.
    voja : Voja
        Learning rule type to build.
    rule : LearningRule
        The learning rule object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.Voja` instance.
    """

    conn = rule.connection

    # Filtered post activity
    post = conn.post_obj
    if voja.post_tau is not None:
        post_filtered = model.build(Lowpass(voja.post_tau),
                                    model.sig[post]['out'])
    else:
        post_filtered = model.sig[post]['out']

    # Learning signal, defaults to 1 in case no connection is made
    # and multiplied by the learning_rate * dt
    learning = Signal(np.zeros(rule.size_in), name="Voja:learning")
    assert rule.size_in == 1
    model.add_op(Reset(learning, value=1.0))
    model.sig[rule]['in'] = learning  # optional connection will attach here

    scaled_encoders = model.sig[post]['encoders']
    # The gain and radius are folded into the encoders during the ensemble
    # build process, so we need to make sure that the deltas are proportional
    # to this scaling factor
    encoder_scale = model.params[post].gain / post.radius
    assert post_filtered.shape == encoder_scale.shape

    model.add_op(
        SimVoja(pre_decoded=model.sig[conn]['out'],
                post_filtered=post_filtered,
                scaled_encoders=scaled_encoders,
                delta=model.sig[rule]['delta'],
                scale=encoder_scale,
                learning_signal=learning,
                learning_rate=voja.learning_rate))

    model.sig[rule]['scaled_encoders'] = scaled_encoders
    model.sig[rule]['post_filtered'] = post_filtered
class Voja(LearningRuleType):
    """Vector Oja learning rule.

    Modifies an ensemble's encoders to be selective to its inputs.

    A connection to the learning rule will provide a scalar weight for the
    learning rate, minus 1. For instance, 0 is normal learning, -1 is no
    learning, and less than -1 causes anti-learning or "forgetting".

    Parameters
    ----------
    post_tau : float, optional
        Filter constant on activities of neurons in post population.
    learning_rate : float, optional
        A scalar indicating the rate at which encoders will be adjusted.
    post_synapse : `.Synapse`, optional
        Synapse model used to filter the post-synaptic activities.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which encoders will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    """

    modifies = "encoders"
    probeable = ("post_filtered", "scaled_encoders", "delta")

    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-2)
    post_synapse = SynapseParam("post_synapse",
                                default=Lowpass(tau=0.005),
                                readonly=True)

    post_tau = _deprecated_tau("post_tau", "post_synapse")

    def __init__(self,
                 learning_rate=Default,
                 post_synapse=Default,
                 post_tau=Unconfigurable):
        super().__init__(learning_rate, size_in=1)

        if post_tau is Unconfigurable:
            self.post_synapse = post_synapse
        else:
            self.post_tau = post_tau

    @property
    def _argdefaults(self):
        return (
            ("learning_rate", Voja.learning_rate.default),
            ("post_synapse", Voja.post_synapse.default),
        )
Exemplo n.º 16
0
def build_oja(model, oja, rule):
    conn = rule.connection
    pre_activities = model.sig[get_pre_ens(conn).neurons]['out']
    post_activities = model.sig[get_post_ens(conn).neurons]['out']
    pre_filtered = model.build(Lowpass(oja.pre_tau), pre_activities)
    post_filtered = model.build(Lowpass(oja.post_tau), post_activities)

    model.add_op(SimOja(pre_filtered,
                        post_filtered,
                        model.sig[conn]['weights'],
                        model.sig[rule]['delta'],
                        learning_rate=oja.learning_rate,
                        beta=oja.beta))

    # expose these for probes
    model.sig[rule]['pre_filtered'] = pre_filtered
    model.sig[rule]['post_filtered'] = post_filtered

    model.params[rule] = None  # no build-time info to return
Exemplo n.º 17
0
def filtered_signal(model, owner, sig, synapse):
    # Note: we add a filter here even if synapse < dt,
    # in order to avoid cycles in the op graph. If the filter
    # is explicitly set to None (e.g. for a passthrough node)
    # then cycles can still occur.
    if is_number(synapse):
        synapse = Lowpass(synapse)
    assert isinstance(synapse, Synapse)
    model.build(synapse, owner, sig)
    return model.sig[owner]['synapse_out']
Exemplo n.º 18
0
def test_order_signals_lowpass():
    # test that lowpass outputs are ordered as reads

    inputs = [dummies.Signal(label=str(i)) for i in range(10)]
    time = dummies.Signal()
    plan = [
        tuple(SimProcess(Lowpass(0.1), inputs[i], inputs[i + 1], time,
                         mode="update") for i in range(0, 4, 2)),
        tuple(SimProcess(Lowpass(0.1), inputs[i], inputs[i + 1], time,
                         mode="update") for i in range(5, 9, 2))]
    sigs, new_plan = order_signals(plan)

    assert contiguous(inputs[1:5:2], sigs)
    assert contiguous(inputs[6:10:2], sigs)

    assert ordered(new_plan[0], sigs, block=1)
    assert ordered(new_plan[0], sigs, block=2)
    assert ordered(new_plan[1], sigs, block=1)
    assert ordered(new_plan[1], sigs, block=2)
Exemplo n.º 19
0
def build_bcm(model, bcm, rule):
    conn = rule.connection
    pre_activities = model.sig[get_pre_ens(conn).neurons]['out']
    pre_filtered = model.build(Lowpass(bcm.pre_tau), pre_activities)
    post_activities = model.sig[get_post_ens(conn).neurons]['out']
    post_filtered = model.build(Lowpass(bcm.post_tau), post_activities)
    theta = model.build(Lowpass(bcm.theta_tau), post_filtered)

    model.add_op(SimBCM(pre_filtered,
                        post_filtered,
                        theta,
                        model.sig[rule]['delta'],
                        learning_rate=bcm.learning_rate))

    # expose these for probes
    model.sig[rule]['theta'] = theta
    model.sig[rule]['pre_filtered'] = pre_filtered
    model.sig[rule]['post_filtered'] = post_filtered

    model.params[rule] = None  # no build-time info to return
Exemplo n.º 20
0
class mPES(LearningRuleType):
    modifies = "weights"
    probeable = ("error", "activities", "delta", "pos_memristors",
                 "neg_memristors")

    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)
    r_max = NumberParam("r_max", readonly=True, default=2.3e8)
    r_min = NumberParam("r_min", readonly=True, default=200)
    exponent = NumberParam("exponent", readonly=True, default=-0.146)
    gain = NumberParam("gain", readonly=True, default=1e3)
    voltage = NumberParam("voltage", readonly=True, default=1e-1)
    initial_state = DictParam("initial_state", optional=True)

    def __init__(self,
                 pre_synapse=Default,
                 r_max=Default,
                 r_min=Default,
                 exponent=Default,
                 noisy=False,
                 gain=Default,
                 voltage=Default,
                 initial_state=None,
                 seed=None):
        super().__init__(size_in="post_state")

        self.pre_synapse = pre_synapse
        self.r_max = r_max
        self.r_min = r_min
        self.exponent = exponent
        if not noisy:
            self.noise_percentage = np.zeros(4)
        elif isinstance(noisy, float) or isinstance(noisy, int):
            self.noise_percentage = np.full(4, noisy)
        elif isinstance(noisy, list) and len(noisy) == 4:
            self.noise_percentage = noisy
        else:
            raise ValueError(
                f"Noisy parameter must be int or list of length 4, not {type( noisy )}"
            )
        self.gain = gain
        self.voltage = voltage
        self.seed = seed
        self.initial_state = {} if initial_state is None else initial_state

    @property
    def _argdefaults(self):
        return (
            ("pre_synapse", mPES.pre_synapse.default),
            ("r_max", mPES.r_max.default),
            ("r_min", mPES.r_min.default),
            ("exponent", mPES.exponent.default),
        )
class PES(LearningRuleType):
    """Prescribed Error Sensitivity learning rule.

    Modifies a connection's decoders to minimize an error signal provided
    through a connection to the connection's learning rule.

    Parameters
    ----------
    learning_rate : float, optional
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional
        Synapse model used to filter the pre-synaptic activities.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    """

    modifies = "decoders"
    probeable = ("error", "activities", "delta")

    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-4)
    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)

    pre_tau = _deprecated_tau("pre_tau", "pre_synapse")

    def __init__(self,
                 learning_rate=Default,
                 pre_synapse=Default,
                 pre_tau=Unconfigurable):
        super().__init__(learning_rate, size_in="post_state")
        if learning_rate is not Default and learning_rate >= 1.0:
            warnings.warn("This learning rate is very high, and can result "
                          "in floating point errors from too much current.")

        if pre_tau is Unconfigurable:
            self.pre_synapse = pre_synapse
        else:
            self.pre_tau = pre_tau

    @property
    def _argdefaults(self):
        return (
            ("learning_rate", PES.learning_rate.default),
            ("pre_synapse", PES.pre_synapse.default),
        )
Exemplo n.º 22
0
 def __init__(self,
              synapse=Lowpass(tau=0.005),
              dist=Gaussian(mean=0, std=1),
              scale=True,
              synapse_kwargs=None,
              **kwargs):
     super(FilteredNoise, self).__init__(default_size_in=0, **kwargs)
     self.synapse = synapse
     self.synapse_kwargs = {} if synapse_kwargs is None else synapse_kwargs
     self.dist = dist
     self.scale = scale
Exemplo n.º 23
0
    def set_tau(self, val):
        if val is Unconfigurable:
            return

        since = "v2.8.0"
        url = "https://github.com/nengo/nengo/pull/1095"
        msg = ("%s has been deprecated, use %s instead (since %s).\n"
               "For more information, please visit %s" % (
                   old_attr, new_attr, since, url))
        warnings.warn(msg, DeprecationWarning)

        setattr(self, new_attr, None if val is None else Lowpass(val))
Exemplo n.º 24
0
def build_pes(model, pes, rule):
    conn = rule.connection

    # Create input error signal
    error = Signal(np.zeros(rule.size_in), name="PES:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error  # error connection will attach here

    acts = model.build(Lowpass(pes.pre_tau), model.sig[conn.pre_obj]['out'])
    acts_view = acts.reshape((1, acts.size))

    # Compute the correction, i.e. the scaled negative error
    correction = Signal(np.zeros(error.shape), name="PES:correction")
    local_error = correction.reshape((error.size, 1))
    model.add_op(Reset(correction))

    # correction = -learning_rate * (dt / n_neurons) * error
    n_neurons = (conn.pre_obj.n_neurons if isinstance(conn.pre_obj, Ensemble)
                 else conn.pre_obj.size_in)
    lr_sig = Signal(-pes.learning_rate * model.dt / n_neurons,
                    name="PES:learning_rate")
    model.add_op(DotInc(lr_sig, error, correction, tag="PES:correct"))

    if conn.solver.weights or (isinstance(conn.pre_obj, Neurons)
                               and isinstance(conn.post_obj, Neurons)):
        post = get_post_ens(conn)
        weights = model.sig[conn]['weights']
        encoders = model.sig[post]['encoders']

        # encoded = dot(encoders, correction)
        encoded = Signal(np.zeros(weights.shape[0]), name="PES:encoded")
        model.add_op(Reset(encoded))
        model.add_op(DotInc(encoders, correction, encoded, tag="PES:encode"))
        local_error = encoded.reshape((encoded.size, 1))
    elif not isinstance(conn.pre_obj, (Ensemble, Neurons)):
        raise ValueError("'pre' object '%s' not suitable for PES learning" %
                         (conn.pre_obj))

    # delta = local_error * activities
    model.add_op(Reset(model.sig[rule]['delta']))
    model.add_op(
        ElementwiseInc(local_error,
                       acts_view,
                       model.sig[rule]['delta'],
                       tag="PES:Inc Delta"))

    # expose these for probes
    model.sig[rule]['error'] = error
    model.sig[rule]['correction'] = correction
    model.sig[rule]['activities'] = acts

    model.params[rule] = None  # no build-time info to return
Exemplo n.º 25
0
def build_bcm(model, bcm, rule):
    """Builds a `.BCM` object into a model.

    Calls synapse build functions to filter the pre and post activities,
    and adds a `.SimBCM` operator to the model to calculate the delta.

    Parameters
    ----------
    model : Model
        The model to build into.
    bcm : BCM
        Learning rule type to build.
    rule : LearningRule
        The learning rule object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `.BCM` instance.
    """

    conn = rule.connection
    pre_activities = model.sig[get_pre_ens(conn).neurons]['out']
    pre_filtered = model.build(Lowpass(bcm.pre_tau), pre_activities)
    post_activities = model.sig[get_post_ens(conn).neurons]['out']
    post_filtered = model.build(Lowpass(bcm.post_tau), post_activities)
    theta = model.build(Lowpass(bcm.theta_tau), post_filtered)

    model.add_op(
        SimBCM(pre_filtered,
               post_filtered,
               theta,
               model.sig[rule]['delta'],
               learning_rate=bcm.learning_rate))

    # expose these for probes
    model.sig[rule]['theta'] = theta
    model.sig[rule]['pre_filtered'] = pre_filtered
    model.sig[rule]['post_filtered'] = post_filtered
Exemplo n.º 26
0
class FilteredNoise(Process):
    """Filtered white noise process.

    This process takes white noise and filters it using the provided synapse.

    Parameters
    ----------
    synapse : Synapse, optional
        The synapse to use to filter the noise. Default: Lowpass(tau=0.005)
    synapse_kwargs : dict, optional
        Arguments to pass to `synapse.make_step`.
    dist : Distribution, optional
        The distribution used to generate the white noise.
        Default: Gaussian(mean=0, std=1)
    scale : bool, optional
        Whether to scale the white noise for integration, making the output
        signal invariant to `dt`. Defaults to True.
    """

    synapse = LinearFilterParam()
    dist = DistributionParam()
    scale = BoolParam()

    def __init__(self, synapse=None, synapse_kwargs={}, dist=None, scale=True):
        super(FilteredNoise, self).__init__()
        self.synapse = Lowpass(tau=0.005) if synapse is None else synapse
        self.synapse_kwargs = synapse_kwargs
        self.dist = Gaussian(mean=0, std=1) if dist is None else dist
        self.scale = scale

    def make_step(self, size_in, size_out, dt, rng):
        assert size_in == 0

        dist = self.dist
        scale = self.scale
        alpha = 1. / np.sqrt(dt)
        output = np.zeros(size_out)
        filter_step = self.synapse.make_step(dt, output, **self.synapse_kwargs)

        # separate RNG for simulation for step order independence
        sim_rng = np.random.RandomState(rng.randint(npext.maxint))

        def step(t):
            x = dist.sample(n=1, d=size_out, rng=sim_rng)[0]
            if scale:
                x *= alpha
            filter_step(x)
            return output

        return step
Exemplo n.º 27
0
class FilteredNoise(Process):
    """Filtered white noise process.

    This process takes white noise and filters it using the provided synapse.

    Parameters
    ----------
    synapse : Synapse, optional
        The synapse to use to filter the noise. Default: Lowpass(tau=0.005)
    synapse_kwargs : dict, optional
        Arguments to pass to `synapse.make_step`.
    dist : Distribution, optional
        The distribution used to generate the white noise.
        Default: Gaussian(mean=0, std=1)
    scale : bool, optional
        Whether to scale the white noise for integration, making the output
        signal invariant to `dt`. Defaults to True.
    """

    synapse = LinearFilterParam()
    dist = DistributionParam()
    scale = BoolParam()

    def __init__(self, synapse=None, synapse_kwargs={}, dist=None, scale=True):
        super(FilteredNoise, self).__init__()
        self.synapse = Lowpass(tau=0.005) if synapse is None else synapse
        self.synapse_kwargs = synapse_kwargs
        self.dist = Gaussian(mean=0, std=1) if dist is None else dist
        self.scale = scale

    def make_step(self, size_in, size_out, dt, rng):
        assert size_in == 0

        dist = self.dist
        scale = self.scale
        alpha = 1. / np.sqrt(dt)
        output = np.zeros(size_out)
        filter_step = self.synapse.make_step(dt, output, **self.synapse_kwargs)

        # separate RNG for simulation for step order independence
        sim_rng = np.random.RandomState(rng.randint(npext.maxint))

        def step(t):
            x = dist.sample(n=1, d=size_out, rng=sim_rng)[0]
            if scale:
                x *= alpha
            filter_step(x)
            return output

        return step
Exemplo n.º 28
0
def test_tau_deprecation(LearningRule):
    params = [("pre_tau", "pre_synapse"), ("post_tau", "post_synapse"),
              ("theta_tau", "theta_synapse")]
    kwargs = {}
    for i, (p0, p1) in enumerate(params):
        if hasattr(LearningRule, p0):
            kwargs[p0] = i

    with pytest.warns(DeprecationWarning):
        l_rule = LearningRule(learning_rate=0, **kwargs)

    for i, (p0, p1) in enumerate(params):
        if hasattr(LearningRule, p0):
            assert getattr(l_rule, p0) == i
            assert getattr(l_rule, p1) == Lowpass(i)
Exemplo n.º 29
0
def test_lti_lowpass(rng, plt):
    dt = 1e-3
    tend = 3.
    t = dt * np.arange(tend / dt)
    nt = len(t)

    tau = 1e-2
    lti = LinearFilter([1], [tau, 1])

    u = rng.normal(size=(nt, 10))
    x = Lowpass(tau).filt(u, dt=dt)
    y = lti.filt(u, dt=dt)

    plt.plot(t, x[:, 0], label="Lowpass")
    plt.plot(t, y[:, 0], label="LTI")
    plt.legend(loc="best")

    assert np.allclose(x, y)
Exemplo n.º 30
0
def test_argreprs():
    def check_init_args(cls, args):
        assert getfullargspec(cls.__init__).args[1:] == args

    def check_repr(obj):
        assert eval(repr(obj)) == obj

    check_init_args(LinearFilter, ['num', 'den', 'analog'])
    check_repr(LinearFilter([1, 2], [3, 4]))
    check_repr(LinearFilter([1, 2], [3, 4], analog=False))

    check_init_args(Lowpass, ['tau'])
    check_repr(Lowpass(0.3))

    check_init_args(Alpha, ['tau'])
    check_repr(Alpha(0.3))

    check_init_args(Triangle, ['t'])
    check_repr(Triangle(0.3))
Exemplo n.º 31
0
def test_argreprs():
    def check_init_args(cls, args):
        assert getfullargspec(cls.__init__).args[1:] == args

    def check_repr(obj):
        assert eval(repr(obj)) == obj

    check_init_args(LinearFilter, ["num", "den", "analog", "method"])
    check_repr(LinearFilter([1, 2], [3, 4]))
    check_repr(LinearFilter([1, 2], [3, 4], analog=False))

    check_init_args(Lowpass, ["tau"])
    check_repr(Lowpass(0.3))

    check_init_args(Alpha, ["tau"])
    check_repr(Alpha(0.3))

    check_init_args(Triangle, ["t"])
    check_repr(Triangle(0.3))
Exemplo n.º 32
0
 def __init__(self, synapse=None, synapse_kwargs={}, dist=None, scale=True):
     super(FilteredNoise, self).__init__()
     self.synapse = Lowpass(tau=0.005) if synapse is None else synapse
     self.synapse_kwargs = synapse_kwargs
     self.dist = Gaussian(mean=0, std=1) if dist is None else dist
     self.scale = scale