Example #1
0
def test_op_constant(dtype, diff, sess):
    ops = (SimNeurons(LIF(tau_rc=1), Signal(np.zeros(10)), None),
           SimNeurons(LIF(tau_rc=2 if diff else 1), Signal(np.zeros(10)),
                      None))

    signals = SignalDict(tf.float32, 1)
    const = signals.op_constant(
        [op.neurons for op in ops], [op.J.shape[0] for op in ops],
        "tau_rc", dtype)
    const1 = signals.op_constant(
        [op.neurons for op in ops], [op.J.shape[0] for op in ops],
        "tau_rc", dtype, ndims=1)
    const3 = signals.op_constant(
        [op.neurons for op in ops], [op.J.shape[0] for op in ops],
        "tau_rc", dtype, ndims=3)

    assert const.dtype.base_dtype == dtype

    sess.run(tf.variables_initializer(tf.get_collection("constants")),
             feed_dict=signals.constant_phs)
    x, x1, x3 = sess.run([const, const1, const3])

    if diff:
        assert np.array_equal(x, [[1]] * 10 + [[2]] * 10)
        assert np.array_equal(x[:, 0], x1)
        assert np.array_equal(x, x3[..., 0])
    else:
        assert np.array_equal(x, 1.0)
        assert np.array_equal(x, x1)
        assert np.array_equal(x, x3)
Example #2
0
def test_frozen():
    """Test attributes inherited from FrozenObject"""
    a = LIF()
    b = LIF()
    c = LIF(tau_rc=0.3)
    d = Izhikevich()

    assert hash(a) == hash(a)
    assert hash(b) == hash(b)
    assert hash(c) == hash(c)
    assert hash(d) == hash(d)

    assert a == b
    assert hash(a) == hash(b)
    assert a != c
    assert hash(a) != hash(c)  # not guaranteed, but highly likely
    assert b != c
    assert hash(b) != hash(c)  # not guaranteed, but highly likely
    assert a != d
    assert hash(a) != hash(d)  # not guaranteed, but highly likely

    with pytest.raises(ValueError):
        a.tau_rc = 0.3
    with pytest.raises(ValueError):
        d.coupling = 8
Example #3
0
def test_op_constant(dtype, diff):
    ops = (
        SimNeurons(LIF(tau_rc=1), Signal(np.zeros(10)), None),
        SimNeurons(LIF(tau_rc=2 if diff else 1), Signal(np.zeros(10)), None),
    )

    signals = SignalDict(tf.float32, 1)
    const = signals.op_constant(
        [op.neurons for op in ops], [op.J.shape[0] for op in ops], "tau_rc", dtype
    )
    const1 = signals.op_constant(
        [op.neurons for op in ops],
        [op.J.shape[0] for op in ops],
        "tau_rc",
        dtype,
        shape=(-1,),
    )
    const3 = signals.op_constant(
        [op.neurons for op in ops],
        [op.J.shape[0] for op in ops],
        "tau_rc",
        dtype,
        shape=(1, -1, 1),
    )

    assert const.dtype.base_dtype == dtype

    if diff:
        assert np.array_equal(const, [[1.0] * 10 + [2.0] * 10])
        assert np.array_equal(const[0], const1)
        assert np.array_equal(const, const3[..., 0])
    else:
        assert np.array_equal(const, 1.0)
        assert np.array_equal(const, const1)
        assert np.array_equal(const, const3)
Example #4
0
def test_frozen():
    """Test attributes inherited from FrozenObject"""
    a = LIF()
    b = LIF()
    c = LIF(tau_rc=0.3)
    d = Izhikevich()

    assert hash(a) == hash(a)
    assert hash(b) == hash(b)
    assert hash(c) == hash(c)
    assert hash(d) == hash(d)

    assert a == b
    assert hash(a) == hash(b)
    assert a != c
    assert hash(a) != hash(c)  # not guaranteed, but highly likely
    assert b != c
    assert hash(b) != hash(c)  # not guaranteed, but highly likely
    assert a != d
    assert hash(a) != hash(d)  # not guaranteed, but highly likely

    with pytest.raises(ValueError):
        a.tau_rc = 0.3
    with pytest.raises(ValueError):
        d.coupling = 8
def test_lif():
    """Test that the dynamic lif model approximately matches the rates."""

    n_neurons = 40
    tau_rc = 0.02
    tau_ref = 0.002
    lif = LIF(tau_rc=tau_rc, tau_ref=tau_ref)

    voltage = Signal(
        np.zeros(n_neurons), name="%s.voltage" % lif)

    ref_time = Signal(
        np.zeros(n_neurons), name="%s.refractory_time" % lif)

    J = Signal(np.zeros(n_neurons), 'J')
    output = Signal(np.zeros(n_neurons), 'output')

    op = SimNeurons(
        neurons=lif, J=J, output=output, states=[voltage, ref_time])

    input = np.arange(-2, 2, .1)
    input_func = SimPyFunc(J, lambda: input, False, None)

    probes = [SignalProbe(output)]

    sim = TestSimulator([op, input_func], probes)
    sim.run(1.0)

    spikes = sim.data[probes[0]] / (1.0 / sim.dt)
    sim_rates = spikes.sum(axis=0)

    math_rates = lif.rates(
        input, gain=np.ones(n_neurons), bias=np.zeros(n_neurons))

    assert np.allclose(np.squeeze(sim_rates), math_rates, atol=1, rtol=0.02)
Example #6
0
    def step_math(self, dt, J, output, voltage, ref, resources, calcium):
        """Implement the u and x parameters """
        x = resources
        u = calcium
        LIF.step_math(self, dt, J, output, voltage, ref)

        #calculate u and x
        dx = dt * ((1 - x) / self.tau_x - u * x * output)
        du = dt * ((0.2 - u) / self.tau_u + 0.2 * (1 - u) * output)

        x += dx
        u += du
Example #7
0
def test_lif_min_voltage(Simulator, plt, min_voltage, seed):
    model = nengo.Network(seed=seed)
    with model:
        stim = nengo.Node(lambda t: np.sin(t * 4 * np.pi))
        ens = nengo.Ensemble(n_neurons=10,
                             dimensions=1,
                             neuron_type=LIF(min_voltage=min_voltage))
        nengo.Connection(stim, ens, synapse=None)
        p_val = nengo.Probe(ens, synapse=0.01)
        p_voltage = nengo.Probe(ens.neurons, "voltage")

    with Simulator(model) as sim:
        sim.run(0.5)

    plt.subplot(2, 1, 1)
    plt.plot(sim.trange(), sim.data[p_val])
    plt.ylabel("Decoded value")
    plt.subplot(2, 1, 2)
    plt.plot(sim.trange(), sim.data[p_voltage])
    plt.ylabel("Voltage")

    if min_voltage < -10:
        assert np.min(sim.data[p_voltage]) < -10
    else:
        assert np.min(sim.data[p_voltage]) == min_voltage
Example #8
0
def test_lif(Simulator, plt, rng, dt, allclose):
    """Test that the dynamic model approximately matches the rates."""
    n = 5000
    x = 0.5
    encoders = np.ones((n, 1))
    max_rates = rng.uniform(low=10, high=200, size=n)
    intercepts = rng.uniform(low=-1, high=1, size=n)

    m = nengo.Network()
    with m:
        ins = nengo.Node(x)
        ens = nengo.Ensemble(
            n,
            dimensions=1,
            neuron_type=LIF(),
            encoders=encoders,
            max_rates=max_rates,
            intercepts=intercepts,
        )
        nengo.Connection(ins,
                         ens.neurons,
                         transform=np.ones((n, 1)),
                         synapse=None)
        spike_probe = nengo.Probe(ens.neurons)
        voltage_probe = nengo.Probe(ens.neurons, "voltage")
        ref_probe = nengo.Probe(ens.neurons, "refractory_time")

    t_final = 1.0
    with Simulator(m, dt=dt) as sim:
        sim.run(t_final)

    i = 3
    plt.subplot(311)
    plt.plot(sim.trange(), sim.data[spike_probe][:, :i])
    plt.subplot(312)
    plt.plot(sim.trange(), sim.data[voltage_probe][:, :i])
    plt.subplot(313)
    plt.plot(sim.trange(), sim.data[ref_probe][:, :i])
    plt.ylim([-dt, ens.neuron_type.tau_ref + dt])

    # check rates against analytic rates
    math_rates = ens.neuron_type.rates(
        x, *ens.neuron_type.gain_bias(max_rates, intercepts))
    spikes = sim.data[spike_probe]
    sim_rates = (spikes > 0).sum(0) / t_final
    logging.info("ME = %f", (sim_rates - math_rates).mean())
    logging.info("RMSE = %f",
                 rms(sim_rates - math_rates) / (rms(math_rates) + 1e-20))
    assert np.sum(
        math_rates > 0) > 0.5 * n, "At least 50% of neurons must fire"
    assert allclose(sim_rates, math_rates, atol=1, rtol=0.02)

    # if voltage and ref time are non-constant, the probe is doing something
    assert np.abs(np.diff(sim.data[voltage_probe])).sum() > 1
    assert np.abs(np.diff(sim.data[ref_probe])).sum() > 1

    # compute spike counts after each timestep
    actual_counts = (spikes > 0).cumsum(axis=0)
    expected_counts = np.outer(sim.trange(), math_rates)
    assert (abs(actual_counts - expected_counts) < 1).all()
Example #9
0
def test_neurontypeparam():
    """NeuronTypeParam must be a neuron type."""
    class Test:
        ntp = NeuronTypeParam("ntp", default=None)

    inst = Test()
    inst.ntp = LIF()
    assert isinstance(inst.ntp, LIF)
    with pytest.raises(ValueError):
        inst.ntp = "a"
Example #10
0
def test_lif_zero_tau_ref(Simulator, allclose):
    """If we set tau_ref=0, we should be able to reach firing rate 1/dt."""
    dt = 1e-3
    max_rate = 1.0 / dt
    with nengo.Network() as m:
        ens = nengo.Ensemble(
            1, 1, encoders=[[1]], max_rates=[max_rate], neuron_type=LIF(tau_ref=0)
        )
        nengo.Connection(nengo.Node(output=10), ens)
        p = nengo.Probe(ens.neurons)
    with Simulator(m) as sim:
        sim.run(0.02)
    assert allclose(sim.data[p][1:], max_rate)
Example #11
0
def test_lif_builtin(rng):
    """Test that the dynamic model approximately matches the rates."""
    dt = 1e-3
    t_final = 1.0

    N = 10
    lif = LIF()
    gain, bias = lif.gain_bias(
        rng.uniform(80, 100, size=N), rng.uniform(-1, 1, size=N))

    x = np.arange(-2, 2, .1)
    J = gain * x[:, None] + bias

    voltage = np.zeros_like(J)
    reftime = np.zeros_like(J)

    spikes = np.zeros((int(t_final / dt),) + J.shape)
    for i, spikes_i in enumerate(spikes):
        lif.step_math(dt, J, spikes_i, voltage, reftime)

    math_rates = lif.rates(x, gain, bias)
    sim_rates = spikes.mean(0)
    assert np.allclose(sim_rates, math_rates, atol=1, rtol=0.02)
Example #12
0
def test_probeable():
    def check_neuron_type(neuron_type, expected):
        assert neuron_type.probeable == expected
        ens = nengo.Ensemble(10, 1, neuron_type=neuron_type)
        assert ens.neurons.probeable == expected + ("input", )

    with nengo.Network():
        check_neuron_type(Direct(), ("output", ))
        check_neuron_type(RectifiedLinear(), ("output", ))
        check_neuron_type(SpikingRectifiedLinear(), ("output", "voltage"))
        check_neuron_type(Sigmoid(), ("output", ))
        check_neuron_type(Tanh(), ("output", ))
        check_neuron_type(LIFRate(), ("output", ))
        check_neuron_type(LIF(), ("output", "voltage", "refractory_time"))
        check_neuron_type(AdaptiveLIFRate(), ("output", "adaptation"))
        check_neuron_type(
            AdaptiveLIF(),
            ("output", "voltage", "refractory_time", "adaptation"))
        check_neuron_type(Izhikevich(), ("output", "voltage", "recovery"))
        check_neuron_type(RegularSpiking(LIFRate()),
                          ("output", "rate_out", "voltage"))
        check_neuron_type(StochasticSpiking(AdaptiveLIFRate()),
                          ("output", "rate_out", "adaptation"))
        check_neuron_type(PoissonSpiking(LIFRate()), ("output", "rate_out"))
Example #13
0
def test_lif_builtin(rng, allclose):
    """Test that the dynamic model approximately matches the rates."""
    dt = 1e-3
    t_final = 1.0

    N = 10
    lif = LIF()
    gain, bias = lif.gain_bias(rng.uniform(80, 100, size=N), rng.uniform(-1, 1, size=N))

    x = np.arange(-2, 2, 0.1)
    J = gain * x[:, None] + bias

    voltage = np.zeros_like(J)
    reftime = np.zeros_like(J)

    spikes = np.zeros((int(t_final / dt),) + J.shape)
    for spikes_i in spikes:
        lif.step(dt, J, spikes_i, voltage, reftime)

    math_rates = lif.rates(x, gain, bias)
    sim_rates = spikes.mean(0)
    assert allclose(sim_rates, math_rates, atol=1, rtol=0.02)
Example #14
0
class Ensemble(NengoObject):
    """A group of neurons that collectively represent a vector.

    Parameters
    ----------
    n_neurons : int
        The number of neurons.
    dimensions : int
        The number of representational dimensions.
    radius : int, optional
        The representational radius of the ensemble.
    encoders : Distribution or ndarray (`n_neurons`, `dimensions`), optional
        The encoders, used to transform from representational space
        to neuron space. Each row is a neuron's encoder, each column is a
        representational dimension.
    intercepts : Distribution or ndarray (`n_neurons`), optional
        The point along each neuron's encoder where its activity is zero. If
        e is the neuron's encoder, then the activity will be zero when
        dot(x, e) <= c, where c is the given intercept.
    max_rates : Distribution or ndarray (`n_neurons`), optional
        The activity of each neuron when dot(x, e) = 1, where e is the neuron's
        encoder.
    eval_points : Distribution or ndarray (`n_eval_points`, `dims`), optional
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which to
        choose evaluation points. Default: ``UniformHypersphere``.
    n_eval_points : int, optional
        The number of evaluation points to be drawn from the `eval_points`
        distribution. If None (the default), then a heuristic is used to
        determine the number of evaluation points.
    neuron_type : Neurons, optional
        The model that simulates all neurons in the ensemble.
    noise : StochasticProcess, optional
        Random noise injected directly into each neuron in the ensemble
        as current. A sample is drawn for each individual neuron on
        every simulation step.
    seed : int, optional
        The seed used for random number generation.
    label : str, optional
        A name for the ensemble. Used for debugging and visualization.
    """

    n_neurons = IntParam(default=None, low=1)
    dimensions = IntParam(default=None, low=1)
    radius = NumberParam(default=1, low=1e-10)
    neuron_type = NeuronTypeParam(default=LIF())
    encoders = DistributionParam(default=UniformHypersphere(surface=True),
                                 sample_shape=('n_neurons', 'dimensions'))
    intercepts = DistributionParam(default=Uniform(-1.0, 1.0),
                                   optional=True,
                                   sample_shape=('n_neurons', ))
    max_rates = DistributionParam(default=Uniform(200, 400),
                                  optional=True,
                                  sample_shape=('n_neurons', ))
    n_eval_points = IntParam(default=None, optional=True)
    eval_points = DistributionParam(default=UniformHypersphere(),
                                    sample_shape=('*', 'dimensions'))
    bias = DistributionParam(default=None,
                             optional=True,
                             sample_shape=('n_neurons', ))
    gain = DistributionParam(default=None,
                             optional=True,
                             sample_shape=('n_neurons', ))
    noise = StochasticProcessParam(default=None, optional=True)
    seed = IntParam(default=None, optional=True)
    label = StringParam(default=None, optional=True)

    def __init__(self,
                 n_neurons,
                 dimensions,
                 radius=Default,
                 encoders=Default,
                 intercepts=Default,
                 max_rates=Default,
                 eval_points=Default,
                 n_eval_points=Default,
                 neuron_type=Default,
                 gain=Default,
                 bias=Default,
                 noise=Default,
                 seed=Default,
                 label=Default):

        self.n_neurons = n_neurons
        self.dimensions = dimensions
        self.radius = radius
        self.encoders = encoders
        self.intercepts = intercepts
        self.max_rates = max_rates
        self.label = label
        self.n_eval_points = n_eval_points
        self.eval_points = eval_points
        self.bias = bias
        self.gain = gain
        self.neuron_type = neuron_type
        self.noise = noise
        self.seed = seed
        self._neurons = Neurons(self)

    def __getitem__(self, key):
        return ObjView(self, key)

    def __len__(self):
        return self.dimensions

    @property
    def neurons(self):
        return self._neurons

    @neurons.setter
    def neurons(self, dummy):
        raise AttributeError("neurons cannot be overwritten.")

    @property
    def probeable(self):
        return ["decoded_output", "input"]

    @property
    def size_in(self):
        return self.dimensions

    @property
    def size_out(self):
        return self.dimensions
Example #15
0
def test_argreprs():
    """Test repr() for each neuron type."""
    def check_init_args(cls, args):
        assert getfullargspec(cls.__init__).args[1:] == args

    def check_repr(obj):
        assert eval(repr(obj)) == obj

    check_init_args(Direct, [])
    check_repr(Direct())

    check_init_args(RectifiedLinear, ["amplitude"])
    check_repr(RectifiedLinear())
    check_repr(RectifiedLinear(amplitude=2))

    check_init_args(SpikingRectifiedLinear, ["amplitude"])
    check_repr(SpikingRectifiedLinear())
    check_repr(SpikingRectifiedLinear(amplitude=2))

    check_init_args(Sigmoid, ["tau_ref"])
    check_repr(Sigmoid())
    check_repr(Sigmoid(tau_ref=0.1))

    check_init_args(LIFRate, ["tau_rc", "tau_ref", "amplitude"])
    check_repr(LIFRate())
    check_repr(LIFRate(tau_rc=0.1))
    check_repr(LIFRate(tau_ref=0.1))
    check_repr(LIFRate(amplitude=2))
    check_repr(LIFRate(tau_rc=0.05, tau_ref=0.02))
    check_repr(LIFRate(tau_rc=0.05, amplitude=2))
    check_repr(LIFRate(tau_ref=0.02, amplitude=2))
    check_repr(LIFRate(tau_rc=0.05, tau_ref=0.02, amplitude=2))

    check_init_args(LIF, ["tau_rc", "tau_ref", "min_voltage", "amplitude"])
    check_repr(LIF())
    check_repr(LIF(tau_rc=0.1))
    check_repr(LIF(tau_ref=0.1))
    check_repr(LIF(amplitude=2))
    check_repr(LIF(min_voltage=-0.5))
    check_repr(LIF(tau_rc=0.05, tau_ref=0.02))
    check_repr(LIF(tau_rc=0.05, amplitude=2))
    check_repr(LIF(tau_ref=0.02, amplitude=2))
    check_repr(LIF(tau_rc=0.05, tau_ref=0.02, amplitude=2))
    check_repr(LIF(tau_rc=0.05, tau_ref=0.02, min_voltage=-0.5, amplitude=2))

    check_init_args(AdaptiveLIFRate,
                    ["tau_n", "inc_n", "tau_rc", "tau_ref", "amplitude"])
    check_repr(AdaptiveLIFRate())
    check_repr(AdaptiveLIFRate(tau_n=0.1))
    check_repr(AdaptiveLIFRate(inc_n=0.5))
    check_repr(AdaptiveLIFRate(tau_rc=0.1))
    check_repr(AdaptiveLIFRate(tau_ref=0.1))
    check_repr(AdaptiveLIFRate(amplitude=2))
    check_repr(
        AdaptiveLIFRate(tau_n=0.1,
                        inc_n=0.5,
                        tau_rc=0.05,
                        tau_ref=0.02,
                        amplitude=2))

    check_init_args(
        AdaptiveLIF,
        ["tau_n", "inc_n", "tau_rc", "tau_ref", "min_voltage", "amplitude"])
    check_repr(AdaptiveLIF())
    check_repr(AdaptiveLIF(tau_n=0.1))
    check_repr(AdaptiveLIF(inc_n=0.5))
    check_repr(AdaptiveLIF(tau_rc=0.1))
    check_repr(AdaptiveLIF(tau_ref=0.1))
    check_repr(AdaptiveLIF(min_voltage=-0.5))
    check_repr(
        AdaptiveLIF(
            tau_n=0.1,
            inc_n=0.5,
            tau_rc=0.05,
            tau_ref=0.02,
            min_voltage=-0.5,
            amplitude=2,
        ))

    check_init_args(
        Izhikevich,
        ["tau_recovery", "coupling", "reset_voltage", "reset_recovery"])
    check_repr(Izhikevich())
    check_repr(Izhikevich(tau_recovery=0.1))
    check_repr(Izhikevich(coupling=0.3))
    check_repr(Izhikevich(reset_voltage=-1))
    check_repr(Izhikevich(reset_recovery=5))
    check_repr(
        Izhikevich(tau_recovery=0.1,
                   coupling=0.3,
                   reset_voltage=-1,
                   reset_recovery=5))
def test_mergeable():
    # anything is mergeable with an empty list
    assert mergeable(None, [])

    # ops with different numbers of sets/incs/reads/updates are not mergeable
    assert not mergeable(dummies.Op(sets=[dummies.Signal()]), [dummies.Op()])
    assert not mergeable(dummies.Op(incs=[dummies.Signal()]), [dummies.Op()])
    assert not mergeable(dummies.Op(reads=[dummies.Signal()]), [dummies.Op()])
    assert not mergeable(dummies.Op(updates=[dummies.Signal()]), [dummies.Op()])
    assert mergeable(dummies.Op(sets=[dummies.Signal()]),
                     [dummies.Op(sets=[dummies.Signal()])])

    # check matching dtypes
    assert not mergeable(dummies.Op(sets=[dummies.Signal(dtype=np.float32)]),
                         [dummies.Op(sets=[dummies.Signal(dtype=np.float64)])])

    # shape mismatch
    assert not mergeable(dummies.Op(sets=[dummies.Signal(shape=(1, 2))]),
                         [dummies.Op(sets=[dummies.Signal(shape=(1, 3))])])

    # display shape mismatch
    assert not mergeable(
        dummies.Op(sets=[dummies.Signal(base_shape=(2, 2), shape=(4, 1))]),
        [dummies.Op(sets=[dummies.Signal(base_shape=(2, 2), shape=(1, 4))])])

    # first dimension mismatch
    assert mergeable(dummies.Op(sets=[dummies.Signal(shape=(3, 2))]),
                     [dummies.Op(sets=[dummies.Signal(shape=(4, 2))])])

    # Copy (inc must match)
    assert mergeable(Copy(dummies.Signal(), dummies.Signal(), inc=True),
                     [Copy(dummies.Signal(), dummies.Signal(), inc=True)])
    assert not mergeable(Copy(dummies.Signal(), dummies.Signal(), inc=True),
                         [Copy(dummies.Signal(), dummies.Signal(), inc=False)])

    # elementwise (first dimension must match)
    assert mergeable(
        ElementwiseInc(dummies.Signal(), dummies.Signal(), dummies.Signal()),
        [ElementwiseInc(dummies.Signal(), dummies.Signal(), dummies.Signal())])
    assert mergeable(
        ElementwiseInc(dummies.Signal(shape=(1,)), dummies.Signal(), dummies.Signal()),
        [ElementwiseInc(dummies.Signal(shape=()), dummies.Signal(), dummies.Signal())])
    assert not mergeable(
        ElementwiseInc(dummies.Signal(shape=(3,)), dummies.Signal(), dummies.Signal()),
        [ElementwiseInc(dummies.Signal(shape=(2,)), dummies.Signal(),
                        dummies.Signal())])

    # simpyfunc (t input must match)
    time = dummies.Signal()
    assert mergeable(SimPyFunc(None, None, time, None),
                     [SimPyFunc(None, None, time, None)])
    assert mergeable(SimPyFunc(None, None, None, dummies.Signal()),
                     [SimPyFunc(None, None, None, dummies.Signal())])
    assert not mergeable(SimPyFunc(None, None, dummies.Signal(), None),
                         [SimPyFunc(None, None, None, dummies.Signal())])

    # simneurons
    # check matching TF_NEURON_IMPL
    assert mergeable(SimNeurons(LIF(), dummies.Signal(), dummies.Signal()),
                     [SimNeurons(LIF(), dummies.Signal(), dummies.Signal())])
    assert not mergeable(SimNeurons(LIF(), dummies.Signal(), dummies.Signal()),
                         [SimNeurons(LIFRate(), dummies.Signal(), dummies.Signal())])

    # check custom with non-custom implementation
    assert not mergeable(SimNeurons(LIF(), dummies.Signal(), dummies.Signal()),
                         [SimNeurons(Izhikevich(), dummies.Signal(),
                                     dummies.Signal())])

    # check non-custom matching
    assert not mergeable(
        SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal()),
        [SimNeurons(AdaptiveLIF(), dummies.Signal(), dummies.Signal())])
    assert not mergeable(
        SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(),
                   states=[dummies.Signal(dtype=np.float32)]),
        [SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(),
                    states=[dummies.Signal(dtype=np.int32)])])
    assert mergeable(
        SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(),
                   states=[dummies.Signal(shape=(3,))]),
        [SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(),
                    states=[dummies.Signal(shape=(2,))])])
    assert not mergeable(
        SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(),
                   states=[dummies.Signal(shape=(2, 1))]),
        [SimNeurons(Izhikevich(), dummies.Signal(), dummies.Signal(),
                    states=[dummies.Signal(shape=(2, 2))])])

    # simprocess
    # mode must match
    assert not mergeable(
        SimProcess(Lowpass(0), None, dummies.Signal(), dummies.Signal(),
                   mode="inc"),
        [SimProcess(Lowpass(0), None, dummies.Signal(), dummies.Signal(),
                    mode="set")])

    # check that lowpass match
    assert mergeable(SimProcess(Lowpass(0), None, None, dummies.Signal()),
                     [SimProcess(Lowpass(0), None, None, dummies.Signal())])

    # check that lowpass and linear don't match
    assert not mergeable(SimProcess(Lowpass(0), None, None, dummies.Signal()),
                         [SimProcess(Alpha(0), None, None, dummies.Signal())])

    # check that two linear do match
    assert mergeable(
        SimProcess(Alpha(0.1), dummies.Signal(), None, dummies.Signal()),
        [SimProcess(LinearFilter([1], [1, 1, 1]), dummies.Signal(), None,
                    dummies.Signal())])

    # check custom and non-custom don't match
    assert not mergeable(SimProcess(Triangle(0), None, None, dummies.Signal()),
                         [SimProcess(Alpha(0), None, None, dummies.Signal())])

    # check non-custom matching
    assert mergeable(SimProcess(Triangle(0), None, None, dummies.Signal()),
                     [SimProcess(Triangle(0), None, None, dummies.Signal())])

    # simtensornode
    a = SimTensorNode(None, dummies.Signal(), None, dummies.Signal())
    assert not mergeable(a, [a])

    # learning rules
    a = SimBCM(dummies.Signal((4,)), dummies.Signal(), dummies.Signal(), dummies.Signal(),
               dummies.Signal())
    b = SimBCM(dummies.Signal((5,)), dummies.Signal(), dummies.Signal(), dummies.Signal(),
               dummies.Signal())
    assert not mergeable(a, [b])
def test_lif_step(upsample, n_elements):
    """Test the lif nonlinearity, comparing one step with the Numpy version."""
    rng = np.random

    dt = 1e-3
    n_neurons = [12345, 23456, 34567]
    J = RA([rng.normal(scale=1.2, size=n) for n in n_neurons])
    V = RA([rng.uniform(low=0, high=1, size=n) for n in n_neurons])
    W = RA([rng.uniform(low=-5 * dt, high=5 * dt, size=n) for n in n_neurons])
    OS = RA([np.zeros(n) for n in n_neurons])

    ref = 2e-3
    taus = list(rng.uniform(low=15e-3, high=80e-3, size=len(n_neurons)))

    queue = cl.CommandQueue(ctx)
    clJ = CLRA(queue, J)
    clV = CLRA(queue, V)
    clW = CLRA(queue, W)
    clOS = CLRA(queue, OS)
    clTau = CLRA(queue, RA(taus))

    # simulate host
    nls = [LIF(tau_ref=ref, tau_rc=taus[i]) for i, n in enumerate(n_neurons)]
    for i, nl in enumerate(nls):
        if upsample <= 1:
            nl.step_math(dt, J[i], OS[i], V[i], W[i])
        else:
            s = np.zeros_like(OS[i])
            for j in range(upsample):
                nl.step_math(dt / upsample, J[i], s, V[i], W[i])
                OS[i] = (1. / dt) * ((OS[i] > 0) | (s > 0))

    # simulate device
    plan = plan_lif(queue,
                    clJ,
                    clV,
                    clW,
                    clV,
                    clW,
                    clOS,
                    ref,
                    clTau,
                    dt,
                    n_elements=n_elements,
                    upsample=upsample)
    plan()

    if 1:
        a, b = V, clV
        for i in range(len(a)):
            nc, _ = not_close(a[i], b[i]).nonzero()
            if len(nc) > 0:
                j = nc[0]
                print("i", i, "j", j)
                print("J", J[i][j], clJ[i][j])
                print("V", V[i][j], clV[i][j])
                print("W", W[i][j], clW[i][j])
                print("...", len(nc) - 1, "more")

    n_spikes = np.sum([np.sum(os) for os in OS])
    if n_spikes < 1.0:
        logger.warn("LIF spiking mechanism was not tested!")
    assert ra.allclose(J, clJ.to_host())
    assert ra.allclose(V, clV.to_host())
    assert ra.allclose(W, clW.to_host())
    assert ra.allclose(OS, clOS.to_host())
Example #18
0
#!/usr/bin/env python

import nengo
from nengo.neurons import LIF

import matplotlib.pyplot as plt

model = nengo.Network(label="Scalar Representation")

with model:
    s = nengo.Ensemble(100, dimensions=1, radius=1, neuron_type=LIF())

    i = nengo.Node(output=0.6)

    nengo.Connection(i, s)

    i_probe = nengo.Probe(s, synapse=0.01)
    s_probe = nengo.Probe(i, synapse=0.01)

sim = nengo.Simulator(model)
sim.run(10)

t = sim.trange()
plt.plot(sim.trange(), sim.data[i_probe], label="Scalar Input")
plt.plot(sim.trange(), sim.data[s_probe], label="Decoded output")
plt.legend()
plt.ylim(0, 3)
plt.show()
Example #19
0
def test_mergeable():
    # anything is mergeable with an empty list
    assert mergeable(None, [])

    # ops with different numbers of sets/incs/reads/updates are not mergeable
    assert not mergeable(DummyOp(sets=[DummySignal()]), [DummyOp()])
    assert not mergeable(DummyOp(incs=[DummySignal()]), [DummyOp()])
    assert not mergeable(DummyOp(reads=[DummySignal()]), [DummyOp()])
    assert not mergeable(DummyOp(updates=[DummySignal()]), [DummyOp()])
    assert mergeable(DummyOp(sets=[DummySignal()]),
                     [DummyOp(sets=[DummySignal()])])

    # check matching dtypes
    assert not mergeable(DummyOp(sets=[DummySignal(dtype=np.float32)]),
                         [DummyOp(sets=[DummySignal(dtype=np.float64)])])

    # shape mismatch
    assert not mergeable(DummyOp(sets=[DummySignal(shape=(1, 2))]),
                         [DummyOp(sets=[DummySignal(shape=(1, 3))])])

    # display shape mismatch
    assert not mergeable(
        DummyOp(sets=[DummySignal(base_shape=(2, 2), shape=(4, 1))]),
        [DummyOp(sets=[DummySignal(base_shape=(2, 2), shape=(1, 4))])])

    # first dimension mismatch
    assert mergeable(DummyOp(sets=[DummySignal(shape=(3, 2))]),
                     [DummyOp(sets=[DummySignal(shape=(4, 2))])])

    # Copy (inc must match)
    assert mergeable(Copy(DummySignal(), DummySignal(), inc=True),
                     [Copy(DummySignal(), DummySignal(), inc=True)])
    assert not mergeable(Copy(DummySignal(), DummySignal(), inc=True),
                         [Copy(DummySignal(), DummySignal(), inc=False)])

    # elementwise (first dimension must match)
    assert mergeable(
        ElementwiseInc(DummySignal(), DummySignal(), DummySignal()),
        [ElementwiseInc(DummySignal(), DummySignal(), DummySignal())])
    assert mergeable(
        ElementwiseInc(DummySignal(shape=(1,)), DummySignal(), DummySignal()),
        [ElementwiseInc(DummySignal(shape=()), DummySignal(), DummySignal())])
    assert not mergeable(
        ElementwiseInc(DummySignal(shape=(3,)), DummySignal(), DummySignal()),
        [ElementwiseInc(DummySignal(shape=(2,)), DummySignal(),
                        DummySignal())])

    # simpyfunc (t input must match)
    time = DummySignal()
    assert mergeable(SimPyFunc(None, None, time, None),
                     [SimPyFunc(None, None, time, None)])
    assert mergeable(SimPyFunc(None, None, None, DummySignal()),
                     [SimPyFunc(None, None, None, DummySignal())])
    assert not mergeable(SimPyFunc(None, None, DummySignal(), None),
                         [SimPyFunc(None, None, None, DummySignal())])

    # simneurons
    # check matching TF_NEURON_IMPL
    assert mergeable(SimNeurons(LIF(), DummySignal(), DummySignal()),
                     [SimNeurons(LIF(), DummySignal(), DummySignal())])
    assert not mergeable(SimNeurons(LIF(), DummySignal(), DummySignal()),
                         [SimNeurons(LIFRate(), DummySignal(), DummySignal())])

    # check custom with non-custom implementation
    assert not mergeable(SimNeurons(LIF(), DummySignal(), DummySignal()),
                         [SimNeurons(Izhikevich(), DummySignal(),
                                     DummySignal())])

    # check non-custom matching
    assert not mergeable(
        SimNeurons(Izhikevich(), DummySignal(), DummySignal()),
        [SimNeurons(AdaptiveLIF(), DummySignal(), DummySignal())])
    assert not mergeable(
        SimNeurons(Izhikevich(), DummySignal(), DummySignal(),
                   states=[DummySignal(dtype=np.float32)]),
        [SimNeurons(Izhikevich(), DummySignal(), DummySignal(),
                    states=[DummySignal(dtype=np.int32)])])
    assert mergeable(
        SimNeurons(Izhikevich(), DummySignal(), DummySignal(),
                   states=[DummySignal(shape=(3,))]),
        [SimNeurons(Izhikevich(), DummySignal(), DummySignal(),
                    states=[DummySignal(shape=(2,))])])
    assert not mergeable(
        SimNeurons(Izhikevich(), DummySignal(), DummySignal(),
                   states=[DummySignal(shape=(2, 1))]),
        [SimNeurons(Izhikevich(), DummySignal(), DummySignal(),
                    states=[DummySignal(shape=(2, 2))])])

    # simprocess
    # mode must match
    assert not mergeable(
        SimProcess(Lowpass(0), None, None, DummySignal(), mode="inc"),
        [SimProcess(Lowpass(0), None, None, DummySignal(), mode="set")])

    # check matching TF_PROCESS_IMPL
    # note: we only have one item in TF_PROCESS_IMPL at the moment, so no
    # such thing as a mismatch
    assert mergeable(SimProcess(Lowpass(0), None, None, DummySignal()),
                     [SimProcess(Lowpass(0), None, None, DummySignal())])

    # check custom vs non custom
    assert not mergeable(SimProcess(Lowpass(0), None, None, DummySignal()),
                         [SimProcess(Alpha(0), None, None, DummySignal())])

    # check non-custom matching
    assert mergeable(SimProcess(Triangle(0), None, None, DummySignal()),
                     [SimProcess(Alpha(0), None, None, DummySignal())])

    # simtensornode
    a = SimTensorNode(None, DummySignal(), None, DummySignal())
    assert not mergeable(a, [a])

    # learning rules
    a = SimBCM(DummySignal((4,)), DummySignal(), DummySignal(), DummySignal(),
               DummySignal())
    b = SimBCM(DummySignal((5,)), DummySignal(), DummySignal(), DummySignal(),
               DummySignal())
    assert not mergeable(a, [b])
Example #20
0
class Ensemble(NengoObject):
    """A group of neurons that collectively represent a vector.

    Parameters
    ----------
    n_neurons : int
        The number of neurons.
    dimensions : int
        The number of representational dimensions.

    radius : int, optional (Default: 1.0)
        The representational radius of the ensemble.
    encoders : Distribution or (n_neurons, dimensions) array_like, optional \
               (Default: UniformHypersphere(surface=True))
        The encoders used to transform from representational space
        to neuron space. Each row is a neuron's encoder; each column is a
        representational dimension.
    intercepts : Distribution or (n_neurons,) array_like, optional \
                 (Default: ``nengo.dists.Uniform(-1.0, 1.0)``)
        The point along each neuron's encoder where its activity is zero. If
        ``e`` is the neuron's encoder, then the activity will be zero when
        ``dot(x, e) <= c``, where ``c`` is the given intercept.
    max_rates : Distribution or (n_neurons,) array_like, optional \
                (Default: ``nengo.dists.Uniform(200, 400)``)
        The activity of each neuron when the input signal ``x`` is magnitude 1
        and aligned with that neuron's encoder ``e``;
        i.e., when ``dot(x, e) = 1``.
    eval_points : Distribution or (n_eval_points, dims) array_like, optional \
                  (Default: ``nengo.dists.UniformHypersphere()``)
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which
        to choose evaluation points.
    n_eval_points : int, optional (Default: None)
        The number of evaluation points to be drawn from the ``eval_points``
        distribution. If None, then a heuristic is used to determine
        the number of evaluation points.
    neuron_type : `~nengo.neurons.NeuronType`, optional \
                  (Default: ``nengo.LIF()``)
        The model that simulates all neurons in the ensemble
        (see `~nengo.neurons.NeuronType`).
    gain : Distribution or (n_neurons,) array_like (Default: None)
        The gains associated with each neuron in the ensemble. If None, then
        the gain will be solved for using ``max_rates`` and ``intercepts``.
    bias : Distribution or (n_neurons,) array_like (Default: None)
        The biases associated with each neuron in the ensemble. If None, then
        the gain will be solved for using ``max_rates`` and ``intercepts``.
    noise : Process, optional (Default: None)
        Random noise injected directly into each neuron in the ensemble
        as current. A sample is drawn for each individual neuron on
        every simulation step.
    normalize_encoders : bool, optional (Default: True)
        Indicates whether the encoders should be normalized.
    label : str, optional (Default: None)
        A name for the ensemble. Used for debugging and visualization.
    seed : int, optional (Default: None)
        The seed used for random number generation.

    Attributes
    ----------
    bias : Distribution or (n_neurons,) array_like or None
        The biases associated with each neuron in the ensemble.
    dimensions : int
        The number of representational dimensions.
    encoders : Distribution or (n_neurons, dimensions) array_like
        The encoders, used to transform from representational space
        to neuron space. Each row is a neuron's encoder, each column is a
        representational dimension.
    eval_points : Distribution or (n_eval_points, dims) array_like
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which
        to choose evaluation points.
    gain : Distribution or (n_neurons,) array_like or None
        The gains associated with each neuron in the ensemble.
    intercepts : Distribution or (n_neurons) array_like or None
        The point along each neuron's encoder where its activity is zero. If
        ``e`` is the neuron's encoder, then the activity will be zero when
        ``dot(x, e) <= c``, where ``c`` is the given intercept.
    label : str or None
        A name for the ensemble. Used for debugging and visualization.
    max_rates : Distribution or (n_neurons,) array_like or None
        The activity of each neuron when ``dot(x, e) = 1``,
        where ``e`` is the neuron's encoder.
    n_eval_points : int or None
        The number of evaluation points to be drawn from the ``eval_points``
        distribution. If None, then a heuristic is used to determine
        the number of evaluation points.
    n_neurons : int or None
        The number of neurons.
    neuron_type : NeuronType
        The model that simulates all neurons in the ensemble
        (see ``nengo.neurons``).
    noise : Process or None
        Random noise injected directly into each neuron in the ensemble
        as current. A sample is drawn for each individual neuron on
        every simulation step.
    radius : int
        The representational radius of the ensemble.
    seed : int or None
        The seed used for random number generation.
    """

    probeable = ('decoded_output', 'input', 'scaled_encoders')

    n_neurons = IntParam('n_neurons', low=1)
    dimensions = IntParam('dimensions', low=1)
    radius = NumberParam('radius', default=1.0, low=1e-10)
    encoders = DistOrArrayParam('encoders',
                                default=UniformHypersphere(surface=True),
                                sample_shape=('n_neurons', 'dimensions'))
    intercepts = DistOrArrayParam('intercepts',
                                  default=Uniform(-1.0, 1.0),
                                  optional=True,
                                  sample_shape=('n_neurons', ))
    max_rates = DistOrArrayParam('max_rates',
                                 default=Uniform(200, 400),
                                 optional=True,
                                 sample_shape=('n_neurons', ))
    eval_points = DistOrArrayParam('eval_points',
                                   default=UniformHypersphere(),
                                   sample_shape=('*', 'dimensions'))
    n_eval_points = IntParam('n_eval_points', default=None, optional=True)
    neuron_type = NeuronTypeParam('neuron_type', default=LIF())
    gain = DistOrArrayParam('gain',
                            default=None,
                            optional=True,
                            sample_shape=('n_neurons', ))
    bias = DistOrArrayParam('bias',
                            default=None,
                            optional=True,
                            sample_shape=('n_neurons', ))
    noise = ProcessParam('noise', default=None, optional=True)
    normalize_encoders = BoolParam('normalize_encoders',
                                   default=True,
                                   optional=True)

    def __init__(self,
                 n_neurons,
                 dimensions,
                 radius=Default,
                 encoders=Default,
                 intercepts=Default,
                 max_rates=Default,
                 eval_points=Default,
                 n_eval_points=Default,
                 neuron_type=Default,
                 gain=Default,
                 bias=Default,
                 noise=Default,
                 normalize_encoders=Default,
                 label=Default,
                 seed=Default):
        super(Ensemble, self).__init__(label=label, seed=seed)
        self.n_neurons = n_neurons
        self.dimensions = dimensions
        self.radius = radius
        self.encoders = encoders
        self.intercepts = intercepts
        self.max_rates = max_rates
        self.n_eval_points = n_eval_points
        self.eval_points = eval_points
        self.bias = bias
        self.gain = gain
        self.neuron_type = neuron_type
        self.noise = noise
        self.normalize_encoders = normalize_encoders

    def __getitem__(self, key):
        return ObjView(self, key)

    def __len__(self):
        return self.dimensions

    @property
    def neurons(self):
        """A direct interface to the neurons in the ensemble."""
        return Neurons(self)

    @neurons.setter
    def neurons(self, dummy):
        raise ReadonlyError(attr="neurons", obj=self)

    @property
    def size_in(self):
        """The dimensionality of the ensemble."""
        return self.dimensions

    @property
    def size_out(self):
        """The dimensionality of the ensemble."""
        return self.dimensions