Ejemplo n.º 1
0
def _basic_model():
    model = Model()

    block0 = LoihiBlock(1)
    block0.compartment.configure_lif()
    model.add_block(block0)

    block1 = LoihiBlock(1)
    block1.compartment.configure_lif()
    model.add_block(block1)

    axon1 = Axon(1)
    block0.add_axon(axon1)

    synapse1 = Synapse(1)
    synapse1.set_full_weights([[1]])
    axon1.target = synapse1
    block1.add_synapse(synapse1)

    axon0 = Axon(1)
    input = LoihiInput()
    input.add_axon(axon0)
    model.add_input(input)

    synapse0 = Synapse(1)
    synapse0.set_full_weights([[1]])
    axon0.target = synapse0
    block0.add_synapse(synapse0)

    discretize_model(model)

    return model
Ejemplo n.º 2
0
def test_multiple_get_probe_output():
    n_steps = 15
    n_axons = 3

    model = Model()

    # n_axons controls number of input spikes and thus amount of overflow
    input = SpikeInput(n_axons)
    for t in np.arange(1, n_steps + 1):
        input.add_spikes(t, np.arange(n_axons))  # send spikes to all axons
    model.add_input(input)

    block = LoihiBlock(1)
    block.compartment.configure_relu()
    block.compartment.configure_filter(0.1)
    model.add_block(block)

    synapse = Synapse(n_axons)
    synapse.set_weights(np.ones((n_axons, 1)))
    block.add_synapse(synapse)

    axon = Axon(n_axons)
    axon.target = synapse
    input.add_axon(axon)

    probe_u = LoihiProbe(target=block, key="current", synapse=Lowpass(0.005))
    model.add_probe(probe_u)
    probe_v = LoihiProbe(target=block, key="voltage", synapse=Lowpass(0.005))
    model.add_probe(probe_v)
    probe_s = LoihiProbe(target=block, key="spiked", synapse=Lowpass(0.005))
    model.add_probe(probe_s)

    discretize_model(model)

    # must set these after `discretize` to specify discretized values
    block.compartment.vmin = -(2**22) + 1
    block.compartment.vth[:] = VTH_MAX

    with EmulatorInterface(model) as emu:
        emu.run_steps(n_steps)
        first_u = emu.get_probe_output(probe_u)
        first_v = emu.get_probe_output(probe_v)
        first_s = emu.get_probe_output(probe_s)
        second_u = emu.get_probe_output(probe_u)
        second_v = emu.get_probe_output(probe_v)
        second_s = emu.get_probe_output(probe_s)

    assert np.all(first_u == second_u)
    assert np.all(first_v == second_v)
    assert np.all(first_s == second_s)
Ejemplo n.º 3
0
def test_negative_base(request, seed):
    n_axons = 3

    model = Model()

    input = SpikeInput(n_axons)
    input.add_spikes(1, list(range(n_axons)), permanent=True)
    model.add_input(input)

    axon = Axon(n_axons)
    input.add_axon(axon)

    block = LoihiBlock(3)
    block.compartment.configure_relu()
    model.add_block(block)

    synapse = Synapse(n_axons)
    weights = [0.1, 0.1, 0.1]
    indices = [0, 1, 2]
    axon_to_weight_map = list(range(n_axons))
    bases = [0, 1, -1]
    synapse.set_population_weights(
        weights, indices, axon_to_weight_map, bases, pop_type=32
    )
    axon.target = synapse
    block.add_synapse(synapse)

    probe = LoihiProbe(target=block, key="voltage")
    model.add_probe(probe)

    discretize_model(model)

    n_steps = 2
    if request.config.getoption("--target") == "loihi":
        with HardwareInterface(model, use_snips=False, seed=seed) as sim:
            sim.run_steps(n_steps)
            y = sim.collect_probe_output(probe)
    else:
        with EmulatorInterface(model, seed=seed) as sim:
            sim.run_steps(n_steps)
            y = sim.collect_probe_output(probe)

    # Compartments 0 and 2 should change from axons 0 and 1.
    # Axon 2 should have no effect, and not change compartment 1 (the sum of
    # its base and index), or other compartments (e.g. 2 if base ignored)
    assert np.allclose(y[1, 1], 0), "Third axon not ignored"
    assert np.allclose(y[1, 0], y[1, 2]), "Third axon targeting another"
    assert not np.allclose(y[1], y[0]), "Voltage not changing"
Ejemplo n.º 4
0
def test_bad_bias_scaling_error(Simulator):
    block = LoihiBlock(10)
    block.compartment.configure_lif(tau_rc=5., vth=1e8)
    block.compartment.bias[:] = 1000.

    with pytest.raises(BuildError, match="[Cc]ould not find.*bias scaling"):
        discretize_block(block)
Ejemplo n.º 5
0
def _basic_model(n_blocks=2):
    model = Model()

    blocks = []
    for _ in range(n_blocks):
        block = LoihiBlock(1)
        block.compartment.configure_lif()
        model.add_block(block)
        blocks.append(block)

    for i in range(n_blocks - 1):
        axon = Axon(1)
        blocks[i].add_axon(axon)

        synapse = Synapse(1)
        synapse.set_weights([[1]])
        axon.target = synapse
        blocks[i + 1].add_synapse(synapse)

    axon0 = Axon(1)
    input = LoihiInput()
    input.add_axon(axon0)
    model.add_input(input)

    synapse0 = Synapse(1)
    synapse0.set_weights([[1]])
    axon0.target = synapse0
    blocks[0].add_synapse(synapse0)

    discretize_model(model)

    return model
Ejemplo n.º 6
0
    def get_block(self, weights, block_label=None, syn_label=None):
        gain = self.gain * self.dt
        bias = self.bias * self.dt

        d, n = weights.shape
        n_neurons = 2 * d * self.pairs_per_dim
        block = LoihiBlock(n_neurons, label=block_label)
        block.compartment.configure_relu(dt=self.dt)
        block.compartment.bias[:] = bias.repeat(d)

        syn = Synapse(n, label=syn_label)
        weights2 = []
        for ga, gb in gain.reshape(self.pairs_per_dim, 2):
            weights2.extend([ga * weights.T, -gb * weights.T])
        weights2 = np.hstack(weights2)
        syn.set_full_weights(weights2)
        block.add_synapse(syn)

        return block, syn
Ejemplo n.º 7
0
    def get_block(self, weights, block_label=None, syn_label=None):
        gain = self.gain * self.dt
        bias = self.bias * self.dt

        n, d = weights.shape
        n_neurons = 2 * d * self.pairs_per_dim
        block = LoihiBlock(n_neurons, label=block_label)
        block.compartment.configure_relu(dt=self.dt)
        block.compartment.bias[:] = bias.repeat(d)

        syn = Synapse(n, label=syn_label)
        weights2 = []
        for ga, gb in gain.reshape(self.pairs_per_dim, 2):
            weights2.extend([scale_matrix(weights, ga), scale_matrix(weights, -gb)])
        weights2 = stack_matrices(weights2, order="h")
        syn.set_weights(weights2)
        block.add_synapse(syn)

        return block, syn
Ejemplo n.º 8
0
def test_utilization():
    comp_fracs = [0.9, 0.2, 0.35]

    model = Model()

    for comp_frac in comp_fracs:
        n_compartments = int(round(comp_frac * MAX_COMPARTMENTS))
        block = LoihiBlock(n_compartments)
        block.compartment.configure_relu()
        model.add_block(block)

        util = block.utilization()
        assert np.allclose(
            util["compartments"], (n_compartments, MAX_COMPARTMENTS), rtol=0, atol=0.001
        )

    lines = model.utilization_summary()
    assert len(lines) == len(comp_fracs) + 1
    assert lines[-1].startswith("Average")
Ejemplo n.º 9
0
def test_dtype_errors():
    block = LoihiBlock(1)
    block_info = BlockInfo([block])
    block.compartment.vth = block.compartment.vth.astype(np.float64)

    with pytest.raises(ValueError, match="dtype.*not supported"):
        CompartmentState(block_info)
    with pytest.raises(ValueError, match="dtype.*not supported"):
        NoiseState(block_info)
    with pytest.raises(ValueError, match="dtype.*not supported"):
        SynapseState(block_info)
Ejemplo n.º 10
0
def build_ensemble(model, ens):
    if isinstance(ens.neuron_type, nengo.Direct):
        raise NotImplementedError("Direct neurons not implemented")

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up encoders
    if isinstance(ens.encoders, Distribution):
        encoders = get_samples(ens.encoders,
                               ens.n_neurons,
                               ens.dimensions,
                               rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)

    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(ens, rng,
                                                      model.intercept_limit)

    block = LoihiBlock(ens.n_neurons, label='%s' % ens)
    block.compartment.bias[:] = bias
    model.build(ens.neuron_type, ens.neurons, block)

    # set default filter just in case no other filter gets set
    block.compartment.configure_default_filter(model.decode_tau, dt=model.dt)

    if ens.noise is not None:
        raise NotImplementedError("Ensemble noise not implemented")

    # Scale the encoders
    # we exclude the radius to keep scaling reasonable for decode neurons
    scaled_encoders = encoders * gain[:, np.newaxis]

    model.add_block(block)

    model.objs[ens]['in'] = block
    model.objs[ens]['out'] = block
    model.objs[ens.neurons]['in'] = block
    model.objs[ens.neurons]['out'] = block
    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Ejemplo n.º 11
0
def test_strings():
    block = LoihiBlock(3, label="myBlock")
    assert str(block) == "LoihiBlock(myBlock)"
    assert str(block.compartment) == "Compartment()"

    synapse = Synapse(2, label="mySynapse")
    assert str(synapse) == "Synapse(mySynapse)"

    axon = Axon(2, label="myAxon")
    assert str(axon) == "Axon(myAxon)"

    spike = Axon.Spike(axon_id=7, atom=2)
    assert str(spike) == "Spike(axon_id=7, atom=2)"
Ejemplo n.º 12
0
def test_strict_mode(strict, monkeypatch):
    # Tests should be run in strict mode
    assert EmulatorInterface.strict

    model = Model()
    model.add_block(LoihiBlock(1))

    monkeypatch.setattr(EmulatorInterface, "strict", strict)
    emu = EmulatorInterface(model)
    assert emu.strict == strict

    if strict:
        check = pytest.raises(SimulationError, match="Error in emulator")
    else:
        check = pytest.warns(UserWarning)

    with check:
        emu.compartment.error("Error in emulator")
Ejemplo n.º 13
0
def test_compartment_errors():
    block = LoihiBlock(90)

    # set filter to a very large value so current scaling can't be applied
    with pytest.raises(BuildError, match="[Cc]urrent.*scaling"):
        block.compartment.configure_filter(1e6)

    # set to a value when previously configured to larger value
    block.compartment.configure_filter(0.006)
    with pytest.warns(UserWarning, match="tau_s.*larger"):
        block.compartment.configure_filter(0.004)

    # set to a value when previously configured to smaller value
    block.compartment.configure_filter(0.003)
    with pytest.warns(UserWarning, match="tau_s.*smaller"):
        block.compartment.configure_filter(0.007)

    # set tau_rc to a very large value so voltage scaling can't be applied
    with pytest.raises(BuildError, match="[Vv]oltage.*scaling"):
        block.compartment.configure_lif(tau_rc=1e6)
Ejemplo n.º 14
0
def test_builder_poptype_errors():
    pytest.importorskip("nxsdk")

    # Test error in build_synapse
    model = Model()
    block = LoihiBlock(1)
    block.compartment.configure_lif()
    model.add_block(block)

    synapse = Synapse(1)
    synapse.set_weights([[1]])
    synapse.pop_type = 8
    block.add_synapse(synapse)

    discretize_model(model)

    allocator = Greedy()  # one core per ensemble
    board = allocator(model, n_chips=1)

    with pytest.raises(ValueError, match="[Ss]ynapse.*[Uu]nrec.*pop.*type"):
        build_board(board)

    # Test error in collect_axons
    model = Model()
    block0 = LoihiBlock(1)
    block0.compartment.configure_lif()
    model.add_block(block0)
    block1 = LoihiBlock(1)
    block1.compartment.configure_lif()
    model.add_block(block1)

    axon = Axon(1)
    block0.add_axon(axon)

    synapse = Synapse(1)
    synapse.set_weights([[1]])
    synapse.pop_type = 8
    axon.target = synapse
    block1.add_synapse(synapse)

    discretize_model(model)

    board = allocator(model, n_chips=1)

    with pytest.raises(ValueError, match="[Aa]xon.*[Uu]nrec.*pop.*type"):
        build_board(board)
Ejemplo n.º 15
0
def test_one_to_one_allocator_big_block_error():
    model = Model()
    model.add_block(LoihiBlock(1050))

    with pytest.raises(ValidationError):
        OneToOne()(model)
Ejemplo n.º 16
0
def test_uv_overflow(n_axons, plt, allclose, monkeypatch):
    # TODO: Currently this is not testing the V overflow, since it is higher
    #  and I haven't been able to figure out a way to make it overflow.
    nt = 15

    model = Model()

    # n_axons controls number of input spikes and thus amount of overflow
    input = SpikeInput(n_axons)
    for t in np.arange(1, nt + 1):
        # send spikes to all axons
        input.add_spikes(t, np.arange(n_axons), permanent=True)
    model.add_input(input)

    block = LoihiBlock(1)
    block.compartment.configure_relu()
    block.compartment.configure_filter(0.1)
    model.add_block(block)

    synapse = Synapse(n_axons)
    synapse.set_weights(np.ones((n_axons, 1)))
    block.add_synapse(synapse)

    axon = Axon(n_axons)
    axon.target = synapse
    input.add_axon(axon)

    probe_u = LoihiProbe(target=block, key="current")
    model.add_probe(probe_u)
    probe_v = LoihiProbe(target=block, key="voltage")
    model.add_probe(probe_v)
    probe_s = LoihiProbe(target=block, key="spiked")
    model.add_probe(probe_s)

    discretize_model(model)

    # must set these after `discretize` to specify discretized values
    block.compartment.vmin = -(2**22) + 1
    block.compartment.vth[:] = VTH_MAX

    assert EmulatorInterface.strict  # Tests should be run in strict mode
    monkeypatch.setattr(EmulatorInterface, "strict", False)
    overflow_var = "q0" if n_axons == 1000 else "current"
    with EmulatorInterface(model) as emu:
        with pytest.warns(UserWarning, match=f"Overflow in {overflow_var}"):
            emu.run_steps(nt)
        emu_u = emu.collect_probe_output(probe_u)
        emu_v = emu.collect_probe_output(probe_v)
        emu_s = emu.collect_probe_output(probe_s)

    with HardwareInterface(model, use_snips=False) as sim:
        sim.run_steps(nt)
        sim_u = sim.collect_probe_output(probe_u)
        sim_v = sim.collect_probe_output(probe_v)
        sim_s = sim.collect_probe_output(probe_s)
        sim_v[sim_s > 0] = 0  # since Loihi has placeholder voltage after spike

    plt.subplot(311)
    plt.plot(emu_u)
    plt.plot(sim_u)

    plt.subplot(312)
    plt.plot(emu_v)
    plt.plot(sim_v)

    plt.subplot(313)
    plt.plot(emu_s)
    plt.plot(sim_s)

    assert allclose(emu_u, sim_u)
    assert allclose(emu_v, sim_v)
Ejemplo n.º 17
0
def build_connection(model, conn):
    if nengo_transforms is not None:
        if isinstance(conn.transform, nengo_transforms.Convolution):
            # TODO: integrate these into the same function
            conv.build_conv2d_connection(model, conn)
            return
        elif not isinstance(conn.transform, nengo_transforms.Dense):
            raise NotImplementedError(
                "nengo-loihi does not yet support %s transforms"
                % conn.transform)

    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    pre_cx = model.objs[conn.pre_obj]['out']
    post_cx = model.objs[conn.post_obj]['in']
    assert isinstance(pre_cx, (LoihiBlock, LoihiInput))
    assert isinstance(post_cx, (LoihiBlock, Probe))

    weights = None
    eval_points = None
    solver_info = None
    neuron_type = None
    post_slice = conn.post_slice

    # sample transform (if using a distribution)
    transform = sample_transform(conn, rng=rng)

    tau_s = 0.0  # `synapse is None` gets mapped to `tau_s = 0.0`
    if isinstance(conn.synapse, nengo.synapses.Lowpass):
        tau_s = conn.synapse.tau
    elif conn.synapse is not None:
        raise NotImplementedError("Cannot handle non-Lowpass synapses")

    needs_decode_neurons = False
    target_encoders = None
    if isinstance(conn.pre_obj, Node):
        assert conn.pre_slice == slice(None)

        if np.array_equal(transform, np.array(1.)):
            # TODO: this identity transform may be avoidable
            transform = np.eye(conn.pre.size_out)
        else:
            assert transform.ndim == 2, "transform shape not handled yet"
            assert transform.shape[1] == conn.pre.size_out

        assert transform.shape[1] == conn.pre.size_out
        if isinstance(conn.pre_obj, ChipReceiveNeurons):
            weights = transform / model.dt
            neuron_type = conn.pre_obj.neuron_type
        else:
            # input is on-off neuron encoded, so double/flip transform
            weights = np.column_stack([transform, -transform])
            target_encoders = 'node_encoders'
    elif (isinstance(conn.pre_obj, Ensemble)
          and isinstance(conn.pre_obj.neuron_type, nengo.Direct)):
        raise NotImplementedError()
    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        eval_points, decoders, solver_info = model.build(
            conn.solver, conn, rng, transform)

        if conn.solver.weights and not conn.solver.compositional:
            weights = decoders
        else:
            weights = multiply(transform, decoders)

        # the decoder solver assumes a spike height of 1/dt; that isn't the
        # case on loihi, so we need to undo that scaling
        weights = weights / model.dt

        neuron_type = conn.pre_obj.neuron_type

        if conn.solver.weights:
            # weight solvers only allowed on ensemble->ensemble connections
            assert isinstance(conn.post_obj, Ensemble)

            if conn.solver.compositional:
                encoders = model.params[conn.post_obj].scaled_encoders.T
                encoders = encoders[post_slice]
                weights = multiply(encoders.T, weights)

            # post slice already applied to encoders (either here or in
            # `build_decoders`), so don't apply later
            post_slice = None
        else:
            needs_decode_neurons = True
    elif isinstance(conn.pre_obj, Neurons):
        assert conn.pre_slice == slice(None)
        assert transform.ndim == 2, "transform shape not handled yet"
        weights = transform / model.dt
        neuron_type = conn.pre_obj.ensemble.neuron_type
    else:
        raise NotImplementedError("Connection from type %r" % (
            type(conn.pre_obj),))

    if neuron_type is not None and hasattr(neuron_type, 'amplitude'):
        weights = weights * neuron_type.amplitude

    mid_cx = pre_cx
    mid_axon_inds = None
    post_tau = tau_s
    if needs_decode_neurons and not isinstance(conn.post_obj, Neurons):
        # --- add decode neurons
        assert weights.ndim == 2
        d, n = weights.shape

        if isinstance(post_cx, Probe):
            # use non-spiking decode neurons for voltage probing
            assert post_cx.target is None
            assert post_slice == slice(None)

            # use the same scaling as the ensemble does, to get good
            #  decodes.  Note that this assumes that the decoded value
            #  is in the range -radius to radius, which is usually true.
            weights = weights / conn.pre_obj.radius

            gain = 1
            dec_cx = LoihiBlock(2 * d, label='%s' % conn)
            dec_cx.compartment.configure_nonspiking(
                dt=model.dt, vth=model.vth_nonspiking)
            dec_cx.compartment.bias[:] = 0
            model.add_block(dec_cx)
            model.objs[conn]['decoded'] = dec_cx

            dec_syn = Synapse(n, label="probe_decoders")
            weights2 = gain * np.vstack([weights, -weights]).T

            dec_syn.set_full_weights(weights2)
            dec_cx.add_synapse(dec_syn)
            model.objs[conn]['decoders'] = dec_syn
        else:
            # use spiking decode neurons for on-chip connection
            if isinstance(conn.post_obj, Ensemble):
                # loihi encoders don't include radius, so handle scaling here
                weights = weights / conn.post_obj.radius

            post_d = conn.post_obj.size_in
            post_inds = np.arange(post_d, dtype=np.int32)[post_slice]
            assert weights.shape[0] == len(post_inds) == conn.size_out == d
            mid_axon_inds = model.decode_neurons.get_post_inds(
                post_inds, post_d)

            target_encoders = 'decode_neuron_encoders'
            dec_cx, dec_syn = model.decode_neurons.get_block(
                weights, block_label="%s" % conn, syn_label="decoders")

            model.add_block(dec_cx)
            model.objs[conn]['decoded'] = dec_cx
            model.objs[conn]['decoders'] = dec_syn

        # use tau_s for filter into decode neurons, decode_tau for filter out
        dec_cx.compartment.configure_filter(tau_s, dt=model.dt)
        post_tau = model.decode_tau

        dec_ax0 = Axon(n, label="decoders")
        dec_ax0.target = dec_syn
        pre_cx.add_axon(dec_ax0)
        model.objs[conn]['decode_axon'] = dec_ax0

        if conn.learning_rule_type is not None:
            rule_type = conn.learning_rule_type
            if isinstance(rule_type, nengo.PES):
                if not isinstance(rule_type.pre_synapse,
                                  nengo.synapses.Lowpass):
                    raise ValidationError(
                        "Loihi only supports `Lowpass` pre-synapses for "
                        "learning rules", attr='pre_synapse', obj=rule_type)

                tracing_tau = rule_type.pre_synapse.tau / model.dt

                # Nengo builder scales PES learning rate by `dt / n_neurons`
                n_neurons = (conn.pre_obj.n_neurons
                             if isinstance(conn.pre_obj, Ensemble)
                             else conn.pre_obj.size_in)
                learning_rate = rule_type.learning_rate * model.dt / n_neurons

                # Account for scaling to put integer error in range [-127, 127]
                learning_rate /= model.pes_error_scale

                # Tracing mag set so that the magnitude of the pre trace
                # is independent of the pre tau. `dt` factor accounts for
                # Nengo's `dt` spike scaling. Where is the second `dt` from?
                # Maybe the fact that post decode neurons have `vth = 1/dt`?
                tracing_mag = -np.expm1(-1. / tracing_tau) / model.dt**2

                # learning weight exponent controls the maximum weight
                # magnitude/weight resolution
                wgt_exp = model.pes_wgt_exp

                dec_syn.set_learning(
                    learning_rate=learning_rate,
                    tracing_mag=tracing_mag,
                    tracing_tau=tracing_tau,
                    wgt_exp=wgt_exp,
                )
            else:
                raise NotImplementedError()

        mid_cx = dec_cx

    if isinstance(post_cx, Probe):
        assert post_cx.target is None
        assert post_slice == slice(None)
        post_cx.target = mid_cx
        mid_cx.add_probe(post_cx)
    elif isinstance(conn.post_obj, Neurons):
        assert isinstance(post_cx, LoihiBlock)
        assert post_slice == slice(None)
        if weights is None:
            raise NotImplementedError("Need weights for connection to neurons")
        else:
            assert weights.ndim == 2
            n2, n1 = weights.shape
            assert post_cx.n_neurons == n2

            syn = Synapse(n1, label="neuron_weights")
            gain = model.params[conn.post_obj.ensemble].gain
            syn.set_full_weights(weights.T * gain)
            post_cx.add_synapse(syn)
            model.objs[conn]['weights'] = syn

        ax = Axon(mid_cx.n_neurons, label="neuron_weights")
        ax.target = syn
        mid_cx.add_axon(ax)

        post_cx.compartment.configure_filter(post_tau, dt=model.dt)

        if conn.learning_rule_type is not None:
            raise NotImplementedError()
    elif isinstance(conn.post_obj, Ensemble) and conn.solver.weights:
        assert isinstance(post_cx, LoihiBlock)
        assert weights.ndim == 2
        n2, n1 = weights.shape
        assert post_cx.n_neurons == n2

        # loihi encoders don't include radius, so handle scaling here
        weights = weights / conn.post_obj.radius

        syn = Synapse(n1, label="%s::decoder_weights" % conn)
        syn.set_full_weights(weights.T)
        post_cx.add_synapse(syn)
        model.objs[conn]['weights'] = syn

        ax = Axon(n1, label="decoder_weights")
        ax.target = syn
        mid_cx.add_axon(ax)

        post_cx.compartment.configure_filter(post_tau, dt=model.dt)

        if conn.learning_rule_type is not None:
            raise NotImplementedError()
    elif isinstance(conn.post_obj, Ensemble):
        assert target_encoders is not None
        if target_encoders not in post_cx.named_synapses:
            build_decode_neuron_encoders(
                model, conn.post_obj, kind=target_encoders)

        mid_ax = Axon(mid_cx.n_neurons, label="encoders")
        mid_ax.target = post_cx.named_synapses[target_encoders]
        mid_ax.set_axon_map(mid_axon_inds)
        mid_cx.add_axon(mid_ax)
        model.objs[conn]['mid_axon'] = mid_ax

        post_cx.compartment.configure_filter(post_tau, dt=model.dt)
    else:
        # This includes Node, since nodes can't be targets on-chip
        raise NotImplementedError()

    model.params[conn] = BuiltConnection(
        eval_points=eval_points,
        solver_info=solver_info,
        transform=transform,
        weights=weights)
Ejemplo n.º 18
0
def split_block(old_block, block_shapes, validate=ValidationLevel.MINIMAL):
    """Break a block apart into smaller blocks, each able to fit on one core"""
    n_compartments = old_block.compartment.n_compartments
    n_in_axons = sum(synapse.n_axons for synapse in old_block.synapses)
    n_out_axons = sum(axon.axon_slots() for axon in old_block.axons)
    synapse_bits = sum(synapse.bits() for synapse in old_block.synapses)

    if block_shapes.get(old_block, None) is None:
        # break block sequentially
        # TODO: account for compartments having different numbers of synapses/axons/etc.
        # Splitting into blocks where each block has the same number of compartments
        # could leave blocks that have more synapses or axons than allowed. But this
        # is rare, and users can work around it by specifying the split shape manually
        n_split = max((
            ceil_div(n_compartments, MAX_COMPARTMENTS),
            ceil_div(n_in_axons, MAX_IN_AXONS),
            ceil_div(n_out_axons, MAX_OUT_AXONS),
            ceil_div(synapse_bits, MAX_SYNAPSE_BITS),
        ))
        block_shapes[old_block] = BlockShape(
            (ceil_div(n_compartments, n_split), ), (n_compartments, ))
    old_block_shape = block_shapes[old_block]
    assert old_block_shape.ensemble_size == old_block.n_neurons

    # find compartment indices for each new block
    new_block_inds = []
    ranges = [range(0, n, i) for n, i in old_block_shape.zip_dimensions()]
    full_inds = np.arange(old_block_shape.ensemble_size).reshape(
        old_block_shape.ensemble_shape)
    for inds0 in itertools.product(*ranges):
        inds1 = np.minimum(inds0 + old_block_shape._shape,
                           old_block_shape._ens_shape)
        indslice = tuple(slice(i0, i1) for i0, i1 in zip(inds0, inds1))
        inds = full_inds[indslice]
        new_block_inds.append(IndicesList(inds.flat))

    assert len(new_block_inds) > 0
    if len(new_block_inds) == 1:
        # if block can fit on one core, just return the current block
        if validate >= ValidationLevel.MINIMAL:
            assert new_block_inds[0].set == set(range(n_compartments))
        new_blocks = [old_block]
        return OrderedDict(zip(new_blocks, new_block_inds))

    # break apart block
    new_blocks = []
    for k, inds in enumerate(new_block_inds):
        n_neurons = len(inds)
        new_block = LoihiBlock(n_neurons)
        if old_block.label is not None:
            ind_array = np.array(list(inds))
            d = np.diff(ind_array)
            indstr = ("%d:%d:%d" % (ind_array[0], ind_array[-1] + 1, d[0])
                      if len(d) > 0 and np.all(d[0] == d) else "%d:%d" %
                      (ind_array[0], ind_array[0] + 1)
                      if len(ind_array) == 1 else str(k))
            new_block.label = "%s[%s]" % (old_block.label, indstr)

        for attr in (
                "decay_u",
                "decay_v",
                "refract_delay",
                "vth",
                "bias",
                "enable_noise",
        ):
            # copy whole array to ensure that we maintain dtype
            setattr(
                new_block.compartment,
                attr,
                getattr(old_block.compartment, attr)[list(inds)].copy(),
            )

        for attr in (
                "tau_s",
                "scale_u",
                "scale_v",
                "vmin",
                "vmax",
                "noise_offset",
                "noise_exp",
                "noise_at_membrane",
        ):
            setattr(new_block.compartment, attr,
                    getattr(old_block.compartment, attr))

        new_blocks.append(new_block)

    logger.info(
        "Split block (%d) into (%s)",
        n_compartments,
        ", ".join(
            str(new_block.compartment.n_compartments)
            for new_block in new_blocks),
    )
    return OrderedDict(zip(new_blocks, new_block_inds))
Ejemplo n.º 19
0
def test_conv2d_weights(channels_last, hw_opts, request, plt, seed, rng,
                        allclose):
    def loihi_rates_n(neuron_type, x, gain, bias, dt):
        """Compute Loihi rates on higher dimensional inputs"""
        y = x.reshape(-1, x.shape[-1])
        gain = np.asarray(gain)
        bias = np.asarray(bias)
        if gain.ndim == 0:
            gain = gain * np.ones(x.shape[-1])
        if bias.ndim == 0:
            bias = bias * np.ones(x.shape[-1])
        rates = loihi_rates(neuron_type, y, gain, bias, dt)
        return rates.reshape(*x.shape)

    if channels_last:
        plt.saveas = None
        pytest.xfail("Blocked by CxBase cannot be > 256 bug")

    target = request.config.getoption("--target")
    if target != 'loihi' and len(hw_opts) > 0:
        pytest.skip("Hardware options only available on hardware")

    pop_type = 32

    # load data
    with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape(28, 28)
    test_x = test_x[3:24, 3:24]
    test_x = 1.999 * test_x - 0.999

    filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng)
    sti, stj = 2, 2
    tau_rc = 0.02
    tau_ref = 0.002
    tau_s = 0.005
    dt = 0.001

    encode_type = nengo.SpikingRectifiedLinear()
    encode_gain = 1. / dt
    encode_bias = 0.
    neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref)
    neuron_gain = 1.
    neuron_bias = 1.

    pres_time = 0.2

    # --- compute ideal outputs
    def conv_pm(x, kernel):
        y0 = scipy.signal.correlate2d(x[0], kernel, mode='valid')[::sti, ::stj]
        y1 = scipy.signal.correlate2d(x[1], kernel, mode='valid')[::sti, ::stj]
        return [y0, -y1]

    ref_out = np.array([test_x, -test_x])
    ref_out = loihi_rates_n(encode_type, ref_out, encode_gain, encode_bias, dt)
    ref_out = ref_out / encode_gain
    ref_out = np.array([conv_pm(ref_out, kernel) for kernel in filters])
    ref_out = ref_out.sum(axis=1)  # sum positive and negative parts
    ref_out = loihi_rates_n(neuron_type, ref_out, neuron_gain, neuron_bias, dt)

    # --- compute nengo_loihi outputs
    inp_biases = np.stack([test_x, -test_x], axis=-1 if channels_last else 0)
    inp_shape = nengo_transforms.ChannelShape(inp_biases.shape,
                                              channels_last=channels_last)

    kernel = np.array([filters, -filters])  # two channels, pos and neg
    kernel = np.transpose(kernel, (2, 3, 0, 1))
    conv2d_transform = nengo_transforms.Convolution(
        8,
        inp_shape,
        strides=(sti, stj),
        channels_last=channels_last,
        kernel_size=(7, 7),
        init=kernel)

    out_size = ref_out.size
    nf, nyi, nyj = ref_out.shape
    assert out_size <= 1024

    model = Model()

    # input block
    inp = LoihiBlock(inp_shape.size, label='inp')
    assert inp.n_neurons <= 1024
    inp.compartment.configure_relu()
    inp.compartment.bias[:] = inp_biases.ravel()

    inp_ax = Axon(np.prod(inp_shape.spatial_shape), label='inp_ax')
    inp_ax.set_compartment_axon_map(target_axons=conv.pixel_idxs(inp_shape),
                                    atoms=conv.channel_idxs(inp_shape))
    inp.add_axon(inp_ax)

    model.add_block(inp)

    # conv block
    neurons = LoihiBlock(out_size, label='neurons')
    assert neurons.n_neurons <= 1024
    neurons.compartment.configure_lif(tau_rc=tau_rc, tau_ref=tau_ref, dt=dt)
    neurons.compartment.configure_filter(tau_s, dt=dt)
    neurons.compartment.bias[:] = neuron_bias

    synapse = Synapse(np.prod(inp_shape.spatial_shape), label='synapse')
    weights, indices, axon_to_weight_map, bases = conv.conv2d_loihi_weights(
        conv2d_transform)
    synapse.set_population_weights(weights,
                                   indices,
                                   axon_to_weight_map,
                                   bases,
                                   pop_type=pop_type)

    neurons.add_synapse(synapse)

    out_probe = Probe(target=neurons, key='spiked')
    neurons.add_probe(out_probe)

    inp_ax.target = synapse
    model.add_block(neurons)

    # simulation
    discretize_model(model)

    n_steps = int(pres_time / dt)
    if target == 'loihi':
        with HardwareInterface(model, use_snips=False, seed=seed,
                               **hw_opts) as sim:
            sim.run_steps(n_steps)
            sim_out = sim.get_probe_output(out_probe)
    else:
        with EmulatorInterface(model, seed=seed) as sim:
            sim.run_steps(n_steps)
            sim_out = sim.get_probe_output(out_probe)

    sim_out = np.sum(sim_out, axis=0) / pres_time
    if channels_last:
        sim_out.shape = (nyi, nyj, nf)
        sim_out = np.transpose(sim_out, (2, 0, 1))
    else:
        sim_out.shape = (nf, nyi, nyj)

    out_max = max(ref_out.max(), sim_out.max())

    # --- plot results
    rows = 2
    cols = 2

    ax = plt.subplot(rows, cols, 1)
    tile(filters, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(ref_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(ref_out.ravel(), bins=31)
    plt.hist(sim_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    # tile(sim_out, vmin=0, vmax=1, cols=8, ax=ax)
    tile(sim_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    assert allclose(sim_out, ref_out, atol=10, rtol=1e-3)
Ejemplo n.º 20
0
def test_pop_tiny(pop_type, channels_last, nc, request, plt, seed, allclose):
    tau_rc = 0.02
    tau_ref = 0.001
    tau_s = 0.0
    dt = 0.001

    neuron_bias = 1.

    pres_time = 0.4

    sti, stj = 1, 1

    if nc == 1:
        filters = np.array([[-0.5, 2., -0.25], [-0.75, 2., -1.0],
                            [-0.5, 3., -0.5], [-1.0, 6.,
                                               -0.25]]).reshape(1, 4, 1, 3)

        inp_biases = np.array([[1, 5, 1], [2, 1, 2]])
        inp_biases = inp_biases[:, :, None]
    elif nc == 2:
        filters = np.array([[[-0.5, 2., -0.2], [-0.7, 2., -1.0],
                             [-0.5, 3., -0.5], [-1.0, 6., -0.2]],
                            [[-1.0, 2., -1.0], [-0.5, 2., -0.5],
                             [-0.8, 3., -0.2], [-1.0, 4.,
                                                -0.2]]]).reshape(2, 4, 1, 3)

        inp_biases = np.array([[[1, 5, 1], [2, 1, 2]], [[0, 3, 1], [4, 2, 1]]])
        inp_biases = np.transpose(inp_biases, (1, 2, 0))

    # rearrange to (kernel_rows, kernel_cols, in_channels, out_channels)
    filters = np.transpose(filters, (2, 3, 0, 1))

    inp_biases = inp_biases / (inp_biases.max() + 0.001)

    # --- compute nengo_loihi outputs
    ni, nj, nk = inp_biases.shape
    si, sj, nc, nf = filters.shape
    nij = ni * nj
    nyi = 1 + (ni - si) // sti
    nyj = 1 + (nj - sj) // stj
    out_size = nyi * nyj * nf
    assert out_size <= 1024

    model = Model()

    # input block
    inp = LoihiBlock(ni * nj * nk, label='inp')
    assert inp.n_neurons <= 1024
    inp.compartment.configure_relu()
    inp.compartment.bias[:] = inp_biases.ravel()

    inp_ax = Axon(nij, label='inp_ax')

    # we always compute the pixel/channel idxs with channels_last=True
    # (not sure why?), and then set it to the correct value afterwards
    inp_shape = nengo_transforms.ChannelShape((ni, nj, nk), channels_last=True)
    inp_ax.set_compartment_axon_map(target_axons=conv.pixel_idxs(inp_shape),
                                    atoms=conv.channel_idxs(inp_shape))
    inp_shape.shape = (ni, nj, nk) if channels_last else (nk, ni, nj)
    inp_shape.channels_last = channels_last

    inp.add_axon(inp_ax)

    model.add_block(inp)

    # conv block
    neurons = LoihiBlock(out_size, label='neurons')
    assert neurons.n_neurons <= 1024
    neurons.compartment.configure_lif(tau_rc=tau_rc, tau_ref=tau_ref, dt=dt)
    neurons.compartment.configure_filter(tau_s, dt=dt)
    neurons.compartment.bias[:] = neuron_bias

    synapse = Synapse(np.prod(inp_shape.spatial_shape), label='synapse')
    conv2d_transform = nengo_transforms.Convolution(
        nf,
        inp_shape,
        strides=(sti, stj),
        channels_last=channels_last,
        init=filters,
        kernel_size=(1, 3))
    weights, indices, axon_to_weight_map, bases = conv.conv2d_loihi_weights(
        conv2d_transform)
    synapse.set_population_weights(weights,
                                   indices,
                                   axon_to_weight_map,
                                   bases,
                                   pop_type=pop_type)
    neurons.add_synapse(synapse)

    out_probe = Probe(target=neurons, key='spiked')
    neurons.add_probe(out_probe)

    inp_ax.target = synapse
    model.add_block(neurons)

    # simulation
    discretize_model(model)

    n_steps = int(pres_time / dt)
    target = request.config.getoption("--target")
    if target == 'loihi':
        with HardwareInterface(model, use_snips=False, seed=seed) as sim:
            sim.run_steps(n_steps)
            sim_out = sim.get_probe_output(out_probe)
    else:
        with EmulatorInterface(model, seed=seed) as sim:
            sim.run_steps(n_steps)
            sim_out = sim.get_probe_output(out_probe)

    sim_out = np.sum(sim_out, axis=0) * (dt / pres_time)
    if channels_last:
        sim_out.shape = (nyi, nyj, nf)
        sim_out = np.transpose(sim_out, (2, 0, 1))
    else:
        sim_out.shape = (nf, nyi, nyj)

    out_max = sim_out.max()

    # --- plot results
    rows = 1
    cols = 2

    ax = plt.subplot(rows, cols, 1)
    plt.hist(sim_out.ravel(), bins=11)

    ax = plt.subplot(rows, cols, 2)
    tile(sim_out, vmin=0, vmax=out_max, grid=True, ax=ax)

    # ref_out determined by emulator running code known to work
    if nc == 1:
        ref_out = np.array([[0.06, 0.02], [0.055, 0.], [0.0825, 0.0225],
                            [0.125, 0.04]])
    elif nc == 2:
        ref_out = np.array([[0.0975, 0.02], [0.0825, 0.02], [0.125, 0.055],
                            [0.2475, 0.0825]])
    assert allclose(sim_out[:, :, 0], ref_out, rtol=0, atol=1e-7)
Ejemplo n.º 21
0
def test_population_input(request, allclose):
    target = request.config.getoption("--target")
    dt = 0.001

    n_inputs = 3
    n_axons = 1
    n_cx = 2

    steps = 6
    spike_times_inds = [(1, [0]), (3, [1]), (5, [2])]

    model = Model()

    input = SpikeInput(n_inputs)
    model.add_input(input)
    spikes = [(input, ti, inds) for ti, inds in spike_times_inds]

    input_axon = Axon(n_axons)
    axon_map = np.zeros(n_inputs, dtype=int)
    atoms = np.arange(n_inputs)
    input_axon.set_axon_map(axon_map, atoms)
    input.add_axon(input_axon)

    block = LoihiBlock(n_cx)
    block.compartment.configure_lif(tau_rc=0., tau_ref=0., dt=dt)
    block.compartment.configure_filter(0, dt=dt)
    model.add_block(block)

    synapse = Synapse(n_axons)
    weights = 0.1 * np.array([[[1, 2], [2, 3], [4, 5]]], dtype=float)
    indices = np.array([[[0, 1], [0, 1], [0, 1]]], dtype=int)
    axon_to_weight_map = np.zeros(n_axons, dtype=int)
    cx_bases = np.zeros(n_axons, dtype=int)
    synapse.set_population_weights(weights,
                                   indices,
                                   axon_to_weight_map,
                                   cx_bases,
                                   pop_type=32)
    block.add_synapse(synapse)
    input_axon.target = synapse

    probe = Probe(target=block, key='voltage')
    block.add_probe(probe)

    discretize_model(model)

    if target == 'loihi':
        with HardwareInterface(model, use_snips=True) as sim:
            sim.run_steps(steps, blocking=False)
            for ti in range(1, steps + 1):
                spikes_i = [spike for spike in spikes if spike[1] == ti]
                sim.host2chip(spikes=spikes_i, errors=[])
                sim.chip2host(probes_receivers={})

            y = sim.get_probe_output(probe)
    else:
        for inp, ti, inds in spikes:
            inp.add_spikes(ti, inds)

        with EmulatorInterface(model) as sim:
            sim.run_steps(steps)
            y = sim.get_probe_output(probe)

    vth = block.compartment.vth[0]
    assert (block.compartment.vth == vth).all()
    z = y / vth
    assert allclose(z[[1, 3, 5]], weights[0], atol=4e-2, rtol=0)
Ejemplo n.º 22
0
def test_simulator_noise(exp, request, plt, seed, allclose):
    # TODO: test that the mean falls within a number of standard errors
    # of the expected mean, and that non-zero offsets work correctly.
    # Currently, there is an unexpected negative bias for small noise
    # exponents, apparently because there is a probability of generating
    # the shifted equivalent of -128, whereas with e.g. exp = 7 all the
    # generated numbers fall in [-127, 127].
    offset = 0

    target = request.config.getoption("--target")
    n_cx = 1000

    model = Model()
    block = LoihiBlock(n_cx)
    block.compartment.configure_relu()

    block.compartment.vmin = -1

    block.compartment.enableNoise[:] = 1
    block.compartment.noiseExp0 = exp
    block.compartment.noiseMantOffset0 = offset
    block.compartment.noiseAtDendOrVm = 1

    probe = Probe(target=block, key='voltage')
    block.add_probe(probe)
    model.add_block(block)

    discretize_model(model)
    exp2 = block.compartment.noiseExp0
    offset2 = block.compartment.noiseMantOffset0

    n_steps = 100
    if target == 'loihi':
        with HardwareInterface(model, use_snips=False, seed=seed) as sim:
            sim.run_steps(n_steps)
            y = sim.get_probe_output(probe)
    else:
        with EmulatorInterface(model, seed=seed) as sim:
            sim.run_steps(n_steps)
            y = sim.get_probe_output(probe)

    t = np.arange(1, n_steps + 1)
    bias = offset2 * 2.**(exp2 - 1)
    std = 2.**exp2 / np.sqrt(3)  # divide by sqrt(3) for std of uniform -1..1
    rmean = t * bias
    rstd = np.sqrt(t) * std
    rerr = rstd / np.sqrt(n_cx)
    ymean = y.mean(axis=1)
    ystd = y.std(axis=1)
    diffs = np.diff(np.vstack([np.zeros_like(y[0]), y]), axis=0)

    plt.subplot(311)
    plt.hist(diffs.ravel(), bins=256)

    plt.subplot(312)
    plt.plot(rmean, 'k')
    plt.plot(rmean + 3 * rerr, 'k--')
    plt.plot(rmean - 3 * rerr, 'k--')
    plt.plot(ymean)
    plt.title('mean')

    plt.subplot(313)
    plt.plot(rstd, 'k')
    plt.plot(ystd)
    plt.title('std')

    assert allclose(ystd, rstd, rtol=0.1, atol=1)
Ejemplo n.º 23
0
def build_full_chip_connection(model, conn):  # noqa: C901
    """Build dense or sparse connections on-chip"""

    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    pre_obj = model.objs[conn.pre_obj]["out"]
    post_obj = model.objs[conn.post_obj]["in"]
    assert isinstance(pre_obj, (LoihiBlock, LoihiInput))
    assert isinstance(post_obj, (LoihiBlock, LoihiProbe))

    weights = None
    eval_points = None
    solver_info = None
    neuron_type = None
    pre_slice = conn.pre_slice
    post_slice = conn.post_slice

    # sample transform (if using a distribution), transform shape (out, in)
    transform = sample_transform(conn, rng=rng)

    tau_s = 0.0  # `synapse is None` gets mapped to `tau_s = 0.0`
    if isinstance(conn.synapse, nengo.synapses.Lowpass):
        tau_s = conn.synapse.tau
    elif conn.synapse is not None:
        raise NotImplementedError("Cannot handle non-Lowpass synapses")

    needs_decode_neurons = False
    target_encoders = None
    is_chip_process = isinstance(conn.pre_obj, Node) and isinstance(
        conn.pre_obj.output, ChipProcess
    )
    if isinstance(conn.pre_obj, Node) and not (
        isinstance(conn.pre_obj, ChipReceiveNeurons) or is_chip_process
    ):
        assert conn.pre_slice == slice(None)

        weights = expand_matrix(transform, shape=(conn.post.size_in, conn.pre.size_out))

        # input is on-off neuron encoded, so double/flip transform
        weights = stack_matrices([weights, scale_matrix(weights, -1)], order="h")
        target_encoders = "node_encoders"
    elif isinstance(conn.pre_obj, Ensemble) and isinstance(
        conn.pre_obj.neuron_type, nengo.Direct
    ):
        raise NotImplementedError()
    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        if isinstance(transform, scipy.sparse.spmatrix):
            raise BuildError(
                "Applying a sparse transform to a decoded connection is not supported"
            )

        eval_points, decoders, solver_info = model.build(
            conn.solver, conn, rng, transform
        )
        pre_slice = slice(None)  # taken care of in decoders

        if conn.solver.weights and not conn.solver.compositional:
            weights = decoders
        else:
            weights = multiply(transform, decoders)

        # the decoder solver assumes a spike height of 1/dt; that isn't the
        # case on loihi, so we need to undo that scaling
        weights = scale_matrix(weights, 1.0 / model.dt)

        neuron_type = conn.pre_obj.neuron_type

        if conn.solver.weights:
            # weight solvers only allowed on ensemble->ensemble connections
            assert isinstance(conn.post_obj, Ensemble)

            if conn.solver.compositional:
                encoders = model.params[conn.post_obj].scaled_encoders
                weights = multiply(encoders[:, post_slice], weights)

            # post slice already applied to encoders (either here or in
            # `build_decoders`), so don't apply later
            post_slice = slice(None)
        else:
            needs_decode_neurons = True
    elif isinstance(conn.pre_obj, (Neurons, ChipReceiveNeurons)) or is_chip_process:
        weights = expand_matrix(transform, shape=(conn.post.size_in, conn.pre.size_out))
        weights = scale_matrix(weights, 1.0 / model.dt)
        neuron_type = (
            None
            if is_chip_process
            else conn.pre_obj.neuron_type
            if isinstance(conn.pre_obj, ChipReceiveNeurons)
            else conn.pre_obj.ensemble.neuron_type
        )

        if isinstance(conn.post_obj, Ensemble):
            needs_decode_neurons = True
    else:
        raise NotImplementedError("Connection from type %r" % (type(conn.pre_obj),))

    if neuron_type is not None and hasattr(neuron_type, "amplitude"):
        weights = scale_matrix(weights, neuron_type.amplitude)

    # to proper dtype
    transform = transform.astype(nengo.rc.float_dtype)
    weights = weights.astype(nengo.rc.float_dtype)

    # loihi_weights has shape (in, out), to match the shape by block.Synapses
    loihi_weights = weights.T

    mid_obj = pre_obj
    mid_axon_inds = None
    post_tau = tau_s
    if needs_decode_neurons and not isinstance(conn.post_obj, Neurons):
        # --- add decode neurons
        assert weights.ndim == 2
        n, d = loihi_weights.shape

        if isinstance(post_obj, LoihiProbe):
            # use non-spiking decode neurons for voltage probing
            assert len(post_obj.target) == 0 or post_obj.target == [None]
            assert post_slice == slice(None)

            # use the same scaling as the ensemble does, to get good
            #  decodes.  Note that this assumes that the decoded value
            #  is in the range -radius to radius, which is usually true.
            gain = np.array(1.0 / conn.pre_obj.radius, dtype=nengo.rc.float_dtype)

            decoder_block = LoihiBlock(2 * d, label="%s" % conn)
            decoder_block.compartment.configure_nonspiking(
                dt=model.dt, vth=model.vth_nonspiking
            )
            decoder_block.compartment.bias[:] = 0

            dec_syn = Synapse(n, label="probe_decoders")
            weights2 = stack_matrices(
                [scale_matrix(loihi_weights, gain), scale_matrix(loihi_weights, -gain)],
                order="h",
            )

            dec_syn.set_weights(weights2)
            decoder_block.add_synapse(dec_syn)
        else:
            # use spiking decode neurons for on-chip connection
            if isinstance(conn.post_obj, Ensemble):
                # loihi encoders don't include radius, so handle scaling here
                gain = np.array(1.0 / conn.post_obj.radius, dtype=nengo.rc.float_dtype)
                loihi_weights = scale_matrix(loihi_weights, gain)

            post_d = conn.post_obj.size_in
            post_inds = np.arange(post_d, dtype=np.int32)[post_slice]
            assert loihi_weights.shape[1] == len(post_inds) == conn.size_out
            mid_axon_inds = model.decode_neurons.get_post_inds(post_inds, post_d)

            target_encoders = "decode_neuron_encoders"
            decoder_block, dec_syn = model.decode_neurons.get_block(
                loihi_weights, block_label="%s" % conn, syn_label="decoders"
            )

        model.add_block(decoder_block)
        model.objs[conn]["decoded"] = decoder_block
        model.objs[conn]["decoders"] = dec_syn
        model.connection_decode_neurons[conn] = decoder_block

        # use tau_s for filter into decode neurons, decode_tau for filter out
        decoder_block.compartment.configure_filter(tau_s, dt=model.dt)
        post_tau = model.decode_tau

        target_axons = -np.ones(pre_obj.n_neurons, dtype=np.int32)
        target_axons[pre_slice] = np.arange(target_axons[pre_slice].size)
        pre_slice = slice(None)

        dec_ax0 = Axon(n, label="decoders")
        dec_ax0.target = dec_syn
        dec_ax0.set_compartment_axon_map(target_axons)
        pre_obj.add_axon(dec_ax0)
        model.objs[conn]["decode_axon"] = dec_ax0

        loihi_weights = None  # weights have now been handled

        if conn.learning_rule_type is not None:
            rule_type = conn.learning_rule_type
            if isinstance(rule_type, nengo.PES):
                if not isinstance(rule_type.pre_synapse, nengo.synapses.Lowpass):
                    raise ValidationError(
                        "Loihi only supports `Lowpass` pre-synapses for learning rules",
                        attr="pre_synapse",
                        obj=rule_type,
                    )

                pre_tau = rule_type.pre_synapse.tau
                float_tracing_tau = pre_tau / model.dt
                tracing_tau = int(round(float_tracing_tau))
                if not np.allclose(float_tracing_tau, tracing_tau):
                    warnings.warn(
                        f"PES learning rule `pre_synapse.tau` ({pre_tau}) is not an "
                        f"integer multiple of `dt` ({model.dt}). Rounding."
                    )

                # Nengo builder scales PES learning rate by `dt / n_neurons`
                n_neurons = (
                    conn.pre_obj.n_neurons
                    if isinstance(conn.pre_obj, Ensemble)
                    else conn.pre_obj.size_in
                )
                learning_rate = rule_type.learning_rate * model.dt / n_neurons

                # Account for scaling to put integer error in range [-127, 127]
                learning_rate /= model.pes_error_scale

                # Tracing mag set so that the magnitude of the pre trace
                # is independent of the pre tau. `dt` factor accounts for
                # Nengo's `dt` spike scaling. Where is the second `dt` from?
                # Maybe the fact that post decode neurons have `vth = 1/dt`?
                tracing_mag = -np.expm1(-1.0 / tracing_tau) / model.dt**2

                # learning weight exponent controls the maximum weight
                # magnitude/weight resolution
                wgt_exp = model.pes_wgt_exp

                dec_syn.set_learning(
                    learning_rate=learning_rate,
                    tracing_mag=tracing_mag,
                    tracing_tau=tracing_tau,
                    wgt_exp=wgt_exp,
                )
            else:
                raise NotImplementedError()

        mid_obj = decoder_block

    if isinstance(post_obj, LoihiProbe):
        assert post_obj.target == [None]
        assert post_slice == slice(None)
        post_obj.target[0] = mid_obj
        model.add_probe(post_obj)
    elif isinstance(conn.post_obj, Neurons):
        assert isinstance(post_obj, LoihiBlock)
        assert post_slice == slice(None)
        if loihi_weights is None:
            raise NotImplementedError("Need weights for connection to neurons")

        assert loihi_weights.ndim == 2
        n1, n2 = loihi_weights.shape
        assert post_obj.n_neurons == n2

        syn = Synapse(n1, label="neuron_weights")
        gain = model.params[conn.post_obj.ensemble].gain
        loihi_weights = scale_matrix(loihi_weights, gain)
        syn.set_weights(loihi_weights)
        post_obj.add_synapse(syn)
        model.objs[conn]["weights"] = syn

        target_axons = -np.ones(mid_obj.n_neurons, dtype=np.int32)
        target_axons[pre_slice] = np.arange(target_axons[pre_slice].size)
        assert target_axons[pre_slice].size == n1

        ax = Axon(mid_obj.n_neurons, label="neuron_weights")
        ax.target = syn
        ax.set_compartment_axon_map(target_axons)
        mid_obj.add_axon(ax)

        post_obj.compartment.configure_filter(post_tau, dt=model.dt)

        if conn.learning_rule_type is not None:
            raise NotImplementedError()
    elif isinstance(conn.post_obj, Ensemble) and conn.solver.weights:
        assert isinstance(post_obj, LoihiBlock)
        assert pre_slice == slice(None), "Not implemented"
        assert post_slice == slice(None)
        assert loihi_weights.ndim == 2
        n1, n2 = loihi_weights.shape
        assert post_obj.n_neurons == n2

        # loihi encoders don't include radius, so handle scaling here
        scale = np.array(1.0 / conn.post_obj.radius, dtype=nengo.rc.float_dtype)
        loihi_weights = scale_matrix(loihi_weights, scale)

        syn = Synapse(n1, label="%s::decoder_weights" % conn)
        syn.set_weights(loihi_weights)
        post_obj.add_synapse(syn)
        model.objs[conn]["weights"] = syn

        ax = Axon(n1, label="decoder_weights")
        ax.target = syn
        mid_obj.add_axon(ax)

        post_obj.compartment.configure_filter(post_tau, dt=model.dt)

        if conn.learning_rule_type is not None:
            raise NotImplementedError()
    elif isinstance(conn.post_obj, Ensemble):
        assert isinstance(post_obj, LoihiBlock)
        assert pre_slice == slice(None), "Not implemented"
        assert post_slice == slice(None)
        assert target_encoders is not None
        if target_encoders not in post_obj.named_synapses:
            build_decode_neuron_encoders(model, conn.post_obj, kind=target_encoders)

        mid_ax = Axon(mid_obj.n_neurons, label="encoders")
        mid_ax.target = post_obj.named_synapses[target_encoders]
        mid_ax.set_compartment_axon_map(mid_axon_inds)
        mid_obj.add_axon(mid_ax)
        model.objs[conn]["mid_axon"] = mid_ax

        post_obj.compartment.configure_filter(post_tau, dt=model.dt)
    else:
        # This includes Node, since nodes can't be targets on-chip
        raise NotImplementedError()

    model.params[conn] = BuiltConnection(
        eval_points=eval_points,
        solver_info=solver_info,
        transform=transform,  # sampled transform
        weights=weights,  # scaled weights (including decoders)
    )
Ejemplo n.º 24
0
def test_big_block_error():
    model = Model()
    model.add_block(LoihiBlock(1050))

    with pytest.raises(ValidationError, match="Segment does not fit"):
        Greedy()(model, n_chips=1)
Ejemplo n.º 25
0
def build_ensemble(model, ens):
    if isinstance(ens.neuron_type, nengo.Direct):
        raise NotImplementedError("Direct neurons not implemented")

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(
        ens, ens.eval_points, rng=rng, dtype=nengo.rc.float_dtype
    )

    # Set up encoders
    if isinstance(ens.encoders, Distribution):
        encoders = get_samples(ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=nengo.rc.float_dtype)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=nengo.rc.float_dtype)

    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    if np.any(np.isnan(encoders)):
        raise BuildError(
            f"{ens}: NaNs detected in encoders. This usually means that you have "
            "zero-length encoders; when normalized, these result in NaNs. Ensure all "
            "encoders have non-zero length, or set `normalize_encoders=False`."
        )

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(
        ens, rng, intercept_limit=model.intercept_limit, dtype=nengo.rc.float_dtype
    )

    block = LoihiBlock(ens.n_neurons, label="%s" % ens)
    block.compartment.bias[:] = bias

    # build the neuron_type (see builders below)
    model.build(ens.neuron_type, ens.neurons, block)

    # set default filter just in case no other filter gets set
    block.compartment.configure_default_filter(model.decode_tau, dt=model.dt)

    if ens.noise is not None:
        raise NotImplementedError("Ensemble noise not implemented")

    # Scale the encoders
    # we exclude the radius to keep scaling reasonable for decode neurons
    scaled_encoders = encoders * gain[:, np.newaxis]

    # add instructions for splitting
    model.block_shapes[block] = model.config[ens].block_shape

    model.add_block(block)

    model.objs[ens]["in"] = block
    model.objs[ens]["out"] = block
    model.objs[ens.neurons]["in"] = block
    model.objs[ens.neurons]["out"] = block
    model.params[ens] = BuiltEnsemble(
        eval_points=eval_points,
        encoders=encoders,
        intercepts=intercepts,
        max_rates=max_rates,
        scaled_encoders=scaled_encoders,
        gain=gain,
        bias=bias,
    )
Ejemplo n.º 26
0
def test_validate_block():
    # too many compartments
    block = LoihiBlock(1200)
    assert block.compartment.n_compartments > 1024
    with pytest.raises(BuildError, match="Number of compartments"):
        validate_block(block)

    # too many input axons
    block = LoihiBlock(410)
    block.add_synapse(Synapse(5000))
    with pytest.raises(BuildError, match="Input axons"):
        validate_block(block)

    # too many output axons
    block = LoihiBlock(410)
    synapse = Synapse(2500)
    axon = Axon(5000)
    axon.target = synapse
    block.add_synapse(synapse)
    block.add_axon(axon)
    with pytest.raises(BuildError, match="Output axons"):
        validate_block(block)

    # too many synapse bits
    block = LoihiBlock(600)
    synapse = Synapse(500)
    synapse.set_full_weights(np.ones((500, 600)))
    axon = Axon(500)
    axon.target = synapse
    block.add_synapse(synapse)
    block.add_axon(axon)
    with pytest.raises(BuildError, match="synapse bits"):
        validate_block(block)
Ejemplo n.º 27
0
def test_one_to_one_allocator_big_block_error():
    model = Model()
    model.add_block(LoihiBlock(1050))

    with pytest.raises(ValidationError, match="Segment does not fit"):
        OneToOne()(model)