def test_validate_block():
    # too many compartments
    block = LoihiBlock(1200)
    assert block.compartment.n_compartments > 1024
    with pytest.raises(BuildError, match="Number of compartments"):
        validate_block(block)

    # too many input axons
    block = LoihiBlock(410)
    block.add_synapse(Synapse(5000))
    with pytest.raises(BuildError, match="Input axons"):
        validate_block(block)

    # too many output axons
    block = LoihiBlock(410)
    synapse = Synapse(2500)
    axon = Axon(5000)
    axon.target = synapse
    block.add_synapse(synapse)
    block.add_axon(axon)
    with pytest.raises(BuildError, match="Output axons"):
        validate_block(block)

    # too many synapse bits
    block = LoihiBlock(600)
    synapse = Synapse(500)
    synapse.set_full_weights(np.ones((500, 600)))
    axon = Axon(500)
    axon.target = synapse
    block.add_synapse(synapse)
    block.add_axon(axon)
    with pytest.raises(BuildError, match="synapse bits"):
        validate_block(block)
def _basic_model():
    model = Model()

    block0 = LoihiBlock(1)
    block0.compartment.configure_lif()
    model.add_block(block0)

    block1 = LoihiBlock(1)
    block1.compartment.configure_lif()
    model.add_block(block1)

    axon1 = Axon(1)
    block0.add_axon(axon1)

    synapse1 = Synapse(1)
    synapse1.set_full_weights([[1]])
    axon1.target = synapse1
    block1.add_synapse(synapse1)

    axon0 = Axon(1)
    input = LoihiInput()
    input.add_axon(axon0)
    model.add_input(input)

    synapse0 = Synapse(1)
    synapse0.set_full_weights([[1]])
    axon0.target = synapse0
    block0.add_synapse(synapse0)

    discretize_model(model)

    return model
 def new_syn(tracing_mag=None):
     syn = Synapse(n_axons=1)
     syn.set_full_weights(np.array([[1]]))
     if tracing_mag is not None:
         syn.set_learning(tracing_mag=tracing_mag)
     core.add_synapse(syn)
     return syn
Exemple #4
0
def build_decode_neuron_encoders(model, ens, kind='decode_neuron_encoders'):
    """Build encoders accepting decode neuron input."""
    block = model.objs[ens.neurons]['in']
    scaled_encoders = model.params[ens].scaled_encoders
    if kind == 'node_encoders':
        encoders = model.node_neurons.get_post_encoders(scaled_encoders)
    elif kind == 'decode_neuron_encoders':
        encoders = model.decode_neurons.get_post_encoders(scaled_encoders)

    synapse = Synapse(encoders.shape[0], label=kind)
    synapse.set_full_weights(encoders)
    block.add_synapse(synapse, name=kind)
def test_builder_poptype_errors():
    pytest.importorskip('nxsdk')

    # Test error in build_synapse
    model = Model()
    block = LoihiBlock(1)
    block.compartment.configure_lif()
    model.add_block(block)

    synapse = Synapse(1)
    synapse.set_full_weights([1])
    synapse.pop_type = 8
    block.add_synapse(synapse)

    discretize_model(model)

    allocator = OneToOne()  # one core per ensemble
    board = allocator(model)

    with pytest.raises(ValueError, match="[Ss]ynapse.*[Uu]nrec.*pop.*type"):
        build_board(board)

    # Test error in collect_axons
    model = Model()
    block0 = LoihiBlock(1)
    block0.compartment.configure_lif()
    model.add_block(block0)
    block1 = LoihiBlock(1)
    block1.compartment.configure_lif()
    model.add_block(block1)

    axon = Axon(1)
    block0.add_axon(axon)

    synapse = Synapse(1)
    synapse.set_full_weights([1])
    synapse.pop_type = 8
    axon.target = synapse
    block1.add_synapse(synapse)

    discretize_model(model)

    board = allocator(model)

    with pytest.raises(ValueError, match="[Aa]xon.*[Uu]nrec.*pop.*type"):
        build_board(board)
    def get_block(self, weights, block_label=None, syn_label=None):
        gain = self.gain * self.dt
        bias = self.bias * self.dt

        d, n = weights.shape
        n_neurons = 2 * d * self.pairs_per_dim
        block = LoihiBlock(n_neurons, label=block_label)
        block.compartment.configure_relu(dt=self.dt)
        block.compartment.bias[:] = bias.repeat(d)

        syn = Synapse(n, label=syn_label)
        weights2 = []
        for ga, gb in gain.reshape(self.pairs_per_dim, 2):
            weights2.extend([ga * weights.T, -gb * weights.T])
        weights2 = np.hstack(weights2)
        syn.set_full_weights(weights2)
        block.add_synapse(syn)

        return block, syn
Exemple #7
0
def build_connection(model, conn):
    if nengo_transforms is not None:
        if isinstance(conn.transform, nengo_transforms.Convolution):
            # TODO: integrate these into the same function
            conv.build_conv2d_connection(model, conn)
            return
        elif not isinstance(conn.transform, nengo_transforms.Dense):
            raise NotImplementedError(
                "nengo-loihi does not yet support %s transforms"
                % conn.transform)

    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    pre_cx = model.objs[conn.pre_obj]['out']
    post_cx = model.objs[conn.post_obj]['in']
    assert isinstance(pre_cx, (LoihiBlock, LoihiInput))
    assert isinstance(post_cx, (LoihiBlock, Probe))

    weights = None
    eval_points = None
    solver_info = None
    neuron_type = None
    post_slice = conn.post_slice

    # sample transform (if using a distribution)
    transform = sample_transform(conn, rng=rng)

    tau_s = 0.0  # `synapse is None` gets mapped to `tau_s = 0.0`
    if isinstance(conn.synapse, nengo.synapses.Lowpass):
        tau_s = conn.synapse.tau
    elif conn.synapse is not None:
        raise NotImplementedError("Cannot handle non-Lowpass synapses")

    needs_decode_neurons = False
    target_encoders = None
    if isinstance(conn.pre_obj, Node):
        assert conn.pre_slice == slice(None)

        if np.array_equal(transform, np.array(1.)):
            # TODO: this identity transform may be avoidable
            transform = np.eye(conn.pre.size_out)
        else:
            assert transform.ndim == 2, "transform shape not handled yet"
            assert transform.shape[1] == conn.pre.size_out

        assert transform.shape[1] == conn.pre.size_out
        if isinstance(conn.pre_obj, ChipReceiveNeurons):
            weights = transform / model.dt
            neuron_type = conn.pre_obj.neuron_type
        else:
            # input is on-off neuron encoded, so double/flip transform
            weights = np.column_stack([transform, -transform])
            target_encoders = 'node_encoders'
    elif (isinstance(conn.pre_obj, Ensemble)
          and isinstance(conn.pre_obj.neuron_type, nengo.Direct)):
        raise NotImplementedError()
    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        eval_points, decoders, solver_info = model.build(
            conn.solver, conn, rng, transform)

        if conn.solver.weights and not conn.solver.compositional:
            weights = decoders
        else:
            weights = multiply(transform, decoders)

        # the decoder solver assumes a spike height of 1/dt; that isn't the
        # case on loihi, so we need to undo that scaling
        weights = weights / model.dt

        neuron_type = conn.pre_obj.neuron_type

        if conn.solver.weights:
            # weight solvers only allowed on ensemble->ensemble connections
            assert isinstance(conn.post_obj, Ensemble)

            if conn.solver.compositional:
                encoders = model.params[conn.post_obj].scaled_encoders.T
                encoders = encoders[post_slice]
                weights = multiply(encoders.T, weights)

            # post slice already applied to encoders (either here or in
            # `build_decoders`), so don't apply later
            post_slice = None
        else:
            needs_decode_neurons = True
    elif isinstance(conn.pre_obj, Neurons):
        assert conn.pre_slice == slice(None)
        assert transform.ndim == 2, "transform shape not handled yet"
        weights = transform / model.dt
        neuron_type = conn.pre_obj.ensemble.neuron_type
    else:
        raise NotImplementedError("Connection from type %r" % (
            type(conn.pre_obj),))

    if neuron_type is not None and hasattr(neuron_type, 'amplitude'):
        weights = weights * neuron_type.amplitude

    mid_cx = pre_cx
    mid_axon_inds = None
    post_tau = tau_s
    if needs_decode_neurons and not isinstance(conn.post_obj, Neurons):
        # --- add decode neurons
        assert weights.ndim == 2
        d, n = weights.shape

        if isinstance(post_cx, Probe):
            # use non-spiking decode neurons for voltage probing
            assert post_cx.target is None
            assert post_slice == slice(None)

            # use the same scaling as the ensemble does, to get good
            #  decodes.  Note that this assumes that the decoded value
            #  is in the range -radius to radius, which is usually true.
            weights = weights / conn.pre_obj.radius

            gain = 1
            dec_cx = LoihiBlock(2 * d, label='%s' % conn)
            dec_cx.compartment.configure_nonspiking(
                dt=model.dt, vth=model.vth_nonspiking)
            dec_cx.compartment.bias[:] = 0
            model.add_block(dec_cx)
            model.objs[conn]['decoded'] = dec_cx

            dec_syn = Synapse(n, label="probe_decoders")
            weights2 = gain * np.vstack([weights, -weights]).T

            dec_syn.set_full_weights(weights2)
            dec_cx.add_synapse(dec_syn)
            model.objs[conn]['decoders'] = dec_syn
        else:
            # use spiking decode neurons for on-chip connection
            if isinstance(conn.post_obj, Ensemble):
                # loihi encoders don't include radius, so handle scaling here
                weights = weights / conn.post_obj.radius

            post_d = conn.post_obj.size_in
            post_inds = np.arange(post_d, dtype=np.int32)[post_slice]
            assert weights.shape[0] == len(post_inds) == conn.size_out == d
            mid_axon_inds = model.decode_neurons.get_post_inds(
                post_inds, post_d)

            target_encoders = 'decode_neuron_encoders'
            dec_cx, dec_syn = model.decode_neurons.get_block(
                weights, block_label="%s" % conn, syn_label="decoders")

            model.add_block(dec_cx)
            model.objs[conn]['decoded'] = dec_cx
            model.objs[conn]['decoders'] = dec_syn

        # use tau_s for filter into decode neurons, decode_tau for filter out
        dec_cx.compartment.configure_filter(tau_s, dt=model.dt)
        post_tau = model.decode_tau

        dec_ax0 = Axon(n, label="decoders")
        dec_ax0.target = dec_syn
        pre_cx.add_axon(dec_ax0)
        model.objs[conn]['decode_axon'] = dec_ax0

        if conn.learning_rule_type is not None:
            rule_type = conn.learning_rule_type
            if isinstance(rule_type, nengo.PES):
                if not isinstance(rule_type.pre_synapse,
                                  nengo.synapses.Lowpass):
                    raise ValidationError(
                        "Loihi only supports `Lowpass` pre-synapses for "
                        "learning rules", attr='pre_synapse', obj=rule_type)

                tracing_tau = rule_type.pre_synapse.tau / model.dt

                # Nengo builder scales PES learning rate by `dt / n_neurons`
                n_neurons = (conn.pre_obj.n_neurons
                             if isinstance(conn.pre_obj, Ensemble)
                             else conn.pre_obj.size_in)
                learning_rate = rule_type.learning_rate * model.dt / n_neurons

                # Account for scaling to put integer error in range [-127, 127]
                learning_rate /= model.pes_error_scale

                # Tracing mag set so that the magnitude of the pre trace
                # is independent of the pre tau. `dt` factor accounts for
                # Nengo's `dt` spike scaling. Where is the second `dt` from?
                # Maybe the fact that post decode neurons have `vth = 1/dt`?
                tracing_mag = -np.expm1(-1. / tracing_tau) / model.dt**2

                # learning weight exponent controls the maximum weight
                # magnitude/weight resolution
                wgt_exp = model.pes_wgt_exp

                dec_syn.set_learning(
                    learning_rate=learning_rate,
                    tracing_mag=tracing_mag,
                    tracing_tau=tracing_tau,
                    wgt_exp=wgt_exp,
                )
            else:
                raise NotImplementedError()

        mid_cx = dec_cx

    if isinstance(post_cx, Probe):
        assert post_cx.target is None
        assert post_slice == slice(None)
        post_cx.target = mid_cx
        mid_cx.add_probe(post_cx)
    elif isinstance(conn.post_obj, Neurons):
        assert isinstance(post_cx, LoihiBlock)
        assert post_slice == slice(None)
        if weights is None:
            raise NotImplementedError("Need weights for connection to neurons")
        else:
            assert weights.ndim == 2
            n2, n1 = weights.shape
            assert post_cx.n_neurons == n2

            syn = Synapse(n1, label="neuron_weights")
            gain = model.params[conn.post_obj.ensemble].gain
            syn.set_full_weights(weights.T * gain)
            post_cx.add_synapse(syn)
            model.objs[conn]['weights'] = syn

        ax = Axon(mid_cx.n_neurons, label="neuron_weights")
        ax.target = syn
        mid_cx.add_axon(ax)

        post_cx.compartment.configure_filter(post_tau, dt=model.dt)

        if conn.learning_rule_type is not None:
            raise NotImplementedError()
    elif isinstance(conn.post_obj, Ensemble) and conn.solver.weights:
        assert isinstance(post_cx, LoihiBlock)
        assert weights.ndim == 2
        n2, n1 = weights.shape
        assert post_cx.n_neurons == n2

        # loihi encoders don't include radius, so handle scaling here
        weights = weights / conn.post_obj.radius

        syn = Synapse(n1, label="%s::decoder_weights" % conn)
        syn.set_full_weights(weights.T)
        post_cx.add_synapse(syn)
        model.objs[conn]['weights'] = syn

        ax = Axon(n1, label="decoder_weights")
        ax.target = syn
        mid_cx.add_axon(ax)

        post_cx.compartment.configure_filter(post_tau, dt=model.dt)

        if conn.learning_rule_type is not None:
            raise NotImplementedError()
    elif isinstance(conn.post_obj, Ensemble):
        assert target_encoders is not None
        if target_encoders not in post_cx.named_synapses:
            build_decode_neuron_encoders(
                model, conn.post_obj, kind=target_encoders)

        mid_ax = Axon(mid_cx.n_neurons, label="encoders")
        mid_ax.target = post_cx.named_synapses[target_encoders]
        mid_ax.set_axon_map(mid_axon_inds)
        mid_cx.add_axon(mid_ax)
        model.objs[conn]['mid_axon'] = mid_ax

        post_cx.compartment.configure_filter(post_tau, dt=model.dt)
    else:
        # This includes Node, since nodes can't be targets on-chip
        raise NotImplementedError()

    model.params[conn] = BuiltConnection(
        eval_points=eval_points,
        solver_info=solver_info,
        transform=transform,
        weights=weights)
Exemple #8
0
def test_uv_overflow(n_axons, plt, allclose, monkeypatch):
    # TODO: Currently this is not testing the V overflow, since it is higher
    #  and I haven't been able to figure out a way to make it overflow.
    nt = 15

    model = Model()

    # n_axons controls number of input spikes and thus amount of overflow
    input = SpikeInput(n_axons)
    for t in np.arange(1, nt + 1):
        input.add_spikes(t, np.arange(n_axons))  # send spikes to all axons
    model.add_input(input)

    block = LoihiBlock(1)
    block.compartment.configure_relu()
    block.compartment.configure_filter(0.1)

    synapse = Synapse(n_axons)
    synapse.set_full_weights(np.ones((n_axons, 1)))
    block.add_synapse(synapse)

    axon = Axon(n_axons)
    axon.target = synapse
    input.add_axon(axon)

    probe_u = Probe(target=block, key='current')
    block.add_probe(probe_u)
    probe_v = Probe(target=block, key='voltage')
    block.add_probe(probe_v)
    probe_s = Probe(target=block, key='spiked')
    block.add_probe(probe_s)

    model.add_block(block)
    discretize_model(model)

    # must set these after `discretize` to specify discretized values
    block.compartment.vmin = -2**22 + 1
    block.compartment.vth[:] = VTH_MAX

    assert EmulatorInterface.strict  # Tests should be run in strict mode
    monkeypatch.setattr(EmulatorInterface, "strict", False)
    with EmulatorInterface(model) as emu:
        with pytest.warns(UserWarning):
            emu.run_steps(nt)
        emu_u = emu.get_probe_output(probe_u)
        emu_v = emu.get_probe_output(probe_v)
        emu_s = emu.get_probe_output(probe_s)

    with HardwareInterface(model, use_snips=False) as sim:
        sim.run_steps(nt)
        sim_u = sim.get_probe_output(probe_u)
        sim_v = sim.get_probe_output(probe_v)
        sim_s = sim.get_probe_output(probe_s)
        sim_v[sim_s > 0] = 0  # since Loihi has placeholder voltage after spike

    plt.subplot(311)
    plt.plot(emu_u)
    plt.plot(sim_u)

    plt.subplot(312)
    plt.plot(emu_v)
    plt.plot(sim_v)

    plt.subplot(313)
    plt.plot(emu_s)
    plt.plot(sim_s)

    assert allclose(emu_u, sim_u)
    assert allclose(emu_v, sim_v)