Exemplo n.º 1
0
def test_deterministic_network_allocation(Simulator, seed):
    # test that we get the same simulations results across allocators.
    # the determinism of the allocation itself is covered by other unit tests.
    n_neurons = 64
    n_ensembles = 8
    tau = 0.1
    sim_t = 1.0

    with nengo.Network(seed=seed) as model:
        prev = nengo.Node(output=1)

        p = []
        for i in range(n_ensembles):
            ens = nengo.Ensemble(n_neurons, 1)
            nengo.Connection(prev, ens, synapse=tau)
            p.append(nengo.Probe(ens, synapse=tau))
            prev = ens

    with nengo.Simulator(model) as sim_ref:
        sim_ref.run(sim_t)

    # one block each for ensemble, connection, probe, minus no final connection
    n_blocks = n_ensembles * 3 - 1
    allocation = [
        (1, 1, RoundRobin()),
        (3, 3, RoundRobin()),
        (8, 8, RoundRobin()),
        (6, ceil_div(n_blocks, 4), Greedy(cores_per_chip=4)),
        (8, ceil_div(n_blocks, 5), Greedy(cores_per_chip=5)),
    ]

    sim_prev = None
    for n_chips, n_chips_used, allocator in allocation:
        with Simulator(
                model,
                precompute=True,
                hardware_options={
                    "n_chips": n_chips,
                    "allocator": allocator
                },
        ) as sim_loihi:
            sim_loihi.run(sim_t)

        assert len(sim_loihi.model.blocks) == n_blocks
        assert n_chips_used == sim_loihi.sims["loihi"].board.n_chips
        for p_i in p:
            assert rms(sim_loihi.data[p_i] - sim_ref.data[p_i]) < 0.05
            if sim_prev is not None:
                assert np.allclose(sim_prev.data[p_i], sim_loihi.data[p_i])
        sim_prev = sim_loihi
Exemplo n.º 2
0
def test_round_robin_allocator_under():
    model = _basic_model(n_blocks=3)

    board = RoundRobin()(model, n_chips=2)

    assert board.n_chips == 2
    assert board.n_cores_per_chip == [2, 1]
    assert board.n_synapses_per_core == [[1, 1], [1]]
    assert len(board.inputs) == 1

    chip = board.chips[0]
    assert chip.board is board
    assert chip.n_cores == 2

    for i in range(2):
        assert chip.cores[i].chip is chip
        assert len(chip.cores[i].synapses) == 1
        assert len(chip.cores[i].blocks) == 1

    chip = board.chips[1]
    assert chip.board is board
    assert chip.n_cores == 1

    assert chip.cores[0].chip is chip
    assert len(chip.cores[0].synapses) == 1
    assert len(chip.cores[0].blocks) == 1
Exemplo n.º 3
0
def test_multiple_pes(init_function, request, allclose, plt, seed, Simulator):
    n_errors = 5
    targets = np.linspace(-0.9, 0.9, n_errors)
    with nengo.Network(seed=seed) as model:
        pre_ea = nengo.networks.EnsembleArray(200, n_ensembles=n_errors)
        output = nengo.Node(size_in=n_errors)

        target = nengo.Node(targets)

        for i in range(n_errors):
            conn = nengo.Connection(
                pre_ea.ea_ensembles[i],
                output[i],
                function=init_function,
                learning_rule_type=nengo.PES(learning_rate=1e-2),
            )
            nengo.Connection(target[i], conn.learning_rule, transform=-1)
            nengo.Connection(output[i], conn.learning_rule)

        probe = nengo.Probe(output, synapse=0.1)

    simtime = 0.6
    with Simulator(model, hardware_options={"allocator": RoundRobin()}) as sim:
        sim.run(simtime)

    t = sim.trange()
    tmask = t > simtime * 0.85

    plt.plot(t, sim.data[probe])
    for target, style in zip(targets, plt.rcParams["axes.prop_cycle"]):
        plt.axhline(target, **style)

    for i, target in enumerate(targets):
        assert allclose(sim.data[probe][tmask, i], target, atol=0.1,
                        rtol=0.1), ("Target %d not close" % i)
Exemplo n.º 4
0
def test_conv_round_robin_unsupported(Simulator, seed):
    k = 10
    d = 5
    with nengo.Network(seed=seed) as model:
        u = nengo.Node(output=np.linspace(-1, 1, k))

        a = nengo.Ensemble(n_neurons=k**2, dimensions=k)

        x = nengo.Ensemble(n_neurons=d,
                           dimensions=d,
                           gain=np.ones(d),
                           bias=np.ones(d))

        nengo.Connection(u, a)

        conv = nengo.Convolution(n_filters=d,
                                 input_shape=(k, k, 1),
                                 strides=(1, 1),
                                 kernel_size=(k, k))
        assert conv.size_in == k**2
        assert conv.size_out == d

        nengo.Connection(a.neurons, x.neurons, transform=conv)

    with pytest.raises(BuildError, match="multi-chip allocator"):
        with Simulator(model,
                       hardware_options={'allocator': RoundRobin(n_chips=8)},
                       precompute=True):
            pass
Exemplo n.º 5
0
def test_learning_seed(Simulator, seed):
    n_per_dim = 120
    dims = 1
    tau = 0.005
    simtime = 0.2
    model, probes = pes_network(
        n_per_dim,
        dims,
        seed,
        learn_synapse=tau,
        learning_rule_type=nengo.PES(learning_rate=1e-2),
        period=simtime / 2,
    )

    sim_args = dict(hardware_options={"allocator": RoundRobin()})

    with Simulator(model, seed=seed, **sim_args) as sim:
        sim.run(simtime)

    with Simulator(model, seed=seed, **sim_args) as sim1:
        sim1.run(simtime)

    with Simulator(model, seed=seed + 1, **sim_args) as sim2:
        sim2.run(simtime)

    assert np.allclose(sim1.data[probes["post"]], sim.data[probes["post"]])
    assert not np.allclose(sim2.data[probes["post"]], sim.data[probes["post"]])
Exemplo n.º 6
0
def test_learning_seed(Simulator, request, seed):
    def set_srun_options(options):
        # TODO: the SRUN_OPTIONS environment variable will be read in a future
        # version of NxSDK. Until that's released, this test is expected to fail.
        os.environ["SRUN_OPTIONS"] = options

    request.addfinalizer(lambda: set_srun_options(""))
    set_srun_options("-p loihi -x ncl-ext-ghrd-02")

    n_per_dim = 120
    dims = 1
    tau = 0.005
    simtime = 0.2
    model, probes = pes_network(
        n_per_dim,
        dims,
        seed,
        learn_synapse=tau,
        learning_rule_type=nengo.PES(learning_rate=1e-2),
        period=simtime / 2,
    )

    sim_args = dict(hardware_options={"allocator": RoundRobin()})

    with Simulator(model, seed=seed, **sim_args) as sim:
        sim.run(simtime)

    with Simulator(model, seed=seed, **sim_args) as sim1:
        sim1.run(simtime)

    with Simulator(model, seed=seed + 1, **sim_args) as sim2:
        sim2.run(simtime)

    assert np.allclose(sim1.data[probes["post"]], sim.data[probes["post"]])
    assert not np.allclose(sim2.data[probes["post"]], sim.data[probes["post"]])
Exemplo n.º 7
0
def test_round_robin_allocator_under():
    model = _basic_model()

    board = RoundRobin(n_chips=2)(model)

    assert board.n_chips == 2
    assert board.n_cores_per_chip == [2, 1]
    assert board.n_synapses_per_core == [[1, 0], [1]]

    chip0 = board.chips[0]
    assert chip0.board is board
    assert chip0.n_cores == 2

    assert chip0.cores[0].chip is chip0
    assert len(chip0.cores[0].synapses) == 1
    assert len(chip0.cores[0].blocks) == 1
    assert len(chip0.cores[0].inputs) == 0

    assert chip0.cores[1].chip is chip0
    assert len(chip0.cores[1].synapses) == 0
    assert len(chip0.cores[1].blocks) == 0
    assert len(chip0.cores[1].inputs) == 1

    chip1 = board.chips[1]
    assert chip1.board is board
    assert chip1.n_cores == 1

    assert chip1.cores[0].chip is chip1
    assert len(chip1.cores[0].synapses) == 1
    assert len(chip1.cores[0].blocks) == 1
    assert len(chip1.cores[0].inputs) == 0
Exemplo n.º 8
0
def test_round_robin_allocator_over():
    model = _basic_model()

    board = RoundRobin(n_chips=4)(model)

    assert board.n_chips == 3
    assert board.n_cores_per_chip == [1, 1, 1]
    assert board.n_synapses_per_core == [[1], [1], [0]]

    chip0 = board.chips[0]
    assert chip0.board is board
    assert chip0.n_cores == 1

    assert chip0.cores[0].chip is chip0
    assert len(chip0.cores[0].synapses) == 1
    assert len(chip0.cores[0].blocks) == 1
    assert len(chip0.cores[0].inputs) == 0

    chip1 = board.chips[1]
    assert chip1.board is board
    assert chip1.n_cores == 1

    assert chip1.cores[0].chip is chip1
    assert len(chip1.cores[0].synapses) == 1
    assert len(chip1.cores[0].blocks) == 1
    assert len(chip1.cores[0].inputs) == 0

    chip2 = board.chips[2]
    assert chip2.board is board
    assert chip2.n_cores == 1

    assert chip2.cores[0].chip is chip2
    assert len(chip2.cores[0].synapses) == 0
    assert len(chip2.cores[0].blocks) == 0
    assert len(chip2.cores[0].inputs) == 1
Exemplo n.º 9
0
def test_allocator_integration_consistency(Simulator, seed, allclose):
    # test that we get the same simulations results across allocators.
    # the determinism of the allocation itself is covered by other unit tests.
    n_neurons = 64
    n_ensembles = 8
    probe_tau = 0.01
    sim_t = 0.1

    with nengo.Network(seed=seed) as model:
        prev = nengo.Node(output=1)

        p = []
        for _ in range(n_ensembles):
            ens = nengo.Ensemble(n_neurons, 1)
            nengo.Connection(prev, ens)
            p.append(nengo.Probe(ens, synapse=probe_tau))
            prev = ens

    with Simulator(model, target="sim") as sim_ref:
        sim_ref.run(sim_t)

    # one block each for ensemble, connection, probe, minus no final connection
    n_blocks = n_ensembles * 3 - 1
    allocation = [
        (1, 1, RoundRobin()),
        (7, 7, RoundRobin()),
        (6, ceil_div(n_blocks, 4), Greedy(cores_per_chip=4)),
        (8, ceil_div(n_blocks, 5), Greedy(cores_per_chip=5)),
        (6, ceil_div(n_blocks, 4), GreedyInterchip(cores_per_chip=4)),
    ]
    if HAS_NXMETIS:
        allocation.append((6, 6, PartitionInterchip()))

    for n_chips, n_chips_used, allocator in allocation:
        with Simulator(
            model,
            precompute=True,
            hardware_options={"n_chips": n_chips, "allocator": allocator},
        ) as sim_loihi:
            sim_loihi.run(sim_t)

        assert len(sim_loihi.model.blocks) == n_blocks
        assert n_chips_used == sim_loihi.sims["loihi"].board.n_chips
        for p_i in p:
            assert allclose(sim_loihi.data[p_i], sim_ref.data[p_i]), allocator
Exemplo n.º 10
0
def test_pes_overflow(plt, seed, Simulator):
    dims = 3
    n_per_dim = 300
    tau = 0.01
    simtime = 0.6
    model, probes = pes_network(
        n_per_dim,
        dims,
        seed,
        learn_synapse=tau,
        input_scale=np.linspace(1, 0.7, dims),
        learning_rule_type=nengo.PES(learning_rate=1e-2),
        period=simtime,
    )

    loihi_model = Model()
    # set learning_wgt_exp low to create overflow in weight values
    loihi_model.pes_wgt_exp = -2

    with Simulator(
            model,
            model=loihi_model,
            hardware_options={"allocator": RoundRobin()},
    ) as loihi_sim:
        loihi_sim.run(simtime)

    t = loihi_sim.trange()
    post_tmask = t > simtime - 0.1

    dec_tau = loihi_sim.model.decode_tau
    y = loihi_sim.data[probes["stim"]]
    y_dpre = nengo.Lowpass(dec_tau).filt(y)
    y_dpost = nengo.Lowpass(tau).combine(nengo.Lowpass(dec_tau)).filt(y_dpre)
    y_loihi = loihi_sim.data[probes["post"]]

    plt.plot(t, y_dpost, "k", label="target")
    plt.plot(t, y_loihi, "g", label="loihi")

    # --- fit output to scaled version of target output
    z_ref0 = y_dpost[post_tmask][:, 0]
    z_loihi = y_loihi[post_tmask]
    scale = np.linspace(0, 1, 50)
    E = np.abs(z_loihi - scale[:, None, None] * z_ref0[:, None])
    errors = E.mean(axis=1)  # average over time (errors is: scales x dims)
    for j in range(dims):
        errors_j = errors[:, j]
        i = np.argmin(errors_j)
        assert errors_j[i] < 0.1, ("Learning output for dim %d did not match "
                                   "any scaled version of the target output" %
                                   j)
        assert scale[i] > 0.25, "Learning output for dim %d is too small" % j
        assert scale[i] < 0.9, (
            "Learning output for dim %d is too large "
            "(weights or traces not clipping as expected)" % j)
Exemplo n.º 11
0
def test_deterministic_network_allocation(Simulator, seed):
    # test that we get the same simulations results across allocators.
    # the determinism of the allocation itself is covered by other unit tests.
    n_neurons = 64
    n_ensembles = 8
    tau = 0.1
    sim_t = 1.0

    with nengo.Network(seed=seed) as model:
        prev = nengo.Node(output=1)

        p = []
        for i in range(n_ensembles):
            ens = nengo.Ensemble(n_neurons, 1)
            nengo.Connection(prev, ens, synapse=tau)
            p.append(nengo.Probe(ens, synapse=tau))
            prev = ens

    with nengo.Simulator(model) as sim_ref:
        sim_ref.run(sim_t)

    allocation = [
        (1, OneToOne()),
        (1, RoundRobin(n_chips=1)),
        (3, RoundRobin(n_chips=3)),
        (8, RoundRobin(n_chips=8)),
    ]

    sim_prev = None
    for n_chips_used, allocator in allocation:
        with Simulator(model,
                       precompute=True,
                       hardware_options={'allocator': allocator}) as sim_loihi:
            sim_loihi.run(sim_t)

        assert n_chips_used == sim_loihi.sims["loihi"].board.n_chips
        for p_i in p:
            assert rms(sim_loihi.data[p_i] - sim_ref.data[p_i]) < 0.05
            if sim_prev is not None:
                assert np.allclose(sim_prev.data[p_i], sim_loihi.data[p_i])
        sim_prev = sim_loihi
Exemplo n.º 12
0
def test_snips_round_robin_unsupported(Simulator):
    with nengo.Network() as model:
        # input is required otherwise precompute will be
        # automatically overwritten to True (and then no snips)
        u = nengo.Node(0)
        x = nengo.Ensemble(1, 1)
        nengo.Connection(u, x)

    with pytest.raises(SimulationError, match="snips are not supported"):
        with Simulator(model,
                       precompute=False,
                       hardware_options={'allocator': RoundRobin(n_chips=8)}):
            pass
Exemplo n.º 13
0
def test_pes_error_clip(plt, seed, Simulator):
    dims = 2
    n_per_dim = 120
    tau = 0.01
    error_scale = 5.0  # scale up error signal so it clips
    simtime = 0.3
    model, probes = pes_network(
        n_per_dim,
        dims,
        seed,
        learn_synapse=tau,
        learning_rule_type=nengo.PES(learning_rate=1e-2 / error_scale),
        input_scale=np.array([1.0, -1.0]),
        error_scale=error_scale,
        period=simtime,
    )

    with pytest.warns(UserWarning, match=r".*PES error.*pes_error_scale.*"):
        with Simulator(model, hardware_options={"allocator":
                                                RoundRobin()}) as loihi_sim:
            loihi_sim.run(simtime)

    t = loihi_sim.trange()
    post_tmask = t > simtime - 1.0

    dec_tau = loihi_sim.model.decode_tau
    y = loihi_sim.data[probes["stim"]]
    y_dpre = nengo.Lowpass(dec_tau).filt(y)
    y_dpost = nengo.Lowpass(tau).combine(nengo.Lowpass(dec_tau)).filt(y_dpre)
    y_loihi = loihi_sim.data[probes["post"]]

    plt.plot(t, y_dpost, "k", label="target")
    plt.plot(t, y_loihi, "g", label="loihi")

    # --- assert that we've learned something, but not everything
    error = rms(y_loihi[post_tmask] - y_dpost[post_tmask]) / rms(
        y_dpost[post_tmask])
    assert error < 0.5
    assert error > 0.05
Exemplo n.º 14
0
def test_pes_deterministic(Simulator, seed, allclose):
    """Ensure that learning output is the same between runs"""
    # Make a network with lots of objects, so dictionary order has an effect
    n_errors = 3
    targets = np.linspace(-0.8, 0.95, n_errors)
    with nengo.Network(seed=seed) as model:
        pre_ea = nengo.networks.EnsembleArray(100, n_ensembles=n_errors)
        output = nengo.Node(size_in=n_errors)

        target = nengo.Node(targets)

        for i in range(n_errors):
            conn = nengo.Connection(
                pre_ea.ea_ensembles[i],
                output[i],
                learning_rule_type=nengo.PES(learning_rate=1e-2),
            )
            nengo.Connection(target[i], conn.learning_rule, transform=-1)
            nengo.Connection(output[i], conn.learning_rule)

        probe = nengo.Probe(output, synapse=0.005)

    # some random aspects (e.g. dictionary order) only have a few combinations,
    # so more sims makes it less likely we'll get the same order by chance,
    # if things are truly non-deterministic
    n_sims = 3
    simtime = 0.1
    sims = []
    for _ in range(n_sims):
        with Simulator(model, hardware_options={"allocator":
                                                RoundRobin()}) as sim:
            sim.run(simtime)
        sims.append(sim)

    sim0 = sims[0]
    for sim in sims[1:]:
        assert allclose(sim.data[probe], sim0.data[probe])
Exemplo n.º 15
0
def test_conv2d_weights(channels_last, request, plt, seed, rng, allclose):
    def loihi_rates_n(neuron_type, x, gain, bias, dt):
        """Compute Loihi rates on higher dimensional inputs"""
        y = x.reshape((-1, x.shape[-1]))
        gain = np.asarray(gain)
        bias = np.asarray(bias)
        if gain.ndim == 0:
            gain = gain * np.ones(x.shape[-1])
        if bias.ndim == 0:
            bias = bias * np.ones(x.shape[-1])
        rates = loihi_rates(neuron_type, y, gain, bias, dt)
        return rates.reshape(x.shape)

    if channels_last:
        plt.saveas = None
        pytest.xfail("Blocked by CxBase cannot be > 256 bug")

    target = request.config.getoption("--target")

    pop_type = 32

    # load data
    with open(os.path.join(test_dir, "mnist10.pkl"), "rb") as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape((28, 28))
    test_x = test_x[3:24, 3:24]
    test_x = 1.999 * test_x - 0.999

    filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng)
    sti, stj = 2, 2
    tau_rc = 0.02
    tau_ref = 0.002
    tau_s = 0.005
    dt = 0.001

    encode_type = nengo.SpikingRectifiedLinear()
    encode_gain = 1.0 / dt
    encode_bias = 0.0
    neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref)
    neuron_gain = 1.0
    neuron_bias = 1.0

    pres_time = 0.2

    # --- compute ideal outputs
    def conv_pm(x, kernel):
        y0 = scipy.signal.correlate2d(x[0], kernel, mode="valid")[::sti, ::stj]
        y1 = scipy.signal.correlate2d(x[1], kernel, mode="valid")[::sti, ::stj]
        return [y0, -y1]

    ref_out = np.array([test_x, -test_x])
    ref_out = loihi_rates_n(encode_type, ref_out, encode_gain, encode_bias, dt)
    ref_out = ref_out / encode_gain
    ref_out = np.array([conv_pm(ref_out, kernel) for kernel in filters])
    ref_out = ref_out.sum(axis=1)  # sum positive and negative parts
    ref_out = loihi_rates_n(neuron_type, ref_out, neuron_gain, neuron_bias, dt)

    # --- compute nengo_loihi outputs
    inp_biases = np.stack([test_x, -test_x], axis=-1 if channels_last else 0)
    inp_shape = nengo_transforms.ChannelShape(inp_biases.shape,
                                              channels_last=channels_last)

    kernel = np.array([filters, -filters])  # two channels, pos and neg
    kernel = np.transpose(kernel, (2, 3, 0, 1))
    conv2d_transform = nengo_transforms.Convolution(
        8,
        inp_shape,
        strides=(sti, stj),
        channels_last=channels_last,
        kernel_size=(7, 7),
        init=kernel,
    )

    out_size = ref_out.size
    nf, nyi, nyj = ref_out.shape
    assert out_size <= 1024

    model = Model()

    # input block
    inp = LoihiBlock(inp_shape.size, label="inp")
    model.add_block(inp)

    assert inp.n_neurons <= 1024
    inp.compartment.configure_relu()
    inp.compartment.bias[:] = inp_biases.ravel()

    inp_ax = Axon(np.prod(inp_shape.spatial_shape), label="inp_ax")
    inp_ax.set_compartment_axon_map(target_axons=conv.pixel_idxs(inp_shape),
                                    atoms=conv.channel_idxs(inp_shape))
    inp.add_axon(inp_ax)

    # conv block
    neurons = LoihiBlock(out_size, label="neurons")
    model.add_block(neurons)

    assert neurons.n_neurons <= 1024
    neurons.compartment.configure_lif(tau_rc=tau_rc, tau_ref=tau_ref, dt=dt)
    neurons.compartment.configure_filter(tau_s, dt=dt)
    neurons.compartment.bias[:] = neuron_bias

    synapse = Synapse(np.prod(inp_shape.spatial_shape), label="synapse")
    weights, indices, axon_to_weight_map, bases = conv.conv2d_loihi_weights(
        conv2d_transform)
    synapse.set_population_weights(weights,
                                   indices,
                                   axon_to_weight_map,
                                   bases,
                                   pop_type=pop_type)

    neurons.add_synapse(synapse)

    out_probe = LoihiProbe(target=neurons, key="spiked")
    model.add_probe(out_probe)

    inp_ax.target = synapse

    # simulation
    discretize_model(model)

    n_steps = int(pres_time / dt)
    if target == "loihi":
        with HardwareInterface(
                model,
                use_snips=False,
                seed=seed,
                allocator=RoundRobin(),
        ) as sim:
            sim.run_steps(n_steps)
            sim_out = sim.get_probe_output(out_probe)
    else:
        with EmulatorInterface(model, seed=seed) as sim:
            sim.run_steps(n_steps)
            sim_out = sim.get_probe_output(out_probe)

    sim_out = np.sum(sim_out, axis=0) / pres_time
    sim_out.shape = make_shape((nyi, nyj), nf, channels_last)
    if channels_last:
        sim_out = np.transpose(sim_out, (2, 0, 1))

    out_max = max(ref_out.max(), sim_out.max())

    # --- plot results
    rows = 2
    cols = 2

    ax = plt.subplot(rows, cols, 1)
    tile(filters, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(ref_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist(ref_out.ravel(), bins=31)
    plt.hist(sim_out.ravel(), bins=31)

    ax = plt.subplot(rows, cols, 4)
    # tile(sim_out, vmin=0, vmax=1, cols=8, ax=ax)
    tile(sim_out, vmin=0, vmax=out_max, cols=8, ax=ax)

    assert allclose(sim_out, ref_out, atol=12, rtol=1e-3)
Exemplo n.º 16
0
    axon0 = Axon(1)
    input = LoihiInput()
    input.add_axon(axon0)
    model.add_input(input)

    synapse0 = Synapse(1)
    synapse0.set_full_weights([[1]])
    axon0.target = synapse0
    block0.add_synapse(synapse0)

    discretize_model(model)

    return model


@pytest.mark.parametrize("allocator", [OneToOne(), RoundRobin(n_chips=1)])
def test_one_to_one_allocator(allocator):
    # RoundRobin(n_chips=1) is equivalent to OneToOne()
    model = _basic_model()
    board = allocator(model)

    assert board.n_chips == 1
    assert board.n_cores_per_chip == [3]
    assert board.n_synapses_per_core == [[1, 1, 0]]

    chip = board.chips[0]
    assert chip.board is board
    assert chip.n_cores == 3

    assert chip.cores[0].chip is chip
    assert len(chip.cores[0].synapses) == 1
Exemplo n.º 17
0
def test_pes_comm_channel(dims, allclose, plt, seed, Simulator):
    n_per_dim = 300
    tau = 0.01
    simtime = 1.5
    model, probes = pes_network(
        n_per_dim,
        dims,
        seed,
        learn_synapse=tau,
        learning_rule_type=nengo.PES(learning_rate=1e-2),
        period=simtime / 2,
    )

    with nengo.Simulator(model) as nengo_sim:
        nengo_sim.run(simtime)

    with Simulator(model, hardware_options={"allocator":
                                            RoundRobin()}) as loihi_sim:
        loihi_sim.run(simtime)

    with Simulator(model, target="simreal") as real_sim:
        real_sim.run(simtime)

    t = nengo_sim.trange()
    pre_tmask = t > 0.1
    post_tmask = t > simtime / 2

    dec_tau = loihi_sim.model.decode_tau
    y = nengo_sim.data[probes["stim"]]
    y_dpre = nengo.Lowpass(dec_tau).filt(y)
    y_dpost = nengo.Lowpass(tau).combine(nengo.Lowpass(dec_tau)).filt(y_dpre)
    y_nengo = nengo_sim.data[probes["post"]]
    y_loihi = loihi_sim.data[probes["post"]]
    y_real = real_sim.data[probes["post"]]

    plt.subplot(211)
    plt.plot(t, y_dpost, "k", label="target")
    plt.plot(t, y_nengo, "b", label="nengo")
    plt.plot(t, y_loihi, "g", label="loihi")
    plt.plot(t, y_real, "r:", label="real")
    plt.legend()

    plt.subplot(212)
    plt.plot(t[post_tmask], y_loihi[post_tmask] - y_dpost[post_tmask], "k")
    plt.plot(t[post_tmask], y_loihi[post_tmask] - y_nengo[post_tmask], "b")

    x_loihi = loihi_sim.data[probes["pre"]]
    assert allclose(x_loihi[pre_tmask],
                    y_dpre[pre_tmask],
                    atol=0.18,
                    rtol=0.05)

    assert allclose(y_loihi[post_tmask],
                    y_dpost[post_tmask],
                    atol=0.18,
                    rtol=0.05)
    assert allclose(y_loihi, y_nengo, atol=0.2, rtol=0.2)

    assert allclose(y_real[post_tmask],
                    y_dpost[post_tmask],
                    atol=0.18,
                    rtol=0.05)
    assert allclose(y_real, y_nengo, atol=0.2, rtol=0.2)
Exemplo n.º 18
0
    axon0 = Axon(1)
    input = LoihiInput()
    input.add_axon(axon0)
    model.add_input(input)

    synapse0 = Synapse(1)
    synapse0.set_weights([[1]])
    axon0.target = synapse0
    blocks[0].add_synapse(synapse0)

    discretize_model(model)

    return model


@pytest.mark.parametrize("allocator", [Greedy(), RoundRobin()])
def test_basic(allocator):
    # RoundRobin is equivalent to Greedy when n_chips == 1
    n_blocks = 3
    model = _basic_model(n_blocks=n_blocks)
    board = allocator(model, n_chips=1)

    assert board.n_chips == 1
    assert board.n_cores_per_chip == [n_blocks]
    assert board.n_synapses_per_core == [[1] * n_blocks]
    assert len(board.inputs) == 1

    chip = board.chips[0]
    assert chip.board is board
    assert chip.n_cores == n_blocks
Exemplo n.º 19
0
    # ref_out determined by emulator running code known to work
    if nc == 1:
        ref_out = np.array([[0.06, 0.02], [0.055, 0.], [0.0825, 0.0225],
                            [0.125, 0.04]])
    elif nc == 2:
        ref_out = np.array([[0.0975, 0.02], [0.0825, 0.02], [0.125, 0.055],
                            [0.2475, 0.0825]])
    assert allclose(sim_out[:, :, 0], ref_out, rtol=0, atol=1e-7)


@pytest.mark.skipif(nengo_transforms is None,
                    reason="Requires new nengo.transforms")
@pytest.mark.parametrize("channels_last", (True, False))
@pytest.mark.parametrize('hw_opts', [
    dict(),
    dict(allocator=RoundRobin(n_chips=2)),
])
def test_conv2d_weights(channels_last, hw_opts, request, plt, seed, rng,
                        allclose):
    def loihi_rates_n(neuron_type, x, gain, bias, dt):
        """Compute Loihi rates on higher dimensional inputs"""
        y = x.reshape(-1, x.shape[-1])
        gain = np.asarray(gain)
        bias = np.asarray(bias)
        if gain.ndim == 0:
            gain = gain * np.ones(x.shape[-1])
        if bias.ndim == 0:
            bias = bias * np.ones(x.shape[-1])
        rates = loihi_rates(neuron_type, y, gain, bias, dt)
        return rates.reshape(*x.shape)
Exemplo n.º 20
0
def test_conv_deepnet(
    channels_last,
    pop_type,
    precompute,
    Simulator,
    request,
    rng,
    seed,
    plt,
    allclose,
):
    """Run a convolutional network with two layers on the chip.

    Checks that network with block splitting on the target matches one without
    on the emulator.
    """
    # TODO: This case fails in NxSDK 0.9.0 but will be fixed in the next version.
    # Remove this check once the next version is released.
    if pop_type == 32:
        pytest.skip("Pop32 multichip test requires latest NxSDK")

    def set_partition(partition):
        os.environ["PARTITION"] = partition

    request.addfinalizer(lambda: set_partition(""))
    # multichip pop_type = 16 works only on nahuku32 board currently
    if pop_type == 16:
        set_partition("nahuku32")

    def conv_layer(x,
                   input_shape,
                   array_init=None,
                   label=None,
                   conn_args=None,
                   **conv_args):
        conn_args = {} if conn_args is None else conn_args

        if array_init is not None:
            assert all(a not in conv_args
                       for a in ("init", "kernel_size", "n_filters"))
            assert array_init.ndim == 4
            conv_args["init"] = array_init
            conv_args["kernel_size"] = array_init.shape[:2]
            assert array_init.shape[2] == input_shape.n_channels
            conv_args["n_filters"] = array_init.shape[3]

        conv = nengo.Convolution(input_shape=input_shape, **conv_args)

        # add an ensemble to implement the activation function
        layer = nengo.Ensemble(conv.output_shape.size, 1, label=label)

        # connect up the input object to the new layer
        conn = nengo.Connection(x, layer.neurons, transform=conv)

        return layer, conv, conn

    channels = 1
    n_filters0 = 1
    n_filters1 = 4
    n_filters2 = 4

    # load data
    with open(os.path.join(test_dir, "mnist10.pkl"), "rb") as f:
        test10 = pickle.load(f)

    test_x = test10[0][0].reshape(28, 28)  # range (0, 1)
    input_shape = make_channel_shape(test_x.shape, channels, channels_last)

    filters0 = np.ones((1, 1, channels, n_filters0))

    # use Gabor filters for first layer
    filters1 = Gabor(freq=Uniform(0.5, 1),
                     sigma_x=Choice([0.9]),
                     sigma_y=Choice([0.9])).generate(n_filters1, (7, 7),
                                                     rng=rng)
    assert n_filters0 == 1
    filters1 = filters1[None, :, :, :]  # single channel
    filters1 = np.transpose(filters1,
                            (2, 3, 0, 1))  # rows, cols, in_chan, out_chan

    # use random combinations of first-layer channels in 1x1 convolution
    filters2 = rng.uniform(-0.2, 1,
                           size=(n_filters1, n_filters2)).clip(0, None)
    filters2 *= 2 / filters2.sum(axis=0,
                                 keepdims=True)  # each filter sums to 2
    filters2 = filters2[None, None, :, :]  # rows, cols, in_chan, out_chan

    tau_s = 0.001
    max_rate = 100
    amp = 1 / max_rate
    f_split = 2 if pop_type == 32 else 4

    # use Loihi neuron type so Nengo sim mimics Loihi neuron effects
    neuron_type = LoihiSpikingRectifiedLinear(amplitude=amp)

    pres_time = 0.2

    with nengo.Network(seed=seed) as net:
        nengo_loihi.add_params(net)

        net.config[nengo.Ensemble].neuron_type = neuron_type
        net.config[nengo.Ensemble].max_rates = Choice([max_rate])
        net.config[nengo.Ensemble].intercepts = Choice([0])
        net.config[nengo.Connection].synapse = tau_s

        u = nengo.Node(test_x.ravel(), label="u")

        layer0, conv0, conn0 = conv_layer(
            u,
            input_shape=input_shape,
            array_init=filters0,
            strides=(1, 1),
            channels_last=channels_last,
            label="layer0",
            conn_args=dict(synapse=None),
        )
        net.config[layer0].on_chip = False

        layer1, conv1, conn1 = conv_layer(
            layer0.neurons,
            input_shape=conv0.output_shape,
            array_init=filters1,
            strides=(2, 2),
            channels_last=channels_last,
            label="layer1",
        )
        net.config[layer1].block_shape = nengo_loihi.BlockShape(
            make_shape((4, 4), f_split, channels_last), conv1)
        net.config[conn1].pop_type = pop_type

        layer2, conv2, conn2 = conv_layer(
            layer1.neurons,
            input_shape=conv1.output_shape,
            array_init=filters2,
            strides=(1, 1),
            channels_last=channels_last,
            label="layer2",
        )
        net.config[layer2].block_shape = nengo_loihi.BlockShape(
            make_shape((4, 4), f_split, channels_last), conv2)
        net.config[conn2].pop_type = pop_type

        output_p = nengo.Probe(layer2.neurons)
        output_shape = conv2.output_shape

    with nengo.Simulator(net, optimize=False) as sim_nengo:
        sim_nengo.run(pres_time)
        ref_out = (sim_nengo.data[output_p] > 0).sum(axis=0).reshape(
            output_shape.shape)

    with Simulator(net, target="sim") as sim_emu:
        sim_emu.run(pres_time)
        emu_out = (sim_emu.data[output_p] > 0).sum(axis=0).reshape(
            output_shape.shape)

    # TODO: Remove the if condition when configurable timeout parameter
    # is available in nxsdk
    if (pop_type == 32 or
            os.popen("sinfo -h --partition=nahuku32").read().find("idle") > 0):
        with Simulator(
                net,
                precompute=precompute,
                hardware_options={
                    "allocator": RoundRobin(),
                    "snip_max_spikes_per_step": 800,
                },
        ) as sim_loihi:
            sim_loihi.run(pres_time)
            sim_out = ((sim_loihi.data[output_p] > 0).sum(axis=0).reshape(
                output_shape.shape))
    elif nengo_loihi.version.dev is None:
        pytest.fail(
            "Pop16 multichip test failed since Nahuku32 is unavailable")
    else:
        pytest.skip(
            "Pop16 multichip test skipped since Nahuku32 is unavailable")

    out_max = ref_out.max()
    ref_out = ref_out / out_max
    emu_out = emu_out / out_max
    sim_out = sim_out / out_max

    if channels_last:
        # channels first, to display channels in separate plots
        ref_out = np.transpose(ref_out, (2, 0, 1))
        emu_out = np.transpose(emu_out, (2, 0, 1))
        sim_out = np.transpose(sim_out, (2, 0, 1))

    # --- plot results
    rows = 2
    cols = 3

    ax = plt.subplot(rows, cols, 1)
    imshow(test_x, vmin=0, vmax=1, ax=ax)

    ax = plt.subplot(rows, cols, 2)
    tile(np.transpose(filters1, (2, 3, 0, 1))[0],
         rows=2,
         cols=2,
         grid=True,
         ax=ax)

    ax = plt.subplot(rows, cols, 3)
    plt.hist((ref_out.ravel(), emu_out.ravel(), sim_out.ravel()), bins=21)

    ax = plt.subplot(rows, cols, 4)
    tile(ref_out, rows=2, cols=2, grid=True, ax=ax)

    ax = plt.subplot(rows, cols, 5)
    tile(emu_out, rows=2, cols=2, grid=True, ax=ax)

    ax = plt.subplot(rows, cols, 6)
    tile(sim_out, rows=2, cols=2, grid=True, ax=ax)

    assert allclose(sim_out, ref_out, atol=0.15, rtol=1e-3)
    assert allclose(sim_out, emu_out, atol=1e-3, rtol=1e-3)