def _basic_model(): model = Model() block0 = LoihiBlock(1) block0.compartment.configure_lif() model.add_block(block0) block1 = LoihiBlock(1) block1.compartment.configure_lif() model.add_block(block1) axon1 = Axon(1) block0.add_axon(axon1) synapse1 = Synapse(1) synapse1.set_full_weights([[1]]) axon1.target = synapse1 block1.add_synapse(synapse1) axon0 = Axon(1) input = LoihiInput() input.add_axon(axon0) model.add_input(input) synapse0 = Synapse(1) synapse0.set_full_weights([[1]]) axon0.target = synapse0 block0.add_synapse(synapse0) discretize_model(model) return model
def _basic_model(n_blocks=2): model = Model() blocks = [] for _ in range(n_blocks): block = LoihiBlock(1) block.compartment.configure_lif() model.add_block(block) blocks.append(block) for i in range(n_blocks - 1): axon = Axon(1) blocks[i].add_axon(axon) synapse = Synapse(1) synapse.set_weights([[1]]) axon.target = synapse blocks[i + 1].add_synapse(synapse) axon0 = Axon(1) input = LoihiInput() input.add_axon(axon0) model.add_input(input) synapse0 = Synapse(1) synapse0.set_weights([[1]]) axon0.target = synapse0 blocks[0].add_synapse(synapse0) discretize_model(model) return model
def test_negative_cxbase(request, seed): n_axons = 3 model = Model() input = SpikeInput(n_axons) input.add_spikes(1, list(range(n_axons))) model.add_input(input) axon = Axon(n_axons) input.add_axon(axon) block = LoihiBlock(3) block.compartment.configure_relu() model.add_block(block) synapse = Synapse(n_axons) weights = [0.1, 0.1, 0.1] indices = [0, 1, 2] axon_to_weight_map = list(range(n_axons)) cx_bases = [0, 1, -1] synapse.set_population_weights(weights, indices, axon_to_weight_map, cx_bases, pop_type=32) axon.target = synapse block.add_synapse(synapse) probe = Probe(target=block, key='voltage') block.add_probe(probe) discretize_model(model) n_steps = 2 if request.config.getoption("--target") == 'loihi': with HardwareInterface(model, use_snips=False, seed=seed) as sim: sim.run_steps(n_steps) y = sim.get_probe_output(probe) else: with EmulatorInterface(model, seed=seed) as sim: sim.run_steps(n_steps) y = sim.get_probe_output(probe) # Compartments 0 and 2 should change from axons 0 and 1. # Axon 2 should have no effect, and not change compartment 1 (the sum of # its cx_base and index), or other compartments (e.g. 2 if cx_base ignored) assert np.allclose(y[1, 1], 0), "Third axon not ignored" assert np.allclose(y[1, 0], y[1, 2]), "Third axon targeting another" assert not np.allclose(y[1], y[0]), "Voltage not changing"
def test_multiple_get_probe_output(): n_steps = 15 n_axons = 3 model = Model() # n_axons controls number of input spikes and thus amount of overflow input = SpikeInput(n_axons) for t in np.arange(1, n_steps + 1): input.add_spikes(t, np.arange(n_axons)) # send spikes to all axons model.add_input(input) block = LoihiBlock(1) block.compartment.configure_relu() block.compartment.configure_filter(0.1) model.add_block(block) synapse = Synapse(n_axons) synapse.set_weights(np.ones((n_axons, 1))) block.add_synapse(synapse) axon = Axon(n_axons) axon.target = synapse input.add_axon(axon) probe_u = LoihiProbe(target=block, key="current", synapse=Lowpass(0.005)) model.add_probe(probe_u) probe_v = LoihiProbe(target=block, key="voltage", synapse=Lowpass(0.005)) model.add_probe(probe_v) probe_s = LoihiProbe(target=block, key="spiked", synapse=Lowpass(0.005)) model.add_probe(probe_s) discretize_model(model) # must set these after `discretize` to specify discretized values block.compartment.vmin = -(2**22) + 1 block.compartment.vth[:] = VTH_MAX with EmulatorInterface(model) as emu: emu.run_steps(n_steps) first_u = emu.get_probe_output(probe_u) first_v = emu.get_probe_output(probe_v) first_s = emu.get_probe_output(probe_s) second_u = emu.get_probe_output(probe_u) second_v = emu.get_probe_output(probe_v) second_s = emu.get_probe_output(probe_s) assert np.all(first_u == second_u) assert np.all(first_v == second_v) assert np.all(first_s == second_s)
def test_strict_mode(strict, monkeypatch): # Tests should be run in strict mode assert EmulatorInterface.strict model = Model() model.add_block(LoihiBlock(1)) monkeypatch.setattr(EmulatorInterface, "strict", strict) emu = EmulatorInterface(model) assert emu.strict == strict if strict: check = pytest.raises(SimulationError, match="Error in emulator") else: check = pytest.warns(UserWarning) with check: emu.compartment.error("Error in emulator")
def test_utilization(): comp_fracs = [0.9, 0.2, 0.35] model = Model() for comp_frac in comp_fracs: n_compartments = int(round(comp_frac * MAX_COMPARTMENTS)) block = LoihiBlock(n_compartments) block.compartment.configure_relu() model.add_block(block) util = block.utilization() assert np.allclose( util["compartments"], (n_compartments, MAX_COMPARTMENTS), rtol=0, atol=0.001 ) lines = model.utilization_summary() assert len(lines) == len(comp_fracs) + 1 assert lines[-1].startswith("Average")
def test_builder_poptype_errors(): pytest.importorskip("nxsdk") # Test error in build_synapse model = Model() block = LoihiBlock(1) block.compartment.configure_lif() model.add_block(block) synapse = Synapse(1) synapse.set_weights([[1]]) synapse.pop_type = 8 block.add_synapse(synapse) discretize_model(model) allocator = Greedy() # one core per ensemble board = allocator(model, n_chips=1) with pytest.raises(ValueError, match="[Ss]ynapse.*[Uu]nrec.*pop.*type"): build_board(board) # Test error in collect_axons model = Model() block0 = LoihiBlock(1) block0.compartment.configure_lif() model.add_block(block0) block1 = LoihiBlock(1) block1.compartment.configure_lif() model.add_block(block1) axon = Axon(1) block0.add_axon(axon) synapse = Synapse(1) synapse.set_weights([[1]]) synapse.pop_type = 8 axon.target = synapse block1.add_synapse(synapse) discretize_model(model) board = allocator(model, n_chips=1) with pytest.raises(ValueError, match="[Aa]xon.*[Uu]nrec.*pop.*type"): build_board(board)
def test_uv_overflow(n_axons, plt, allclose, monkeypatch): # TODO: Currently this is not testing the V overflow, since it is higher # and I haven't been able to figure out a way to make it overflow. nt = 15 model = Model() # n_axons controls number of input spikes and thus amount of overflow input = SpikeInput(n_axons) for t in np.arange(1, nt + 1): # send spikes to all axons input.add_spikes(t, np.arange(n_axons), permanent=True) model.add_input(input) block = LoihiBlock(1) block.compartment.configure_relu() block.compartment.configure_filter(0.1) model.add_block(block) synapse = Synapse(n_axons) synapse.set_weights(np.ones((n_axons, 1))) block.add_synapse(synapse) axon = Axon(n_axons) axon.target = synapse input.add_axon(axon) probe_u = LoihiProbe(target=block, key="current") model.add_probe(probe_u) probe_v = LoihiProbe(target=block, key="voltage") model.add_probe(probe_v) probe_s = LoihiProbe(target=block, key="spiked") model.add_probe(probe_s) discretize_model(model) # must set these after `discretize` to specify discretized values block.compartment.vmin = -(2**22) + 1 block.compartment.vth[:] = VTH_MAX assert EmulatorInterface.strict # Tests should be run in strict mode monkeypatch.setattr(EmulatorInterface, "strict", False) overflow_var = "q0" if n_axons == 1000 else "current" with EmulatorInterface(model) as emu: with pytest.warns(UserWarning, match=f"Overflow in {overflow_var}"): emu.run_steps(nt) emu_u = emu.collect_probe_output(probe_u) emu_v = emu.collect_probe_output(probe_v) emu_s = emu.collect_probe_output(probe_s) with HardwareInterface(model, use_snips=False) as sim: sim.run_steps(nt) sim_u = sim.collect_probe_output(probe_u) sim_v = sim.collect_probe_output(probe_v) sim_s = sim.collect_probe_output(probe_s) sim_v[sim_s > 0] = 0 # since Loihi has placeholder voltage after spike plt.subplot(311) plt.plot(emu_u) plt.plot(sim_u) plt.subplot(312) plt.plot(emu_v) plt.plot(sim_v) plt.subplot(313) plt.plot(emu_s) plt.plot(sim_s) assert allclose(emu_u, sim_u) assert allclose(emu_v, sim_v)
def test_pop_tiny(pop_type, channels_last, nc, request, plt, seed, allclose): tau_rc = 0.02 tau_ref = 0.001 tau_s = 0.0 dt = 0.001 neuron_bias = 1. pres_time = 0.4 sti, stj = 1, 1 if nc == 1: filters = np.array([[-0.5, 2., -0.25], [-0.75, 2., -1.0], [-0.5, 3., -0.5], [-1.0, 6., -0.25]]).reshape(1, 4, 1, 3) inp_biases = np.array([[1, 5, 1], [2, 1, 2]]) inp_biases = inp_biases[:, :, None] elif nc == 2: filters = np.array([[[-0.5, 2., -0.2], [-0.7, 2., -1.0], [-0.5, 3., -0.5], [-1.0, 6., -0.2]], [[-1.0, 2., -1.0], [-0.5, 2., -0.5], [-0.8, 3., -0.2], [-1.0, 4., -0.2]]]).reshape(2, 4, 1, 3) inp_biases = np.array([[[1, 5, 1], [2, 1, 2]], [[0, 3, 1], [4, 2, 1]]]) inp_biases = np.transpose(inp_biases, (1, 2, 0)) # rearrange to (kernel_rows, kernel_cols, in_channels, out_channels) filters = np.transpose(filters, (2, 3, 0, 1)) inp_biases = inp_biases / (inp_biases.max() + 0.001) # --- compute nengo_loihi outputs ni, nj, nk = inp_biases.shape si, sj, nc, nf = filters.shape nij = ni * nj nyi = 1 + (ni - si) // sti nyj = 1 + (nj - sj) // stj out_size = nyi * nyj * nf assert out_size <= 1024 model = Model() # input block inp = LoihiBlock(ni * nj * nk, label='inp') assert inp.n_neurons <= 1024 inp.compartment.configure_relu() inp.compartment.bias[:] = inp_biases.ravel() inp_ax = Axon(nij, label='inp_ax') # we always compute the pixel/channel idxs with channels_last=True # (not sure why?), and then set it to the correct value afterwards inp_shape = nengo_transforms.ChannelShape((ni, nj, nk), channels_last=True) inp_ax.set_compartment_axon_map(target_axons=conv.pixel_idxs(inp_shape), atoms=conv.channel_idxs(inp_shape)) inp_shape.shape = (ni, nj, nk) if channels_last else (nk, ni, nj) inp_shape.channels_last = channels_last inp.add_axon(inp_ax) model.add_block(inp) # conv block neurons = LoihiBlock(out_size, label='neurons') assert neurons.n_neurons <= 1024 neurons.compartment.configure_lif(tau_rc=tau_rc, tau_ref=tau_ref, dt=dt) neurons.compartment.configure_filter(tau_s, dt=dt) neurons.compartment.bias[:] = neuron_bias synapse = Synapse(np.prod(inp_shape.spatial_shape), label='synapse') conv2d_transform = nengo_transforms.Convolution( nf, inp_shape, strides=(sti, stj), channels_last=channels_last, init=filters, kernel_size=(1, 3)) weights, indices, axon_to_weight_map, bases = conv.conv2d_loihi_weights( conv2d_transform) synapse.set_population_weights(weights, indices, axon_to_weight_map, bases, pop_type=pop_type) neurons.add_synapse(synapse) out_probe = Probe(target=neurons, key='spiked') neurons.add_probe(out_probe) inp_ax.target = synapse model.add_block(neurons) # simulation discretize_model(model) n_steps = int(pres_time / dt) target = request.config.getoption("--target") if target == 'loihi': with HardwareInterface(model, use_snips=False, seed=seed) as sim: sim.run_steps(n_steps) sim_out = sim.get_probe_output(out_probe) else: with EmulatorInterface(model, seed=seed) as sim: sim.run_steps(n_steps) sim_out = sim.get_probe_output(out_probe) sim_out = np.sum(sim_out, axis=0) * (dt / pres_time) if channels_last: sim_out.shape = (nyi, nyj, nf) sim_out = np.transpose(sim_out, (2, 0, 1)) else: sim_out.shape = (nf, nyi, nyj) out_max = sim_out.max() # --- plot results rows = 1 cols = 2 ax = plt.subplot(rows, cols, 1) plt.hist(sim_out.ravel(), bins=11) ax = plt.subplot(rows, cols, 2) tile(sim_out, vmin=0, vmax=out_max, grid=True, ax=ax) # ref_out determined by emulator running code known to work if nc == 1: ref_out = np.array([[0.06, 0.02], [0.055, 0.], [0.0825, 0.0225], [0.125, 0.04]]) elif nc == 2: ref_out = np.array([[0.0975, 0.02], [0.0825, 0.02], [0.125, 0.055], [0.2475, 0.0825]]) assert allclose(sim_out[:, :, 0], ref_out, rtol=0, atol=1e-7)
def test_conv2d_weights(channels_last, hw_opts, request, plt, seed, rng, allclose): def loihi_rates_n(neuron_type, x, gain, bias, dt): """Compute Loihi rates on higher dimensional inputs""" y = x.reshape(-1, x.shape[-1]) gain = np.asarray(gain) bias = np.asarray(bias) if gain.ndim == 0: gain = gain * np.ones(x.shape[-1]) if bias.ndim == 0: bias = bias * np.ones(x.shape[-1]) rates = loihi_rates(neuron_type, y, gain, bias, dt) return rates.reshape(*x.shape) if channels_last: plt.saveas = None pytest.xfail("Blocked by CxBase cannot be > 256 bug") target = request.config.getoption("--target") if target != 'loihi' and len(hw_opts) > 0: pytest.skip("Hardware options only available on hardware") pop_type = 32 # load data with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f: test10 = pickle.load(f) test_x = test10[0][0].reshape(28, 28) test_x = test_x[3:24, 3:24] test_x = 1.999 * test_x - 0.999 filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng) sti, stj = 2, 2 tau_rc = 0.02 tau_ref = 0.002 tau_s = 0.005 dt = 0.001 encode_type = nengo.SpikingRectifiedLinear() encode_gain = 1. / dt encode_bias = 0. neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref) neuron_gain = 1. neuron_bias = 1. pres_time = 0.2 # --- compute ideal outputs def conv_pm(x, kernel): y0 = scipy.signal.correlate2d(x[0], kernel, mode='valid')[::sti, ::stj] y1 = scipy.signal.correlate2d(x[1], kernel, mode='valid')[::sti, ::stj] return [y0, -y1] ref_out = np.array([test_x, -test_x]) ref_out = loihi_rates_n(encode_type, ref_out, encode_gain, encode_bias, dt) ref_out = ref_out / encode_gain ref_out = np.array([conv_pm(ref_out, kernel) for kernel in filters]) ref_out = ref_out.sum(axis=1) # sum positive and negative parts ref_out = loihi_rates_n(neuron_type, ref_out, neuron_gain, neuron_bias, dt) # --- compute nengo_loihi outputs inp_biases = np.stack([test_x, -test_x], axis=-1 if channels_last else 0) inp_shape = nengo_transforms.ChannelShape(inp_biases.shape, channels_last=channels_last) kernel = np.array([filters, -filters]) # two channels, pos and neg kernel = np.transpose(kernel, (2, 3, 0, 1)) conv2d_transform = nengo_transforms.Convolution( 8, inp_shape, strides=(sti, stj), channels_last=channels_last, kernel_size=(7, 7), init=kernel) out_size = ref_out.size nf, nyi, nyj = ref_out.shape assert out_size <= 1024 model = Model() # input block inp = LoihiBlock(inp_shape.size, label='inp') assert inp.n_neurons <= 1024 inp.compartment.configure_relu() inp.compartment.bias[:] = inp_biases.ravel() inp_ax = Axon(np.prod(inp_shape.spatial_shape), label='inp_ax') inp_ax.set_compartment_axon_map(target_axons=conv.pixel_idxs(inp_shape), atoms=conv.channel_idxs(inp_shape)) inp.add_axon(inp_ax) model.add_block(inp) # conv block neurons = LoihiBlock(out_size, label='neurons') assert neurons.n_neurons <= 1024 neurons.compartment.configure_lif(tau_rc=tau_rc, tau_ref=tau_ref, dt=dt) neurons.compartment.configure_filter(tau_s, dt=dt) neurons.compartment.bias[:] = neuron_bias synapse = Synapse(np.prod(inp_shape.spatial_shape), label='synapse') weights, indices, axon_to_weight_map, bases = conv.conv2d_loihi_weights( conv2d_transform) synapse.set_population_weights(weights, indices, axon_to_weight_map, bases, pop_type=pop_type) neurons.add_synapse(synapse) out_probe = Probe(target=neurons, key='spiked') neurons.add_probe(out_probe) inp_ax.target = synapse model.add_block(neurons) # simulation discretize_model(model) n_steps = int(pres_time / dt) if target == 'loihi': with HardwareInterface(model, use_snips=False, seed=seed, **hw_opts) as sim: sim.run_steps(n_steps) sim_out = sim.get_probe_output(out_probe) else: with EmulatorInterface(model, seed=seed) as sim: sim.run_steps(n_steps) sim_out = sim.get_probe_output(out_probe) sim_out = np.sum(sim_out, axis=0) / pres_time if channels_last: sim_out.shape = (nyi, nyj, nf) sim_out = np.transpose(sim_out, (2, 0, 1)) else: sim_out.shape = (nf, nyi, nyj) out_max = max(ref_out.max(), sim_out.max()) # --- plot results rows = 2 cols = 2 ax = plt.subplot(rows, cols, 1) tile(filters, cols=8, ax=ax) ax = plt.subplot(rows, cols, 2) tile(ref_out, vmin=0, vmax=out_max, cols=8, ax=ax) ax = plt.subplot(rows, cols, 3) plt.hist(ref_out.ravel(), bins=31) plt.hist(sim_out.ravel(), bins=31) ax = plt.subplot(rows, cols, 4) # tile(sim_out, vmin=0, vmax=1, cols=8, ax=ax) tile(sim_out, vmin=0, vmax=out_max, cols=8, ax=ax) assert allclose(sim_out, ref_out, atol=10, rtol=1e-3)
def test_population_input(request, allclose): target = request.config.getoption("--target") dt = 0.001 n_inputs = 3 n_axons = 1 n_cx = 2 steps = 6 spike_times_inds = [(1, [0]), (3, [1]), (5, [2])] model = Model() input = SpikeInput(n_inputs) model.add_input(input) spikes = [(input, ti, inds) for ti, inds in spike_times_inds] input_axon = Axon(n_axons) axon_map = np.zeros(n_inputs, dtype=int) atoms = np.arange(n_inputs) input_axon.set_axon_map(axon_map, atoms) input.add_axon(input_axon) block = LoihiBlock(n_cx) block.compartment.configure_lif(tau_rc=0., tau_ref=0., dt=dt) block.compartment.configure_filter(0, dt=dt) model.add_block(block) synapse = Synapse(n_axons) weights = 0.1 * np.array([[[1, 2], [2, 3], [4, 5]]], dtype=float) indices = np.array([[[0, 1], [0, 1], [0, 1]]], dtype=int) axon_to_weight_map = np.zeros(n_axons, dtype=int) cx_bases = np.zeros(n_axons, dtype=int) synapse.set_population_weights(weights, indices, axon_to_weight_map, cx_bases, pop_type=32) block.add_synapse(synapse) input_axon.target = synapse probe = Probe(target=block, key='voltage') block.add_probe(probe) discretize_model(model) if target == 'loihi': with HardwareInterface(model, use_snips=True) as sim: sim.run_steps(steps, blocking=False) for ti in range(1, steps + 1): spikes_i = [spike for spike in spikes if spike[1] == ti] sim.host2chip(spikes=spikes_i, errors=[]) sim.chip2host(probes_receivers={}) y = sim.get_probe_output(probe) else: for inp, ti, inds in spikes: inp.add_spikes(ti, inds) with EmulatorInterface(model) as sim: sim.run_steps(steps) y = sim.get_probe_output(probe) vth = block.compartment.vth[0] assert (block.compartment.vth == vth).all() z = y / vth assert allclose(z[[1, 3, 5]], weights[0], atol=4e-2, rtol=0)
def test_simulator_noise(exp, request, plt, seed, allclose): # TODO: test that the mean falls within a number of standard errors # of the expected mean, and that non-zero offsets work correctly. # Currently, there is an unexpected negative bias for small noise # exponents, apparently because there is a probability of generating # the shifted equivalent of -128, whereas with e.g. exp = 7 all the # generated numbers fall in [-127, 127]. offset = 0 target = request.config.getoption("--target") n_cx = 1000 model = Model() block = LoihiBlock(n_cx) block.compartment.configure_relu() block.compartment.vmin = -1 block.compartment.enableNoise[:] = 1 block.compartment.noiseExp0 = exp block.compartment.noiseMantOffset0 = offset block.compartment.noiseAtDendOrVm = 1 probe = Probe(target=block, key='voltage') block.add_probe(probe) model.add_block(block) discretize_model(model) exp2 = block.compartment.noiseExp0 offset2 = block.compartment.noiseMantOffset0 n_steps = 100 if target == 'loihi': with HardwareInterface(model, use_snips=False, seed=seed) as sim: sim.run_steps(n_steps) y = sim.get_probe_output(probe) else: with EmulatorInterface(model, seed=seed) as sim: sim.run_steps(n_steps) y = sim.get_probe_output(probe) t = np.arange(1, n_steps + 1) bias = offset2 * 2.**(exp2 - 1) std = 2.**exp2 / np.sqrt(3) # divide by sqrt(3) for std of uniform -1..1 rmean = t * bias rstd = np.sqrt(t) * std rerr = rstd / np.sqrt(n_cx) ymean = y.mean(axis=1) ystd = y.std(axis=1) diffs = np.diff(np.vstack([np.zeros_like(y[0]), y]), axis=0) plt.subplot(311) plt.hist(diffs.ravel(), bins=256) plt.subplot(312) plt.plot(rmean, 'k') plt.plot(rmean + 3 * rerr, 'k--') plt.plot(rmean - 3 * rerr, 'k--') plt.plot(ymean) plt.title('mean') plt.subplot(313) plt.plot(rstd, 'k') plt.plot(ystd) plt.title('std') assert allclose(ystd, rstd, rtol=0.1, atol=1)
def test_one_to_one_allocator_big_block_error(): model = Model() model.add_block(LoihiBlock(1050)) with pytest.raises(ValidationError, match="Segment does not fit"): OneToOne()(model)
def test_big_block_error(): model = Model() model.add_block(LoihiBlock(1050)) with pytest.raises(ValidationError, match="Segment does not fit"): Greedy()(model, n_chips=1)
def test_one_to_one_allocator_big_block_error(): model = Model() model.add_block(LoihiBlock(1050)) with pytest.raises(ValidationError): OneToOne()(model)