def test_validate_block(): # too many compartments block = LoihiBlock(1200) assert block.compartment.n_compartments > 1024 with pytest.raises(BuildError, match="Number of compartments"): validate_block(block) # too many input axons block = LoihiBlock(410) block.add_synapse(Synapse(5000)) with pytest.raises(BuildError, match="Input axons"): validate_block(block) # too many output axons block = LoihiBlock(410) synapse = Synapse(2500) axon = Axon(5000) axon.target = synapse block.add_synapse(synapse) block.add_axon(axon) with pytest.raises(BuildError, match="Output axons"): validate_block(block) # too many synapse bits block = LoihiBlock(600) synapse = Synapse(500) synapse.set_weights(np.ones((500, 600))) axon = Axon(500) axon.target = synapse block.add_synapse(synapse) block.add_axon(axon) with pytest.raises(BuildError, match="synapse bits"): validate_block(block)
def _basic_model(n_blocks=2): model = Model() blocks = [] for _ in range(n_blocks): block = LoihiBlock(1) block.compartment.configure_lif() model.add_block(block) blocks.append(block) for i in range(n_blocks - 1): axon = Axon(1) blocks[i].add_axon(axon) synapse = Synapse(1) synapse.set_weights([[1]]) axon.target = synapse blocks[i + 1].add_synapse(synapse) axon0 = Axon(1) input = LoihiInput() input.add_axon(axon0) model.add_input(input) synapse0 = Synapse(1) synapse0.set_weights([[1]]) axon0.target = synapse0 blocks[0].add_synapse(synapse0) discretize_model(model) return model
def new_syn(tracing_mag=None): syn = Synapse(n_axons=1) syn.set_weights(np.array([[1]])) if tracing_mag is not None: syn.set_learning(tracing_mag=tracing_mag) core.add_synapse(syn) return syn
def build_decode_neuron_encoders(model, ens, kind="decode_neuron_encoders"): """Build encoders accepting decode neuron input.""" block = model.objs[ens.neurons]["in"] scaled_encoders = model.params[ens].scaled_encoders if kind == "node_encoders": encoders = model.node_neurons.get_post_encoders(scaled_encoders) elif kind == "decode_neuron_encoders": encoders = model.decode_neurons.get_post_encoders(scaled_encoders) synapse = Synapse(encoders.shape[0], label=kind) synapse.set_weights(encoders) block.add_synapse(synapse, name=kind)
def test_multiple_get_probe_output(): n_steps = 15 n_axons = 3 model = Model() # n_axons controls number of input spikes and thus amount of overflow input = SpikeInput(n_axons) for t in np.arange(1, n_steps + 1): input.add_spikes(t, np.arange(n_axons)) # send spikes to all axons model.add_input(input) block = LoihiBlock(1) block.compartment.configure_relu() block.compartment.configure_filter(0.1) model.add_block(block) synapse = Synapse(n_axons) synapse.set_weights(np.ones((n_axons, 1))) block.add_synapse(synapse) axon = Axon(n_axons) axon.target = synapse input.add_axon(axon) probe_u = LoihiProbe(target=block, key="current", synapse=Lowpass(0.005)) model.add_probe(probe_u) probe_v = LoihiProbe(target=block, key="voltage", synapse=Lowpass(0.005)) model.add_probe(probe_v) probe_s = LoihiProbe(target=block, key="spiked", synapse=Lowpass(0.005)) model.add_probe(probe_s) discretize_model(model) # must set these after `discretize` to specify discretized values block.compartment.vmin = -(2**22) + 1 block.compartment.vth[:] = VTH_MAX with EmulatorInterface(model) as emu: emu.run_steps(n_steps) first_u = emu.get_probe_output(probe_u) first_v = emu.get_probe_output(probe_v) first_s = emu.get_probe_output(probe_s) second_u = emu.get_probe_output(probe_u) second_v = emu.get_probe_output(probe_v) second_s = emu.get_probe_output(probe_s) assert np.all(first_u == second_u) assert np.all(first_v == second_v) assert np.all(first_s == second_s)
def test_builder_poptype_errors(): pytest.importorskip("nxsdk") # Test error in build_synapse model = Model() block = LoihiBlock(1) block.compartment.configure_lif() model.add_block(block) synapse = Synapse(1) synapse.set_weights([[1]]) synapse.pop_type = 8 block.add_synapse(synapse) discretize_model(model) allocator = Greedy() # one core per ensemble board = allocator(model, n_chips=1) with pytest.raises(ValueError, match="[Ss]ynapse.*[Uu]nrec.*pop.*type"): build_board(board) # Test error in collect_axons model = Model() block0 = LoihiBlock(1) block0.compartment.configure_lif() model.add_block(block0) block1 = LoihiBlock(1) block1.compartment.configure_lif() model.add_block(block1) axon = Axon(1) block0.add_axon(axon) synapse = Synapse(1) synapse.set_weights([[1]]) synapse.pop_type = 8 axon.target = synapse block1.add_synapse(synapse) discretize_model(model) board = allocator(model, n_chips=1) with pytest.raises(ValueError, match="[Aa]xon.*[Uu]nrec.*pop.*type"): build_board(board)
def get_block(self, weights, block_label=None, syn_label=None): gain = self.gain * self.dt bias = self.bias * self.dt n, d = weights.shape n_neurons = 2 * d * self.pairs_per_dim block = LoihiBlock(n_neurons, label=block_label) block.compartment.configure_relu(dt=self.dt) block.compartment.bias[:] = bias.repeat(d) syn = Synapse(n, label=syn_label) weights2 = [] for ga, gb in gain.reshape(self.pairs_per_dim, 2): weights2.extend([scale_matrix(weights, ga), scale_matrix(weights, -gb)]) weights2 = stack_matrices(weights2, order="h") syn.set_weights(weights2) block.add_synapse(syn) return block, syn
def test_uv_overflow(n_axons, plt, allclose, monkeypatch): # TODO: Currently this is not testing the V overflow, since it is higher # and I haven't been able to figure out a way to make it overflow. nt = 15 model = Model() # n_axons controls number of input spikes and thus amount of overflow input = SpikeInput(n_axons) for t in np.arange(1, nt + 1): # send spikes to all axons input.add_spikes(t, np.arange(n_axons), permanent=True) model.add_input(input) block = LoihiBlock(1) block.compartment.configure_relu() block.compartment.configure_filter(0.1) model.add_block(block) synapse = Synapse(n_axons) synapse.set_weights(np.ones((n_axons, 1))) block.add_synapse(synapse) axon = Axon(n_axons) axon.target = synapse input.add_axon(axon) probe_u = LoihiProbe(target=block, key="current") model.add_probe(probe_u) probe_v = LoihiProbe(target=block, key="voltage") model.add_probe(probe_v) probe_s = LoihiProbe(target=block, key="spiked") model.add_probe(probe_s) discretize_model(model) # must set these after `discretize` to specify discretized values block.compartment.vmin = -(2**22) + 1 block.compartment.vth[:] = VTH_MAX assert EmulatorInterface.strict # Tests should be run in strict mode monkeypatch.setattr(EmulatorInterface, "strict", False) overflow_var = "q0" if n_axons == 1000 else "current" with EmulatorInterface(model) as emu: with pytest.warns(UserWarning, match=f"Overflow in {overflow_var}"): emu.run_steps(nt) emu_u = emu.collect_probe_output(probe_u) emu_v = emu.collect_probe_output(probe_v) emu_s = emu.collect_probe_output(probe_s) with HardwareInterface(model, use_snips=False) as sim: sim.run_steps(nt) sim_u = sim.collect_probe_output(probe_u) sim_v = sim.collect_probe_output(probe_v) sim_s = sim.collect_probe_output(probe_s) sim_v[sim_s > 0] = 0 # since Loihi has placeholder voltage after spike plt.subplot(311) plt.plot(emu_u) plt.plot(sim_u) plt.subplot(312) plt.plot(emu_v) plt.plot(sim_v) plt.subplot(313) plt.plot(emu_s) plt.plot(sim_s) assert allclose(emu_u, sim_u) assert allclose(emu_v, sim_v)
def build_full_chip_connection(model, conn): # noqa: C901 """Build dense or sparse connections on-chip""" # Create random number generator rng = np.random.RandomState(model.seeds[conn]) pre_obj = model.objs[conn.pre_obj]["out"] post_obj = model.objs[conn.post_obj]["in"] assert isinstance(pre_obj, (LoihiBlock, LoihiInput)) assert isinstance(post_obj, (LoihiBlock, LoihiProbe)) weights = None eval_points = None solver_info = None neuron_type = None pre_slice = conn.pre_slice post_slice = conn.post_slice # sample transform (if using a distribution), transform shape (out, in) transform = sample_transform(conn, rng=rng) tau_s = 0.0 # `synapse is None` gets mapped to `tau_s = 0.0` if isinstance(conn.synapse, nengo.synapses.Lowpass): tau_s = conn.synapse.tau elif conn.synapse is not None: raise NotImplementedError("Cannot handle non-Lowpass synapses") needs_decode_neurons = False target_encoders = None is_chip_process = isinstance(conn.pre_obj, Node) and isinstance( conn.pre_obj.output, ChipProcess ) if isinstance(conn.pre_obj, Node) and not ( isinstance(conn.pre_obj, ChipReceiveNeurons) or is_chip_process ): assert conn.pre_slice == slice(None) weights = expand_matrix(transform, shape=(conn.post.size_in, conn.pre.size_out)) # input is on-off neuron encoded, so double/flip transform weights = stack_matrices([weights, scale_matrix(weights, -1)], order="h") target_encoders = "node_encoders" elif isinstance(conn.pre_obj, Ensemble) and isinstance( conn.pre_obj.neuron_type, nengo.Direct ): raise NotImplementedError() elif isinstance(conn.pre_obj, Ensemble): # Normal decoded connection if isinstance(transform, scipy.sparse.spmatrix): raise BuildError( "Applying a sparse transform to a decoded connection is not supported" ) eval_points, decoders, solver_info = model.build( conn.solver, conn, rng, transform ) pre_slice = slice(None) # taken care of in decoders if conn.solver.weights and not conn.solver.compositional: weights = decoders else: weights = multiply(transform, decoders) # the decoder solver assumes a spike height of 1/dt; that isn't the # case on loihi, so we need to undo that scaling weights = scale_matrix(weights, 1.0 / model.dt) neuron_type = conn.pre_obj.neuron_type if conn.solver.weights: # weight solvers only allowed on ensemble->ensemble connections assert isinstance(conn.post_obj, Ensemble) if conn.solver.compositional: encoders = model.params[conn.post_obj].scaled_encoders weights = multiply(encoders[:, post_slice], weights) # post slice already applied to encoders (either here or in # `build_decoders`), so don't apply later post_slice = slice(None) else: needs_decode_neurons = True elif isinstance(conn.pre_obj, (Neurons, ChipReceiveNeurons)) or is_chip_process: weights = expand_matrix(transform, shape=(conn.post.size_in, conn.pre.size_out)) weights = scale_matrix(weights, 1.0 / model.dt) neuron_type = ( None if is_chip_process else conn.pre_obj.neuron_type if isinstance(conn.pre_obj, ChipReceiveNeurons) else conn.pre_obj.ensemble.neuron_type ) if isinstance(conn.post_obj, Ensemble): needs_decode_neurons = True else: raise NotImplementedError("Connection from type %r" % (type(conn.pre_obj),)) if neuron_type is not None and hasattr(neuron_type, "amplitude"): weights = scale_matrix(weights, neuron_type.amplitude) # to proper dtype transform = transform.astype(nengo.rc.float_dtype) weights = weights.astype(nengo.rc.float_dtype) # loihi_weights has shape (in, out), to match the shape by block.Synapses loihi_weights = weights.T mid_obj = pre_obj mid_axon_inds = None post_tau = tau_s if needs_decode_neurons and not isinstance(conn.post_obj, Neurons): # --- add decode neurons assert weights.ndim == 2 n, d = loihi_weights.shape if isinstance(post_obj, LoihiProbe): # use non-spiking decode neurons for voltage probing assert len(post_obj.target) == 0 or post_obj.target == [None] assert post_slice == slice(None) # use the same scaling as the ensemble does, to get good # decodes. Note that this assumes that the decoded value # is in the range -radius to radius, which is usually true. gain = np.array(1.0 / conn.pre_obj.radius, dtype=nengo.rc.float_dtype) decoder_block = LoihiBlock(2 * d, label="%s" % conn) decoder_block.compartment.configure_nonspiking( dt=model.dt, vth=model.vth_nonspiking ) decoder_block.compartment.bias[:] = 0 dec_syn = Synapse(n, label="probe_decoders") weights2 = stack_matrices( [scale_matrix(loihi_weights, gain), scale_matrix(loihi_weights, -gain)], order="h", ) dec_syn.set_weights(weights2) decoder_block.add_synapse(dec_syn) else: # use spiking decode neurons for on-chip connection if isinstance(conn.post_obj, Ensemble): # loihi encoders don't include radius, so handle scaling here gain = np.array(1.0 / conn.post_obj.radius, dtype=nengo.rc.float_dtype) loihi_weights = scale_matrix(loihi_weights, gain) post_d = conn.post_obj.size_in post_inds = np.arange(post_d, dtype=np.int32)[post_slice] assert loihi_weights.shape[1] == len(post_inds) == conn.size_out mid_axon_inds = model.decode_neurons.get_post_inds(post_inds, post_d) target_encoders = "decode_neuron_encoders" decoder_block, dec_syn = model.decode_neurons.get_block( loihi_weights, block_label="%s" % conn, syn_label="decoders" ) model.add_block(decoder_block) model.objs[conn]["decoded"] = decoder_block model.objs[conn]["decoders"] = dec_syn model.connection_decode_neurons[conn] = decoder_block # use tau_s for filter into decode neurons, decode_tau for filter out decoder_block.compartment.configure_filter(tau_s, dt=model.dt) post_tau = model.decode_tau target_axons = -np.ones(pre_obj.n_neurons, dtype=np.int32) target_axons[pre_slice] = np.arange(target_axons[pre_slice].size) pre_slice = slice(None) dec_ax0 = Axon(n, label="decoders") dec_ax0.target = dec_syn dec_ax0.set_compartment_axon_map(target_axons) pre_obj.add_axon(dec_ax0) model.objs[conn]["decode_axon"] = dec_ax0 loihi_weights = None # weights have now been handled if conn.learning_rule_type is not None: rule_type = conn.learning_rule_type if isinstance(rule_type, nengo.PES): if not isinstance(rule_type.pre_synapse, nengo.synapses.Lowpass): raise ValidationError( "Loihi only supports `Lowpass` pre-synapses for learning rules", attr="pre_synapse", obj=rule_type, ) pre_tau = rule_type.pre_synapse.tau float_tracing_tau = pre_tau / model.dt tracing_tau = int(round(float_tracing_tau)) if not np.allclose(float_tracing_tau, tracing_tau): warnings.warn( f"PES learning rule `pre_synapse.tau` ({pre_tau}) is not an " f"integer multiple of `dt` ({model.dt}). Rounding." ) # Nengo builder scales PES learning rate by `dt / n_neurons` n_neurons = ( conn.pre_obj.n_neurons if isinstance(conn.pre_obj, Ensemble) else conn.pre_obj.size_in ) learning_rate = rule_type.learning_rate * model.dt / n_neurons # Account for scaling to put integer error in range [-127, 127] learning_rate /= model.pes_error_scale # Tracing mag set so that the magnitude of the pre trace # is independent of the pre tau. `dt` factor accounts for # Nengo's `dt` spike scaling. Where is the second `dt` from? # Maybe the fact that post decode neurons have `vth = 1/dt`? tracing_mag = -np.expm1(-1.0 / tracing_tau) / model.dt**2 # learning weight exponent controls the maximum weight # magnitude/weight resolution wgt_exp = model.pes_wgt_exp dec_syn.set_learning( learning_rate=learning_rate, tracing_mag=tracing_mag, tracing_tau=tracing_tau, wgt_exp=wgt_exp, ) else: raise NotImplementedError() mid_obj = decoder_block if isinstance(post_obj, LoihiProbe): assert post_obj.target == [None] assert post_slice == slice(None) post_obj.target[0] = mid_obj model.add_probe(post_obj) elif isinstance(conn.post_obj, Neurons): assert isinstance(post_obj, LoihiBlock) assert post_slice == slice(None) if loihi_weights is None: raise NotImplementedError("Need weights for connection to neurons") assert loihi_weights.ndim == 2 n1, n2 = loihi_weights.shape assert post_obj.n_neurons == n2 syn = Synapse(n1, label="neuron_weights") gain = model.params[conn.post_obj.ensemble].gain loihi_weights = scale_matrix(loihi_weights, gain) syn.set_weights(loihi_weights) post_obj.add_synapse(syn) model.objs[conn]["weights"] = syn target_axons = -np.ones(mid_obj.n_neurons, dtype=np.int32) target_axons[pre_slice] = np.arange(target_axons[pre_slice].size) assert target_axons[pre_slice].size == n1 ax = Axon(mid_obj.n_neurons, label="neuron_weights") ax.target = syn ax.set_compartment_axon_map(target_axons) mid_obj.add_axon(ax) post_obj.compartment.configure_filter(post_tau, dt=model.dt) if conn.learning_rule_type is not None: raise NotImplementedError() elif isinstance(conn.post_obj, Ensemble) and conn.solver.weights: assert isinstance(post_obj, LoihiBlock) assert pre_slice == slice(None), "Not implemented" assert post_slice == slice(None) assert loihi_weights.ndim == 2 n1, n2 = loihi_weights.shape assert post_obj.n_neurons == n2 # loihi encoders don't include radius, so handle scaling here scale = np.array(1.0 / conn.post_obj.radius, dtype=nengo.rc.float_dtype) loihi_weights = scale_matrix(loihi_weights, scale) syn = Synapse(n1, label="%s::decoder_weights" % conn) syn.set_weights(loihi_weights) post_obj.add_synapse(syn) model.objs[conn]["weights"] = syn ax = Axon(n1, label="decoder_weights") ax.target = syn mid_obj.add_axon(ax) post_obj.compartment.configure_filter(post_tau, dt=model.dt) if conn.learning_rule_type is not None: raise NotImplementedError() elif isinstance(conn.post_obj, Ensemble): assert isinstance(post_obj, LoihiBlock) assert pre_slice == slice(None), "Not implemented" assert post_slice == slice(None) assert target_encoders is not None if target_encoders not in post_obj.named_synapses: build_decode_neuron_encoders(model, conn.post_obj, kind=target_encoders) mid_ax = Axon(mid_obj.n_neurons, label="encoders") mid_ax.target = post_obj.named_synapses[target_encoders] mid_ax.set_compartment_axon_map(mid_axon_inds) mid_obj.add_axon(mid_ax) model.objs[conn]["mid_axon"] = mid_ax post_obj.compartment.configure_filter(post_tau, dt=model.dt) else: # This includes Node, since nodes can't be targets on-chip raise NotImplementedError() model.params[conn] = BuiltConnection( eval_points=eval_points, solver_info=solver_info, transform=transform, # sampled transform weights=weights, # scaled weights (including decoders) )