def build_decode_neuron_encoders(model, ens, kind='decode_neuron_encoders'): """Build encoders accepting decode neuron input.""" block = model.objs[ens.neurons]['in'] scaled_encoders = model.params[ens].scaled_encoders if kind == 'node_encoders': encoders = model.node_neurons.get_post_encoders(scaled_encoders) elif kind == 'decode_neuron_encoders': encoders = model.decode_neurons.get_post_encoders(scaled_encoders) synapse = Synapse(encoders.shape[0], label=kind) synapse.set_full_weights(encoders) block.add_synapse(synapse, name=kind)
def _basic_model(n_blocks=2): model = Model() blocks = [] for _ in range(n_blocks): block = LoihiBlock(1) block.compartment.configure_lif() model.add_block(block) blocks.append(block) for i in range(n_blocks - 1): axon = Axon(1) blocks[i].add_axon(axon) synapse = Synapse(1) synapse.set_weights([[1]]) axon.target = synapse blocks[i + 1].add_synapse(synapse) axon0 = Axon(1) input = LoihiInput() input.add_axon(axon0) model.add_input(input) synapse0 = Synapse(1) synapse0.set_weights([[1]]) axon0.target = synapse0 blocks[0].add_synapse(synapse0) discretize_model(model) return model
def test_negative_cxbase(request, seed): n_axons = 3 model = Model() input = SpikeInput(n_axons) input.add_spikes(1, list(range(n_axons))) model.add_input(input) axon = Axon(n_axons) input.add_axon(axon) block = LoihiBlock(3) block.compartment.configure_relu() model.add_block(block) synapse = Synapse(n_axons) weights = [0.1, 0.1, 0.1] indices = [0, 1, 2] axon_to_weight_map = list(range(n_axons)) cx_bases = [0, 1, -1] synapse.set_population_weights(weights, indices, axon_to_weight_map, cx_bases, pop_type=32) axon.target = synapse block.add_synapse(synapse) probe = Probe(target=block, key='voltage') block.add_probe(probe) discretize_model(model) n_steps = 2 if request.config.getoption("--target") == 'loihi': with HardwareInterface(model, use_snips=False, seed=seed) as sim: sim.run_steps(n_steps) y = sim.get_probe_output(probe) else: with EmulatorInterface(model, seed=seed) as sim: sim.run_steps(n_steps) y = sim.get_probe_output(probe) # Compartments 0 and 2 should change from axons 0 and 1. # Axon 2 should have no effect, and not change compartment 1 (the sum of # its cx_base and index), or other compartments (e.g. 2 if cx_base ignored) assert np.allclose(y[1, 1], 0), "Third axon not ignored" assert np.allclose(y[1, 0], y[1, 2]), "Third axon targeting another" assert not np.allclose(y[1], y[0]), "Voltage not changing"
def _basic_model(): model = Model() block0 = LoihiBlock(1) block0.compartment.configure_lif() model.add_block(block0) block1 = LoihiBlock(1) block1.compartment.configure_lif() model.add_block(block1) axon1 = Axon(1) block0.add_axon(axon1) synapse1 = Synapse(1) synapse1.set_full_weights([[1]]) axon1.target = synapse1 block1.add_synapse(synapse1) axon0 = Axon(1) input = LoihiInput() input.add_axon(axon0) model.add_input(input) synapse0 = Synapse(1) synapse0.set_full_weights([[1]]) axon0.target = synapse0 block0.add_synapse(synapse0) discretize_model(model) return model
def test_multiple_get_probe_output(): n_steps = 15 n_axons = 3 model = Model() # n_axons controls number of input spikes and thus amount of overflow input = SpikeInput(n_axons) for t in np.arange(1, n_steps + 1): input.add_spikes(t, np.arange(n_axons)) # send spikes to all axons model.add_input(input) block = LoihiBlock(1) block.compartment.configure_relu() block.compartment.configure_filter(0.1) model.add_block(block) synapse = Synapse(n_axons) synapse.set_weights(np.ones((n_axons, 1))) block.add_synapse(synapse) axon = Axon(n_axons) axon.target = synapse input.add_axon(axon) probe_u = LoihiProbe(target=block, key="current", synapse=Lowpass(0.005)) model.add_probe(probe_u) probe_v = LoihiProbe(target=block, key="voltage", synapse=Lowpass(0.005)) model.add_probe(probe_v) probe_s = LoihiProbe(target=block, key="spiked", synapse=Lowpass(0.005)) model.add_probe(probe_s) discretize_model(model) # must set these after `discretize` to specify discretized values block.compartment.vmin = -(2**22) + 1 block.compartment.vth[:] = VTH_MAX with EmulatorInterface(model) as emu: emu.run_steps(n_steps) first_u = emu.get_probe_output(probe_u) first_v = emu.get_probe_output(probe_v) first_s = emu.get_probe_output(probe_s) second_u = emu.get_probe_output(probe_u) second_v = emu.get_probe_output(probe_v) second_s = emu.get_probe_output(probe_s) assert np.all(first_u == second_u) assert np.all(first_v == second_v) assert np.all(first_s == second_s)
def test_builder_poptype_errors(): pytest.importorskip("nxsdk") # Test error in build_synapse model = Model() block = LoihiBlock(1) block.compartment.configure_lif() model.add_block(block) synapse = Synapse(1) synapse.set_weights([[1]]) synapse.pop_type = 8 block.add_synapse(synapse) discretize_model(model) allocator = Greedy() # one core per ensemble board = allocator(model, n_chips=1) with pytest.raises(ValueError, match="[Ss]ynapse.*[Uu]nrec.*pop.*type"): build_board(board) # Test error in collect_axons model = Model() block0 = LoihiBlock(1) block0.compartment.configure_lif() model.add_block(block0) block1 = LoihiBlock(1) block1.compartment.configure_lif() model.add_block(block1) axon = Axon(1) block0.add_axon(axon) synapse = Synapse(1) synapse.set_weights([[1]]) synapse.pop_type = 8 axon.target = synapse block1.add_synapse(synapse) discretize_model(model) board = allocator(model, n_chips=1) with pytest.raises(ValueError, match="[Aa]xon.*[Uu]nrec.*pop.*type"): build_board(board)
def test_validate_block(): # too many compartments block = LoihiBlock(1200) assert block.compartment.n_compartments > 1024 with pytest.raises(BuildError, match="Number of compartments"): validate_block(block) # too many input axons block = LoihiBlock(410) block.add_synapse(Synapse(5000)) with pytest.raises(BuildError, match="Input axons"): validate_block(block) # too many output axons block = LoihiBlock(410) synapse = Synapse(2500) axon = Axon(5000) axon.target = synapse block.add_synapse(synapse) block.add_axon(axon) with pytest.raises(BuildError, match="Output axons"): validate_block(block) # too many synapse bits block = LoihiBlock(600) synapse = Synapse(500) synapse.set_full_weights(np.ones((500, 600))) axon = Axon(500) axon.target = synapse block.add_synapse(synapse) block.add_axon(axon) with pytest.raises(BuildError, match="synapse bits"): validate_block(block)
def get_block(self, weights, block_label=None, syn_label=None): gain = self.gain * self.dt bias = self.bias * self.dt n, d = weights.shape n_neurons = 2 * d * self.pairs_per_dim block = LoihiBlock(n_neurons, label=block_label) block.compartment.configure_relu(dt=self.dt) block.compartment.bias[:] = bias.repeat(d) syn = Synapse(n, label=syn_label) weights2 = [] for ga, gb in gain.reshape(self.pairs_per_dim, 2): weights2.extend([scale_matrix(weights, ga), scale_matrix(weights, -gb)]) weights2 = stack_matrices(weights2, order="h") syn.set_weights(weights2) block.add_synapse(syn) return block, syn
def get_block(self, weights, block_label=None, syn_label=None): gain = self.gain * self.dt bias = self.bias * self.dt d, n = weights.shape n_neurons = 2 * d * self.pairs_per_dim block = LoihiBlock(n_neurons, label=block_label) block.compartment.configure_relu(dt=self.dt) block.compartment.bias[:] = bias.repeat(d) syn = Synapse(n, label=syn_label) weights2 = [] for ga, gb in gain.reshape(self.pairs_per_dim, 2): weights2.extend([ga * weights.T, -gb * weights.T]) weights2 = np.hstack(weights2) syn.set_full_weights(weights2) block.add_synapse(syn) return block, syn
def new_syn(tracing_mag=None): syn = Synapse(n_axons=1) syn.set_full_weights(np.array([[1]])) if tracing_mag is not None: syn.set_learning(tracing_mag=tracing_mag) core.add_synapse(syn) return syn
def test_strings(): block = LoihiBlock(3, label="myBlock") assert str(block) == "LoihiBlock(myBlock)" assert str(block.compartment) == "Compartment()" synapse = Synapse(2, label="mySynapse") assert str(synapse) == "Synapse(mySynapse)" axon = Axon(2, label="myAxon") assert str(axon) == "Axon(myAxon)" spike = Axon.Spike(axon_id=7, atom=2) assert str(spike) == "Spike(axon_id=7, atom=2)"
def test_population_input(request, allclose): target = request.config.getoption("--target") dt = 0.001 n_inputs = 3 n_axons = 1 n_cx = 2 steps = 6 spike_times_inds = [(1, [0]), (3, [1]), (5, [2])] model = Model() input = SpikeInput(n_inputs) model.add_input(input) spikes = [(input, ti, inds) for ti, inds in spike_times_inds] input_axon = Axon(n_axons) axon_map = np.zeros(n_inputs, dtype=int) atoms = np.arange(n_inputs) input_axon.set_axon_map(axon_map, atoms) input.add_axon(input_axon) block = LoihiBlock(n_cx) block.compartment.configure_lif(tau_rc=0., tau_ref=0., dt=dt) block.compartment.configure_filter(0, dt=dt) model.add_block(block) synapse = Synapse(n_axons) weights = 0.1 * np.array([[[1, 2], [2, 3], [4, 5]]], dtype=float) indices = np.array([[[0, 1], [0, 1], [0, 1]]], dtype=int) axon_to_weight_map = np.zeros(n_axons, dtype=int) cx_bases = np.zeros(n_axons, dtype=int) synapse.set_population_weights(weights, indices, axon_to_weight_map, cx_bases, pop_type=32) block.add_synapse(synapse) input_axon.target = synapse probe = Probe(target=block, key='voltage') block.add_probe(probe) discretize_model(model) if target == 'loihi': with HardwareInterface(model, use_snips=True) as sim: sim.run_steps(steps, blocking=False) for ti in range(1, steps + 1): spikes_i = [spike for spike in spikes if spike[1] == ti] sim.host2chip(spikes=spikes_i, errors=[]) sim.chip2host(probes_receivers={}) y = sim.get_probe_output(probe) else: for inp, ti, inds in spikes: inp.add_spikes(ti, inds) with EmulatorInterface(model) as sim: sim.run_steps(steps) y = sim.get_probe_output(probe) vth = block.compartment.vth[0] assert (block.compartment.vth == vth).all() z = y / vth assert allclose(z[[1, 3, 5]], weights[0], atol=4e-2, rtol=0)
def test_uv_overflow(n_axons, plt, allclose, monkeypatch): # TODO: Currently this is not testing the V overflow, since it is higher # and I haven't been able to figure out a way to make it overflow. nt = 15 model = Model() # n_axons controls number of input spikes and thus amount of overflow input = SpikeInput(n_axons) for t in np.arange(1, nt + 1): # send spikes to all axons input.add_spikes(t, np.arange(n_axons), permanent=True) model.add_input(input) block = LoihiBlock(1) block.compartment.configure_relu() block.compartment.configure_filter(0.1) model.add_block(block) synapse = Synapse(n_axons) synapse.set_weights(np.ones((n_axons, 1))) block.add_synapse(synapse) axon = Axon(n_axons) axon.target = synapse input.add_axon(axon) probe_u = LoihiProbe(target=block, key="current") model.add_probe(probe_u) probe_v = LoihiProbe(target=block, key="voltage") model.add_probe(probe_v) probe_s = LoihiProbe(target=block, key="spiked") model.add_probe(probe_s) discretize_model(model) # must set these after `discretize` to specify discretized values block.compartment.vmin = -(2**22) + 1 block.compartment.vth[:] = VTH_MAX assert EmulatorInterface.strict # Tests should be run in strict mode monkeypatch.setattr(EmulatorInterface, "strict", False) overflow_var = "q0" if n_axons == 1000 else "current" with EmulatorInterface(model) as emu: with pytest.warns(UserWarning, match=f"Overflow in {overflow_var}"): emu.run_steps(nt) emu_u = emu.collect_probe_output(probe_u) emu_v = emu.collect_probe_output(probe_v) emu_s = emu.collect_probe_output(probe_s) with HardwareInterface(model, use_snips=False) as sim: sim.run_steps(nt) sim_u = sim.collect_probe_output(probe_u) sim_v = sim.collect_probe_output(probe_v) sim_s = sim.collect_probe_output(probe_s) sim_v[sim_s > 0] = 0 # since Loihi has placeholder voltage after spike plt.subplot(311) plt.plot(emu_u) plt.plot(sim_u) plt.subplot(312) plt.plot(emu_v) plt.plot(sim_v) plt.subplot(313) plt.plot(emu_s) plt.plot(sim_s) assert allclose(emu_u, sim_u) assert allclose(emu_v, sim_v)
def build_connection(model, conn): if nengo_transforms is not None: if isinstance(conn.transform, nengo_transforms.Convolution): # TODO: integrate these into the same function conv.build_conv2d_connection(model, conn) return elif not isinstance(conn.transform, nengo_transforms.Dense): raise NotImplementedError( "nengo-loihi does not yet support %s transforms" % conn.transform) # Create random number generator rng = np.random.RandomState(model.seeds[conn]) pre_cx = model.objs[conn.pre_obj]['out'] post_cx = model.objs[conn.post_obj]['in'] assert isinstance(pre_cx, (LoihiBlock, LoihiInput)) assert isinstance(post_cx, (LoihiBlock, Probe)) weights = None eval_points = None solver_info = None neuron_type = None post_slice = conn.post_slice # sample transform (if using a distribution) transform = sample_transform(conn, rng=rng) tau_s = 0.0 # `synapse is None` gets mapped to `tau_s = 0.0` if isinstance(conn.synapse, nengo.synapses.Lowpass): tau_s = conn.synapse.tau elif conn.synapse is not None: raise NotImplementedError("Cannot handle non-Lowpass synapses") needs_decode_neurons = False target_encoders = None if isinstance(conn.pre_obj, Node): assert conn.pre_slice == slice(None) if np.array_equal(transform, np.array(1.)): # TODO: this identity transform may be avoidable transform = np.eye(conn.pre.size_out) else: assert transform.ndim == 2, "transform shape not handled yet" assert transform.shape[1] == conn.pre.size_out assert transform.shape[1] == conn.pre.size_out if isinstance(conn.pre_obj, ChipReceiveNeurons): weights = transform / model.dt neuron_type = conn.pre_obj.neuron_type else: # input is on-off neuron encoded, so double/flip transform weights = np.column_stack([transform, -transform]) target_encoders = 'node_encoders' elif (isinstance(conn.pre_obj, Ensemble) and isinstance(conn.pre_obj.neuron_type, nengo.Direct)): raise NotImplementedError() elif isinstance(conn.pre_obj, Ensemble): # Normal decoded connection eval_points, decoders, solver_info = model.build( conn.solver, conn, rng, transform) if conn.solver.weights and not conn.solver.compositional: weights = decoders else: weights = multiply(transform, decoders) # the decoder solver assumes a spike height of 1/dt; that isn't the # case on loihi, so we need to undo that scaling weights = weights / model.dt neuron_type = conn.pre_obj.neuron_type if conn.solver.weights: # weight solvers only allowed on ensemble->ensemble connections assert isinstance(conn.post_obj, Ensemble) if conn.solver.compositional: encoders = model.params[conn.post_obj].scaled_encoders.T encoders = encoders[post_slice] weights = multiply(encoders.T, weights) # post slice already applied to encoders (either here or in # `build_decoders`), so don't apply later post_slice = None else: needs_decode_neurons = True elif isinstance(conn.pre_obj, Neurons): assert conn.pre_slice == slice(None) assert transform.ndim == 2, "transform shape not handled yet" weights = transform / model.dt neuron_type = conn.pre_obj.ensemble.neuron_type else: raise NotImplementedError("Connection from type %r" % ( type(conn.pre_obj),)) if neuron_type is not None and hasattr(neuron_type, 'amplitude'): weights = weights * neuron_type.amplitude mid_cx = pre_cx mid_axon_inds = None post_tau = tau_s if needs_decode_neurons and not isinstance(conn.post_obj, Neurons): # --- add decode neurons assert weights.ndim == 2 d, n = weights.shape if isinstance(post_cx, Probe): # use non-spiking decode neurons for voltage probing assert post_cx.target is None assert post_slice == slice(None) # use the same scaling as the ensemble does, to get good # decodes. Note that this assumes that the decoded value # is in the range -radius to radius, which is usually true. weights = weights / conn.pre_obj.radius gain = 1 dec_cx = LoihiBlock(2 * d, label='%s' % conn) dec_cx.compartment.configure_nonspiking( dt=model.dt, vth=model.vth_nonspiking) dec_cx.compartment.bias[:] = 0 model.add_block(dec_cx) model.objs[conn]['decoded'] = dec_cx dec_syn = Synapse(n, label="probe_decoders") weights2 = gain * np.vstack([weights, -weights]).T dec_syn.set_full_weights(weights2) dec_cx.add_synapse(dec_syn) model.objs[conn]['decoders'] = dec_syn else: # use spiking decode neurons for on-chip connection if isinstance(conn.post_obj, Ensemble): # loihi encoders don't include radius, so handle scaling here weights = weights / conn.post_obj.radius post_d = conn.post_obj.size_in post_inds = np.arange(post_d, dtype=np.int32)[post_slice] assert weights.shape[0] == len(post_inds) == conn.size_out == d mid_axon_inds = model.decode_neurons.get_post_inds( post_inds, post_d) target_encoders = 'decode_neuron_encoders' dec_cx, dec_syn = model.decode_neurons.get_block( weights, block_label="%s" % conn, syn_label="decoders") model.add_block(dec_cx) model.objs[conn]['decoded'] = dec_cx model.objs[conn]['decoders'] = dec_syn # use tau_s for filter into decode neurons, decode_tau for filter out dec_cx.compartment.configure_filter(tau_s, dt=model.dt) post_tau = model.decode_tau dec_ax0 = Axon(n, label="decoders") dec_ax0.target = dec_syn pre_cx.add_axon(dec_ax0) model.objs[conn]['decode_axon'] = dec_ax0 if conn.learning_rule_type is not None: rule_type = conn.learning_rule_type if isinstance(rule_type, nengo.PES): if not isinstance(rule_type.pre_synapse, nengo.synapses.Lowpass): raise ValidationError( "Loihi only supports `Lowpass` pre-synapses for " "learning rules", attr='pre_synapse', obj=rule_type) tracing_tau = rule_type.pre_synapse.tau / model.dt # Nengo builder scales PES learning rate by `dt / n_neurons` n_neurons = (conn.pre_obj.n_neurons if isinstance(conn.pre_obj, Ensemble) else conn.pre_obj.size_in) learning_rate = rule_type.learning_rate * model.dt / n_neurons # Account for scaling to put integer error in range [-127, 127] learning_rate /= model.pes_error_scale # Tracing mag set so that the magnitude of the pre trace # is independent of the pre tau. `dt` factor accounts for # Nengo's `dt` spike scaling. Where is the second `dt` from? # Maybe the fact that post decode neurons have `vth = 1/dt`? tracing_mag = -np.expm1(-1. / tracing_tau) / model.dt**2 # learning weight exponent controls the maximum weight # magnitude/weight resolution wgt_exp = model.pes_wgt_exp dec_syn.set_learning( learning_rate=learning_rate, tracing_mag=tracing_mag, tracing_tau=tracing_tau, wgt_exp=wgt_exp, ) else: raise NotImplementedError() mid_cx = dec_cx if isinstance(post_cx, Probe): assert post_cx.target is None assert post_slice == slice(None) post_cx.target = mid_cx mid_cx.add_probe(post_cx) elif isinstance(conn.post_obj, Neurons): assert isinstance(post_cx, LoihiBlock) assert post_slice == slice(None) if weights is None: raise NotImplementedError("Need weights for connection to neurons") else: assert weights.ndim == 2 n2, n1 = weights.shape assert post_cx.n_neurons == n2 syn = Synapse(n1, label="neuron_weights") gain = model.params[conn.post_obj.ensemble].gain syn.set_full_weights(weights.T * gain) post_cx.add_synapse(syn) model.objs[conn]['weights'] = syn ax = Axon(mid_cx.n_neurons, label="neuron_weights") ax.target = syn mid_cx.add_axon(ax) post_cx.compartment.configure_filter(post_tau, dt=model.dt) if conn.learning_rule_type is not None: raise NotImplementedError() elif isinstance(conn.post_obj, Ensemble) and conn.solver.weights: assert isinstance(post_cx, LoihiBlock) assert weights.ndim == 2 n2, n1 = weights.shape assert post_cx.n_neurons == n2 # loihi encoders don't include radius, so handle scaling here weights = weights / conn.post_obj.radius syn = Synapse(n1, label="%s::decoder_weights" % conn) syn.set_full_weights(weights.T) post_cx.add_synapse(syn) model.objs[conn]['weights'] = syn ax = Axon(n1, label="decoder_weights") ax.target = syn mid_cx.add_axon(ax) post_cx.compartment.configure_filter(post_tau, dt=model.dt) if conn.learning_rule_type is not None: raise NotImplementedError() elif isinstance(conn.post_obj, Ensemble): assert target_encoders is not None if target_encoders not in post_cx.named_synapses: build_decode_neuron_encoders( model, conn.post_obj, kind=target_encoders) mid_ax = Axon(mid_cx.n_neurons, label="encoders") mid_ax.target = post_cx.named_synapses[target_encoders] mid_ax.set_axon_map(mid_axon_inds) mid_cx.add_axon(mid_ax) model.objs[conn]['mid_axon'] = mid_ax post_cx.compartment.configure_filter(post_tau, dt=model.dt) else: # This includes Node, since nodes can't be targets on-chip raise NotImplementedError() model.params[conn] = BuiltConnection( eval_points=eval_points, solver_info=solver_info, transform=transform, weights=weights)
def build_conv2d_connection(model, conn): if nengo_transforms is None: # It should not be possible to reach this, because this function is # only called for a Convolution transform, which can exist only if # nengo_transforms exists. raise NotImplementedError("Convolution requires newer Nengo") if conn.transform.dimensions != 2: raise NotImplementedError("nengo-loihi only supports 2D convolution") if conn.transform.padding != "valid": raise NotImplementedError( "nengo-loihi only supports convolution with 'valid' padding") # Create random number generator rng = np.random.RandomState(model.seeds[conn]) pre_cx = model.objs[conn.pre_obj]['out'] post_cx = model.objs[conn.post_obj]['in'] assert isinstance(pre_cx, (LoihiInput, LoihiBlock)) assert isinstance(post_cx, LoihiBlock) tau_s = 0.0 if isinstance(conn.synapse, nengo.synapses.Lowpass): tau_s = conn.synapse.tau elif conn.synapse is not None: raise NotImplementedError("Cannot handle non-Lowpass synapses") # --- pre assert isinstance(conn.pre_obj, (Neurons, ChipReceiveNeurons)) assert conn.pre_slice == slice(None) assert isinstance(conn.transform, nengo_transforms.Convolution) weights = conn.transform.sample(rng=rng) input_shape = conn.transform.input_shape # Account for nengo spike height of 1/dt weights = weights / model.dt if isinstance(conn.pre_obj, ChipReceiveNeurons): neuron_type = conn.pre_obj.neuron_type elif isinstance(conn.pre_obj, Neurons): neuron_type = conn.pre_obj.ensemble.neuron_type if neuron_type is not None and hasattr(neuron_type, 'amplitude'): weights = weights * neuron_type.amplitude # --- post assert isinstance(conn.post_obj, Neurons) assert conn.post_slice == slice(None) gain = model.params[conn.post_obj.ensemble].gain if not np.all(gain == gain[0]): # TODO: support this? raise ValidationError( "All neurons targeted by a Convolution connection must " "have the same gain", "gain", obj=conn.post_obj.ensemble) weights = weights * gain[0] pop_type = 32 # TODO: pick this new_transform = copy.copy(conn.transform) type(new_transform).init.data[new_transform] = weights weights, indices, axon_to_weight_map, cx_bases = conv2d_loihi_weights( new_transform) synapse = Synapse(np.prod(input_shape.spatial_shape), label="conv2d_weights") synapse.set_population_weights(weights, indices, axon_to_weight_map, cx_bases, pop_type=pop_type) post_cx.add_synapse(synapse) model.objs[conn]['weights'] = synapse ax = Axon(np.prod(input_shape.spatial_shape), label="conv2d_weights") ax.target = synapse ax.cx_to_axon_map = pixel_idxs(input_shape) ax.cx_atoms = channel_idxs(input_shape) pre_cx.add_axon(ax) post_cx.compartment.configure_filter(tau_s, dt=model.dt) model.params[conn] = BuiltConnection(eval_points=None, solver_info=None, transform=None, weights=weights)
def test_pop_tiny(pop_type, channels_last, nc, request, plt, seed, allclose): tau_rc = 0.02 tau_ref = 0.001 tau_s = 0.0 dt = 0.001 neuron_bias = 1. pres_time = 0.4 sti, stj = 1, 1 if nc == 1: filters = np.array([[-0.5, 2., -0.25], [-0.75, 2., -1.0], [-0.5, 3., -0.5], [-1.0, 6., -0.25]]).reshape(1, 4, 1, 3) inp_biases = np.array([[1, 5, 1], [2, 1, 2]]) inp_biases = inp_biases[:, :, None] elif nc == 2: filters = np.array([[[-0.5, 2., -0.2], [-0.7, 2., -1.0], [-0.5, 3., -0.5], [-1.0, 6., -0.2]], [[-1.0, 2., -1.0], [-0.5, 2., -0.5], [-0.8, 3., -0.2], [-1.0, 4., -0.2]]]).reshape(2, 4, 1, 3) inp_biases = np.array([[[1, 5, 1], [2, 1, 2]], [[0, 3, 1], [4, 2, 1]]]) inp_biases = np.transpose(inp_biases, (1, 2, 0)) # rearrange to (kernel_rows, kernel_cols, in_channels, out_channels) filters = np.transpose(filters, (2, 3, 0, 1)) inp_biases = inp_biases / (inp_biases.max() + 0.001) # --- compute nengo_loihi outputs ni, nj, nk = inp_biases.shape si, sj, nc, nf = filters.shape nij = ni * nj nyi = 1 + (ni - si) // sti nyj = 1 + (nj - sj) // stj out_size = nyi * nyj * nf assert out_size <= 1024 model = Model() # input block inp = LoihiBlock(ni * nj * nk, label='inp') assert inp.n_neurons <= 1024 inp.compartment.configure_relu() inp.compartment.bias[:] = inp_biases.ravel() inp_ax = Axon(nij, label='inp_ax') # we always compute the pixel/channel idxs with channels_last=True # (not sure why?), and then set it to the correct value afterwards inp_shape = nengo_transforms.ChannelShape((ni, nj, nk), channels_last=True) inp_ax.set_compartment_axon_map(target_axons=conv.pixel_idxs(inp_shape), atoms=conv.channel_idxs(inp_shape)) inp_shape.shape = (ni, nj, nk) if channels_last else (nk, ni, nj) inp_shape.channels_last = channels_last inp.add_axon(inp_ax) model.add_block(inp) # conv block neurons = LoihiBlock(out_size, label='neurons') assert neurons.n_neurons <= 1024 neurons.compartment.configure_lif(tau_rc=tau_rc, tau_ref=tau_ref, dt=dt) neurons.compartment.configure_filter(tau_s, dt=dt) neurons.compartment.bias[:] = neuron_bias synapse = Synapse(np.prod(inp_shape.spatial_shape), label='synapse') conv2d_transform = nengo_transforms.Convolution( nf, inp_shape, strides=(sti, stj), channels_last=channels_last, init=filters, kernel_size=(1, 3)) weights, indices, axon_to_weight_map, bases = conv.conv2d_loihi_weights( conv2d_transform) synapse.set_population_weights(weights, indices, axon_to_weight_map, bases, pop_type=pop_type) neurons.add_synapse(synapse) out_probe = Probe(target=neurons, key='spiked') neurons.add_probe(out_probe) inp_ax.target = synapse model.add_block(neurons) # simulation discretize_model(model) n_steps = int(pres_time / dt) target = request.config.getoption("--target") if target == 'loihi': with HardwareInterface(model, use_snips=False, seed=seed) as sim: sim.run_steps(n_steps) sim_out = sim.get_probe_output(out_probe) else: with EmulatorInterface(model, seed=seed) as sim: sim.run_steps(n_steps) sim_out = sim.get_probe_output(out_probe) sim_out = np.sum(sim_out, axis=0) * (dt / pres_time) if channels_last: sim_out.shape = (nyi, nyj, nf) sim_out = np.transpose(sim_out, (2, 0, 1)) else: sim_out.shape = (nf, nyi, nyj) out_max = sim_out.max() # --- plot results rows = 1 cols = 2 ax = plt.subplot(rows, cols, 1) plt.hist(sim_out.ravel(), bins=11) ax = plt.subplot(rows, cols, 2) tile(sim_out, vmin=0, vmax=out_max, grid=True, ax=ax) # ref_out determined by emulator running code known to work if nc == 1: ref_out = np.array([[0.06, 0.02], [0.055, 0.], [0.0825, 0.0225], [0.125, 0.04]]) elif nc == 2: ref_out = np.array([[0.0975, 0.02], [0.0825, 0.02], [0.125, 0.055], [0.2475, 0.0825]]) assert allclose(sim_out[:, :, 0], ref_out, rtol=0, atol=1e-7)
def test_conv2d_weights(channels_last, hw_opts, request, plt, seed, rng, allclose): def loihi_rates_n(neuron_type, x, gain, bias, dt): """Compute Loihi rates on higher dimensional inputs""" y = x.reshape(-1, x.shape[-1]) gain = np.asarray(gain) bias = np.asarray(bias) if gain.ndim == 0: gain = gain * np.ones(x.shape[-1]) if bias.ndim == 0: bias = bias * np.ones(x.shape[-1]) rates = loihi_rates(neuron_type, y, gain, bias, dt) return rates.reshape(*x.shape) if channels_last: plt.saveas = None pytest.xfail("Blocked by CxBase cannot be > 256 bug") target = request.config.getoption("--target") if target != 'loihi' and len(hw_opts) > 0: pytest.skip("Hardware options only available on hardware") pop_type = 32 # load data with open(os.path.join(test_dir, 'mnist10.pkl'), 'rb') as f: test10 = pickle.load(f) test_x = test10[0][0].reshape(28, 28) test_x = test_x[3:24, 3:24] test_x = 1.999 * test_x - 0.999 filters = Gabor(freq=Uniform(0.5, 1)).generate(8, (7, 7), rng=rng) sti, stj = 2, 2 tau_rc = 0.02 tau_ref = 0.002 tau_s = 0.005 dt = 0.001 encode_type = nengo.SpikingRectifiedLinear() encode_gain = 1. / dt encode_bias = 0. neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref) neuron_gain = 1. neuron_bias = 1. pres_time = 0.2 # --- compute ideal outputs def conv_pm(x, kernel): y0 = scipy.signal.correlate2d(x[0], kernel, mode='valid')[::sti, ::stj] y1 = scipy.signal.correlate2d(x[1], kernel, mode='valid')[::sti, ::stj] return [y0, -y1] ref_out = np.array([test_x, -test_x]) ref_out = loihi_rates_n(encode_type, ref_out, encode_gain, encode_bias, dt) ref_out = ref_out / encode_gain ref_out = np.array([conv_pm(ref_out, kernel) for kernel in filters]) ref_out = ref_out.sum(axis=1) # sum positive and negative parts ref_out = loihi_rates_n(neuron_type, ref_out, neuron_gain, neuron_bias, dt) # --- compute nengo_loihi outputs inp_biases = np.stack([test_x, -test_x], axis=-1 if channels_last else 0) inp_shape = nengo_transforms.ChannelShape(inp_biases.shape, channels_last=channels_last) kernel = np.array([filters, -filters]) # two channels, pos and neg kernel = np.transpose(kernel, (2, 3, 0, 1)) conv2d_transform = nengo_transforms.Convolution( 8, inp_shape, strides=(sti, stj), channels_last=channels_last, kernel_size=(7, 7), init=kernel) out_size = ref_out.size nf, nyi, nyj = ref_out.shape assert out_size <= 1024 model = Model() # input block inp = LoihiBlock(inp_shape.size, label='inp') assert inp.n_neurons <= 1024 inp.compartment.configure_relu() inp.compartment.bias[:] = inp_biases.ravel() inp_ax = Axon(np.prod(inp_shape.spatial_shape), label='inp_ax') inp_ax.set_compartment_axon_map(target_axons=conv.pixel_idxs(inp_shape), atoms=conv.channel_idxs(inp_shape)) inp.add_axon(inp_ax) model.add_block(inp) # conv block neurons = LoihiBlock(out_size, label='neurons') assert neurons.n_neurons <= 1024 neurons.compartment.configure_lif(tau_rc=tau_rc, tau_ref=tau_ref, dt=dt) neurons.compartment.configure_filter(tau_s, dt=dt) neurons.compartment.bias[:] = neuron_bias synapse = Synapse(np.prod(inp_shape.spatial_shape), label='synapse') weights, indices, axon_to_weight_map, bases = conv.conv2d_loihi_weights( conv2d_transform) synapse.set_population_weights(weights, indices, axon_to_weight_map, bases, pop_type=pop_type) neurons.add_synapse(synapse) out_probe = Probe(target=neurons, key='spiked') neurons.add_probe(out_probe) inp_ax.target = synapse model.add_block(neurons) # simulation discretize_model(model) n_steps = int(pres_time / dt) if target == 'loihi': with HardwareInterface(model, use_snips=False, seed=seed, **hw_opts) as sim: sim.run_steps(n_steps) sim_out = sim.get_probe_output(out_probe) else: with EmulatorInterface(model, seed=seed) as sim: sim.run_steps(n_steps) sim_out = sim.get_probe_output(out_probe) sim_out = np.sum(sim_out, axis=0) / pres_time if channels_last: sim_out.shape = (nyi, nyj, nf) sim_out = np.transpose(sim_out, (2, 0, 1)) else: sim_out.shape = (nf, nyi, nyj) out_max = max(ref_out.max(), sim_out.max()) # --- plot results rows = 2 cols = 2 ax = plt.subplot(rows, cols, 1) tile(filters, cols=8, ax=ax) ax = plt.subplot(rows, cols, 2) tile(ref_out, vmin=0, vmax=out_max, cols=8, ax=ax) ax = plt.subplot(rows, cols, 3) plt.hist(ref_out.ravel(), bins=31) plt.hist(sim_out.ravel(), bins=31) ax = plt.subplot(rows, cols, 4) # tile(sim_out, vmin=0, vmax=1, cols=8, ax=ax) tile(sim_out, vmin=0, vmax=out_max, cols=8, ax=ax) assert allclose(sim_out, ref_out, atol=10, rtol=1e-3)
def split_synapse(old_block, old_synapse, new_blocks, validate=ValidationLevel.MINIMAL): """Break a synapse apart to work with new blocks Parameters ---------- old_block : LoihiBlock The old block that the old synapse belonged to. old_synapse : Synapse The old synapse to be split. new_blocks : OrderedDict(Block: list of int) A map from new blocks that ``old_block`` has been split into, to old block compartment indices that the new block now represents. Returns ------- new_synapses : OrderedDict(Synapse: list of int) A map from new synapses that ``old_synapse`` has been split into, to the axon indices in the old synapse that the new synapse requires. """ # Either synapse has discrete weights, in which case weight sharing and compartment # base offsets are not allowed, or it has population weights and these are allowed. # This function may work outside this dichotomy, but has not been tested for that. assert ( # discrete weights old_synapse.axon_to_weight_map is None and old_synapse.axon_compartment_bases is None and old_synapse.pop_type == 0) or ( # population weights old_synapse.axon_to_weight_map is not None and old_synapse.axon_compartment_bases is not None and old_synapse.pop_type != 0) assert all( isinstance(w, np.ndarray) for w in old_synapse.weights), "Sparse weights not yet supported" # --- collect old input axon information old_input_axons = OrderedDict() for axon_idx in range(old_synapse.n_axons): weight_idx = old_synapse.axon_weight_idx(axon_idx) indices = old_synapse.indices[weight_idx] if validate >= ValidationLevel.MINIMAL: assert all( np.array_equal(i, indices[0]) for i in indices[1:]), "All atoms must target same indices" indices = indices[0] base = old_synapse.axon_compartment_base(axon_idx) if base is None: continue # this axon is not used axon_comp_ids = base + indices old_input_axons[axon_idx] = ( weight_idx, base, axon_comp_ids, set(axon_comp_ids), ) # --- create new synapses, one for each new block that `old_block` split into # `new_synapse_axons` maps each new synapse to the old axon ids that it requires new_synapse_axons = OrderedDict() for k, (block, block_comp_ids) in enumerate(new_blocks.items()): # find which compartments in this new block each old axon inputs to axon_overlaps = { axon_id: block_comp_ids.set.intersection(axon_comp_ids_set) for axon_id, (_, _, _, axon_comp_ids_set) in old_input_axons.items() } # select only the axons that input to at least one compartment in this block axon_ids = [ axon_id for axon_id in old_input_axons if len(axon_overlaps[axon_id]) > 0 ] if len(axon_ids) == 0: # Can only happen if this synapse inputted to part of the old block only, # and none of the compartments it connected to are in this new block. # We currently don't allow connections to subsets of neurons, so don't # cover this. But just skipping with `continue` _should_ work. continue # pragma: no cover # --- make the new synapse new_synapse = Synapse(len(axon_ids)) if old_synapse.label is not None: new_synapse.label = "%s[%d]" % (old_synapse.label, k) block.add_synapse(new_synapse) new_synapse_axons[new_synapse] = axon_ids set_new_synapse_weights( old_synapse, old_input_axons, new_synapse, block_comp_ids, axon_overlaps, axon_ids, validate=validate, ) logger.info( "Split synapse (%d) into (%s)", old_synapse.n_axons, ", ".join(str(synapse.n_axons) for synapse in new_synapse_axons), ) assert all(w.dtype == old_synapse.weights[0].dtype for w in old_synapse.weights) for new_synapse in new_synapse_axons: assert all(w.dtype == old_synapse.weights[0].dtype for w in new_synapse.weights) return new_synapse_axons
def build_conv2d_connection(model, transform, conn): assert is_transform_type(transform, ("Convolution", "ConvolutionTranspose")) if transform.dimensions != 2: raise NotImplementedError("nengo-loihi only supports 2D convolution") # Create random number generator rng = np.random.RandomState(model.seeds[conn]) pre_obj = model.objs[conn.pre_obj]["out"] post_obj = model.objs[conn.post_obj]["in"] assert isinstance(pre_obj, (LoihiInput, LoihiBlock)) assert isinstance(post_obj, LoihiBlock) tau_s = 0.0 if isinstance(conn.synapse, nengo.synapses.Lowpass): tau_s = conn.synapse.tau elif conn.synapse is not None: raise NotImplementedError("Cannot handle non-Lowpass synapses") # --- pre assert isinstance(conn.pre_obj, (Neurons, ChipReceiveNeurons)) kernel = transform.sample(rng=rng) input_shape = transform.input_shape # Account for nengo spike height of 1/dt kernel = kernel / model.dt if isinstance(conn.pre_obj, ChipReceiveNeurons): neuron_type = conn.pre_obj.neuron_type elif isinstance(conn.pre_obj, Neurons): neuron_type = conn.pre_obj.ensemble.neuron_type if neuron_type is not None and hasattr(neuron_type, "amplitude"): kernel = kernel * neuron_type.amplitude # --- post assert isinstance(conn.post_obj, Neurons) assert conn.post_slice == slice(None) gain = model.params[conn.post_obj.ensemble].gain if not np.all(gain == gain[0]): # Cannot fold gains into kernel, result would not be convolutional. # Therefore, Loihi does not support this if we want to share weights. raise ValidationError( "All neurons targeted by a Convolution connection must " "have the same gain", "gain", obj=conn.post_obj.ensemble, ) kernel = kernel * gain[0] kernel = kernel.astype(nengo.rc.float_dtype) pop_type = model.config[conn].pop_type new_transform = copy.copy(transform) type(new_transform).init.data[new_transform] = kernel weights, indices, axon_to_weight_map, offsets = conv2d_loihi_weights(new_transform) synapse = Synapse(np.prod(input_shape.spatial_shape), label="conv2d_weights") synapse.set_population_weights( weights, indices, axon_to_weight_map, offsets, pop_type=pop_type ) post_obj.add_synapse(synapse) model.objs[conn]["weights"] = synapse if synapse.atom_bits_extra() > 0: warnings.warn( "Using more than 32 'populations' (e.g. convolutional filters) with " "`pop_type=16` axons has not yet been implemented in NxSDK. This feature " "is therefore emulator-only." ) target_axons = -np.ones(pre_obj.n_neurons, dtype=np.int32) target_axons[conn.pre_slice] = pixel_idxs(input_shape) atoms = np.zeros(pre_obj.n_neurons, dtype=np.int32) atoms[conn.pre_slice] = channel_idxs(input_shape) ax = Axon(np.prod(input_shape.spatial_shape), label="conv2d_weights") ax.target = synapse ax.set_compartment_axon_map(target_axons, atoms=atoms) pre_obj.add_axon(ax) post_obj.compartment.configure_filter(tau_s, dt=model.dt) model.params[conn] = BuiltConnection( eval_points=None, solver_info=None, transform=None, weights=kernel )
def build_full_chip_connection(model, conn): # noqa: C901 """Build dense or sparse connections on-chip""" # Create random number generator rng = np.random.RandomState(model.seeds[conn]) pre_obj = model.objs[conn.pre_obj]["out"] post_obj = model.objs[conn.post_obj]["in"] assert isinstance(pre_obj, (LoihiBlock, LoihiInput)) assert isinstance(post_obj, (LoihiBlock, LoihiProbe)) weights = None eval_points = None solver_info = None neuron_type = None pre_slice = conn.pre_slice post_slice = conn.post_slice # sample transform (if using a distribution), transform shape (out, in) transform = sample_transform(conn, rng=rng) tau_s = 0.0 # `synapse is None` gets mapped to `tau_s = 0.0` if isinstance(conn.synapse, nengo.synapses.Lowpass): tau_s = conn.synapse.tau elif conn.synapse is not None: raise NotImplementedError("Cannot handle non-Lowpass synapses") needs_decode_neurons = False target_encoders = None is_chip_process = isinstance(conn.pre_obj, Node) and isinstance( conn.pre_obj.output, ChipProcess ) if isinstance(conn.pre_obj, Node) and not ( isinstance(conn.pre_obj, ChipReceiveNeurons) or is_chip_process ): assert conn.pre_slice == slice(None) weights = expand_matrix(transform, shape=(conn.post.size_in, conn.pre.size_out)) # input is on-off neuron encoded, so double/flip transform weights = stack_matrices([weights, scale_matrix(weights, -1)], order="h") target_encoders = "node_encoders" elif isinstance(conn.pre_obj, Ensemble) and isinstance( conn.pre_obj.neuron_type, nengo.Direct ): raise NotImplementedError() elif isinstance(conn.pre_obj, Ensemble): # Normal decoded connection if isinstance(transform, scipy.sparse.spmatrix): raise BuildError( "Applying a sparse transform to a decoded connection is not supported" ) eval_points, decoders, solver_info = model.build( conn.solver, conn, rng, transform ) pre_slice = slice(None) # taken care of in decoders if conn.solver.weights and not conn.solver.compositional: weights = decoders else: weights = multiply(transform, decoders) # the decoder solver assumes a spike height of 1/dt; that isn't the # case on loihi, so we need to undo that scaling weights = scale_matrix(weights, 1.0 / model.dt) neuron_type = conn.pre_obj.neuron_type if conn.solver.weights: # weight solvers only allowed on ensemble->ensemble connections assert isinstance(conn.post_obj, Ensemble) if conn.solver.compositional: encoders = model.params[conn.post_obj].scaled_encoders weights = multiply(encoders[:, post_slice], weights) # post slice already applied to encoders (either here or in # `build_decoders`), so don't apply later post_slice = slice(None) else: needs_decode_neurons = True elif isinstance(conn.pre_obj, (Neurons, ChipReceiveNeurons)) or is_chip_process: weights = expand_matrix(transform, shape=(conn.post.size_in, conn.pre.size_out)) weights = scale_matrix(weights, 1.0 / model.dt) neuron_type = ( None if is_chip_process else conn.pre_obj.neuron_type if isinstance(conn.pre_obj, ChipReceiveNeurons) else conn.pre_obj.ensemble.neuron_type ) if isinstance(conn.post_obj, Ensemble): needs_decode_neurons = True else: raise NotImplementedError("Connection from type %r" % (type(conn.pre_obj),)) if neuron_type is not None and hasattr(neuron_type, "amplitude"): weights = scale_matrix(weights, neuron_type.amplitude) # to proper dtype transform = transform.astype(nengo.rc.float_dtype) weights = weights.astype(nengo.rc.float_dtype) # loihi_weights has shape (in, out), to match the shape by block.Synapses loihi_weights = weights.T mid_obj = pre_obj mid_axon_inds = None post_tau = tau_s if needs_decode_neurons and not isinstance(conn.post_obj, Neurons): # --- add decode neurons assert weights.ndim == 2 n, d = loihi_weights.shape if isinstance(post_obj, LoihiProbe): # use non-spiking decode neurons for voltage probing assert len(post_obj.target) == 0 or post_obj.target == [None] assert post_slice == slice(None) # use the same scaling as the ensemble does, to get good # decodes. Note that this assumes that the decoded value # is in the range -radius to radius, which is usually true. gain = np.array(1.0 / conn.pre_obj.radius, dtype=nengo.rc.float_dtype) decoder_block = LoihiBlock(2 * d, label="%s" % conn) decoder_block.compartment.configure_nonspiking( dt=model.dt, vth=model.vth_nonspiking ) decoder_block.compartment.bias[:] = 0 dec_syn = Synapse(n, label="probe_decoders") weights2 = stack_matrices( [scale_matrix(loihi_weights, gain), scale_matrix(loihi_weights, -gain)], order="h", ) dec_syn.set_weights(weights2) decoder_block.add_synapse(dec_syn) else: # use spiking decode neurons for on-chip connection if isinstance(conn.post_obj, Ensemble): # loihi encoders don't include radius, so handle scaling here gain = np.array(1.0 / conn.post_obj.radius, dtype=nengo.rc.float_dtype) loihi_weights = scale_matrix(loihi_weights, gain) post_d = conn.post_obj.size_in post_inds = np.arange(post_d, dtype=np.int32)[post_slice] assert loihi_weights.shape[1] == len(post_inds) == conn.size_out mid_axon_inds = model.decode_neurons.get_post_inds(post_inds, post_d) target_encoders = "decode_neuron_encoders" decoder_block, dec_syn = model.decode_neurons.get_block( loihi_weights, block_label="%s" % conn, syn_label="decoders" ) model.add_block(decoder_block) model.objs[conn]["decoded"] = decoder_block model.objs[conn]["decoders"] = dec_syn model.connection_decode_neurons[conn] = decoder_block # use tau_s for filter into decode neurons, decode_tau for filter out decoder_block.compartment.configure_filter(tau_s, dt=model.dt) post_tau = model.decode_tau target_axons = -np.ones(pre_obj.n_neurons, dtype=np.int32) target_axons[pre_slice] = np.arange(target_axons[pre_slice].size) pre_slice = slice(None) dec_ax0 = Axon(n, label="decoders") dec_ax0.target = dec_syn dec_ax0.set_compartment_axon_map(target_axons) pre_obj.add_axon(dec_ax0) model.objs[conn]["decode_axon"] = dec_ax0 loihi_weights = None # weights have now been handled if conn.learning_rule_type is not None: rule_type = conn.learning_rule_type if isinstance(rule_type, nengo.PES): if not isinstance(rule_type.pre_synapse, nengo.synapses.Lowpass): raise ValidationError( "Loihi only supports `Lowpass` pre-synapses for learning rules", attr="pre_synapse", obj=rule_type, ) pre_tau = rule_type.pre_synapse.tau float_tracing_tau = pre_tau / model.dt tracing_tau = int(round(float_tracing_tau)) if not np.allclose(float_tracing_tau, tracing_tau): warnings.warn( f"PES learning rule `pre_synapse.tau` ({pre_tau}) is not an " f"integer multiple of `dt` ({model.dt}). Rounding." ) # Nengo builder scales PES learning rate by `dt / n_neurons` n_neurons = ( conn.pre_obj.n_neurons if isinstance(conn.pre_obj, Ensemble) else conn.pre_obj.size_in ) learning_rate = rule_type.learning_rate * model.dt / n_neurons # Account for scaling to put integer error in range [-127, 127] learning_rate /= model.pes_error_scale # Tracing mag set so that the magnitude of the pre trace # is independent of the pre tau. `dt` factor accounts for # Nengo's `dt` spike scaling. Where is the second `dt` from? # Maybe the fact that post decode neurons have `vth = 1/dt`? tracing_mag = -np.expm1(-1.0 / tracing_tau) / model.dt**2 # learning weight exponent controls the maximum weight # magnitude/weight resolution wgt_exp = model.pes_wgt_exp dec_syn.set_learning( learning_rate=learning_rate, tracing_mag=tracing_mag, tracing_tau=tracing_tau, wgt_exp=wgt_exp, ) else: raise NotImplementedError() mid_obj = decoder_block if isinstance(post_obj, LoihiProbe): assert post_obj.target == [None] assert post_slice == slice(None) post_obj.target[0] = mid_obj model.add_probe(post_obj) elif isinstance(conn.post_obj, Neurons): assert isinstance(post_obj, LoihiBlock) assert post_slice == slice(None) if loihi_weights is None: raise NotImplementedError("Need weights for connection to neurons") assert loihi_weights.ndim == 2 n1, n2 = loihi_weights.shape assert post_obj.n_neurons == n2 syn = Synapse(n1, label="neuron_weights") gain = model.params[conn.post_obj.ensemble].gain loihi_weights = scale_matrix(loihi_weights, gain) syn.set_weights(loihi_weights) post_obj.add_synapse(syn) model.objs[conn]["weights"] = syn target_axons = -np.ones(mid_obj.n_neurons, dtype=np.int32) target_axons[pre_slice] = np.arange(target_axons[pre_slice].size) assert target_axons[pre_slice].size == n1 ax = Axon(mid_obj.n_neurons, label="neuron_weights") ax.target = syn ax.set_compartment_axon_map(target_axons) mid_obj.add_axon(ax) post_obj.compartment.configure_filter(post_tau, dt=model.dt) if conn.learning_rule_type is not None: raise NotImplementedError() elif isinstance(conn.post_obj, Ensemble) and conn.solver.weights: assert isinstance(post_obj, LoihiBlock) assert pre_slice == slice(None), "Not implemented" assert post_slice == slice(None) assert loihi_weights.ndim == 2 n1, n2 = loihi_weights.shape assert post_obj.n_neurons == n2 # loihi encoders don't include radius, so handle scaling here scale = np.array(1.0 / conn.post_obj.radius, dtype=nengo.rc.float_dtype) loihi_weights = scale_matrix(loihi_weights, scale) syn = Synapse(n1, label="%s::decoder_weights" % conn) syn.set_weights(loihi_weights) post_obj.add_synapse(syn) model.objs[conn]["weights"] = syn ax = Axon(n1, label="decoder_weights") ax.target = syn mid_obj.add_axon(ax) post_obj.compartment.configure_filter(post_tau, dt=model.dt) if conn.learning_rule_type is not None: raise NotImplementedError() elif isinstance(conn.post_obj, Ensemble): assert isinstance(post_obj, LoihiBlock) assert pre_slice == slice(None), "Not implemented" assert post_slice == slice(None) assert target_encoders is not None if target_encoders not in post_obj.named_synapses: build_decode_neuron_encoders(model, conn.post_obj, kind=target_encoders) mid_ax = Axon(mid_obj.n_neurons, label="encoders") mid_ax.target = post_obj.named_synapses[target_encoders] mid_ax.set_compartment_axon_map(mid_axon_inds) mid_obj.add_axon(mid_ax) model.objs[conn]["mid_axon"] = mid_ax post_obj.compartment.configure_filter(post_tau, dt=model.dt) else: # This includes Node, since nodes can't be targets on-chip raise NotImplementedError() model.params[conn] = BuiltConnection( eval_points=eval_points, solver_info=solver_info, transform=transform, # sampled transform weights=weights, # scaled weights (including decoders) )