def build_chip_to_host(model, conn): if not is_transform_type(conn.transform, ("Dense", "NoTransform")): raise BuildError( f"{conn}: nengo-loihi does not yet support " f"'{type(conn.transform).__name__}' transforms on chip to host connections" ) rng = np.random.RandomState(model.seeds[conn]) dim = conn.size_out host = model.host_model(base_obj(conn.post)) logger.debug("Creating HostReceiveNode for %s", conn) receive = HostReceiveNode( dim, label=None if conn.label is None else "%s_receive" % conn.label, add_to_container=False, ) host.build(receive) receive2post = Connection( receive, conn.post, synapse=conn.synapse, label=None if conn.label is None else "%s_host" % conn.label, add_to_container=False, ) _inherit_seed(host, receive2post, model, conn) host.build(receive2post) logger.debug("Creating Probe for %s", conn) transform = sample_transform(conn, rng=rng) probe = NengoProbe( conn.pre, synapse=None, solver=conn.solver, add_to_container=False ) model.chip2host_params[probe] = dict( learning_rule_type=conn.learning_rule_type, function=conn.function, eval_points=conn.eval_points, scale_eval_points=conn.scale_eval_points, transform=transform, label=None if conn.label is None else "%s_probe" % conn.label, ) model.chip2host_receivers[probe] = receive _inherit_seed(model, probe, model, conn) model.builder.build(model, probe) if conn.learning_rule_type is not None: if not isinstance(conn.pre_obj, Ensemble): raise NotImplementedError( "Learning rule presynaptic object must be an Ensemble " "(got %r)" % type(conn.pre_obj).__name__ ) model.needs_sender[conn.learning_rule] = PESModulatoryTarget(probe)
def split_chip_to_host(networks, conn): dim = conn.size_out logger.debug("Creating HostReceiveNode for %s", conn) receive = HostReceiveNode( dim, label=None if conn.label is None else "%s_receive" % conn.label, add_to_container=False, ) networks.add(receive, "host") receive2post = Connection( receive, conn.post, synapse=conn.synapse, label=None if conn.label is None else "%s_host" % conn.label, add_to_container=False, ) networks.add(receive2post, "host") logger.debug("Creating Probe for %s", conn) seed = networks.original.seed if conn.seed is None else conn.seed transform = sample_transform(conn, rng=np.random.RandomState(seed=seed)) probe = Probe(conn.pre, synapse=None, solver=conn.solver, add_to_container=False) networks.chip2host_params[probe] = dict( learning_rule_type=conn.learning_rule_type, function=conn.function, eval_points=conn.eval_points, scale_eval_points=conn.scale_eval_points, transform=transform, label=None if conn.label is None else "%s_probe" % conn.label, ) networks.add(probe, "chip") networks.chip2host_receivers[probe] = receive if conn.learning_rule_type is not None: if not isinstance(conn.pre_obj, Ensemble): raise NotImplementedError( "Learning rule presynaptic object must be an Ensemble " "(got %r)" % type(conn.pre_obj).__name__) networks.needs_sender[conn.learning_rule] = PESModulatoryTarget(probe) networks.remove(conn)
def build_connection(model, conn): if nengo_transforms is not None: if isinstance(conn.transform, nengo_transforms.Convolution): # TODO: integrate these into the same function conv.build_conv2d_connection(model, conn) return elif not isinstance(conn.transform, nengo_transforms.Dense): raise NotImplementedError( "nengo-loihi does not yet support %s transforms" % conn.transform) # Create random number generator rng = np.random.RandomState(model.seeds[conn]) pre_cx = model.objs[conn.pre_obj]['out'] post_cx = model.objs[conn.post_obj]['in'] assert isinstance(pre_cx, (LoihiBlock, LoihiInput)) assert isinstance(post_cx, (LoihiBlock, Probe)) weights = None eval_points = None solver_info = None neuron_type = None post_slice = conn.post_slice # sample transform (if using a distribution) transform = sample_transform(conn, rng=rng) tau_s = 0.0 # `synapse is None` gets mapped to `tau_s = 0.0` if isinstance(conn.synapse, nengo.synapses.Lowpass): tau_s = conn.synapse.tau elif conn.synapse is not None: raise NotImplementedError("Cannot handle non-Lowpass synapses") needs_decode_neurons = False target_encoders = None if isinstance(conn.pre_obj, Node): assert conn.pre_slice == slice(None) if np.array_equal(transform, np.array(1.)): # TODO: this identity transform may be avoidable transform = np.eye(conn.pre.size_out) else: assert transform.ndim == 2, "transform shape not handled yet" assert transform.shape[1] == conn.pre.size_out assert transform.shape[1] == conn.pre.size_out if isinstance(conn.pre_obj, ChipReceiveNeurons): weights = transform / model.dt neuron_type = conn.pre_obj.neuron_type else: # input is on-off neuron encoded, so double/flip transform weights = np.column_stack([transform, -transform]) target_encoders = 'node_encoders' elif (isinstance(conn.pre_obj, Ensemble) and isinstance(conn.pre_obj.neuron_type, nengo.Direct)): raise NotImplementedError() elif isinstance(conn.pre_obj, Ensemble): # Normal decoded connection eval_points, decoders, solver_info = model.build( conn.solver, conn, rng, transform) if conn.solver.weights and not conn.solver.compositional: weights = decoders else: weights = multiply(transform, decoders) # the decoder solver assumes a spike height of 1/dt; that isn't the # case on loihi, so we need to undo that scaling weights = weights / model.dt neuron_type = conn.pre_obj.neuron_type if conn.solver.weights: # weight solvers only allowed on ensemble->ensemble connections assert isinstance(conn.post_obj, Ensemble) if conn.solver.compositional: encoders = model.params[conn.post_obj].scaled_encoders.T encoders = encoders[post_slice] weights = multiply(encoders.T, weights) # post slice already applied to encoders (either here or in # `build_decoders`), so don't apply later post_slice = None else: needs_decode_neurons = True elif isinstance(conn.pre_obj, Neurons): assert conn.pre_slice == slice(None) assert transform.ndim == 2, "transform shape not handled yet" weights = transform / model.dt neuron_type = conn.pre_obj.ensemble.neuron_type else: raise NotImplementedError("Connection from type %r" % ( type(conn.pre_obj),)) if neuron_type is not None and hasattr(neuron_type, 'amplitude'): weights = weights * neuron_type.amplitude mid_cx = pre_cx mid_axon_inds = None post_tau = tau_s if needs_decode_neurons and not isinstance(conn.post_obj, Neurons): # --- add decode neurons assert weights.ndim == 2 d, n = weights.shape if isinstance(post_cx, Probe): # use non-spiking decode neurons for voltage probing assert post_cx.target is None assert post_slice == slice(None) # use the same scaling as the ensemble does, to get good # decodes. Note that this assumes that the decoded value # is in the range -radius to radius, which is usually true. weights = weights / conn.pre_obj.radius gain = 1 dec_cx = LoihiBlock(2 * d, label='%s' % conn) dec_cx.compartment.configure_nonspiking( dt=model.dt, vth=model.vth_nonspiking) dec_cx.compartment.bias[:] = 0 model.add_block(dec_cx) model.objs[conn]['decoded'] = dec_cx dec_syn = Synapse(n, label="probe_decoders") weights2 = gain * np.vstack([weights, -weights]).T dec_syn.set_full_weights(weights2) dec_cx.add_synapse(dec_syn) model.objs[conn]['decoders'] = dec_syn else: # use spiking decode neurons for on-chip connection if isinstance(conn.post_obj, Ensemble): # loihi encoders don't include radius, so handle scaling here weights = weights / conn.post_obj.radius post_d = conn.post_obj.size_in post_inds = np.arange(post_d, dtype=np.int32)[post_slice] assert weights.shape[0] == len(post_inds) == conn.size_out == d mid_axon_inds = model.decode_neurons.get_post_inds( post_inds, post_d) target_encoders = 'decode_neuron_encoders' dec_cx, dec_syn = model.decode_neurons.get_block( weights, block_label="%s" % conn, syn_label="decoders") model.add_block(dec_cx) model.objs[conn]['decoded'] = dec_cx model.objs[conn]['decoders'] = dec_syn # use tau_s for filter into decode neurons, decode_tau for filter out dec_cx.compartment.configure_filter(tau_s, dt=model.dt) post_tau = model.decode_tau dec_ax0 = Axon(n, label="decoders") dec_ax0.target = dec_syn pre_cx.add_axon(dec_ax0) model.objs[conn]['decode_axon'] = dec_ax0 if conn.learning_rule_type is not None: rule_type = conn.learning_rule_type if isinstance(rule_type, nengo.PES): if not isinstance(rule_type.pre_synapse, nengo.synapses.Lowpass): raise ValidationError( "Loihi only supports `Lowpass` pre-synapses for " "learning rules", attr='pre_synapse', obj=rule_type) tracing_tau = rule_type.pre_synapse.tau / model.dt # Nengo builder scales PES learning rate by `dt / n_neurons` n_neurons = (conn.pre_obj.n_neurons if isinstance(conn.pre_obj, Ensemble) else conn.pre_obj.size_in) learning_rate = rule_type.learning_rate * model.dt / n_neurons # Account for scaling to put integer error in range [-127, 127] learning_rate /= model.pes_error_scale # Tracing mag set so that the magnitude of the pre trace # is independent of the pre tau. `dt` factor accounts for # Nengo's `dt` spike scaling. Where is the second `dt` from? # Maybe the fact that post decode neurons have `vth = 1/dt`? tracing_mag = -np.expm1(-1. / tracing_tau) / model.dt**2 # learning weight exponent controls the maximum weight # magnitude/weight resolution wgt_exp = model.pes_wgt_exp dec_syn.set_learning( learning_rate=learning_rate, tracing_mag=tracing_mag, tracing_tau=tracing_tau, wgt_exp=wgt_exp, ) else: raise NotImplementedError() mid_cx = dec_cx if isinstance(post_cx, Probe): assert post_cx.target is None assert post_slice == slice(None) post_cx.target = mid_cx mid_cx.add_probe(post_cx) elif isinstance(conn.post_obj, Neurons): assert isinstance(post_cx, LoihiBlock) assert post_slice == slice(None) if weights is None: raise NotImplementedError("Need weights for connection to neurons") else: assert weights.ndim == 2 n2, n1 = weights.shape assert post_cx.n_neurons == n2 syn = Synapse(n1, label="neuron_weights") gain = model.params[conn.post_obj.ensemble].gain syn.set_full_weights(weights.T * gain) post_cx.add_synapse(syn) model.objs[conn]['weights'] = syn ax = Axon(mid_cx.n_neurons, label="neuron_weights") ax.target = syn mid_cx.add_axon(ax) post_cx.compartment.configure_filter(post_tau, dt=model.dt) if conn.learning_rule_type is not None: raise NotImplementedError() elif isinstance(conn.post_obj, Ensemble) and conn.solver.weights: assert isinstance(post_cx, LoihiBlock) assert weights.ndim == 2 n2, n1 = weights.shape assert post_cx.n_neurons == n2 # loihi encoders don't include radius, so handle scaling here weights = weights / conn.post_obj.radius syn = Synapse(n1, label="%s::decoder_weights" % conn) syn.set_full_weights(weights.T) post_cx.add_synapse(syn) model.objs[conn]['weights'] = syn ax = Axon(n1, label="decoder_weights") ax.target = syn mid_cx.add_axon(ax) post_cx.compartment.configure_filter(post_tau, dt=model.dt) if conn.learning_rule_type is not None: raise NotImplementedError() elif isinstance(conn.post_obj, Ensemble): assert target_encoders is not None if target_encoders not in post_cx.named_synapses: build_decode_neuron_encoders( model, conn.post_obj, kind=target_encoders) mid_ax = Axon(mid_cx.n_neurons, label="encoders") mid_ax.target = post_cx.named_synapses[target_encoders] mid_ax.set_axon_map(mid_axon_inds) mid_cx.add_axon(mid_ax) model.objs[conn]['mid_axon'] = mid_ax post_cx.compartment.configure_filter(post_tau, dt=model.dt) else: # This includes Node, since nodes can't be targets on-chip raise NotImplementedError() model.params[conn] = BuiltConnection( eval_points=eval_points, solver_info=solver_info, transform=transform, weights=weights)
def build_full_chip_connection(model, conn): # noqa: C901 """Build dense or sparse connections on-chip""" # Create random number generator rng = np.random.RandomState(model.seeds[conn]) pre_obj = model.objs[conn.pre_obj]["out"] post_obj = model.objs[conn.post_obj]["in"] assert isinstance(pre_obj, (LoihiBlock, LoihiInput)) assert isinstance(post_obj, (LoihiBlock, LoihiProbe)) weights = None eval_points = None solver_info = None neuron_type = None pre_slice = conn.pre_slice post_slice = conn.post_slice # sample transform (if using a distribution), transform shape (out, in) transform = sample_transform(conn, rng=rng) tau_s = 0.0 # `synapse is None` gets mapped to `tau_s = 0.0` if isinstance(conn.synapse, nengo.synapses.Lowpass): tau_s = conn.synapse.tau elif conn.synapse is not None: raise NotImplementedError("Cannot handle non-Lowpass synapses") needs_decode_neurons = False target_encoders = None is_chip_process = isinstance(conn.pre_obj, Node) and isinstance( conn.pre_obj.output, ChipProcess ) if isinstance(conn.pre_obj, Node) and not ( isinstance(conn.pre_obj, ChipReceiveNeurons) or is_chip_process ): assert conn.pre_slice == slice(None) weights = expand_matrix(transform, shape=(conn.post.size_in, conn.pre.size_out)) # input is on-off neuron encoded, so double/flip transform weights = stack_matrices([weights, scale_matrix(weights, -1)], order="h") target_encoders = "node_encoders" elif isinstance(conn.pre_obj, Ensemble) and isinstance( conn.pre_obj.neuron_type, nengo.Direct ): raise NotImplementedError() elif isinstance(conn.pre_obj, Ensemble): # Normal decoded connection if isinstance(transform, scipy.sparse.spmatrix): raise BuildError( "Applying a sparse transform to a decoded connection is not supported" ) eval_points, decoders, solver_info = model.build( conn.solver, conn, rng, transform ) pre_slice = slice(None) # taken care of in decoders if conn.solver.weights and not conn.solver.compositional: weights = decoders else: weights = multiply(transform, decoders) # the decoder solver assumes a spike height of 1/dt; that isn't the # case on loihi, so we need to undo that scaling weights = scale_matrix(weights, 1.0 / model.dt) neuron_type = conn.pre_obj.neuron_type if conn.solver.weights: # weight solvers only allowed on ensemble->ensemble connections assert isinstance(conn.post_obj, Ensemble) if conn.solver.compositional: encoders = model.params[conn.post_obj].scaled_encoders weights = multiply(encoders[:, post_slice], weights) # post slice already applied to encoders (either here or in # `build_decoders`), so don't apply later post_slice = slice(None) else: needs_decode_neurons = True elif isinstance(conn.pre_obj, (Neurons, ChipReceiveNeurons)) or is_chip_process: weights = expand_matrix(transform, shape=(conn.post.size_in, conn.pre.size_out)) weights = scale_matrix(weights, 1.0 / model.dt) neuron_type = ( None if is_chip_process else conn.pre_obj.neuron_type if isinstance(conn.pre_obj, ChipReceiveNeurons) else conn.pre_obj.ensemble.neuron_type ) if isinstance(conn.post_obj, Ensemble): needs_decode_neurons = True else: raise NotImplementedError("Connection from type %r" % (type(conn.pre_obj),)) if neuron_type is not None and hasattr(neuron_type, "amplitude"): weights = scale_matrix(weights, neuron_type.amplitude) # to proper dtype transform = transform.astype(nengo.rc.float_dtype) weights = weights.astype(nengo.rc.float_dtype) # loihi_weights has shape (in, out), to match the shape by block.Synapses loihi_weights = weights.T mid_obj = pre_obj mid_axon_inds = None post_tau = tau_s if needs_decode_neurons and not isinstance(conn.post_obj, Neurons): # --- add decode neurons assert weights.ndim == 2 n, d = loihi_weights.shape if isinstance(post_obj, LoihiProbe): # use non-spiking decode neurons for voltage probing assert len(post_obj.target) == 0 or post_obj.target == [None] assert post_slice == slice(None) # use the same scaling as the ensemble does, to get good # decodes. Note that this assumes that the decoded value # is in the range -radius to radius, which is usually true. gain = np.array(1.0 / conn.pre_obj.radius, dtype=nengo.rc.float_dtype) decoder_block = LoihiBlock(2 * d, label="%s" % conn) decoder_block.compartment.configure_nonspiking( dt=model.dt, vth=model.vth_nonspiking ) decoder_block.compartment.bias[:] = 0 dec_syn = Synapse(n, label="probe_decoders") weights2 = stack_matrices( [scale_matrix(loihi_weights, gain), scale_matrix(loihi_weights, -gain)], order="h", ) dec_syn.set_weights(weights2) decoder_block.add_synapse(dec_syn) else: # use spiking decode neurons for on-chip connection if isinstance(conn.post_obj, Ensemble): # loihi encoders don't include radius, so handle scaling here gain = np.array(1.0 / conn.post_obj.radius, dtype=nengo.rc.float_dtype) loihi_weights = scale_matrix(loihi_weights, gain) post_d = conn.post_obj.size_in post_inds = np.arange(post_d, dtype=np.int32)[post_slice] assert loihi_weights.shape[1] == len(post_inds) == conn.size_out mid_axon_inds = model.decode_neurons.get_post_inds(post_inds, post_d) target_encoders = "decode_neuron_encoders" decoder_block, dec_syn = model.decode_neurons.get_block( loihi_weights, block_label="%s" % conn, syn_label="decoders" ) model.add_block(decoder_block) model.objs[conn]["decoded"] = decoder_block model.objs[conn]["decoders"] = dec_syn model.connection_decode_neurons[conn] = decoder_block # use tau_s for filter into decode neurons, decode_tau for filter out decoder_block.compartment.configure_filter(tau_s, dt=model.dt) post_tau = model.decode_tau target_axons = -np.ones(pre_obj.n_neurons, dtype=np.int32) target_axons[pre_slice] = np.arange(target_axons[pre_slice].size) pre_slice = slice(None) dec_ax0 = Axon(n, label="decoders") dec_ax0.target = dec_syn dec_ax0.set_compartment_axon_map(target_axons) pre_obj.add_axon(dec_ax0) model.objs[conn]["decode_axon"] = dec_ax0 loihi_weights = None # weights have now been handled if conn.learning_rule_type is not None: rule_type = conn.learning_rule_type if isinstance(rule_type, nengo.PES): if not isinstance(rule_type.pre_synapse, nengo.synapses.Lowpass): raise ValidationError( "Loihi only supports `Lowpass` pre-synapses for learning rules", attr="pre_synapse", obj=rule_type, ) pre_tau = rule_type.pre_synapse.tau float_tracing_tau = pre_tau / model.dt tracing_tau = int(round(float_tracing_tau)) if not np.allclose(float_tracing_tau, tracing_tau): warnings.warn( f"PES learning rule `pre_synapse.tau` ({pre_tau}) is not an " f"integer multiple of `dt` ({model.dt}). Rounding." ) # Nengo builder scales PES learning rate by `dt / n_neurons` n_neurons = ( conn.pre_obj.n_neurons if isinstance(conn.pre_obj, Ensemble) else conn.pre_obj.size_in ) learning_rate = rule_type.learning_rate * model.dt / n_neurons # Account for scaling to put integer error in range [-127, 127] learning_rate /= model.pes_error_scale # Tracing mag set so that the magnitude of the pre trace # is independent of the pre tau. `dt` factor accounts for # Nengo's `dt` spike scaling. Where is the second `dt` from? # Maybe the fact that post decode neurons have `vth = 1/dt`? tracing_mag = -np.expm1(-1.0 / tracing_tau) / model.dt**2 # learning weight exponent controls the maximum weight # magnitude/weight resolution wgt_exp = model.pes_wgt_exp dec_syn.set_learning( learning_rate=learning_rate, tracing_mag=tracing_mag, tracing_tau=tracing_tau, wgt_exp=wgt_exp, ) else: raise NotImplementedError() mid_obj = decoder_block if isinstance(post_obj, LoihiProbe): assert post_obj.target == [None] assert post_slice == slice(None) post_obj.target[0] = mid_obj model.add_probe(post_obj) elif isinstance(conn.post_obj, Neurons): assert isinstance(post_obj, LoihiBlock) assert post_slice == slice(None) if loihi_weights is None: raise NotImplementedError("Need weights for connection to neurons") assert loihi_weights.ndim == 2 n1, n2 = loihi_weights.shape assert post_obj.n_neurons == n2 syn = Synapse(n1, label="neuron_weights") gain = model.params[conn.post_obj.ensemble].gain loihi_weights = scale_matrix(loihi_weights, gain) syn.set_weights(loihi_weights) post_obj.add_synapse(syn) model.objs[conn]["weights"] = syn target_axons = -np.ones(mid_obj.n_neurons, dtype=np.int32) target_axons[pre_slice] = np.arange(target_axons[pre_slice].size) assert target_axons[pre_slice].size == n1 ax = Axon(mid_obj.n_neurons, label="neuron_weights") ax.target = syn ax.set_compartment_axon_map(target_axons) mid_obj.add_axon(ax) post_obj.compartment.configure_filter(post_tau, dt=model.dt) if conn.learning_rule_type is not None: raise NotImplementedError() elif isinstance(conn.post_obj, Ensemble) and conn.solver.weights: assert isinstance(post_obj, LoihiBlock) assert pre_slice == slice(None), "Not implemented" assert post_slice == slice(None) assert loihi_weights.ndim == 2 n1, n2 = loihi_weights.shape assert post_obj.n_neurons == n2 # loihi encoders don't include radius, so handle scaling here scale = np.array(1.0 / conn.post_obj.radius, dtype=nengo.rc.float_dtype) loihi_weights = scale_matrix(loihi_weights, scale) syn = Synapse(n1, label="%s::decoder_weights" % conn) syn.set_weights(loihi_weights) post_obj.add_synapse(syn) model.objs[conn]["weights"] = syn ax = Axon(n1, label="decoder_weights") ax.target = syn mid_obj.add_axon(ax) post_obj.compartment.configure_filter(post_tau, dt=model.dt) if conn.learning_rule_type is not None: raise NotImplementedError() elif isinstance(conn.post_obj, Ensemble): assert isinstance(post_obj, LoihiBlock) assert pre_slice == slice(None), "Not implemented" assert post_slice == slice(None) assert target_encoders is not None if target_encoders not in post_obj.named_synapses: build_decode_neuron_encoders(model, conn.post_obj, kind=target_encoders) mid_ax = Axon(mid_obj.n_neurons, label="encoders") mid_ax.target = post_obj.named_synapses[target_encoders] mid_ax.set_compartment_axon_map(mid_axon_inds) mid_obj.add_axon(mid_ax) model.objs[conn]["mid_axon"] = mid_ax post_obj.compartment.configure_filter(post_tau, dt=model.dt) else: # This includes Node, since nodes can't be targets on-chip raise NotImplementedError() model.params[conn] = BuiltConnection( eval_points=eval_points, solver_info=solver_info, transform=transform, # sampled transform weights=weights, # scaled weights (including decoders) )
def build_host_to_chip(model, conn): rng = np.random.RandomState(model.seeds[conn]) host = model.host_model(base_obj(conn.pre)) if is_transform_type(conn.transform, ("Convolution", "ConvolutionTranspose")): raise BuildError( f"{conn}: Conv2D transforms not supported for off-chip to " "on-chip connections where `pre` is not a Neurons object." ) elif not is_transform_type(conn.transform, ("Dense", "NoTransform")): raise BuildError( f"{conn}: nengo-loihi does not yet support " f"'{type(conn.transform).__name__}' transforms on host to chip connections" ) # Scale the input spikes based on the radius of the target ensemble weights = sample_transform(conn, rng=rng) if isinstance(conn.post_obj, Ensemble): weights = weights / conn.post_obj.radius if is_transform_type(conn.transform, "NoTransform"): transform = weights # weights are 1 / (post ensemble radius), if applicable else: # copy the Transform information, setting `init` to the sampled weights transform = copy.copy(conn.transform) type(transform).init.data[transform] = weights if isinstance(conn.post_obj, Neurons): # we don't have encoders, and the transform could have large output, # so do it on the chip host_transform = 1.0 chip_transform = transform dim = conn.size_mid else: # we have encoders on the chip, so do the transform off-chip host_transform = transform chip_transform = 1.0 dim = conn.size_out logger.debug("Creating ChipReceiveNode for %s", conn) receive = ChipReceiveNode( dim * 2, size_out=dim, label=None if conn.label is None else "%s_node" % conn.label, add_to_container=False, ) model.builder.build(model, receive) receive2post = Connection( receive, conn.post, transform=chip_transform, synapse=model.decode_tau, label=None if conn.label is None else "%s_chip" % conn.label, add_to_container=False, ) _inherit_seed(model, receive2post, model, conn) _inherit_config(model, receive2post, model, conn) build_chip_connection(model, receive2post) logger.debug("Creating DecodeNeuron ensemble for %s", conn) ens = model.node_neurons.get_ensemble(dim, add_to_container=False) ens.label = None if conn.label is None else "%s_ens" % conn.label _inherit_seed(host, ens, model, conn) host.build(ens) model.connection_decode_neurons[conn] = ens pre2ens = Connection( conn.pre, ens, function=conn.function, solver=conn.solver, eval_points=conn.eval_points, scale_eval_points=conn.scale_eval_points, synapse=conn.synapse, transform=host_transform, label=None if conn.label is None else "%s_enc" % conn.label, add_to_container=False, ) _inherit_seed(host, pre2ens, model, conn) host.build(pre2ens) logger.debug("Creating HostSendNode for %s", conn) send = HostSendNode( dim * 2, label=None if conn.label is None else "%s_send" % conn.label, add_to_container=False, ) host.build(send) ensneurons2send = Connection( ens.neurons, send, synapse=None, label=None if conn.label is None else "%s_host" % conn.label, add_to_container=False, ) _inherit_seed(host, ensneurons2send, model, conn) model.host2chip_senders[send] = receive host.build(ensneurons2send)
def split_host_to_chip(networks, conn): dim = conn.size_out logger.debug("Creating ChipReceiveNode for %s", conn) receive = ChipReceiveNode( dim * 2, size_out=dim, label=None if conn.label is None else "%s_node" % conn.label, add_to_container=False, ) networks.add(receive, "chip") receive2post = Connection( receive, conn.post, synapse=networks.node_tau, label=None if conn.label is None else "%s_chip" % conn.label, add_to_container=False, ) networks.add(receive2post, "chip") logger.debug("Creating DecodeNeuron ensemble for %s", conn) if networks.node_neurons is None: raise BuildError( "DecodeNeurons must be specified for host->chip connection.") ens = networks.node_neurons.get_ensemble(dim) ens.label = None if conn.label is None else "%s_ens" % conn.label networks.add(ens, "host") if nengo_transforms is not None and isinstance( conn.transform, nengo_transforms.Convolution): raise BuildError( "Conv2D transforms not supported for off-chip to " "on-chip connections where `pre` is not a Neurons object.") # Scale the input spikes based on the radius of the target ensemble seed = networks.original.seed if conn.seed is None else conn.seed weights = sample_transform(conn, rng=np.random.RandomState(seed=seed)) if isinstance(conn.post_obj, Ensemble): weights = weights / conn.post_obj.radius if nengo_transforms is None: transform = weights else: # copy the Transform information, setting `init` to the sampled weights transform = copy.copy(conn.transform) type(transform).init.data[transform] = weights pre2ens = Connection( conn.pre, ens, function=conn.function, solver=conn.solver, eval_points=conn.eval_points, scale_eval_points=conn.scale_eval_points, synapse=conn.synapse, transform=transform, label=None if conn.label is None else "%s_enc" % conn.label, add_to_container=False, ) networks.add(pre2ens, "host") logger.debug("Creating HostSendNode for %s", conn) send = HostSendNode( dim * 2, label=None if conn.label is None else "%s_send" % conn.label, add_to_container=False, ) networks.add(send, "host") ensneurons2send = Connection( ens.neurons, send, synapse=None, label=None if conn.label is None else "%s_host" % conn.label, add_to_container=False, ) networks.add(ensneurons2send, "host") networks.remove(conn) networks.host2chip_senders[send] = receive