示例#1
0
def test_stack_matrices(matrix_type):
    # horizontal
    xd1, xs1 = make_matrix(matrix_type, (5, 4))
    xd2, xs2 = make_matrix(matrix_type, (5, 3))
    y = stack_matrices([xs1, xs2], order="h")
    assert isinstance(y, matrix_type)
    assert np.allclose(toarray(y), np.hstack([xd1, xd2]))

    # vertical
    xd1, xs1 = make_matrix(matrix_type, (5, 4))
    xd2, xs2 = make_matrix(matrix_type, (3, 4))
    y = stack_matrices([xs1, xs2], order="v")
    assert isinstance(y, matrix_type)
    assert np.allclose(toarray(y), np.vstack([xd1, xd2]))
示例#2
0
    def get_block(self, weights, block_label=None, syn_label=None):
        gain = self.gain * self.dt
        bias = self.bias * self.dt

        n, d = weights.shape
        n_neurons = 2 * d * self.pairs_per_dim
        block = LoihiBlock(n_neurons, label=block_label)
        block.compartment.configure_relu(dt=self.dt)
        block.compartment.bias[:] = bias.repeat(d)

        syn = Synapse(n, label=syn_label)
        weights2 = []
        for ga, gb in gain.reshape(self.pairs_per_dim, 2):
            weights2.extend([scale_matrix(weights, ga), scale_matrix(weights, -gb)])
        weights2 = stack_matrices(weights2, order="h")
        syn.set_weights(weights2)
        block.add_synapse(syn)

        return block, syn
示例#3
0
def build_full_chip_connection(model, conn):  # noqa: C901
    """Build dense or sparse connections on-chip"""

    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    pre_obj = model.objs[conn.pre_obj]["out"]
    post_obj = model.objs[conn.post_obj]["in"]
    assert isinstance(pre_obj, (LoihiBlock, LoihiInput))
    assert isinstance(post_obj, (LoihiBlock, LoihiProbe))

    weights = None
    eval_points = None
    solver_info = None
    neuron_type = None
    pre_slice = conn.pre_slice
    post_slice = conn.post_slice

    # sample transform (if using a distribution), transform shape (out, in)
    transform = sample_transform(conn, rng=rng)

    tau_s = 0.0  # `synapse is None` gets mapped to `tau_s = 0.0`
    if isinstance(conn.synapse, nengo.synapses.Lowpass):
        tau_s = conn.synapse.tau
    elif conn.synapse is not None:
        raise NotImplementedError("Cannot handle non-Lowpass synapses")

    needs_decode_neurons = False
    target_encoders = None
    is_chip_process = isinstance(conn.pre_obj, Node) and isinstance(
        conn.pre_obj.output, ChipProcess
    )
    if isinstance(conn.pre_obj, Node) and not (
        isinstance(conn.pre_obj, ChipReceiveNeurons) or is_chip_process
    ):
        assert conn.pre_slice == slice(None)

        weights = expand_matrix(transform, shape=(conn.post.size_in, conn.pre.size_out))

        # input is on-off neuron encoded, so double/flip transform
        weights = stack_matrices([weights, scale_matrix(weights, -1)], order="h")
        target_encoders = "node_encoders"
    elif isinstance(conn.pre_obj, Ensemble) and isinstance(
        conn.pre_obj.neuron_type, nengo.Direct
    ):
        raise NotImplementedError()
    elif isinstance(conn.pre_obj, Ensemble):  # Normal decoded connection
        if isinstance(transform, scipy.sparse.spmatrix):
            raise BuildError(
                "Applying a sparse transform to a decoded connection is not supported"
            )

        eval_points, decoders, solver_info = model.build(
            conn.solver, conn, rng, transform
        )
        pre_slice = slice(None)  # taken care of in decoders

        if conn.solver.weights and not conn.solver.compositional:
            weights = decoders
        else:
            weights = multiply(transform, decoders)

        # the decoder solver assumes a spike height of 1/dt; that isn't the
        # case on loihi, so we need to undo that scaling
        weights = scale_matrix(weights, 1.0 / model.dt)

        neuron_type = conn.pre_obj.neuron_type

        if conn.solver.weights:
            # weight solvers only allowed on ensemble->ensemble connections
            assert isinstance(conn.post_obj, Ensemble)

            if conn.solver.compositional:
                encoders = model.params[conn.post_obj].scaled_encoders
                weights = multiply(encoders[:, post_slice], weights)

            # post slice already applied to encoders (either here or in
            # `build_decoders`), so don't apply later
            post_slice = slice(None)
        else:
            needs_decode_neurons = True
    elif isinstance(conn.pre_obj, (Neurons, ChipReceiveNeurons)) or is_chip_process:
        weights = expand_matrix(transform, shape=(conn.post.size_in, conn.pre.size_out))
        weights = scale_matrix(weights, 1.0 / model.dt)
        neuron_type = (
            None
            if is_chip_process
            else conn.pre_obj.neuron_type
            if isinstance(conn.pre_obj, ChipReceiveNeurons)
            else conn.pre_obj.ensemble.neuron_type
        )

        if isinstance(conn.post_obj, Ensemble):
            needs_decode_neurons = True
    else:
        raise NotImplementedError("Connection from type %r" % (type(conn.pre_obj),))

    if neuron_type is not None and hasattr(neuron_type, "amplitude"):
        weights = scale_matrix(weights, neuron_type.amplitude)

    # to proper dtype
    transform = transform.astype(nengo.rc.float_dtype)
    weights = weights.astype(nengo.rc.float_dtype)

    # loihi_weights has shape (in, out), to match the shape by block.Synapses
    loihi_weights = weights.T

    mid_obj = pre_obj
    mid_axon_inds = None
    post_tau = tau_s
    if needs_decode_neurons and not isinstance(conn.post_obj, Neurons):
        # --- add decode neurons
        assert weights.ndim == 2
        n, d = loihi_weights.shape

        if isinstance(post_obj, LoihiProbe):
            # use non-spiking decode neurons for voltage probing
            assert len(post_obj.target) == 0 or post_obj.target == [None]
            assert post_slice == slice(None)

            # use the same scaling as the ensemble does, to get good
            #  decodes.  Note that this assumes that the decoded value
            #  is in the range -radius to radius, which is usually true.
            gain = np.array(1.0 / conn.pre_obj.radius, dtype=nengo.rc.float_dtype)

            decoder_block = LoihiBlock(2 * d, label="%s" % conn)
            decoder_block.compartment.configure_nonspiking(
                dt=model.dt, vth=model.vth_nonspiking
            )
            decoder_block.compartment.bias[:] = 0

            dec_syn = Synapse(n, label="probe_decoders")
            weights2 = stack_matrices(
                [scale_matrix(loihi_weights, gain), scale_matrix(loihi_weights, -gain)],
                order="h",
            )

            dec_syn.set_weights(weights2)
            decoder_block.add_synapse(dec_syn)
        else:
            # use spiking decode neurons for on-chip connection
            if isinstance(conn.post_obj, Ensemble):
                # loihi encoders don't include radius, so handle scaling here
                gain = np.array(1.0 / conn.post_obj.radius, dtype=nengo.rc.float_dtype)
                loihi_weights = scale_matrix(loihi_weights, gain)

            post_d = conn.post_obj.size_in
            post_inds = np.arange(post_d, dtype=np.int32)[post_slice]
            assert loihi_weights.shape[1] == len(post_inds) == conn.size_out
            mid_axon_inds = model.decode_neurons.get_post_inds(post_inds, post_d)

            target_encoders = "decode_neuron_encoders"
            decoder_block, dec_syn = model.decode_neurons.get_block(
                loihi_weights, block_label="%s" % conn, syn_label="decoders"
            )

        model.add_block(decoder_block)
        model.objs[conn]["decoded"] = decoder_block
        model.objs[conn]["decoders"] = dec_syn
        model.connection_decode_neurons[conn] = decoder_block

        # use tau_s for filter into decode neurons, decode_tau for filter out
        decoder_block.compartment.configure_filter(tau_s, dt=model.dt)
        post_tau = model.decode_tau

        target_axons = -np.ones(pre_obj.n_neurons, dtype=np.int32)
        target_axons[pre_slice] = np.arange(target_axons[pre_slice].size)
        pre_slice = slice(None)

        dec_ax0 = Axon(n, label="decoders")
        dec_ax0.target = dec_syn
        dec_ax0.set_compartment_axon_map(target_axons)
        pre_obj.add_axon(dec_ax0)
        model.objs[conn]["decode_axon"] = dec_ax0

        loihi_weights = None  # weights have now been handled

        if conn.learning_rule_type is not None:
            rule_type = conn.learning_rule_type
            if isinstance(rule_type, nengo.PES):
                if not isinstance(rule_type.pre_synapse, nengo.synapses.Lowpass):
                    raise ValidationError(
                        "Loihi only supports `Lowpass` pre-synapses for learning rules",
                        attr="pre_synapse",
                        obj=rule_type,
                    )

                pre_tau = rule_type.pre_synapse.tau
                float_tracing_tau = pre_tau / model.dt
                tracing_tau = int(round(float_tracing_tau))
                if not np.allclose(float_tracing_tau, tracing_tau):
                    warnings.warn(
                        f"PES learning rule `pre_synapse.tau` ({pre_tau}) is not an "
                        f"integer multiple of `dt` ({model.dt}). Rounding."
                    )

                # Nengo builder scales PES learning rate by `dt / n_neurons`
                n_neurons = (
                    conn.pre_obj.n_neurons
                    if isinstance(conn.pre_obj, Ensemble)
                    else conn.pre_obj.size_in
                )
                learning_rate = rule_type.learning_rate * model.dt / n_neurons

                # Account for scaling to put integer error in range [-127, 127]
                learning_rate /= model.pes_error_scale

                # Tracing mag set so that the magnitude of the pre trace
                # is independent of the pre tau. `dt` factor accounts for
                # Nengo's `dt` spike scaling. Where is the second `dt` from?
                # Maybe the fact that post decode neurons have `vth = 1/dt`?
                tracing_mag = -np.expm1(-1.0 / tracing_tau) / model.dt**2

                # learning weight exponent controls the maximum weight
                # magnitude/weight resolution
                wgt_exp = model.pes_wgt_exp

                dec_syn.set_learning(
                    learning_rate=learning_rate,
                    tracing_mag=tracing_mag,
                    tracing_tau=tracing_tau,
                    wgt_exp=wgt_exp,
                )
            else:
                raise NotImplementedError()

        mid_obj = decoder_block

    if isinstance(post_obj, LoihiProbe):
        assert post_obj.target == [None]
        assert post_slice == slice(None)
        post_obj.target[0] = mid_obj
        model.add_probe(post_obj)
    elif isinstance(conn.post_obj, Neurons):
        assert isinstance(post_obj, LoihiBlock)
        assert post_slice == slice(None)
        if loihi_weights is None:
            raise NotImplementedError("Need weights for connection to neurons")

        assert loihi_weights.ndim == 2
        n1, n2 = loihi_weights.shape
        assert post_obj.n_neurons == n2

        syn = Synapse(n1, label="neuron_weights")
        gain = model.params[conn.post_obj.ensemble].gain
        loihi_weights = scale_matrix(loihi_weights, gain)
        syn.set_weights(loihi_weights)
        post_obj.add_synapse(syn)
        model.objs[conn]["weights"] = syn

        target_axons = -np.ones(mid_obj.n_neurons, dtype=np.int32)
        target_axons[pre_slice] = np.arange(target_axons[pre_slice].size)
        assert target_axons[pre_slice].size == n1

        ax = Axon(mid_obj.n_neurons, label="neuron_weights")
        ax.target = syn
        ax.set_compartment_axon_map(target_axons)
        mid_obj.add_axon(ax)

        post_obj.compartment.configure_filter(post_tau, dt=model.dt)

        if conn.learning_rule_type is not None:
            raise NotImplementedError()
    elif isinstance(conn.post_obj, Ensemble) and conn.solver.weights:
        assert isinstance(post_obj, LoihiBlock)
        assert pre_slice == slice(None), "Not implemented"
        assert post_slice == slice(None)
        assert loihi_weights.ndim == 2
        n1, n2 = loihi_weights.shape
        assert post_obj.n_neurons == n2

        # loihi encoders don't include radius, so handle scaling here
        scale = np.array(1.0 / conn.post_obj.radius, dtype=nengo.rc.float_dtype)
        loihi_weights = scale_matrix(loihi_weights, scale)

        syn = Synapse(n1, label="%s::decoder_weights" % conn)
        syn.set_weights(loihi_weights)
        post_obj.add_synapse(syn)
        model.objs[conn]["weights"] = syn

        ax = Axon(n1, label="decoder_weights")
        ax.target = syn
        mid_obj.add_axon(ax)

        post_obj.compartment.configure_filter(post_tau, dt=model.dt)

        if conn.learning_rule_type is not None:
            raise NotImplementedError()
    elif isinstance(conn.post_obj, Ensemble):
        assert isinstance(post_obj, LoihiBlock)
        assert pre_slice == slice(None), "Not implemented"
        assert post_slice == slice(None)
        assert target_encoders is not None
        if target_encoders not in post_obj.named_synapses:
            build_decode_neuron_encoders(model, conn.post_obj, kind=target_encoders)

        mid_ax = Axon(mid_obj.n_neurons, label="encoders")
        mid_ax.target = post_obj.named_synapses[target_encoders]
        mid_ax.set_compartment_axon_map(mid_axon_inds)
        mid_obj.add_axon(mid_ax)
        model.objs[conn]["mid_axon"] = mid_ax

        post_obj.compartment.configure_filter(post_tau, dt=model.dt)
    else:
        # This includes Node, since nodes can't be targets on-chip
        raise NotImplementedError()

    model.params[conn] = BuiltConnection(
        eval_points=eval_points,
        solver_info=solver_info,
        transform=transform,  # sampled transform
        weights=weights,  # scaled weights (including decoders)
    )