コード例 #1
0
        def format_transform(size, transform):
            if is_transform_type(transform, "NoTransform"):
                transform = np.array(1.0)
            elif is_transform_type(transform, "Dense"):
                transform = transform.init
            else:
                raise NotImplementedError(
                    "Mergeable transforms must be Dense; "
                    "set remove_passthrough=False"
                )

            if not isinstance(transform, np.ndarray):
                raise NotImplementedError(
                    "Mergeable transforms must be specified as Numpy arrays, "
                    "not distributions. Set `remove_passthrough=False`."
                )

            if transform.ndim == 0:  # scalar
                transform = np.eye(size) * transform
            elif transform.ndim != 2:
                raise BuildError(
                    f"{node}: Unhandled transform shape: {transform.shape}"
                )

            return transform
コード例 #2
0
def build_host_to_learning_rule(model, conn):
    if not is_transform_type(conn.transform, ("Dense", "NoTransform")):
        # TODO: What needs to be done to support this? It looks like it should just work
        raise BuildError(
            f"{conn}: nengo-loihi does not yet support "
            f"'{type(conn.transform).__name__}' transforms on host to chip "
            "learning rule connections"
        )

    dim = conn.size_out
    host = model.host_model(base_obj(conn.pre))

    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(
        dim,
        label=None if conn.label is None else "%s_send" % conn.label,
        add_to_container=False,
    )
    host.build(send)

    pre2send = Connection(
        conn.pre,
        send,
        function=conn.function,
        solver=conn.solver,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        synapse=conn.synapse,
        transform=conn.transform,
        label=conn.label,
        add_to_container=False,
    )
    model.host2chip_pes_senders[send] = model.needs_sender[conn.post_obj]
    _inherit_seed(host, pre2send, model, conn)
    host.build(pre2send)
コード例 #3
0
def build_chip_to_host(model, conn):
    if not is_transform_type(conn.transform, ("Dense", "NoTransform")):
        raise BuildError(
            f"{conn}: nengo-loihi does not yet support "
            f"'{type(conn.transform).__name__}' transforms on chip to host connections"
        )

    rng = np.random.RandomState(model.seeds[conn])
    dim = conn.size_out
    host = model.host_model(base_obj(conn.post))

    logger.debug("Creating HostReceiveNode for %s", conn)
    receive = HostReceiveNode(
        dim,
        label=None if conn.label is None else "%s_receive" % conn.label,
        add_to_container=False,
    )
    host.build(receive)

    receive2post = Connection(
        receive,
        conn.post,
        synapse=conn.synapse,
        label=None if conn.label is None else "%s_host" % conn.label,
        add_to_container=False,
    )
    _inherit_seed(host, receive2post, model, conn)
    host.build(receive2post)

    logger.debug("Creating Probe for %s", conn)
    transform = sample_transform(conn, rng=rng)

    probe = NengoProbe(
        conn.pre, synapse=None, solver=conn.solver, add_to_container=False
    )
    model.chip2host_params[probe] = dict(
        learning_rule_type=conn.learning_rule_type,
        function=conn.function,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        transform=transform,
        label=None if conn.label is None else "%s_probe" % conn.label,
    )
    model.chip2host_receivers[probe] = receive
    _inherit_seed(model, probe, model, conn)
    model.builder.build(model, probe)

    if conn.learning_rule_type is not None:
        if not isinstance(conn.pre_obj, Ensemble):
            raise NotImplementedError(
                "Learning rule presynaptic object must be an Ensemble "
                "(got %r)" % type(conn.pre_obj).__name__
            )
        model.needs_sender[conn.learning_rule] = PESModulatoryTarget(probe)
コード例 #4
0
def build_decoders(model, conn, rng, sampled_transform):
    # Copied from Nengo, except where noted below

    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = get_targets(conn, eval_points)

    if conn.solver.weights and not conn.solver.compositional:
        # solver is solving for the whole weight matrix, so apply
        # transform/encoders to targets

        # CHANGE: backwards compatibility with nengo<=2.8.0
        # if not isinstance(conn.transform, Dense):
        #     raise BuildError(
        #         "Non-compositional solvers only work with Dense transforms")
        # transform = conn.transform.sample(rng=rng)
        # targets = np.dot(targets, transform.T)
        if not is_transform_type(conn.transform, "Dense"):  # pragma: no cover
            raise BuildError(
                f"{conn}: Non-compositional solvers only work with Dense transforms"
            )
        targets = np.dot(targets, sampled_transform.T)

        # weight solvers only allowed on ensemble->ensemble connections
        assert isinstance(conn.post_obj, Ensemble)
        post_enc = model.params[conn.post_obj].scaled_encoders
        targets = np.dot(targets, post_enc.T[conn.post_slice])

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)

    # CHANGE: we pass `dt` to `solve_for_decoders`,
    # and do not support the decoder cache.
    # wrapped_solver = (model.decoder_cache.wrap_solver(solve_for_decoders)
    #                   if model.seeded[conn] else solve_for_decoders)
    # decoders, solver_info = wrapped_solver(
    #     conn, gain, bias, x, targets, rng=rng)
    decoders, solver_info = solve_for_decoders(
        conn, gain, bias, x, targets, rng=rng, dt=model.dt
    )

    return eval_points, decoders.T, solver_info
コード例 #5
0
def build_conv2d_connection(model, transform, conn):
    assert is_transform_type(transform, ("Convolution", "ConvolutionTranspose"))

    if transform.dimensions != 2:
        raise NotImplementedError("nengo-loihi only supports 2D convolution")

    # Create random number generator
    rng = np.random.RandomState(model.seeds[conn])

    pre_obj = model.objs[conn.pre_obj]["out"]
    post_obj = model.objs[conn.post_obj]["in"]
    assert isinstance(pre_obj, (LoihiInput, LoihiBlock))
    assert isinstance(post_obj, LoihiBlock)

    tau_s = 0.0
    if isinstance(conn.synapse, nengo.synapses.Lowpass):
        tau_s = conn.synapse.tau
    elif conn.synapse is not None:
        raise NotImplementedError("Cannot handle non-Lowpass synapses")

    # --- pre
    assert isinstance(conn.pre_obj, (Neurons, ChipReceiveNeurons))

    kernel = transform.sample(rng=rng)
    input_shape = transform.input_shape

    # Account for nengo spike height of 1/dt
    kernel = kernel / model.dt

    if isinstance(conn.pre_obj, ChipReceiveNeurons):
        neuron_type = conn.pre_obj.neuron_type
    elif isinstance(conn.pre_obj, Neurons):
        neuron_type = conn.pre_obj.ensemble.neuron_type

    if neuron_type is not None and hasattr(neuron_type, "amplitude"):
        kernel = kernel * neuron_type.amplitude

    # --- post
    assert isinstance(conn.post_obj, Neurons)
    assert conn.post_slice == slice(None)

    gain = model.params[conn.post_obj.ensemble].gain
    if not np.all(gain == gain[0]):
        # Cannot fold gains into kernel, result would not be convolutional.
        # Therefore, Loihi does not support this if we want to share weights.
        raise ValidationError(
            "All neurons targeted by a Convolution connection must "
            "have the same gain",
            "gain",
            obj=conn.post_obj.ensemble,
        )
    kernel = kernel * gain[0]
    kernel = kernel.astype(nengo.rc.float_dtype)

    pop_type = model.config[conn].pop_type
    new_transform = copy.copy(transform)
    type(new_transform).init.data[new_transform] = kernel
    weights, indices, axon_to_weight_map, offsets = conv2d_loihi_weights(new_transform)

    synapse = Synapse(np.prod(input_shape.spatial_shape), label="conv2d_weights")
    synapse.set_population_weights(
        weights, indices, axon_to_weight_map, offsets, pop_type=pop_type
    )
    post_obj.add_synapse(synapse)
    model.objs[conn]["weights"] = synapse
    if synapse.atom_bits_extra() > 0:
        warnings.warn(
            "Using more than 32 'populations' (e.g. convolutional filters) with "
            "`pop_type=16` axons has not yet been implemented in NxSDK. This feature "
            "is therefore emulator-only."
        )

    target_axons = -np.ones(pre_obj.n_neurons, dtype=np.int32)
    target_axons[conn.pre_slice] = pixel_idxs(input_shape)
    atoms = np.zeros(pre_obj.n_neurons, dtype=np.int32)
    atoms[conn.pre_slice] = channel_idxs(input_shape)

    ax = Axon(np.prod(input_shape.spatial_shape), label="conv2d_weights")
    ax.target = synapse
    ax.set_compartment_axon_map(target_axons, atoms=atoms)
    pre_obj.add_axon(ax)

    post_obj.compartment.configure_filter(tau_s, dt=model.dt)

    model.params[conn] = BuiltConnection(
        eval_points=None, solver_info=None, transform=None, weights=kernel
    )
コード例 #6
0
def build_chip_connection(model, conn):
    if is_transform_type(conn.transform, ("NoTransform", "Dense", "Sparse")):
        return build_full_chip_connection(model, conn)
    else:
        model.build(conn.transform, conn)
コード例 #7
0
def build_host_to_chip(model, conn):
    rng = np.random.RandomState(model.seeds[conn])
    host = model.host_model(base_obj(conn.pre))

    if is_transform_type(conn.transform, ("Convolution", "ConvolutionTranspose")):
        raise BuildError(
            f"{conn}: Conv2D transforms not supported for off-chip to "
            "on-chip connections where `pre` is not a Neurons object."
        )
    elif not is_transform_type(conn.transform, ("Dense", "NoTransform")):
        raise BuildError(
            f"{conn}: nengo-loihi does not yet support "
            f"'{type(conn.transform).__name__}' transforms on host to chip connections"
        )

    # Scale the input spikes based on the radius of the target ensemble
    weights = sample_transform(conn, rng=rng)

    if isinstance(conn.post_obj, Ensemble):
        weights = weights / conn.post_obj.radius

    if is_transform_type(conn.transform, "NoTransform"):
        transform = weights  # weights are 1 / (post ensemble radius), if applicable
    else:
        # copy the Transform information, setting `init` to the sampled weights
        transform = copy.copy(conn.transform)
        type(transform).init.data[transform] = weights

    if isinstance(conn.post_obj, Neurons):
        # we don't have encoders, and the transform could have large output,
        # so do it on the chip
        host_transform = 1.0
        chip_transform = transform
        dim = conn.size_mid
    else:
        # we have encoders on the chip, so do the transform off-chip
        host_transform = transform
        chip_transform = 1.0
        dim = conn.size_out

    logger.debug("Creating ChipReceiveNode for %s", conn)
    receive = ChipReceiveNode(
        dim * 2,
        size_out=dim,
        label=None if conn.label is None else "%s_node" % conn.label,
        add_to_container=False,
    )
    model.builder.build(model, receive)

    receive2post = Connection(
        receive,
        conn.post,
        transform=chip_transform,
        synapse=model.decode_tau,
        label=None if conn.label is None else "%s_chip" % conn.label,
        add_to_container=False,
    )
    _inherit_seed(model, receive2post, model, conn)
    _inherit_config(model, receive2post, model, conn)
    build_chip_connection(model, receive2post)

    logger.debug("Creating DecodeNeuron ensemble for %s", conn)
    ens = model.node_neurons.get_ensemble(dim, add_to_container=False)
    ens.label = None if conn.label is None else "%s_ens" % conn.label
    _inherit_seed(host, ens, model, conn)
    host.build(ens)
    model.connection_decode_neurons[conn] = ens

    pre2ens = Connection(
        conn.pre,
        ens,
        function=conn.function,
        solver=conn.solver,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        synapse=conn.synapse,
        transform=host_transform,
        label=None if conn.label is None else "%s_enc" % conn.label,
        add_to_container=False,
    )
    _inherit_seed(host, pre2ens, model, conn)
    host.build(pre2ens)

    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(
        dim * 2,
        label=None if conn.label is None else "%s_send" % conn.label,
        add_to_container=False,
    )
    host.build(send)

    ensneurons2send = Connection(
        ens.neurons,
        send,
        synapse=None,
        label=None if conn.label is None else "%s_host" % conn.label,
        add_to_container=False,
    )
    _inherit_seed(host, ensneurons2send, model, conn)
    model.host2chip_senders[send] = receive
    host.build(ensneurons2send)
コード例 #8
0
def conv2d_loihi_weights(transform):  # noqa: C901
    assert (
        transform.channels_last == transform.input_shape.channels_last
    ), "Transforms that switch the channel position not yet implemented"

    transpose = is_transform_type(transform, "ConvolutionTranspose")

    input_rows, input_cols = transform.input_shape.spatial_shape
    n_channels = transform.input_shape.n_channels
    output_rows, output_cols = transform.output_shape.spatial_shape
    n_filters = transform.n_filters
    n_compartments = output_rows * output_cols * n_filters
    kernel_rows, kernel_cols = transform.kernel_size
    row_stride, col_stride = transform.strides

    kernel = transform.init
    assert isinstance(kernel, np.ndarray), "Should already have been sampled"
    assert kernel.shape == (kernel_rows, kernel_cols, n_channels, n_filters)

    # tranpose kernel to (in_channels, rows, cols, out_channels)
    kernel = np.transpose(kernel, (2, 0, 1, 3))

    if not transpose:
        # flip weights to do correlation
        kernel = kernel[:, ::-1, ::-1, :]

    # compute number of used input pixels
    if not transpose:
        ri_max = (output_rows - 1) * row_stride + 1
        rj_max = (output_cols - 1) * col_stride + 1
    else:
        # compute number of used output pixels
        ri_max, rj_max = transform.output_shape.spatial_shape

    # --- determine padding
    pad_i, pad_j = 0, 0
    if transform.padding == "same":
        if transpose:
            # these paddings are based off the method used in
            # `nengo._vendor.npconv2d`, to ensure we perform the same
            output_rows_min = (input_rows - 1) * row_stride + 1
            output_cols_min = (input_cols - 1) * col_stride + 1
            pad_i = min(
                max(output_rows + kernel_rows - 1 - output_rows_min, 0),
                (kernel_rows - 1) * 2,
            )
            pad_j = min(
                max(output_cols + kernel_cols - 1 - output_cols_min, 0),
                (kernel_cols - 1) * 2,
            )
            # use floor instead of the `ceil` used by `npconv2d.conv2d_gradx`, since
            # this padding is applied to the output where the kernel is flipped
            pad_i, pad_j = pad_i // 2, pad_j // 2
            pad_i, pad_j = -pad_i, -pad_j
        else:
            # these paddings are based off the method used in
            # `nengo._vendor.npconv2d`, to ensure we perform the same
            pad_i = max(
                (output_rows - 1) * row_stride + kernel_rows - input_rows, 0)
            pad_j = max(
                (output_cols - 1) * col_stride + kernel_cols - input_cols, 0)
            pad_i, pad_j = pad_i // 2, pad_j // 2

    # --- determine weights and indices
    weights = []
    indices = []
    # compartment offset (aka. compartment base) for each axon
    offsets = np.zeros(input_rows * input_cols, dtype=np.int32)
    axon_to_weight_map = np.zeros(input_rows * input_cols, dtype=np.int32)
    weights_map = {}
    for i, j in itertools.product(range(input_rows), range(input_cols)):
        ij = i * input_cols + j

        if transpose:
            # compartment indices that this input axon would map to if mode == 'valid'
            ri0 = i * row_stride + pad_i
            rj0 = j * col_stride + pad_j
        else:
            # unstrided compartment indices that this input axon would map to
            # if strides == 1 and mode == 'full'
            ri0 = i + pad_i + 1 - kernel_rows
            rj0 = j + pad_j + 1 - kernel_cols

        ri = np.arange(ri0, ri0 + kernel_rows)
        rj = np.arange(rj0, rj0 + kernel_cols)

        wmask_i = (ri >= 0) & (ri < ri_max)
        wmask_j = (rj >= 0) & (rj < rj_max)
        if transpose:
            assert wmask_i.sum() > 0 and wmask_j.sum() > 0
        else:
            wmask_i &= ri % row_stride == 0
            wmask_j &= rj % col_stride == 0

        if wmask_i.sum() == 0 or wmask_j.sum() == 0:
            # this axon is not needed, so indicate this in offsets and skip
            offsets[ij] = -1
            continue

        yi0, yj0 = ri[wmask_i][0], rj[wmask_j][0]
        if not transpose:
            yi0 = yi0 // row_stride
            yj0 = yj0 // col_stride

        yij0 = yi0 * output_cols + yj0
        offset = yij0 * n_filters if transform.channels_last else yij0

        # There is currently an upper limit on the axon compartment offset of 256.
        # To work around this, we split the offset into two parts, and make extra sets
        # of redundant weights with part of the offset in the indices, as needed.
        axon_offset = offset % 256
        index_offset = offset - axon_offset
        offsets[ij] = axon_offset

        weight_key = (tuple(wmask_i), tuple(wmask_j), index_offset)
        if weight_key not in weights_map:
            w = kernel[:, wmask_i[:, None] * wmask_j, :]
            assert w.shape == (n_channels, wmask_i.sum() * wmask_j.sum(),
                               n_filters)

            # --- determine indices
            # channel inds are zero, since we use same indices for each channel
            channel_inds = np.zeros(n_channels, dtype=np.int32)
            row_inds = np.arange(wmask_i.sum(), dtype=np.int32)
            col_inds = np.arange(wmask_j.sum(), dtype=np.int32)
            filter_inds = np.arange(n_filters, dtype=np.int32)

            order = [channel_inds, row_inds, col_inds, filter_inds]
            shape = [n_channels, output_rows, output_cols, n_filters]
            if not transform.channels_last:
                # move filters (aka. output channels) before rows/cols
                w = np.transpose(w, (0, 2, 1))
                order = [order[i] for i in (0, 3, 1, 2)]
                shape = [shape[i] for i in (0, 3, 1, 2)]

            n = len(shape)
            strides = [
                np.prod(shape[i + 1:], dtype=np.int32) for i in range(n)
            ]

            # inds[i_0,...,i_{n-1}] = sum_{k=0}^{n-1} strides[k] * order[k][i_k]
            strided_inds = [
                stride * ind.reshape([-1] + [1] * (n - 1 - k))
                for k, (ind, stride) in enumerate(zip(order, strides))
            ]
            inds = sum([index_offset] + strided_inds)

            weights_map[weight_key] = len(weights)
            weights.append(w.reshape(n_channels, -1))
            indices.append(inds.reshape(n_channels, -1))

        axon_to_weight_map[ij] = weights_map[weight_key]

        # check that offset (compartment base) plus index points to a valid compartment
        inds = indices[axon_to_weight_map[ij]]
        assert (offsets[ij] + inds < n_compartments).all()

    return weights, indices, axon_to_weight_map, offsets