Esempio n. 1
0
 def make_middle_layer(self, n_features, n_parallel,
                       n_local, kernel_stride, kernel_size):
     with self.net:
         prev_layer = self.layers[-1]
         prev_output_shape = self.output_shapes[-1]
         layer = []
         for prev_row in prev_layer:
             row = []
             for prev_col in prev_row:
                 col = []
                 
                 index = 0
                 for k in range(n_parallel):
                     conv = nengo.Convolution(n_features, prev_output_shape,
                                              channels_last=False,
                                              kernel_size=kernel_size,
                                              strides=kernel_stride)
                     ens = nengo.Ensemble(conv.output_shape.size, dimensions=1,
                                          label='%s' % conv.output_shape)
                     for kk in range(n_local):
                         prev_k = prev_col[index%len(prev_col)]
                         conv = nengo.Convolution(n_features, prev_output_shape,
                                                  channels_last=False,
                                                  kernel_size=kernel_size,
                                                  strides=kernel_stride)
                         nengo.Connection(prev_k, ens.neurons, transform=conv)
                         index += 1
                     col.append(ens.neurons)
                 row.append(col)
             layer.append(row)
         self.layers.append(layer)
         self.output_shapes.append(conv.output_shape)
    def make_middle_layer(self,
                          n_features,
                          n_parallel,
                          n_local,
                          kernel_stride,
                          kernel_size,
                          padding='valid',
                          use_neurons=True,
                          init=nengo.dists.Uniform(-1, 1)):
        with self.net:
            prev_layer = self.layers[-1]
            prev_output_shape = self.output_shapes[-1]
            layer = []
            for prev_row in prev_layer:
                row = []
                for prev_col in prev_row:
                    col = []

                    index = 0
                    for k in range(n_parallel):
                        conv = nengo.Convolution(n_features,
                                                 prev_output_shape,
                                                 channels_last=False,
                                                 kernel_size=kernel_size,
                                                 padding=padding,
                                                 strides=kernel_stride,
                                                 init=init)
                        if use_neurons:
                            ens = nengo.Ensemble(conv.output_shape.size,
                                                 dimensions=1,
                                                 label='%s' %
                                                 conv.output_shape)
                            ens_neurons = ens.neurons
                        else:
                            ens = nengo.Node(None,
                                             size_in=conv.output_shape.size,
                                             label='%s' % conv.output_shape)
                            ens_neurons = ens
                        for kk in range(n_local):
                            prev_k = prev_col[index % len(prev_col)]
                            conv = nengo.Convolution(n_features,
                                                     prev_output_shape,
                                                     channels_last=False,
                                                     kernel_size=kernel_size,
                                                     padding=padding,
                                                     strides=kernel_stride,
                                                     init=init)
                            nengo.Connection(prev_k,
                                             ens_neurons,
                                             transform=conv)
                            index += 1
                        col.append(ens_neurons)
                    row.append(col)
                layer.append(row)
            self.layers.append(layer)
            self.output_shapes.append(conv.output_shape)
def test_merge_conv(Simulator, channels_last, seed, pytestconfig):
    from nengo.builder.transforms import (  # pylint: disable=import-outside-toplevel
        ConvInc, )

    with nengo.Network(seed=seed) as net:
        a = nengo.Node(np.ones(32))
        b = nengo.Node(size_in=12)
        c = nengo.Node(size_in=12)
        nengo.Connection(
            a,
            b,
            synapse=None,
            transform=nengo.Convolution(
                3,
                (4, 4, 2) if channels_last else (2, 4, 4),
                channels_last=channels_last,
            ),
        )
        nengo.Connection(
            a,
            c,
            synapse=None,
            transform=nengo.Convolution(
                3,
                (4, 4, 2) if channels_last else (2, 4, 4),
                channels_last=channels_last,
            ),
        )
        p_b = nengo.Probe(b)
        p_c = nengo.Probe(c)

    with pytest.warns(None) as recwarns:
        with Simulator(net) as sim:
            assert (len([
                ops for ops in sim.tensor_graph.plan
                if isinstance(ops[0], ConvInc)
            ]) == 1)

            sim.step()

    # check for warning about force_last
    # note: this also assures us that we are testing on the GPU in native
    # channels_first when possible
    recwarns = [w for w in recwarns if "channels_last=False" in str(w.message)]
    if channels_last or (tf_gpu_installed
                         and pytestconfig.getoption("--device") != "/cpu:0"):
        assert len(recwarns) == 0
    else:
        assert len(recwarns) > 0

    with nengo.Simulator(net) as canonical:
        canonical.step()

    assert np.allclose(sim.data[p_b], canonical.data[p_b], atol=5e-6)
    assert np.allclose(sim.data[p_c], canonical.data[p_c], atol=5e-6)
Esempio n. 4
0
def test_argreprs():
    """Test repr() for each transform type."""
    assert repr(nengo.Dense((1, 2), init=[[1, 1]])) == "Dense(shape=(1, 2))"

    assert (repr(nengo.Convolution(
        3, (1, 2, 3))) == "Convolution(n_filters=3, input_shape=(1, 2, 3))")
    assert (repr(nengo.Convolution(
        3, (1, 2, 3),
        kernel_size=(3,
                     2))) == "Convolution(n_filters=3, input_shape=(1, 2, 3), "
            "kernel_size=(3, 2))")
    assert (repr(nengo.Convolution(3, (1, 2, 3), channels_last=False)) ==
            "Convolution(n_filters=3, input_shape=(1, 2, 3), "
            "channels_last=False)")
Esempio n. 5
0
    def conv_layer(x,
                   input_shape,
                   array_init=None,
                   label=None,
                   conn_args=None,
                   **conv_args):
        conn_args = {} if conn_args is None else conn_args

        if array_init is not None:
            assert all(a not in conv_args
                       for a in ("init", "kernel_size", "n_filters"))
            assert array_init.ndim == 4
            conv_args["init"] = array_init
            conv_args["kernel_size"] = array_init.shape[:2]
            assert array_init.shape[2] == input_shape.n_channels
            conv_args["n_filters"] = array_init.shape[3]

        conv = nengo.Convolution(input_shape=input_shape, **conv_args)

        # add an ensemble to implement the activation function
        layer = nengo.Ensemble(conv.output_shape.size, 1, label=label)

        # connect up the input object to the new layer
        conn = nengo.Connection(x, layer.neurons, transform=conv)

        return layer, conv, conn
Esempio n. 6
0
def test_conv_round_robin_unsupported(Simulator, seed):
    k = 10
    d = 5
    with nengo.Network(seed=seed) as model:
        u = nengo.Node(output=np.linspace(-1, 1, k))

        a = nengo.Ensemble(n_neurons=k**2, dimensions=k)

        x = nengo.Ensemble(n_neurons=d,
                           dimensions=d,
                           gain=np.ones(d),
                           bias=np.ones(d))

        nengo.Connection(u, a)

        conv = nengo.Convolution(n_filters=d,
                                 input_shape=(k, k, 1),
                                 strides=(1, 1),
                                 kernel_size=(k, k))
        assert conv.size_in == k**2
        assert conv.size_out == d

        nengo.Connection(a.neurons, x.neurons, transform=conv)

    with pytest.raises(BuildError, match="multi-chip allocator"):
        with Simulator(model,
                       hardware_options={'allocator': RoundRobin(n_chips=8)},
                       precompute=True):
            pass
Esempio n. 7
0
def test_conv_non_lowpass(Simulator):
    k = 10
    d = 5
    with nengo.Network() as model:
        a = nengo.Ensemble(n_neurons=k**2, dimensions=k)

        x = nengo.Ensemble(n_neurons=d,
                           dimensions=d,
                           gain=np.ones(d),
                           bias=np.ones(d))

        conv = nengo.Convolution(n_filters=d,
                                 input_shape=(k, k, 1),
                                 strides=(1, 1),
                                 kernel_size=(k, k))
        assert conv.size_in == k**2
        assert conv.size_out == d

        nengo.Connection(a.neurons,
                         x.neurons,
                         transform=conv,
                         synapse=nengo.Alpha(0.005))

    with pytest.raises(NotImplementedError, match="non-Lowpass synapses"):
        with Simulator(model):
            pass
Esempio n. 8
0
def _test_convolution_shape(padding, stride, k_size, x_mul, groups,
                            out_channels, rng, allclose):
    tf = pytest.importorskip("tensorflow")

    in_channels = 2

    for i in range(2 * k_size):
        x_size = k_size + stride * (x_mul - 1) + i
        x_shape = (x_size, x_size, in_channels)
        k_shape = (k_size, k_size, in_channels // groups, out_channels)

        x = rng.uniform(-1, 1, size=x_shape)
        kernel = rng.uniform(-1, 1, size=k_shape)
        y_tf = tf.nn.conv2d(x[None, ...],
                            kernel,
                            stride,
                            padding=padding.upper()).numpy()[0]

        y_np = conv2d_groups(x[None, ...],
                             kernel,
                             pad=padding.upper(),
                             stride=(stride, stride))[0]

        transform = nengo.Convolution(
            out_channels,
            x_shape,
            kernel_size=(k_size, k_size),
            strides=(stride, stride),
            padding=padding,
            groups=groups,
        )

        assert transform.output_shape.shape == y_tf.shape
        assert y_np.shape == y_tf.shape
        assert allclose(y_np, y_tf)
Esempio n. 9
0
def test_copy_convolution():
    x = nengo.Convolution(1, (2, 3, 4), channels_last=False)
    y = copy(x)

    assert x.n_filters == y.n_filters
    assert x.input_shape == y.input_shape
    assert x.channels_last == y.channels_last
Esempio n. 10
0
def test_split_conv2d_transform_error(Simulator):
    with nengo.Network() as net:
        node_offchip = nengo.Node([1])
        ens_onchip = nengo.Ensemble(10, 1)
        conv2d = nengo.Convolution(n_filters=1,
                                   input_shape=(1, 1, 1),
                                   kernel_size=(1, 1))
        nengo.Connection(node_offchip, ens_onchip, transform=conv2d)

    with pytest.raises(BuildError, match="Conv2D"):
        with Simulator(net):
            pass
Esempio n. 11
0
def test_non_compositional_solver_transform_error(Simulator):
    pytest.importorskip("scipy")

    with nengo.Network() as net:
        a = nengo.Ensemble(10, 1)
        b = nengo.Ensemble(10, 1)
        nengo.Connection(a, b, solver=Nnls(weights=True),
                         transform=nengo.Convolution(
                             1, (1, 1), kernel_size=(1,), strides=(1,)))

    # build error for non-compositional solver with non-dense transform
    with pytest.raises(BuildError, match="Non-compositional solvers"):
        with Simulator(net):
            pass
Esempio n. 12
0
def test_split_conv2d_transform_error():
    with nengo.Network() as net:
        add_params(net)
        node_offchip = nengo.Node([1])
        ens_onchip = nengo.Ensemble(10, 1)
        conv2d = nengo.Convolution(n_filters=1,
                                   input_shape=(1, 1, 1),
                                   kernel_size=(1, 1))
        nengo.Connection(node_offchip, ens_onchip, transform=conv2d)

    with pytest.raises(BuildError, match="Conv2D"):
        split(net,
              precompute=False,
              node_neurons=default_node_neurons,
              node_tau=0.005)
Esempio n. 13
0
def test_conv_error(Simulator, d):
    with nengo.Network() as net:
        a = nengo.Node([0])
        b = nengo.Node(size_in=1)
        nengo.Connection(a,
                         b,
                         transform=nengo.Convolution(1, [1] * (d + 1),
                                                     kernel_size=[1] * d,
                                                     strides=[1] * d))

    try:
        with Simulator(net):
            pass
    except NotImplementedError:
        assert d == 4
    else:
        assert d == 3
Esempio n. 14
0
def conv_layer(x, *args, activation=True, **kwargs):
    # create a Conv2D transform with the given arguments
    conv = nengo.Convolution(*args, channels_last=False, **kwargs)

    if activation:
        # add an ensemble to implement the activation function
        layer = nengo.Ensemble(conv.output_shape.size, 1).neurons
    else:
        # no nonlinearity, so we just use a node
        layer = nengo.Node(size_in=conv.output_shape.size)

    # connect up the input object to the new layer
    nengo.Connection(x, layer, transform=conv)

    # print out the shape information for our new layer
    print("LAYER")
    print(conv.input_shape.shape, "->", conv.output_shape.shape)

    return layer, conv
Esempio n. 15
0
def test_convolution():
    shape = (3, 3, 2, 4)
    tr = nengo.Convolution(
        n_filters=4,
        input_shape=(5, 5, 2),
        init=np.arange(np.prod(shape)).reshape(shape),
    )
    inp = np.arange(tr.size_in)

    stim = stimulus(inp)
    out = stim.transform(tr).run(1, 1)

    with nengo.Network() as model:
        stim = nengo.Node(output=inp)
        x = nengo.Node(size_in=tr.size_out)
        nengo.Connection(stim, x, transform=tr, synapse=None)
        p = nengo.Probe(x)

    with nengo.Simulator(model) as sim:
        sim.step()

    assert np.allclose(sim.data[p], out)
Esempio n. 16
0
def test_convolution_validation_errors():
    # conflicting channels_last
    input_shape = nengo.transforms.ChannelShape((2, 3, 4), channels_last=True)
    with pytest.raises(ValidationError, match="transform has channels_l.*input shape"):
        nengo.Convolution(4, input_shape, channels_last=False)

    # kernel_size does not match dimensions (2)
    with pytest.raises(ValidationError, match=r"Kernel dimensions \(3\) does not mat"):
        nengo.Convolution(4, input_shape, kernel_size=(3, 3, 3))

    # strides does not match dimensions (2)
    with pytest.raises(ValidationError, match=r"Stride dimensions \(3\) does not mat"):
        nengo.Convolution(4, input_shape, strides=(1, 1, 1))

    # init shape does not match kernel shape
    nengo.Convolution(4, input_shape, init=np.ones((3, 3, 4, 4)))  # this works
    with pytest.raises(ValidationError, match=r"Kernel shape \(9, 9, 4, 4\).*not mat"):
        nengo.Convolution(4, input_shape, init=np.ones((9, 9, 4, 4)))
    with pytest.raises(ValidationError, match=r"Kernel shape \(3, 3, 7, 4\).*not mat"):
        nengo.Convolution(4, input_shape, init=np.ones((3, 3, 7, 4)))
    with pytest.raises(ValidationError, match=r"Kernel shape \(3, 3, 4, 5\).*not mat"):
        nengo.Convolution(4, input_shape, init=np.ones((3, 3, 4, 5)))
Esempio n. 17
0
    def evaluate(self, p, plt):
        files = []
        sets = []
        for f in os.listdir(p.dataset_dir):
            if f.endswith('events'):
                files.append(os.path.join(p.dataset_dir, f))

        if p.test_set == 'one':
            test_file = random.sample(files, 1)[0]
            files.remove(test_file)
        
        if p.n_data != -1:
            files = random.sample(files, p.n_data)
            
        inputs = []
        targets = []
        for f in files:
            times, imgs, targs = davis_tracking.load_data(f, dt=p.dt, decay_time=p.decay_time,
                                                  separate_channels=p.separate_channels, 
                                                  saturation=p.saturation, merge=p.merge)
            inputs.append(imgs)
            targets.append(targs[:,:2])
                                
        inputs_all = np.vstack(inputs)
        targets_all = np.vstack(targets)
        
        if p.test_set == 'odd':
            inputs_train = inputs_all[::2]
            inputs_test = inputs_all[1::2]
            targets_train = targets_all[::2]
            targets_test = targets_all[1::2]
            dt_test = p.dt*2
        elif p.test_set == 'one':
            times, imgs, targs = davis_tracking.load_data(test_file, dt=p.dt_test, decay_time=p.decay_time,
                                                  separate_channels=p.separate_channels, 
                                                  saturation=p.saturation, merge=p.merge)
            inputs_test = imgs
            targets_test = targs[:, :2]
            inputs_train = inputs_all
            targets_train = targets_all
            dt_test = p.dt_test
            
        if p.augment:
            inputs_train, targets_train = davis_tracking.augment(inputs_train, targets_train,
                                                                 separate_channels=p.separate_channels)                
                      
        if p.separate_channels:
            shape = (2, 180//p.merge, 240//p.merge)
        else:
            shape = (1, 180//p.merge, 240//p.merge)
        
        dimensions = shape[0]*shape[1]*shape[2]

        
        if p.normalize:
            magnitude = np.linalg.norm(inputs_train.reshape(-1, dimensions), axis=1)
            inputs_train = inputs_train*(1.0/magnitude[:,None,None])
            
            magnitude = np.linalg.norm(inputs_test.reshape(-1, dimensions), axis=1)
            inputs_test = inputs_test*(1.0/magnitude[:,None,None])
                    
        
        
        max_rate = 100
        amp = 1 / max_rate

        model = nengo.Network()
        with model:
            model.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(amplitude=amp)
            model.config[nengo.Ensemble].max_rates = nengo.dists.Choice([max_rate])
            model.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            model.config[nengo.Connection].synapse = None

            inp = nengo.Node(
                nengo.processes.PresentInput(inputs_test.reshape(-1, dimensions), dt_test),
                size_out=dimensions,
                )

            out = nengo.Node(None, size_in=2)
            
            if not p.split_spatial:
                # do a standard convnet
                conv1 = nengo.Convolution(p.n_features_1, shape, channels_last=False, strides=(p.stride_1,p.stride_1),
                                          kernel_size=(p.kernel_size_1, p.kernel_size_1))
                layer1 = nengo.Ensemble(conv1.output_shape.size, dimensions=1)
                nengo.Connection(inp, layer1.neurons, transform=conv1)

                conv2 = nengo.Convolution(p.n_features_2, conv1.output_shape, channels_last=False, strides=(p.stride_2,p.stride_2),
                                          kernel_size=(p.kernel_size_2, p.kernel_size_2))
                layer2 = nengo.Ensemble(conv2.output_shape.size, dimensions=1)
                nengo.Connection(layer1.neurons, layer2.neurons, transform=conv2)

                nengo.Connection(layer2.neurons, out, transform=nengo_dl.dists.Glorot())
            else:
                # do the weird spatially split convnet
                convnet = davis_tracking.ConvNet(nengo.Network())
                convnet.make_input_layer(
                        shape,
                        spatial_stride=(p.spatial_stride, p.spatial_stride), 
                        spatial_size=(p.spatial_size,p.spatial_size))
                nengo.Connection(inp, convnet.input)
                convnet.make_middle_layer(n_features=p.n_features_1, n_parallel=p.n_parallel, n_local=1,
                                          kernel_stride=(p.stride_1,p.stride_1), kernel_size=(p.kernel_size_1,p.kernel_size_1))
                convnet.make_middle_layer(n_features=p.n_features_2, n_parallel=p.n_parallel, n_local=1,
                                          kernel_stride=(p.stride_2,p.stride_2), kernel_size=(p.kernel_size_2,p.kernel_size_2))
                convnet.make_output_layer(2)
                nengo.Connection(convnet.output, out)
                         

            p_out = nengo.Probe(out)


        N = len(inputs_train)
        n_steps = int(np.ceil(N/p.minibatch_size))
        dl_train_data = {inp: np.resize(inputs_train, (p.minibatch_size, n_steps, dimensions)),
                         p_out: np.resize(targets_train, (p.minibatch_size, n_steps, 2))}
        N = len(inputs_test)
        n_steps = int(np.ceil(N/p.minibatch_size))
        dl_test_data = {inp: np.resize(inputs_test, (p.minibatch_size, n_steps, dimensions)),
                        p_out: np.resize(targets_test, (p.minibatch_size, n_steps, 2))}
        with nengo_dl.Simulator(model, minibatch_size=p.minibatch_size) as sim:
            #loss_pre = sim.loss(dl_test_data)

            if p.n_epochs > 0:
                sim.train(dl_train_data, tf.train.RMSPropOptimizer(learning_rate=p.learning_rate),
                          n_epochs=p.n_epochs)

            loss_post = sim.loss(dl_test_data)

            sim.run_steps(n_steps, data=dl_test_data)

        data = sim.data[p_out].reshape(-1,2)[:len(targets_test)]
        
        rmse_test = np.sqrt(np.mean((targets_test-data)**2, axis=0))*p.merge          
        if plt:
            plt.plot(data*p.merge)
            plt.plot(targets_test*p.merge, ls='--')
            
        return dict(
            rmse_test = rmse_test,
            max_n_neurons = max([ens.n_neurons for ens in model.all_ensembles]),
            test_targets = targets_test,
            test_output = data,
            test_loss = loss_post
            )
Esempio n. 18
0
def test_convolution(dimensions, padding, channels_last, fixed_kernel,
                     Simulator, allclose, rng, seed):
    input_d = 4
    input_channels = 2
    output_channels = 5
    kernel_d = 3
    kernel_size = (kernel_d, ) if dimensions == 1 else (kernel_d, kernel_d)
    output_d = input_d - kernel_d // 2 * 2 if padding == "valid" else input_d

    input_shape = (input_d, input_channels)
    kernel_shape = (kernel_d, input_channels, output_channels)
    output_shape = (output_d, output_channels)

    if dimensions == 2:
        input_shape = (input_d, ) + input_shape
        kernel_shape = (kernel_d, ) + kernel_shape
        output_shape = (output_d, ) + output_shape

    if not channels_last:
        input_shape = tuple(np.roll(input_shape, 1))
        output_shape = tuple(np.roll(output_shape, 1))

    with nengo.Network(seed=seed) as net:
        x = rng.randn(*input_shape)
        w = rng.randn(
            *kernel_shape) if fixed_kernel else nengo.dists.Uniform(-0.1, 0.1)

        a = nengo.Node(np.ravel(x))
        b = nengo.Node(size_in=np.prod(output_shape))
        conn = nengo.Connection(
            a,
            b,
            synapse=None,
            transform=nengo.Convolution(
                output_channels,
                input_shape,
                init=w,
                padding=padding,
                kernel_size=kernel_size,
                strides=(1, ) if dimensions == 1 else (1, 1),
                channels_last=channels_last,
            ),
        )
        p = nengo.Probe(b)

        # check error handling
        bad_in = nengo.Node([0])
        bad_out = nengo.Node(size_in=5)
        with pytest.raises(ValidationError):
            nengo.Connection(bad_in, b, transform=conn.transform)
        with pytest.raises(ValidationError):
            nengo.Connection(a, bad_out, transform=conn.transform)

    assert conn.transform.output_shape.shape == output_shape
    assert conn.transform.kernel_shape == kernel_shape

    with Simulator(net) as sim:
        sim.step()

    weights = sim.data[conn].weights
    if not channels_last:
        x = np.moveaxis(x, 0, -1)
    if dimensions == 1:
        x = x[:, None, :]
        weights = weights[:, None, :, :]
    truth = conv2d.conv2d(x[None, ...], weights, pad=padding.upper())[0]
    if not channels_last:
        truth = np.moveaxis(truth, -1, 0)

    assert allclose(sim.data[p][0], np.ravel(truth))
Esempio n. 19
0
    def evaluate(self, p, plt):
        files = []
        sets = []
        for f in os.listdir(p.dataset_dir):
            if f.endswith('events'):
                files.append(os.path.join(p.dataset_dir, f))

        if p.test_set == 'one':
            test_file = random.sample(files, 1)[0]
            files.remove(test_file)

        if p.n_data != -1:
            files = random.sample(files, p.n_data)

        if len(p.load_params_from) > 0:
            params = np.load(p.load_params_from, allow_pickle=True)
        else:
            params = None

        strip_edges = 3  #  the number of edge pixels to remove due to convolution

        inputs = []
        targets = []
        targets_raw = []
        for f in files:
            times, imgs, targs = davis_tracking.load_data(
                f,
                dt=p.dt,
                decay_time=p.decay_time,
                separate_channels=p.separate_channels,
                saturation=p.saturation,
                merge=p.merge)
            inputs.append(imgs)
            targets_raw.append(targs[:, :2])
            targets.append(
                davis_tracking.make_heatmap(targs,
                                            merge=p.merge,
                                            strip_edges=strip_edges).reshape(
                                                len(targs), -1))

        inputs_all = np.vstack(inputs)
        targets_all = np.vstack(targets)
        targets_all_raw = np.vstack(targets_raw)

        if p.test_set == 'odd':
            inputs_train = inputs_all[::2]
            inputs_test = inputs_all[1::2]
            targets_train = targets_all[::2]
            targets_test = targets_all[1::2]
            targets_test_raw = targets_all_raw[1::2]
            dt_test = p.dt * 2
        elif p.test_set == 'one':
            times, imgs, targs = davis_tracking.load_data(
                test_file,
                dt=p.dt_test,
                decay_time=p.decay_time,
                separate_channels=p.separate_channels,
                saturation=p.saturation,
                merge=p.merge)
            inputs_test = imgs

            targets_test_raw = targs
            targets_test = davis_tracking.make_heatmap(
                targs, merge=p.merge,
                strip_edges=strip_edges).reshape(len(targs), -1)
            inputs_train = inputs_all
            targets_train = targets_all
            dt_test = p.dt_test

        if p.separate_channels:
            shape = (2, 180 // p.merge, 240 // p.merge)
        else:
            shape = (1, 180 // p.merge, 240 // p.merge)
        output_shape = shape[1] - strip_edges * 2, shape[2] - strip_edges * 2

        dimensions = shape[0] * shape[1] * shape[2]

        if p.normalize:
            magnitude = np.linalg.norm(inputs_train.reshape(-1, dimensions),
                                       axis=1)
            inputs_train = inputs_train * (1.0 / magnitude[:, None, None])

            magnitude = np.linalg.norm(inputs_test.reshape(-1, dimensions),
                                       axis=1)
            inputs_test = inputs_test * (1.0 / magnitude[:, None, None])

        max_rate = 100
        amp = 1 / max_rate

        model = nengo.Network()
        with model:
            model.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(
                amplitude=amp)
            model.config[nengo.Ensemble].max_rates = nengo.dists.Choice(
                [max_rate])
            model.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            model.config[nengo.Connection].synapse = None

            inp = nengo.Node(
                nengo.processes.PresentInput(
                    inputs_test.reshape(-1, dimensions), dt_test),
                size_out=dimensions,
            )

            out = nengo.Node(None, size_in=targets_train.shape[-1])

            if not p.split_spatial:
                # do a standard convnet
                init = params[2][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                conv1 = nengo.Convolution(p.n_features_1,
                                          shape,
                                          channels_last=False,
                                          strides=(1, 1),
                                          padding='valid',
                                          kernel_size=(3, 3),
                                          init=init)
                layer1 = nengo.Ensemble(conv1.output_shape.size, dimensions=1)
                if params is not None:
                    layer1.gain = params[0]['gain']
                    layer1.bias = params[0]['bias']
                nengo.Connection(inp, layer1.neurons, transform=conv1)

                init = params[3][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                conv2 = nengo.Convolution(p.n_features_2,
                                          conv1.output_shape,
                                          channels_last=False,
                                          strides=(1, 1),
                                          padding='valid',
                                          kernel_size=(3, 3),
                                          init=init)
                layer2 = nengo.Ensemble(conv2.output_shape.size, dimensions=1)
                if params is not None:
                    layer2.gain = params[1]['gain']
                    layer2.bias = params[1]['bias']
                nengo.Connection(layer1.neurons,
                                 layer2.neurons,
                                 transform=conv2)

                init = params[4][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                conv3 = nengo.Convolution(1,
                                          conv2.output_shape,
                                          channels_last=False,
                                          strides=(1, 1),
                                          padding='valid',
                                          kernel_size=(3, 3),
                                          init=init)

                nengo.Connection(layer2.neurons, out, transform=conv3)
            else:
                # do the weird spatially split convnet
                convnet = davis_tracking.ConvNet(nengo.Network())
                convnet.make_input_layer(shape,
                                         spatial_stride=(p.spatial_stride,
                                                         p.spatial_stride),
                                         spatial_size=(p.spatial_size,
                                                       p.spatial_size))
                nengo.Connection(inp, convnet.input)
                init = params[2][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                convnet.make_middle_layer(n_features=p.n_features_1,
                                          n_parallel=p.n_parallel,
                                          n_local=1,
                                          kernel_stride=(1, 1),
                                          kernel_size=(3, 3),
                                          init=init)
                init = params[3][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                convnet.make_middle_layer(n_features=p.n_features_2,
                                          n_parallel=p.n_parallel,
                                          n_local=1,
                                          kernel_stride=(1, 1),
                                          kernel_size=(3, 3),
                                          init=init)
                init = params[4][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                convnet.make_middle_layer(n_features=1,
                                          n_parallel=p.n_parallel,
                                          n_local=1,
                                          kernel_stride=(1, 1),
                                          kernel_size=(3, 3),
                                          init=init,
                                          use_neurons=False)
                convnet.make_merged_output(output_shape)
                nengo.Connection(convnet.output, out)

                if params is not None:
                    assert np.allclose(params[0]['gain'], 100, atol=1e-5)
                    assert np.allclose(params[1]['gain'], 100, atol=1e-5)
                    if np.max(np.abs(params[0]['bias'])) > 1e-8:
                        print(
                            'WARNING: biases are not yet being set on the neurons'
                        )
                    if np.max(np.abs(params[1]['bias'])) > 1e-8:
                        print(
                            'WARNING: biases are not yet being set on the neurons'
                        )
                    #assert np.allclose(params[0]['bias'], 0, atol=1e-4)
                    #assert np.allclose(params[1]['bias'], 0, atol=1e-4)
                    #TODO: actually do this!  Even though it involves annoying slicing

            p_out = nengo.Probe(out)

        N = len(inputs_train)
        n_steps = int(np.ceil(N / p.minibatch_size))
        dl_train_data = {
            inp:
            np.resize(inputs_train, (p.minibatch_size, n_steps, dimensions)),
            p_out:
            np.resize(targets_train,
                      (p.minibatch_size, n_steps, targets_train.shape[-1]))
        }
        N = len(inputs_test)
        n_steps = int(np.ceil(N / p.minibatch_size))
        dl_test_data = {
            inp:
            np.resize(inputs_test, (p.minibatch_size, n_steps, dimensions)),
            p_out:
            np.resize(targets_test,
                      (p.minibatch_size, n_steps, targets_train.shape[-1]))
        }
        with nengo_dl.Simulator(model, minibatch_size=p.minibatch_size) as sim:
            #loss_pre = sim.loss(dl_test_data)

            if p.n_epochs > 0:
                sim.train(
                    dl_train_data,
                    tf.train.RMSPropOptimizer(learning_rate=p.learning_rate),
                    n_epochs=p.n_epochs)

            loss_post = sim.loss(dl_test_data)

            sim.run_steps(n_steps, data=dl_test_data)

            if p.save_params:
                assert not p.split_spatial

                objects = list(model.all_ensembles) + list(
                    model.all_connections)
                params = sim.get_nengo_params(objects, as_dict=False)

                np.save(
                    os.path.join(p.data_dir, p.data_filename + '.params.npy'),
                    params)

        data = sim.data[p_out].reshape(
            -1, targets_train.shape[-1])[:len(targets_test)]

        data_peak = np.array(
            [davis_tracking.find_peak(d.reshape(output_shape)) for d in data])
        target_peak = np.array([
            davis_tracking.find_peak(d.reshape(output_shape))
            for d in targets_test
        ])

        rmse_test = np.sqrt(np.mean(
            (target_peak - data_peak)**2, axis=0)) * p.merge
        if plt:
            plt.plot(data_peak * p.merge)
            plt.plot(target_peak * p.merge, ls='--')
            plt.plot((targets_test_raw - strip_edges) * p.merge, ls=':')

        return dict(
            rmse_test=rmse_test,
            max_n_neurons=max([ens.n_neurons for ens in model.all_ensembles]),
            #test_targets = targets_test,
            test_targets_raw=targets_test_raw,
            #test_output = data,
            target_peak=target_peak,
            data_peak=data_peak,
            test_loss=loss_post,
        )
Esempio n. 20
0
def test_convolution_validation_errors():
    # conflicting channels_last
    input_shape = nengo.transforms.ChannelShape((2, 3, 4), channels_last=True)
    with pytest.raises(ValidationError,
                       match="transform has channels_l.*input shape"):
        nengo.Convolution(4, input_shape, channels_last=False)

    # kernel_size does not match dimensions (2)
    with pytest.raises(ValidationError,
                       match=r"Kernel dimensions \(3\) does not mat"):
        nengo.Convolution(4, input_shape, kernel_size=(3, 3, 3))

    # strides does not match dimensions (2)
    with pytest.raises(ValidationError,
                       match=r"Stride dimensions \(3\) does not mat"):
        nengo.Convolution(4, input_shape, strides=(1, 1, 1))

    # init shape does not match kernel shape
    input_shape = nengo.transforms.ChannelShape((5, 5, 4), channels_last=True)
    nengo.Convolution(4, input_shape, init=np.ones((3, 3, 4, 4)))  # this works
    with pytest.raises(ValidationError,
                       match=r"Kernel shape \(9, 9, 4, 4\).*not mat"):
        nengo.Convolution(4, input_shape, init=np.ones((9, 9, 4, 4)))
    with pytest.raises(ValidationError,
                       match=r"Kernel shape \(3, 3, 7, 4\).*not mat"):
        nengo.Convolution(4, input_shape, init=np.ones((3, 3, 7, 4)))
    with pytest.raises(ValidationError,
                       match=r"Kernel shape \(3, 3, 4, 5\).*not mat"):
        nengo.Convolution(4, input_shape, init=np.ones((3, 3, 4, 5)))

    # test empty output
    with pytest.raises(ValidationError, match="exceeds the spatial size"):
        nengo.transforms.Convolution(n_filters=2, input_shape=(3, 2, 1))

    # test invalid groups
    with pytest.raises(ValidationError,
                       match="Groups.*cannot be greater than"):
        nengo.transforms.Convolution(n_filters=3,
                                     input_shape=(3, 2, 1),
                                     groups=3)
    with pytest.raises(ValidationError, match="evenly divisible by.*groups"):
        nengo.transforms.Convolution(n_filters=3,
                                     input_shape=(3, 3, 5),
                                     groups=3)
    with pytest.raises(ValidationError, match="evenly divisible by.*groups"):
        nengo.transforms.Convolution(n_filters=4,
                                     input_shape=(3, 3, 3),
                                     groups=3)

    # valid output shape
    nengo.transforms.ConvolutionTranspose(n_filters=2,
                                          input_shape=(3, 2, 1),
                                          output_shape=(5, 4, 2))
    with pytest.raises(ValidationError, match="number of dimensions"):
        # too many dims in output shape
        nengo.transforms.ConvolutionTranspose(n_filters=2,
                                              input_shape=(3, 2, 1),
                                              output_shape=(5, 4, 2, 1))
    with pytest.raises(ValidationError, match="number of channels"):
        # too many channels in output shape
        nengo.transforms.ConvolutionTranspose(n_filters=2,
                                              input_shape=(3, 2, 1),
                                              output_shape=(5, 4, 3))
    with pytest.raises(ValidationError, match="not a valid output shape"):
        # too many rows in output shape
        nengo.transforms.ConvolutionTranspose(n_filters=2,
                                              input_shape=(3, 2, 1),
                                              output_shape=(6, 4, 2))
Esempio n. 21
0
def _test_convolution(
    dimensions,
    padding,
    channels_last,
    fixed_kernel,
    transpose,
    groups,
    output_channels,
    Simulator,
    allclose,
    rng,
    seed,
):
    assert not (transpose and groups > 1
                ), "Transpose Convolutions Not Supported With Groups != 1"
    input_d = 4
    input_channels = 2
    kernel_d = 3
    output_d = (input_d if padding == "same" else input_d +
                (1 if transpose else -1) * (kernel_d // 2 * 2))

    kernel_size = (kernel_d, ) if dimensions == 1 else (kernel_d, kernel_d)
    input_shape = (input_d, input_channels)
    kernel_shape = (kernel_d, input_channels // groups, output_channels)
    output_shape = (output_d, output_channels)

    if dimensions == 2:
        input_shape = (input_d, ) + input_shape
        kernel_shape = (kernel_d, ) + kernel_shape
        output_shape = (output_d, ) + output_shape

    if not channels_last:
        input_shape = tuple(np.roll(input_shape, 1))
        output_shape = tuple(np.roll(output_shape, 1))

    x = rng.randn(*input_shape)
    w = rng.randn(
        *kernel_shape) if fixed_kernel else nengo.dists.Uniform(-0.1, 0.1)

    if transpose:
        transform = nengo.transforms.ConvolutionTranspose(
            output_channels,
            input_shape,
            init=w,
            padding=padding,
            kernel_size=kernel_size,
            strides=(1, ) if dimensions == 1 else (1, 1),
            channels_last=channels_last,
        )
    else:
        transform = nengo.Convolution(
            output_channels,
            input_shape,
            init=w,
            padding=padding,
            kernel_size=kernel_size,
            strides=(1, ) if dimensions == 1 else (1, 1),
            channels_last=channels_last,
            groups=groups,
        )

    assert transform.output_shape.shape == output_shape

    with nengo.Network(seed=seed) as net:
        a = nengo.Node(np.ravel(x))
        b = nengo.Node(size_in=np.prod(output_shape))
        conn = nengo.Connection(a, b, synapse=None, transform=transform)
        p = nengo.Probe(b)

        # check error handling
        bad_in = nengo.Node([0])
        bad_out = nengo.Node(size_in=5)
        with pytest.raises(ValidationError):
            nengo.Connection(bad_in, b, transform=conn.transform)
        with pytest.raises(ValidationError):
            nengo.Connection(a, bad_out, transform=conn.transform)

    assert conn.transform.output_shape.shape == output_shape
    assert conn.transform.kernel_shape == kernel_shape

    with Simulator(net) as sim:
        sim.step()

    weights = sim.data[conn].weights
    if not channels_last:
        x = np.moveaxis(x, 0, -1)

    if dimensions == 1:
        x = x[:, None, :]
        weights = weights[:, None, :, :]

    if transpose:
        outsize = (output_d, 1) if dimensions == 1 else (output_d, output_d)
        truth = conv2d_gradx(weights,
                             x[None, ...],
                             xsize=outsize,
                             pad=padding.upper())[0]
    else:
        truth = (conv2d if groups == 1 else conv2d_groups)(
            x[None, ...], weights, pad=padding.upper())[0]

    if not channels_last:
        truth = np.moveaxis(truth, -1, 0)

    assert allclose(sim.data[p][0], np.ravel(truth))
Esempio n. 22
0
    wrappedIndex = int(n % (duration - buffSize))
    return sound[wrappedIndex * buffSize:(wrappedIndex + 1) * buffSize]


with nengo.Network() as model:
    # Input node representing the wavform
    inProbe = nengo.Node(lambda t: bufferSound(t), size_out=240, label="sound")

    # Ensemble weights are precomputed by the Nengo library
    # when defined this way
    spikeGen = nengo.networks.EnsembleArray(n_neurons=240 * 3,
                                            n_ensembles=240,
                                            label="spike sound")
    nengo.Connection(inProbe, spikeGen.input)

    # Define convolution transform
    conv = nengo.Convolution(4,
                             input_shape=(1, 240),
                             kernel_size=(5, ),
                             strides=(1, ),
                             padding="same")
    # Define convolution layer
    convLayer = nengo.Ensemble(conv.output_shape.size, 4)
    nengo.Connection(spikeGen.output, convLayer.neurons, transform=conv)
    # note: in nengo, convolutions operate across ensemble dimensions
    #not across time. In this case the 50 dimension will refer to the
    #neurons in spikeGen

#Start Nengo GUI
import nengo_gui
nengo_gui.GUI(__file__).start()
Esempio n. 23
0
 def conv_layer(input=None, label=None, **kwargs):
     conv = nengo.Convolution(**kwargs)
     layer = nengo.Ensemble(conv.output_shape.size, 1, label=label)
     conn = (nengo.Connection(input, layer.neurons, transform=conv)
             if input is not None else None)
     return layer, conv, conn
Esempio n. 24
0
    def evaluate(self, p, plt):
        files = []
        sets = []
        for f in os.listdir(p.dataset_dir):
            if f.endswith('events'):
                files.append(os.path.join(p.dataset_dir, f))

        if p.test_set == 'one':
            test_file = random.sample(files, 1)[0]
            files.remove(test_file)

        if p.n_data != -1:
            files = random.sample(files, p.n_data)

        inputs = []
        targets = []
        for f in files:
            times, imgs, targs = davis_track.load_data(
                f,
                dt=p.dt,
                decay_time=p.decay_time,
                separate_channels=p.separate_channels,
                saturation=p.saturation,
                merge=p.merge)
            inputs.append(imgs)
            targets.append(targs[:, :2])

        inputs_all = np.vstack(inputs)
        targets_all = np.vstack(targets)

        if p.test_set == 'odd':
            inputs_train = inputs_all[::2]
            inputs_test = inputs_all[1::2]
            targets_train = targets_all[::2]
            targets_test = targets_all[1::2]
            dt_test = p.dt * 2
        elif p.test_set == 'one':
            times, imgs, targs = davis_track.load_data(
                test_file,
                dt=p.dt_test,
                decay_time=p.decay_time,
                separate_channels=p.separate_channels,
                saturation=p.saturation,
                merge=p.merge)
            inputs_test = imgs
            targets_test = targs[:, :2]
            inputs_train = inputs_all
            targets_train = targets_all
            dt_test = p.dt_test

        if p.augment:
            inputs_train, targets_train = davis_track.augment(
                inputs_train,
                targets_train,
                separate_channels=p.separate_channels)

        if p.separate_channels:
            shape = (2, 180 // p.merge, 240 // p.merge)
        else:
            shape = (1, 180 // p.merge, 240 // p.merge)

        dimensions = shape[0] * shape[1] * shape[2]
        eval_points_train = inputs_train.reshape(-1, dimensions)
        eval_points_test = inputs_test.reshape(-1, dimensions)

        max_rate = 100
        amp = 1 / max_rate

        model = nengo.Network()
        with model:
            model.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(
                amplitude=amp)
            model.config[nengo.Ensemble].max_rates = nengo.dists.Choice(
                [max_rate])
            model.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            model.config[nengo.Connection].synapse = None

            inp = nengo.Node(
                nengo.processes.PresentInput(
                    inputs_test.reshape(-1, dimensions), dt_test),
                size_out=dimensions,
            )

            out = nengo.Node(None, size_in=2)

            if not p.split_spatial:
                # do a standard convnet
                conv1 = nengo.Convolution(p.n_features_1,
                                          shape,
                                          channels_last=False,
                                          strides=(p.stride_1, p.stride_1),
                                          kernel_size=(p.kernel_size_1,
                                                       p.kernel_size_1))
                layer1 = nengo.Ensemble(conv1.output_shape.size, dimensions=1)
                nengo.Connection(inp, layer1.neurons, transform=conv1)

                conv2 = nengo.Convolution(p.n_features_2,
                                          conv1.output_shape,
                                          channels_last=False,
                                          strides=(p.stride_2, p.stride_2),
                                          kernel_size=(p.kernel_size_2,
                                                       p.kernel_size_2))
                layer2 = nengo.Ensemble(conv2.output_shape.size, dimensions=1)
                nengo.Connection(layer1.neurons,
                                 layer2.neurons,
                                 transform=conv2)

                nengo.Connection(layer2.neurons,
                                 out,
                                 transform=nengo_dl.dists.Glorot())
            else:
                convnet = spatial_convnet.ConvNet(nengo.Network())
                convnet.make_input_layer(shape,
                                         spatial_stride=(p.spatial_stride,
                                                         p.spatial_stride),
                                         spatial_size=(p.spatial_size,
                                                       p.spatial_size))
                nengo.Connection(inp, convnet.input)
                convnet.make_middle_layer(n_features=p.n_features_1,
                                          n_parallel=p.n_parallel,
                                          n_local=1,
                                          n_remote=0,
                                          kernel_stride=(p.stride_1,
                                                         p.stride_1),
                                          kernel_size=(p.kernel_size_1,
                                                       p.kernel_size_1))
                convnet.make_middle_layer(n_features=p.n_features_2,
                                          n_parallel=p.n_parallel,
                                          n_local=1,
                                          n_remote=0,
                                          kernel_stride=(p.stride_2,
                                                         p.stride_2),
                                          kernel_size=(p.kernel_size_2,
                                                       p.kernel_size_2))
                convnet.make_output_layer(2)
                nengo.Connection(convnet.output, out)

            p_out = nengo.Probe(out)

        if p.gui:
            locals_dict = getattr(self, 'locals', dict(model=model))
            import nengo_gui
            import webbrowser

            if hasattr(nengo_gui, 'guibackend'):
                host = 'localhost'
                port = 8080
                server_settings = nengo_gui.guibackend.GuiServerSettings(
                    (host, port))
                model_context = nengo_gui.guibackend.ModelContext(
                    model=model,
                    locals=locals_dict,
                    filename=sys.argv[1],
                    writeable=False)
                page_settings = nengo_gui.page.PageSettings(
                    filename_cfg=sys.argv[1] + '.cfg',
                    backend='nengo',
                    editor_class=nengo_gui.components.editor.NoEditor)
                server = nengo_gui.gui.BaseGUI(model_context, server_settings,
                                               page_settings)
                if hasattr(server.server, 'gen_one_time_token'):
                    wb = webbrowser.get().open(
                        '%s://%s:%d/?token=%s' %
                        ('http', host, port,
                         server.server.gen_one_time_token()))
                else:
                    wb = webbrowser.get().open('%s://%s:%d/' %
                                               ('http', host, port))

                server.start()
            else:
                try:
                    nengo_gui.GUI(
                        model=model,
                        filename=sys.argv[1],
                        locals=locals_dict,
                        editor=False,
                    ).start()
                except TypeError:
                    # support nengo_gui v0.2.0 and previous
                    nengo_gui.GUI(
                        model=model,
                        filename=sys.argv[1],
                        locals=locals_dict,
                        interactive=False,
                        allow_file_change=False,
                    ).start()

        N = len(inputs_train)
        n_steps = int(np.ceil(N / p.minibatch_size))
        dl_train_data = {
            inp: np.resize(inputs_train,
                           (p.minibatch_size, n_steps, dimensions)),
            p_out: np.resize(targets_train, (p.minibatch_size, n_steps, 2))
        }
        N = len(inputs_test)
        n_steps = int(np.ceil(N / p.minibatch_size))
        dl_test_data = {
            inp: np.resize(inputs_test,
                           (p.minibatch_size, n_steps, dimensions)),
            p_out: np.resize(targets_test, (p.minibatch_size, n_steps, 2))
        }
        with nengo_dl.Simulator(model, minibatch_size=p.minibatch_size) as sim:
            #loss_pre = sim.loss(dl_test_data)

            if p.n_epochs > 0:
                sim.train(
                    dl_train_data,
                    tf.train.RMSPropOptimizer(learning_rate=p.learning_rate),
                    n_epochs=p.n_epochs)

            #loss_post = sim.loss(dl_test_data)

            sim.run_steps(n_steps, data=dl_test_data)

        data = sim.data[p_out].reshape(-1, 2)[:len(targets_test)]
        filt = nengo.synapses.Lowpass(p.output_filter)
        data = filt.filt(data, dt=dt_test)

        rmse_test = np.sqrt(np.mean(
            (targets_test - data)**2, axis=0)) * p.merge
        if plt:
            plt.plot(data)
            plt.plot(targets_test, ls='--')

        return dict(
            #loss_pre=loss_pre,
            #loss_post=loss_post
            rmse_test=rmse_test,
            max_n_neurons=max([ens.n_neurons for ens in model.all_ensembles]))
Esempio n. 25
0
    def make_middle_layer(self,
                          n_features,
                          n_parallel,
                          n_local,
                          kernel_stride,
                          kernel_size,
                          padding='valid',
                          use_neurons=True,
                          init=nengo.dists.Uniform(-1, 1)):
        with self.net:
            prev_layer = self.layers[-1]
            prev_output_shape = self.output_shapes[-1]
            layer = []
            for prev_row in prev_layer:
                row = []
                for prev_col in prev_row:
                    col = []
                    this_index = 0

                    index = 0
                    for k in range(n_parallel):
                        prev_index = 0
                        if isinstance(init, nengo.dists.Distribution):
                            this_inits = [init] * n_local
                        else:
                            this_inits = []
                            prev_size = init.shape[2] // n_local

                            for i in range(n_local):

                                this_init = init[:, :, prev_index:prev_index +
                                                 prev_size,
                                                 this_index:this_index +
                                                 n_features]
                                prev_index = (prev_index + prev_size)
                                this_inits.append(this_init)
                            this_index = (this_index + n_features)

                        conv = nengo.Convolution(n_features,
                                                 prev_output_shape,
                                                 channels_last=False,
                                                 kernel_size=kernel_size,
                                                 padding=padding,
                                                 strides=kernel_stride,
                                                 init=this_inits[0])
                        if use_neurons:
                            ens = nengo.Ensemble(conv.output_shape.size,
                                                 dimensions=1,
                                                 label='%s' %
                                                 conv.output_shape)
                            ens_neurons = ens.neurons
                        else:
                            ens = nengo.Node(None,
                                             size_in=conv.output_shape.size,
                                             label='%s' % conv.output_shape)
                            ens_neurons = ens
                        for kk in range(n_local):
                            prev_k = prev_col[index % len(prev_col)]
                            conv = nengo.Convolution(n_features,
                                                     prev_output_shape,
                                                     channels_last=False,
                                                     kernel_size=kernel_size,
                                                     padding=padding,
                                                     strides=kernel_stride,
                                                     init=this_inits[kk])
                            nengo.Connection(prev_k,
                                             ens_neurons,
                                             transform=conv)
                            index += 1
                        col.append(ens_neurons)
                    row.append(col)
                layer.append(row)
            self.layers.append(layer)
            self.output_shapes.append(conv.output_shape)