updates.update(layer.updates(grads))

        train = theano.function(
            [sx, sy], [cost, error], updates=updates)

        # --- make test function
        y_pred = tt.argmax(self._propup(sx, testsize, noise=False), axis=1)
        error = tt.mean(tt.neq(y_pred, sy))
        test = theano.function([sx, sy], error)

        return train, test

################################################################################
if __name__ == '__main__':

    [train_x, train_y], [test_x, test_y] = dvs_loader.load_dataset()
    assert train_x.shape[1] == train_x.shape[2]

    train_x = train_x[:, None, :, :].astype(dtype)
    train_y = train_y.astype(np.int32)
    test_x = test_x[:, None, :, :].astype(dtype)
    test_y = test_y.astype(np.int32)

    batch_size = 100
    n = floor_multiple(len(train_x), batch_size)
    train_x, train_y = train_x[:n], train_y[:n]
    batch_x = train_x.reshape(-1, batch_size, *train_x.shape[1:])
    batch_y = train_y.reshape(-1, batch_size)

    test_size = 2000
    n = floor_multiple(len(test_x), test_size)
    FullLayer(outputs=2, initW=0.01, initB=0.0, wc=0.01),
]

convfile = "convnet_2015-07-15_20-56-16.npz"
convdata = np.load(convfile)

net = Convnet((1,) + shape, layers)
propup = net.get_propup()

for i, [w, b] in enumerate(zip(convdata["weights"], convdata["biases"])):
    net.layers[i].weights.set_value(w)
    net.layers[i].biases.set_value(b)

if 0:
    # test
    _, [test_x, test_y] = dvs_loader.load_dataset()
    test_x, test_y = test_x[:1000], test_y[:1000]

    test_x = test_x[:, None, :, :].astype(dtype)
    test_y = test_y.astype(np.int32)

    _, test = net.get_train()
    y = test(test_x, test_y)
    print y.mean()

    y = propup(test_x)
    print (test_y != y).mean()

    sys.exit(0)

# load data