Beispiel #1
0
def test_maxout():
    network = N.Network()

    network.setInput(N.RawInput((1, 28, 28)))
    network.append(N.Conv2d(filter_size=(3, 3), feature_map_multiplier=128))
    network.append(N.FeaturePooling(4))
    network.append(N.Pooling((2, 2)))
    network.append(N.Conv2d(filter_size=(3, 3), feature_map_multiplier=8))
    network.append(N.FeaturePooling(4))
    network.append(N.Pooling((2, 2)))
    network.append(N.Conv2d(filter_size=(3, 3), feature_map_multiplier=8))
    network.append(N.FeaturePooling(4))
    network.append(N.GlobalPooling())
    network.append(N.FullConn(input_feature=128, output_feature=10))
    network.append(N.SoftMax())

    network.build()

    trX, trY, teX, teY = l.load_mnist()

    for i in range(5000):
        print(i)
        network.train(trX, trY)
        print(1 - np.mean(
            np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
Beispiel #2
0
def test1():
    network = N.Network()
    network.debug = True
    network.setInput((28, 28))
    network.append(N.Conv2d(filter=(3, 3), input_feature=1, output_feature=32))
    network.append(N.Relu())
    network.append(N.Conv2d(filter=(3, 3), input_feature=32,
                            output_feature=32))
    network.append(N.Relu())
    network.append(N.Conv2d(filter=(3, 3), input_feature=32,
                            output_feature=32))
    network.append(N.Relu())
    network.append(N.Pooling((2, 2)))
    network.append(N.Conv2d(filter=(3, 3), input_feature=32,
                            output_feature=64))
    network.append(N.Relu())
    network.append(N.Conv2d(filter=(3, 3), input_feature=64,
                            output_feature=64))
    network.append(N.Relu())
    network.append(N.Conv2d(filter=(3, 3), input_feature=64,
                            output_feature=64))
    network.append(N.Relu())
    network.append(N.Pooling((2, 2)))
    network.append(
        N.Conv2d(filter=(3, 3), input_feature=64, output_feature=128))
    network.append(N.Relu())
    network.append(
        N.Conv2d(filter=(3, 3), input_feature=128, output_feature=128))
    network.append(N.Relu())
    network.append(
        N.Conv2d(filter=(3, 3), input_feature=128, output_feature=128))
    network.append(N.Relu())
    network.append(N.Pooling((2, 2)))
    network.append(N.Flatten())
    network.append(N.FullConn(input_feature=1152, output_feature=1152 * 2))
    network.append(N.Relu())
    network.append(N.FullConn(input_feature=1152 * 2, output_feature=10))
    network.append(N.SoftMax())
    #network.setCost(N.CategoryCrossEntropy)

    network.build()

    f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')

    trX = f['x_train'][:, :].reshape(-1, 1, 28, 28)
    teX = f['x_test'][:, :].reshape(-1, 1, 28, 28)

    trY = np.zeros((f['t_train'].shape[0], 10))
    trY[np.arange(len(f['t_train'])), f['t_train']] = 1
    teY = np.zeros((f['t_test'].shape[0], 10))
    teY[np.arange(len(f['t_test'])), f['t_test']] = 1

    for i in range(5000):
        print(i)
        network.train(trX, trY)
        print(1 - np.mean(np.argmax(teY, axis=1) == network.predict(teX)))
 def test_pooling_forward(self):
     x = np.asarray(rng.uniform(low=-1, high=1, size=(500, 20, 28, 28)))
     x = theano.shared(x, borrow=True)
     pooling = N.Pooling()
     y = pooling.forward([x])
     y_shape = y[0].eval().shape
     self.assertEqual(y_shape, (500, 20, 14, 14))
Beispiel #4
0
def test():
    network = N.Network()
    network.debug = True

    network.setInput(N.RawInput((1, 28,28)))
    network.append(N.Conv2d(feature_map_multiplier=32))
    network.append(N.Relu())
    network.append(N.Pooling())
    network.append(N.Conv2d(feature_map_multiplier=2))
    network.append(N.Relu())
    network.append(N.Pooling())
    network.append(UpConv2d(feature_map_multiplier=2))
    network.append(N.Relu())
    network.append(UpConv2d(feature_map_multiplier=32))
    network.append(N.Relu())
    #network.append(N.Flatten())
    #network.append(N.FullConn(input_feature=1152, output_feature=1152*2))
    #network.append(N.Relu())
    #network.append(N.FullConn(input_feature=1152*2, output_feature=10))
    #network.append(N.SoftMax())

    network.costFunction = cost.ImageSSE
    network.inputOutputType = (T.tensor4(), T.tensor4(),)

    network.build()

    f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')

    trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
    teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)

    trY = np.zeros((f['t_train'].shape[0], 10))
    trY[np.arange(len(f['t_train'])), f['t_train']] = 1
    teY = np.zeros((f['t_test'].shape[0], 10))
    teY[np.arange(len(f['t_test'])), f['t_test']] = 1

    for i in range(5000):
        print(i)
        #network.train(trX, trY)
        #print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
        network.train(trX, trX)
        print(np.sum((teX - network.predict(teX)) * (teX - network.predict(teX))))
Beispiel #5
0
def test_binaryweight():
    network = N.Network()
    network.debug = True

    network.setInput(N.RawInput((1, 28,28)))
    network.append(N.Conv2d(feature_map_multiplier=32))
    network.append(N.Relu())
    network.append(N.Pooling((2,2)))
    network.append(Binarize())
    network.append(N.Conv2d(feature_map_multiplier=2))
    network.append(N.Relu())
    network.append(N.Pooling((2,2)))
    network.append(Binarize())
    network.append(BinaryConv2d(feature_map_multiplier=2))
    network.append(N.Relu())
    network.append(N.Pooling((2,2)))
    network.append(N.Flatten())
    network.append(N.FullConn(input_feature=1152, output_feature=1152*2))
    network.append(N.Relu())
    network.append(N.FullConn(input_feature=1152*2, output_feature=10))
    network.append(N.SoftMax())

    network.build()

    f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')

    trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
    teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)

    trY = np.zeros((f['t_train'].shape[0], 10))
    trY[np.arange(len(f['t_train'])), f['t_train']] = 1
    teY = np.zeros((f['t_test'].shape[0], 10))
    teY[np.arange(len(f['t_test'])), f['t_test']] = 1

    for i in range(5000):
        print(i)
        network.train(trX, trY)
        print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
 def test_pooling_forwardSize(self):
     x = [(100, 1, 28, 28)]
     pool = N.Pooling()
     y = pool.forwardSize(x)
     self.assertEqual(y, [(100, 1, 14, 14)])