Example #1
0
    def forwardSize(self, inputsize):
        isize = inputsize[0]

        if self.increaseDim:
            self.conv1 = N.Conv2d(self.filterSize,
                                  input_feature=isize[1],
                                  output_feature=isize[1] * 2,
                                  subsample=(2, 2))
            self.conv2 = N.Conv2d(self.filterSize,
                                  input_feature=isize[1] * 2,
                                  output_feature=isize[1] * 2)
        else:
            self.conv1 = N.Conv2d(self.filterSize,
                                  input_feature=isize[1],
                                  output_feature=isize[1])
            self.conv2 = N.Conv2d(self.filterSize,
                                  input_feature=isize[1],
                                  output_feature=isize[1])

        self.bn1 = N.BatchNormalization()
        self.bn2 = N.BatchNormalization()

        self.relu1 = N.Relu()
        self.relu2 = N.Relu()

        s1 = self.conv1.forwardSize(inputsize)
        s2 = self.bn1.forwardSize(s1)
        s3 = self.relu1.forwardSize(s2)
        s4 = self.conv2.forwardSize(s3)
        s5 = self.bn2.forwardSize(s4)
        return self.relu2.forwardSize(s5)
Example #2
0
def test():
    network = N.Network()
    network.debug = True

    network.setInput(N.RawInput((28, 28)))
    network.append(
        N.Conv2d(filter_size=(3, 3), input_feature=1, output_feature=32))
    network.append(N.Relu())
    network.append(
        N.Conv2d(filter_size=(2, 2),
                 input_feature=32,
                 output_feature=32,
                 subsample=(2, 2),
                 border='valid'))
    network.append(
        N.Conv2d(filter_size=(3, 3), input_feature=32, output_feature=64))
    network.append(N.Relu())
    network.append(
        N.Conv2d(filter_size=(2, 2),
                 input_feature=64,
                 output_feature=64,
                 subsample=(2, 2),
                 border='valid'))
    network.append(
        N.Conv2d(filter_size=(3, 3), input_feature=64, output_feature=128))
    network.append(N.Relu())
    network.append(
        N.Conv2d(filter_size=(2, 2),
                 input_feature=128,
                 output_feature=128,
                 subsample=(2, 2),
                 border='valid'))
    network.append(N.Flatten())
    network.append(N.FullConn(input_feature=1152, output_feature=1152 * 2))
    network.append(N.Relu())
    network.append(N.FullConn(input_feature=1152 * 2, output_feature=10))
    network.append(N.SoftMax())

    network.build()

    print(network)

    f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')

    trX = f['x_train'][:, :].reshape(-1, 1, 28, 28)
    teX = f['x_test'][:, :].reshape(-1, 1, 28, 28)

    trY = np.zeros((f['t_train'].shape[0], 10))
    trY[np.arange(len(f['t_train'])), f['t_train']] = 1
    teY = np.zeros((f['t_test'].shape[0], 10))
    teY[np.arange(len(f['t_test'])), f['t_test']] = 1

    for i in range(5000):
        print(i)
        network.train(trX, trY)
        print(1 - np.mean(
            np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
Example #3
0
def test():
    network = N.Network()
    network.debug = True

    network.setInput(N.RawInput((1, 28,28)))
    network.append(N.Conv2d(feature_map_multiplier=32))
    network.append(N.Relu())
    network.append(N.Pooling())
    network.append(N.Conv2d(feature_map_multiplier=2))
    network.append(N.Relu())
    network.append(N.Pooling())
    network.append(UpConv2d(feature_map_multiplier=2))
    network.append(N.Relu())
    network.append(UpConv2d(feature_map_multiplier=32))
    network.append(N.Relu())
    #network.append(N.Flatten())
    #network.append(N.FullConn(input_feature=1152, output_feature=1152*2))
    #network.append(N.Relu())
    #network.append(N.FullConn(input_feature=1152*2, output_feature=10))
    #network.append(N.SoftMax())

    network.costFunction = cost.ImageSSE
    network.inputOutputType = (T.tensor4(), T.tensor4(),)

    network.build()

    f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')

    trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
    teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)

    trY = np.zeros((f['t_train'].shape[0], 10))
    trY[np.arange(len(f['t_train'])), f['t_train']] = 1
    teY = np.zeros((f['t_test'].shape[0], 10))
    teY[np.arange(len(f['t_test'])), f['t_test']] = 1

    for i in range(5000):
        print(i)
        #network.train(trX, trY)
        #print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
        network.train(trX, trX)
        print(np.sum((teX - network.predict(teX)) * (teX - network.predict(teX))))
Example #4
0
def test_binaryweight():
    network = N.Network()
    network.debug = True

    network.setInput(N.RawInput((1, 28,28)))
    network.append(N.Conv2d(feature_map_multiplier=32))
    network.append(N.Relu())
    network.append(N.Pooling((2,2)))
    network.append(Binarize())
    network.append(N.Conv2d(feature_map_multiplier=2))
    network.append(N.Relu())
    network.append(N.Pooling((2,2)))
    network.append(Binarize())
    network.append(BinaryConv2d(feature_map_multiplier=2))
    network.append(N.Relu())
    network.append(N.Pooling((2,2)))
    network.append(N.Flatten())
    network.append(N.FullConn(input_feature=1152, output_feature=1152*2))
    network.append(N.Relu())
    network.append(N.FullConn(input_feature=1152*2, output_feature=10))
    network.append(N.SoftMax())

    network.build()

    f = h5py.File('/hdd/home/yueguan/workspace/data/mnist/mnist.hdf5', 'r')

    trX = f['x_train'][:,:].reshape(-1, 1, 28, 28)
    teX = f['x_test'][:,:].reshape(-1, 1, 28, 28)

    trY = np.zeros((f['t_train'].shape[0], 10))
    trY[np.arange(len(f['t_train'])), f['t_train']] = 1
    teY = np.zeros((f['t_test'].shape[0], 10))
    teY[np.arange(len(f['t_test'])), f['t_test']] = 1

    for i in range(5000):
        print(i)
        network.train(trX, trY)
        print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))