def expanded_data():
    expanded_training_data, _, _ = network3.load_data_shared(
        "../data/mnist_expanded.pkl.gz")
    for j in range(3):
        print "Training with expanded data, run num %s" % j
        net = Network([
            ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                          filter_shape=(20, 1, 5, 5),
                          poolsize=(2, 2),
                          activation_fn=ReLU),
            ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                          filter_shape=(40, 20, 5, 5),
                          poolsize=(2, 2),
                          activation_fn=ReLU),
            FullyConnectedLayer(n_in=40 * 4 * 4, n_out=100,
                                activation_fn=ReLU),
            SoftmaxLayer(n_in=100, n_out=10)
        ], mini_batch_size)
        net.SGD(expanded_training_data,
                20,
                mini_batch_size,
                0.03,
                validation_data,
                test_data,
                lmbda=0.1)
Beispiel #2
0
def dbl_conv_relu():
    for lmbda in [0.0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0]:
        for j in range(3):
            print("Conv + Conv + FC num %s, relu, with regularization %s") % (
                j, lmbda)
            net = Network([
                ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                              filter_shape=(20, 1, 5, 5),
                              poolsize=(2, 2),
                              activation_fn=ReLU),
                ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                              filter_shape=(40, 20, 5, 5),
                              poolsize=(2, 2),
                              activation_fn=ReLU),
                FullyConnectedLayer(
                    n_in=40 * 4 * 4, n_out=100, activation_fn=ReLU),
                SoftmaxLayer(n_in=100, n_out=10)
            ], mini_batch_size)
            net.SGD(training_data,
                    60,
                    mini_batch_size,
                    0.03,
                    validation_data,
                    test_data,
                    lmbda=lmbda)
Beispiel #3
0
def expanded_data_double_fc(n=100):
    """n is the number of neurons in both fully-connected layers.  We'll
    try n=100, 300, and 1000.

    """
    expanded_training_data, _, _ = network3.load_data_shared(
        "../data/mnist_expanded.pkl.gz")
    for j in range(3):
        print(
            "Training with expanded data, %s neurons in two FC layers, run num %s"
        ) % (n, j)
        net = Network([
            ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                          filter_shape=(20, 1, 5, 5),
                          poolsize=(2, 2),
                          activation_fn=ReLU),
            ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                          filter_shape=(40, 20, 5, 5),
                          poolsize=(2, 2),
                          activation_fn=ReLU),
            FullyConnectedLayer(n_in=40 * 4 * 4, n_out=n, activation_fn=ReLU),
            FullyConnectedLayer(n_in=n, n_out=n, activation_fn=ReLU),
            SoftmaxLayer(n_in=n, n_out=10)
        ], mini_batch_size)
        net.SGD(expanded_training_data,
                60,
                mini_batch_size,
                0.03,
                validation_data,
                test_data,
                lmbda=0.1)
Beispiel #4
0
def double_fc_dropout(p0, p1, p2, repetitions):
    expanded_training_data, _, _ = network3.load_data_shared(
        "../data/mnist_expanded.pkl.gz")
    nets = []
    for j in range(repetitions):
        print("\n\nTraining using a dropout network with parameters "
              ), p0, p1, p2
        print("Training with expanded data, run num %s") % j
        net = Network([
            ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                          filter_shape=(20, 1, 5, 5),
                          poolsize=(2, 2),
                          activation_fn=ReLU),
            ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                          filter_shape=(40, 20, 5, 5),
                          poolsize=(2, 2),
                          activation_fn=ReLU),
            FullyConnectedLayer(
                n_in=40 * 4 * 4, n_out=1000, activation_fn=ReLU, p_dropout=p0),
            FullyConnectedLayer(
                n_in=1000, n_out=1000, activation_fn=ReLU, p_dropout=p1),
            SoftmaxLayer(n_in=1000, n_out=10, p_dropout=p2)
        ], mini_batch_size)
        net.SGD(expanded_training_data, 40, mini_batch_size, 0.03,
                validation_data, test_data)
        nets.append(net)
    return nets
Beispiel #5
0
def elu():
    net = None
    for j in range(RUNS):
        print "num %s, leaky relu, with regularization %s" % (j, 0.0001)
        net = Network([
            ConvPoolLayer(image_shape=(MB_SIZE, 1, IMAGE_SIZE, IMAGE_SIZE),
                          filter_shape=(5, 1, 12, 12),
                          poolsize=(3, 3),
                          activation_fn=ELU),
            ConvPoolLayer(image_shape=(MB_SIZE, 5, 30, 30),
                          filter_shape=(10, 5, 3, 3),
                          poolsize=(2, 2),
                          activation_fn=ELU),
            FullyConnectedLayer(
                n_in=10 * 14 * 14, n_out=200, activation_fn=ELU),
            FullyConnectedLayer(n_in=200, n_out=200, activation_fn=ELU),
            FullyConnectedLayer(n_in=200, n_out=100, activation_fn=ELU),
            SoftmaxLayer(n_in=100, n_out=2)
        ], MB_SIZE)
        net.SGD("ELU",
                training_data,
                EPOCHS,
                MB_SIZE,
                ETA,
                validation_data,
                test_data,
                lmbda=0.0001)
    return net
Beispiel #6
0
def test_4():
    """全连接 + 卷积混合层 + 卷积混合层 + 全连接 + softmax
       激活函数:修正线性单元
       代价函数:L2规范化
       测试准确率:99.18%
    """

    name = sys._getframe().f_code.co_name
    print(name + "\n")

    training_data, validation_data, test_data = network3.load_data_shared()
    mini_batch_size = 10

    net = Network([
        ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                      filter_shape=(20, 1, 5, 5),
                      poolsize=(2, 2),
                      activation_fn=ReLU),
        ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                      filter_shape=(40, 20, 5, 5),
                      poolsize=(2, 2),
                      activation_fn=ReLU),
        FullyConnectedLayer(n_in=40 * 4 * 4, n_out=100, activation_fn=ReLU),
        SoftmaxLayer(n_in=100, n_out=10)
    ], mini_batch_size)

    net.SGD(training_data,
            60,
            mini_batch_size,
            0.03,
            validation_data,
            test_data,
            lmbda=0.1)
def shallow():
    for j in range(3):
        print "A shallow net with 100 hidden neurons"
        net = Network([
            FullyConnectedLayer(n_in=784, n_out=100),
            SoftmaxLayer(n_in=100, n_out=10)
        ], mini_batch_size)
        net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data,
                test_data)
Beispiel #8
0
def omit_FC():
    for j in range(3):
        print "Conv only, no FC"
        net = Network([
            ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28), 
                          filter_shape=(20, 1, 5, 5), 
                          poolsize=(2, 2)),
            SoftmaxLayer(n_in=20*12*12, n_out=10)], mini_batch_size)
        net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
    return net 
Beispiel #9
0
def test_drop(mini_batch_size):
    size = 100
    f_s = 7
    padding = 1
    conv_stride = 1
    pool_stride = 1
    pool_size = 4
    n_f1 = 20
    n_f2 = 40
    n_f3 = 80

    co1 = ((size - f_s + 2 * padding) / conv_stride) + 1
    po1 = ((co1 - pool_size) / pool_stride) + 1
    print(po1)
    co2 = ((po1 - f_s + 2 * padding) / conv_stride) + 1
    po2 = ((co2 - pool_size) / pool_stride) + 1
    print(po2)
    co3 = ((po2 - f_s + 2 * padding) / conv_stride) + 1
    po3 = ((co3 - pool_size) / pool_stride) + 1
    print(po3)

    layer1 = ConvPoolLayer(input_shape=(mini_batch_size, 1, size, size),
                           filter_shape=(n_f1, 1, f_s, f_s),
                           poolsize=(4, 4),
                           activation_fn=ReLU)

    layer2 = ConvPoolLayer(input_shape=(mini_batch_size, n_f1, po1, po1),
                           filter_shape=(n_f2, n_f1, f_s, f_s),
                           poolsize=(4, 4),
                           activation_fn=ReLU)

    layer3 = ConvPoolLayer(input_shape=(mini_batch_size, n_f2, po2, po2),
                           filter_shape=(n_f3, n_f2, f_s, f_s),
                           poolsize=(4, 4),
                           activation_fn=ReLU)

    layer4 = FullyConnectedLayer(n_in=n_f3 * po3 * po3,
                                 n_out=1000,
                                 activation_fn=ReLU,
                                 p_dropout=0.0)
    layer5 = FullyConnectedLayer(n_in=1000,
                                 n_out=500,
                                 activation_fn=ReLU,
                                 p_dropout=0.0)
    layer6 = SoftmaxLayer(n_in=500, n_out=2, p_dropout=0.0)

    net = Network([layer1, layer2, layer3, layer4, layer5, layer6],
                  mini_batch_size)
    net.SGD(training_data,
            10,
            mini_batch_size,
            0.001,
            validation_data,
            test_data,
            lmbda=0.0)
Beispiel #10
0
def basic_conv(n=3, epochs=60):
    for j in range(n):
        print "Conv + FC architecture"
        net = Network([
            ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28), 
                          filter_shape=(20, 1, 5, 5), 
                          poolsize=(2, 2)),
            FullyConnectedLayer(n_in=20*12*12, n_out=100),
            SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
        net.SGD(
            training_data, epochs, mini_batch_size, 0.1, validation_data, test_data)
    return net 
Beispiel #11
0
def shallow(n=3, epochs=60):
    nets = []
    for j in range(n):
        print("A shallow net with 100 hidden neurons")
        net = Network([
            FullyConnectedLayer(n_in=784, n_out=100),
            SoftmaxLayer(n_in=100, n_out=10)
        ], mini_batch_size)
        net.SGD(training_data, epochs, mini_batch_size, 0.1, validation_data,
                test_data)
        nets.append(net)
    return nets
Beispiel #12
0
def test_conv(mini_batch_size):
    nets = []
    net = Network(
        [
            # Layer 0
            # 1 Input image of size  = 100 x 100
            # 20 Filters of size     = 5 x 5
            # poolsize               = 2 x 2 ( stride length 2)
            # output of layer 0      = 20 feature Images of size 48 x 48
            ConvPoolLayer(input_shape=(mini_batch_size, 1, 224, 224),
                          filter_shape=(64, 1, 3, 3),
                          poolsize=(2, 2),
                          activation_fn=ReLU),
            # Layer 1
            # 20 Input images of size = 48 x 48
            # 40 Filters of size      = 5 x 5
            # poolsize                = 2 x 2 ( stride length = 2)
            # output of layer 1       = 40 feature Images of size 22 x 22
            ConvPoolLayer(input_shape=(mini_batch_size, 64, 112, 112),
                          filter_shape=(128, 64, 3, 3),
                          poolsize=(2, 2),
                          activation_fn=ReLU),
            ConvPoolLayer(input_shape=(mini_batch_size, 128, 56, 56),
                          filter_shape=(256, 128, 3, 3),
                          poolsize=(2, 2),
                          activation_fn=ReLU),
            ConvPoolLayer(input_shape=(mini_batch_size, 256, 28, 28),
                          filter_shape=(512, 256, 3, 3),
                          poolsize=(2, 2),
                          activation_fn=ReLU),
            ConvPoolLayer(input_shape=(mini_batch_size, 512, 14, 14),
                          filter_shape=(512, 512, 3, 3),
                          poolsize=(2, 2),
                          activation_fn=ReLU),

            # Layer 2
            FullyConnectedLayer(n_in=512 * 7 * 7,
                                n_out=4096,
                                activation_fn=ReLU,
                                p_dropout=0.0),
            FullyConnectedLayer(
                n_in=4096, n_out=1000, activation_fn=ReLU, p_dropout=0.0),
            SoftmaxLayer(n_in=1000, n_out=2, p_dropout=0.0)
        ],
        mini_batch_size)
    # End of Network Architecture

    net.SGD(training_data, 10, mini_batch_size, 0.01, validation_data,
            test_data)
    nets.append(net)
    return nets
Beispiel #13
0
def basic_softmax_NN():
    mini_batch_size = 10
    train_data, val_data, test_data = go_parser.parse_games(1000,
                                                            test_percent=0.2,
                                                            val_percent=0.2,
                                                            onehot=False)
    net = Network(
        [
            # FullyConnectedLayer(n_in=361, n_out=200),
            SoftmaxLayer(n_in=361, n_out=361)
        ],
        mini_batch_size)
    net.SGD(shared(train_data), 50, mini_batch_size, 0.1, shared(val_data),
            shared(test_data))
Beispiel #14
0
def test_0():
    """全连接 + 全连接 + softmax, 测试准确率97:80%
    """
    name = sys._getframe().f_code.co_name
    print(name + "\n")

    training_data, validation_data, test_data = network3.load_data_shared()

    mini_batch_size = 10
    net = Network([
        FullyConnectedLayer(n_in=784, n_out=100),
        SoftmaxLayer(n_in=100, n_out=10)
    ], mini_batch_size)
    net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data,
            test_data)
Beispiel #15
0
def test_1():
    """全连接 + 卷积混合层 + softmax,测试准确率98.48%
    """
    name = sys._getframe().f_code.co_name
    print(name + "\n")

    training_data, validation_data, test_data = network3.load_data_shared()

    mini_batch_size = 10
    net = Network([
        ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                      filter_shape=(20, 1, 5, 5),
                      poolsize=(2, 2)),
        SoftmaxLayer(n_in=20 * 12 * 12, n_out=10)
    ], mini_batch_size)
    net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data,
            test_data)
Beispiel #16
0
def dbl_conv(activation_fn=sigmoid):
    for j in range(3):
        print "Conv + Conv + FC architecture"
        net = Network([
            ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28), 
                          filter_shape=(20, 1, 5, 5), 
                          poolsize=(2, 2),
                          activation_fn=activation_fn),
            ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12), 
                          filter_shape=(40, 20, 5, 5), 
                          poolsize=(2, 2),
                          activation_fn=activation_fn),
            FullyConnectedLayer(
                n_in=40*4*4, n_out=100, activation_fn=activation_fn),
            SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
        net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
    return net 
Beispiel #17
0
def test_ding(mini_batch_size):

    size = 32
    f_s = 5
    padding = 1
    conv_stride = 1
    pool_stride = 1
    pool_size = 3
    n_f1 = 20
    n_f2 = 40

    co1 = ((size - f_s + 2 * padding) / conv_stride) + 1
    po1 = ((co1 - pool_size) / pool_stride) + 1
    #print(po1)
    co2 = ((po1 - f_s + 2 * padding) / conv_stride) + 1
    po2 = ((co2 - pool_size) / pool_stride) + 1
    #print(po2)

    layer1 = ConvPoolLayer(input_shape=(mini_batch_size, 1, size, size),
                           filter_shape=(n_f1, 1, f_s, f_s),
                           poolsize=(pool_size, pool_size),
                           activation_fn=ReLU)

    #layer2 = ConvPoolLayer(input_shape=(mini_batch_size, n_f1, po1 , po1),
    #              filter_shape=(n_f2, n_f1, f_s, f_s),
    #              poolsize=(pool_size, pool_size),
    #              activation_fn=ReLU)

    layer3 = FullyConnectedLayer(n_in=n_f1 * po1 * po1,
                                 n_out=500,
                                 activation_fn=ReLU,
                                 p_dropout=0.0)

    layer4 = SoftmaxLayer(n_in=500, n_out=2, p_dropout=0.0)

    net = Network([layer1, layer3, layer4], mini_batch_size)

    net.SGD(training_data,
            50,
            mini_batch_size,
            0.3,
            validation_data,
            training_data,
            lmbda=0.0)
Beispiel #18
0
def test_small(mini_batch_size):

    size = 100
    f_s = 5
    padding = 1
    conv_stride = 1
    pool_stride = 1
    pool_size = 3
    n_f1 = 30
    n_f2 = 60

    co1 = ((size - f_s + 2 * padding) / conv_stride) + 1
    po1 = ((co1 - pool_size) / pool_stride) + 1
    co2 = ((po1 - f_s + 2 * padding) / conv_stride) + 1
    po2 = ((co2 - pool_size) / pool_stride) + 1

    net = Network([
        ConvPoolLayer(input_shape=(mini_batch_size, 1, size, size),
                      filter_shape=(n_f1, 1, f_s, f_s),
                      poolsize=(pool_size, pool_size),
                      activation_fn=ReLU),
        ConvPoolLayer(input_shape=(mini_batch_size, n_f1, po1, po1),
                      filter_shape=(n_f2, n_f1, f_s, f_s),
                      poolsize=(pool_size, pool_size),
                      activation_fn=ReLU),
        FullyConnectedLayer(n_in=n_f2 * po2 * po2,
                            n_out=100,
                            activation_fn=ReLU,
                            p_dropout=0.0),
        SoftmaxLayer(n_in=100, n_out=2, p_dropout=0.0)
    ], mini_batch_size)

    net.SGD(training_data,
            100,
            mini_batch_size,
            0.01,
            validation_data,
            test_data,
            lmbda=0.0)

    f = open('../data/network.cnn', 'wb')
    cPickle.dump(net, f, protocol=2)
    f.close()
Beispiel #19
0
def test_6():
    """全连接 + 卷积混合层 + 卷积混合层 + 全连接 + softmax
       激活函数:修正线性单元
       代价函数:L2规范化
       训练数据:使用扩展数据集,将数据集多扩张8倍
       测试准确率:99.45% (60 epochs), 99.58% (600 epochs)
    """
    name = sys._getframe().f_code.co_name
    print(name + "\n")

    # 扩展数据集多扩展8倍
    src_path = '../../minst-data/data/mnist.pkl.gz'
    dst_path = '../../minst-data/data/mnist_expanded_8.pkl.gz'

    study_note.mnistTest().expand_mnist(src_path=src_path,
                                        dst_path=dst_path,
                                        expand_count=8)

    training_data, validation_data, test_data = \
        network3.load_data_shared(dst_path)
    mini_batch_size = 10

    net = Network([
        ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                      filter_shape=(20, 1, 5, 5),
                      poolsize=(2, 2),
                      activation_fn=ReLU),
        ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                      filter_shape=(40, 20, 5, 5),
                      poolsize=(2, 2),
                      activation_fn=ReLU),
        FullyConnectedLayer(n_in=40 * 4 * 4, n_out=100, activation_fn=ReLU),
        SoftmaxLayer(n_in=100, n_out=10)
    ], mini_batch_size)

    net.SGD(training_data,
            60,
            mini_batch_size,
            0.03,
            validation_data,
            test_data,
            lmbda=0.1)
Beispiel #20
0
def test_digit(mini_batch_size):

    size = 100
    f_s = 10
    padding = 1
    conv_stride = 1
    pool_stride = 1
    pool_size = 5
    n_f1 = 25
    n_f2 = 50

    co1 = ((size - f_s + 2 * padding) / conv_stride) + 1
    po1 = ((co1 - pool_size) / pool_stride) + 1
    co2 = ((po1 - f_s + 2 * padding) / conv_stride) + 1
    po2 = ((co2 - pool_size) / pool_stride) + 1

    net = Network(
        [
            ConvPoolLayer(input_shape=(mini_batch_size, 1, size, size),
                          filter_shape=(n_f1, 1, f_s, f_s),
                          poolsize=(pool_size, pool_size),
                          activation_fn=ReLU),
            ConvPoolLayer(input_shape=(mini_batch_size, n_f1, po1, po1),
                          filter_shape=(n_f2, n_f1, f_s, f_s),
                          poolsize=(pool_size, pool_size),
                          activation_fn=ReLU),
            FullyConnectedLayer(n_in=n_f2 * po2 * po2,
                                n_out=500,
                                activation_fn=ReLU,
                                p_dropout=0.0),
            #FullyConnectedLayer(n_in=1000, n_out=1000, activation_fn=ReLU, p_dropout=0.0),
            SoftmaxLayer(n_in=500, n_out=2, p_dropout=0.0)
        ],
        mini_batch_size)

    net.SGD(training_data,
            100,
            mini_batch_size,
            0.01,
            validation_data,
            test_data,
            lmbda=0.0)
Beispiel #21
0
def test_3():
    """全连接 + 卷积混合层 + 卷积混合层 + 全连接 + softmax,测试准确率99.09%
    """
    name = sys._getframe().f_code.co_name
    print(name + "\n")

    training_data, validation_data, test_data = network3.load_data_shared()
    mini_batch_size = 10

    net = Network([
        ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                      filter_shape=(20, 1, 5, 5),
                      poolsize=(2, 2)),
        ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                      filter_shape=(40, 20, 5, 5),
                      poolsize=(2, 2)),
        FullyConnectedLayer(n_in=40 * 4 * 4, n_out=100),
        SoftmaxLayer(n_in=100, n_out=10)
    ], mini_batch_size)
    net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data,
            test_data)
Beispiel #22
0
def test_7():
    """全连接 + 卷积混合层 + 卷积混合层 + 全连接 + 全连接 + softmax
       激活函数:修正线性单元
       代价函数:L2规范化
       训练数据:使用扩展数据集
       测试准确率:99.49%
    """
    name = sys._getframe().f_code.co_name
    print(name + "\n")

    # 扩展数据集
    expand_mnist.expand_mnist_data()
    dst_path = "../../minst-data/data/mnist_expanded.pkl.gz"

    training_data, validation_data, test_data = \
        network3.load_data_shared(dst_path)
    mini_batch_size = 10

    net = Network([
        ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                      filter_shape=(20, 1, 5, 5),
                      poolsize=(2, 2),
                      activation_fn=ReLU),
        ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                      filter_shape=(40, 20, 5, 5),
                      poolsize=(2, 2),
                      activation_fn=ReLU),
        FullyConnectedLayer(n_in=40 * 4 * 4, n_out=100, activation_fn=ReLU),
        FullyConnectedLayer(n_in=100, n_out=100, activation_fn=ReLU),
        SoftmaxLayer(n_in=100, n_out=10)
    ], mini_batch_size)
    net.SGD(training_data,
            60,
            mini_batch_size,
            0.03,
            validation_data,
            test_data,
            lmbda=0.1)
Beispiel #23
0
def test_basic(mini_batch_size):
    nets = []
    net = Network(
        [
            ConvPoolLayer(input_shape=(mini_batch_size, 1, 224, 224),
                          filter_shape=(64, 1, 3, 3),
                          poolsize=(2, 2),
                          activation_fn=ReLU),

            # Layer 2
            FullyConnectedLayer(
                n_in=64 * 112 * 112, n_out=40, activation_fn=ReLU),

            # Activation function
            SoftmaxLayer(n_in=40, n_out=2)
        ],
        mini_batch_size)
    # End of Network Architecture

    net.SGD(training_data, 10, mini_batch_size, 0.001, validation_data,
            test_data)
    nets.append(net)
    return nets
Beispiel #24
0
import network3
from network3 import Network
from network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer


training_data, validation_data, test_data = network3.load_data_shared()
mini_batch_size = 10
net = Network([FullyConnectedLayer(n_in=784, n_out=100), SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    ConvPoolLayer(input_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(n_in=40 * 4 * 4, n_out=100, activation_fn=ReLU),
    SoftmaxLayer(n_in=100, n_out=10)
], mini_batch_size)

start_time = time.time()
net.SGD(training_data,
        120,
        mini_batch_size,
        0.03,
        validation_data,
        test_data,
        lmbda=0.1)
end_time = time.time()

print(f'Total time elapsed: {end_time - start_time} seconds')
"""RESULTS:

100% of data
------------
mini_batch_size = 10
net.SGD(training_data, 50, mini_batch_size, 0.03, validation_data, test_data, lmbda=0.1)
Best validation accuracy of 99.16% obtained at iteration 139999 (epoch 27)
Corresponding test accuracy of 99.19%
Total time elapsed: 3364.949262857437 seconds
Beispiel #26
0
    "../data/quickdraw_expanded.pkl.gz")
mini_batch_size = 10
net = Network([
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2)),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 3, 3),
                  poolsize=(2, 2)),
    FullyConnectedLayer(
        n_in=40 * 5 * 5, n_out=500, activation_fn=sigmoid, p_dropout=0.1),
    FullyConnectedLayer(
        n_in=500, n_out=500, activation_fn=sigmoid, p_dropout=0.1),
    SoftmaxLayer(n_in=500, n_out=10, p_dropout=0.1)
], mini_batch_size)
net.SGD(expanded_training_data, 70, mini_batch_size, 0.01, validation_data,
        test_data)
####Plot the weights of first layer
weights0 = np.load("/home/syrine92/Syrine_Belakaria/weights0.npy")
i = -1
fig, ax = plt.subplots(nrows=4, ncols=5)
for row in ax:
    for col in row:
        col.axes.get_xaxis().set_visible(False)
        col.axes.get_yaxis().set_visible(False)
        i = i + 1
        col.imshow(weights0[i][0], cmap='Greys')
fig.savefig('/home/syrine92/Syrine_Belakaria/weights0.png')
####Plot the weights of second layer
weights1 = np.load("/home/syrine92/Syrine_Belakaria/weights1.npy")
i = -1
fig, ax = plt.subplots(nrows=5, ncols=8)
Beispiel #27
0
                  poolsize=(2, 2)),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2)),
    FullyConnectedLayer(n_in=40*4*4, n_out=100),
    SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
'''

# chapter 6 -  rectified linear units and some l2 regularization (lmbda=0.1) => even better accuracy
from network3 import ReLU
net = Network([
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(n_in=40 * 4 * 4, n_out=100, activation_fn=ReLU),
    SoftmaxLayer(n_in=100, n_out=10)
], mini_batch_size)
net.SGD(training_data,
        60,
        mini_batch_size,
        0.03,
        validation_data,
        test_data,
        lmbda=0.1)
regularization_factor = 0.1
topology = [
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(n_in=40*4*4, n_out=100, activation_fn=ReLU),
    SoftmaxLayer(n_in=100, n_out=10)]


net = Network(topology, mini_batch_size)
result.append(net.SGD(training_data, epochs, mini_batch_size, learning_rate, validation_data, test_data, lmbda=regularization_factor))
dump_file(result, "result_pickle"+str(filter_size))




filter_size=1

mini_batch_size = 10
epochs = 30
learning_rate = 0.03
regularization_factor = 0.1
topology = [
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, filter_size, filter_size),
                  poolsize=(2, 2),
                          filter_shape=(50, 6, 9, 9),
                          poolsize=(1, 1),
                          activation_fn=ReLU),
            FullyConnectedLayer(
                n_in=50 * 5 * 5, n_out=1000, activation_fn=ReLU,
                p_dropout=0.5),
            FullyConnectedLayer(
                n_in=1000, n_out=500, activation_fn=ReLU, p_dropout=0.5),
            SoftmaxLayer(n_in=500, n_out=10, p_dropout=0.5)
        ], mini_batch_size)

        print "=========Currently Calculating Voter number %s=========" % vote
        k, p = net.SGD(expanded_training_data,
                       120,
                       mini_batch_size,
                       0.001,
                       validation_data,
                       test_data,
                       lmbda=0)
        #vote_box = np.concatenate((vote_box, k))
        #vote_prob_box = np.concatenate((vote_prob_box, np.array(p).reshape(-1,)))
    ### Polishing
    k, p = net.SGD(training_data,
                   5,
                   mini_batch_size,
                   0.0005,
                   validation_data,
                   test_data,
                   lmbda=0)
    vote_box = np.concatenate((vote_box, k))
    vote_prob_box = np.concatenate((vote_prob_box, np.array(p).reshape(-1, )))
Beispiel #30
0
import network3
from network3 import Network
from network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer
from network3 import ReLU

training_data, validation_data, test_data = network3.load_data_shared()
expanded_training_data, _, _ = network3.load_data_shared("mnist_expanded.pkl.gz")
mini_batch_size = 10

net = Network([
        ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                      filter_shape=(20, 1, 5, 5),
                      poolsize=(2, 2),
                      activation_fn=ReLU),
        ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                      filter_shape=(40, 20, 5, 5),
                      poolsize=(2, 2),
                      activation_fn=ReLU),
        FullyConnectedLayer(n_in=40*4*4, n_out=100, activation_fn=ReLU),
        SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(expanded_training_data, 30, mini_batch_size, 0.03,
            validation_data, test_data, lmbda=0.1)