コード例 #1
0
def test_maxout():
    network = N.Network()

    network.setInput(RawInput((1, 28, 28)))
    network.append(conv.Conv2d(filter_size=(3, 3), feature_map_multiplier=128))
    network.append(pooling.FeaturePooling(4))
    network.append(pooling.Pooling((2, 2)))
    network.append(conv.Conv2d(filter_size=(3, 3), feature_map_multiplier=8))
    network.append(pooling.FeaturePooling(4))
    network.append(pooling.Pooling((2, 2)))
    network.append(conv.Conv2d(filter_size=(3, 3), feature_map_multiplier=8))
    network.append(pooling.FeaturePooling(4))
    network.append(pooling.GlobalPooling())
    network.append(fullconn.FullConn(input_feature=128, output_feature=10))
    network.append(output.SoftMax())

    network.build()

    trX, trY, teX, teY = l.load_mnist()

    for i in range(5000):
        print(i)
        network.train(trX, trY)
        print(1 - np.mean(
            np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
コード例 #2
0
def test_seqlayer():
    network = N.Network()
    network.debug = True

    class ConvNN(layer.Layer, metaclass=compose.SeqLayer,
                 seq=[Conv2d, act.Relu, pooling.Pooling],
                 yaml_tag=u'!ConvNN',
                 type_name='ConvNN'):
        def __init__(self, feature_map_multiplier=1):
            super().__init__()
            self.bases[0] = Conv2d(feature_map_multiplier=feature_map_multiplier)

    network.setInput(RawInput((1, 28,28)))
            
    network.append(ConvNN(feature_map_multiplier=32))
    network.append(ConvNN(feature_map_multiplier=2))
    network.append(ConvNN(feature_map_multiplier=2))
    
    network.append(reshape.Flatten())
    network.append(fullconn.FullConn(input_feature=1152, output_feature=1152*2))
    network.append(act.Relu())
    network.append(fullconn.FullConn(input_feature=1152*2, output_feature=10))
    network.append(output.SoftMax())

    network.build()

    trX, trY, teX, teY = l.load_mnist()

    for i in range(5000):
        print(i)
        network.train(trX, trY)
        print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
コード例 #3
0
def test2():
    network = N.Network()
    network.debug = True

    #network.setInput(RawInput((1, 28,28)))
    #network.append(conv.Conv2d(feature_map_multiplier=32))
    #network.append(act.Relu())
    #network.append(pooling.Pooling())
    #network.append(conv.Conv2d(feature_map_multiplier=2))
    #network.append(act.Relu())
    #network.append(pooling.Pooling())
    #network.append(conv.Conv2d(feature_map_multiplier=2))
    #network.append(act.Relu())
    #network.append(pooling.Pooling())
    #network.append(reshape.Flatten())
    #network.append(fullconn.FullConn(input_feature=1152, output_feature=1152*2))
    #network.append(act.Relu())
    #network.append(fullconn.FullConn(input_feature=1152*2, output_feature=10))
    #network.append(output.SoftMax())
    li = RawInput((1, 28,28))
    network.setInput(li)

    lc1 = conv.Conv2d(feature_map_multiplier=32)
    la1 = act.Relu()
    lp1 = pooling.Pooling()
    lc2 = conv.Conv2d(feature_map_multiplier=2)
    la2 = act.Relu()
    lp2 = pooling.Pooling()
    lc3 = conv.Conv2d(feature_map_multiplier=2)
    la3 = act.Relu()
    lp3 = pooling.Pooling()
    lf = reshape.Flatten()
    lfc1 = fullconn.FullConn(input_feature=1152, output_feature=1152*2)
    la4 = act.Relu()
    lfc2 = fullconn.FullConn(input_feature=1152*2, output_feature=10)
    lsm = output.SoftMax()

    network.connect(li, lc1)
    network.connect(lc1, la1)
    network.connect(la1, lp1)
    network.connect(lp1, lc2)
    network.connect(lc2, la2)
    network.connect(la2, lp2)
    network.connect(lp2, lc3)
    network.connect(lc3, la3)
    network.connect(la3, lp3)
    network.connect(lp3, lf)
    network.connect(lf, lfc1)
    network.connect(lfc1, la4)
    network.connect(la4, lfc2)
    network.connect(lfc2, lsm)

    network.build()

    trX, trY, teX, teY = l.load_mnist()

    for i in range(5000):
        print(i)
        network.train(trX, trY)
        print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(network.predict(teX), axis=1)))
コード例 #4
0
def test_mlp():
    n = N.Network()

    n.setInput(RawInput((1, 28, 28)))
    n.append(Flatten())    
    n.append(FullConn(feature_map_multiplier=2))
    n.append(Elu())
    n.append(FullConn(output_feature=10))
    n.append(output.SoftMax())

    n.build()

    trX, trY, teX, teY = l.load_mnist()

    for i in range(100):
        print(i)
        n.train(trX, trY)
        print(1 - np.mean(np.argmax(teY, axis=1) == np.argmax(n.predict(teX), axis=1)))