Пример #1
0
def train_with_Resnet():
    from tensorgraphx.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        seq = tg.Sequential()
        id1 = IdentityBlock(input_channels=c,
                            input_shape=(h, w),
                            nlayers=4,
                            filters=[32, 64])
        seq.add(id1)
        trans1 = TransitionLayer(input_channels=id1.output_channels,
                                 input_shape=id1.output_shape)
        seq.add(trans1)

        id2 = IdentityBlock(input_channels=trans1.output_channels,
                            input_shape=trans1.output_shape,
                            nlayers=4,
                            filters=[64, 128])
        seq.add(id2)
        trans2 = TransitionLayer(input_channels=id2.output_channels,
                                 input_shape=id2.output_shape)
        seq.add(trans2)
        seq.add(Flatten())
        ldim = trans2.output_channels * np.prod(trans2.output_shape)
        seq.add(Linear(ldim, nclass))
        seq.add(Softmax())

        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Пример #2
0
def test_Atrous_Conv2d():

    seq = tg.Sequential()
    seq.add(
        Atrous_Conv2D(input_channels=5,
                      num_filters=2,
                      kernel_size=(3, 3),
                      rate=3,
                      padding='SAME'))

    h, w, c = 100, 300, 5
    X_ph = tf.placeholder('float32', [None, h, w, c])

    y_sb = seq.train_fprop(X_ph)
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        out = sess.run(y_sb, feed_dict={X_ph: np.random.rand(32, h, w, c)})
        print(out.shape)
        assert out.shape[1] == h and out.shape[2] == w

    seq = tg.Sequential()
    r = 2
    k = 5
    seq.add(
        Atrous_Conv2D(input_channels=5,
                      num_filters=2,
                      kernel_size=(k, k),
                      rate=r,
                      padding='VALID'))

    h, w, c = 100, 300, 5
    X_ph = tf.placeholder('float32', [None, h, w, c])

    y_sb = seq.train_fprop(X_ph)
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        out = sess.run(y_sb, feed_dict={X_ph: np.random.rand(32, h, w, c)})
        print(out.shape)
        assert out.shape[1] == h - 2 * int(
            (k + (k - 1) * (r - 1)) / 2), out.shape[2] == w - 2 * int(
                (w + (w - 1) * (r - 1)) / 2)
Пример #3
0
def train_with_Densenet():
    from tensorgraphx.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        seq = tg.Sequential()
        dense = DenseNet(input_channels=c,
                         input_shape=(h, w),
                         ndense=3,
                         growth_rate=4,
                         nlayer1blk=4)
        seq.add(dense)
        seq.add(Flatten())
        ldim = dense.output_channels
        seq.add(Linear(ldim, nclass))
        seq.add(Softmax())

        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        print(tf.global_variables())
        print('..total number of global variables: {}'.format(
            len(tf.global_variables())))
        count = 0
        for var in tf.global_variables():
            count += int(np.prod(var.get_shape()))
        print('..total number of global parameters: {}'.format(count))

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Пример #4
0
def test_VGG19():
    seq = tg.Sequential()
    vgg = VGG19(input_channels=c, input_shape=(h, w))
    print('output channels:', vgg.output_channels)
    print('output shape:', vgg.output_shape)
    out_dim = np.prod(vgg.output_shape) * vgg.output_channels
    seq.add(vgg)
    seq.add(Flatten())
    seq.add(Linear(int(out_dim), nclass))
    seq.add(Softmax())
    train(seq)
Пример #5
0
def test_Dropout():
    X_ph = tf.placeholder('float32', [None, 32])

    seq = tg.Sequential()
    seq.add(Linear(32, 20))
    seq.add(Dropout(0.2, noise_shape=[-1, 20]))

    out = seq.train_fprop(X_ph)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        out = sess.run(out, feed_dict={X_ph: np.random.rand(1, 32)})
        print(out)
        print(out.shape)
Пример #6
0
def test_UNet():
    seq = tg.Sequential()
    model = UNet(input_channels=c, input_shape=(h, w))
    print('output channels:', model.output_channels)
    print('output shape:', model.output_shape)
    out_dim = np.prod(model.output_shape) * model.output_channels
    seq.add(model)
    seq.add(
        MaxPooling(poolsize=tuple(model.output_shape),
                   stride=(1, 1),
                   padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(model.output_channels, nclass))
    seq.add(Softmax())
    train(seq)
Пример #7
0
def model():
    with tf.name_scope('MnistCNN'):
        seq = tg.Sequential()
        seq.add(
            Conv2D(input_channels=1,
                   num_filters=32,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        h, w = same(in_height=28,
                    in_width=28,
                    stride=(1, 1),
                    kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 32]))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())

        seq.add(
            Conv2D(input_channels=32,
                   num_filters=64,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 64]))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())
        seq.add(Flatten())
        seq.add(Linear(int(h * w * 64), 128))
        seq.add(BatchNormalization(input_shape=[128]))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(128, 256))
        seq.add(BatchNormalization(input_shape=[256]))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(256, 10))
        seq.add(Softmax())
    return seq
Пример #8
0
def test_DenseNet():
    seq = tg.Sequential()
    model = DenseNet(input_channels=c,
                     input_shape=(h, w),
                     ndense=1,
                     growth_rate=1,
                     nlayer1blk=1)
    print('output channels:', model.output_channels)
    print('output shape:', model.output_shape)
    seq.add(model)
    seq.add(
        MaxPooling(poolsize=tuple(model.output_shape),
                   stride=(1, 1),
                   padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(model.output_channels, nclass))
    seq.add(Softmax())
    train(seq)
Пример #9
0
def test_Depthwise_Conv2d():

    seq = tg.Sequential()
    seq.add(
        Depthwise_Conv2D(input_channels=5,
                         num_filters=2,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         padding='SAME'))

    X_ph = tf.placeholder('float32', [None, 100, 100, 5])

    y_sb = seq.train_fprop(X_ph)
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        out = sess.run(y_sb, feed_dict={X_ph: np.random.rand(32, 100, 100, 5)})
        print(out.shape)
Пример #10
0
def test_OneHot():
    X1 = tf.placeholder('int32', [5, 6, 7])
    X2 = tf.placeholder('int32', [5, 6, 7, 8])
    seq = tg.Sequential()
    seq.add(OneHot(onehot_size=3))

    y1 = seq.train_fprop(X1)
    y2 = seq.train_fprop(X2)

    with tf.Session() as sess:
        print(
            sess.run(y1,
                     feed_dict={
                         X1: np.random.random_integers(0, 2, [5, 6, 7])
                     }).shape)
        print(
            sess.run(y2,
                     feed_dict={
                         X2: np.random.random_integers(0, 2, [5, 6, 7, 8])
                     }).shape)
Пример #11
0
def train_with_VGG():
    from tensorgraphx.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        from tensorgraphx.layers import VGG19
        seq = tg.Sequential()
        layer = VGG19(input_channels=c, input_shape=(h, w))
        seq.add(layer)
        seq.add(Flatten())
        seq.add(Linear(512, nclass))
        seq.add(Softmax())
        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Пример #12
0
def test_ResNetBase():
    seq = tg.Sequential()
    model = ResNetBase(input_channels=c,
                       input_shape=(h, w),
                       config=[1, 1, 1, 1])
    print('output channels:', model.output_channels)
    print('output shape:', model.output_shape)
    seq.add(model)
    seq.add(
        MaxPooling(poolsize=tuple(model.output_shape),
                   stride=(1, 1),
                   padding='VALID'))
    outshape = valid_nd(model.output_shape,
                        kernel_size=model.output_shape,
                        stride=(1, 1))
    print(outshape)
    out_dim = model.output_channels
    seq.add(Flatten())
    seq.add(Linear(int(out_dim), nclass))
    seq.add(Softmax())
    train(seq)
Пример #13
0
def model(nclass, h, w, c):
    with tf.name_scope('Cifar10AllCNN'):
        seq = tg.Sequential()
        seq.add(
            Conv2D(input_channels=c,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 96]))

        seq.add(
            Conv2D(input_channels=96,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(input_channels=96,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 96]))

        seq.add(
            Conv2D(input_channels=96,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 192]))

        seq.add(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 192]))

        seq.add(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(input_channels=192,
                   num_filters=nclass,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        seq.add(BatchNormalization(input_shape=[h, w, nclass]))

        seq.add(AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'))
        seq.add(Flatten())
        seq.add(Softmax())
    return seq