def test_Atrous_Conv2d():

    seq = tg.Sequential()
    seq.add(Atrous_Conv2D(input_channels=5, num_filters=2, kernel_size=(3, 3), rate=3, padding='SAME'))

    h, w, c = 100, 300, 5
    X_ph = tf.placeholder('float32', [None, h, w, c])

    y_sb = seq.train_fprop(X_ph)
    with tf.Session() as  sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        out = sess.run(y_sb, feed_dict={X_ph:np.random.rand(32, h, w, c)})
        print(out.shape)
        assert out.shape[1] == h and out.shape[2] == w


    seq = tg.Sequential()
    r = 2
    k = 5
    seq.add(Atrous_Conv2D(input_channels=5, num_filters=2, kernel_size=(k, k), rate=r, padding='VALID'))

    h, w, c = 100, 300, 5
    X_ph = tf.placeholder('float32', [None, h, w, c])

    y_sb = seq.train_fprop(X_ph)
    with tf.Session() as  sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        out = sess.run(y_sb, feed_dict={X_ph:np.random.rand(32, h, w, c)})
        print(out.shape)
        assert out.shape[1] == h - 2*int((k+(k-1)*(r-1))/2), out.shape[2] == w - 2*int((w+(w-1)*(r-1))/2)
Exemple #2
0
def model():
    with tf.name_scope('MnistCNN'):
        seq = tg.Sequential()
        seq.add(Conv2D(num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(BatchNormalization())
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME'))
        seq.add(LRN())

        seq.add(Conv2D(num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(BatchNormalization())
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME'))
        seq.add(LRN())
        seq.add(Flatten())
        seq.add(Linear(128))
        seq.add(BatchNormalization())
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(256))
        seq.add(BatchNormalization())
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(10))
        seq.add(Softmax())
    return seq
Exemple #3
0
def ph2onehot(ph, charlen):
    seq = tg.Sequential()
    seq.add(OneHot(charlen))  #[?, charlen, char_embed_dim]
    seq.add(Expand_Dims(-1))  # [?, charlen, char_embed_dim, 1]
    seq.add(Transpose((0, 2, 1, 3)))  # [?, char_embed_dim, charlen, 1]
    oh = seq.train_fprop(ph)
    return oh
Exemple #4
0
def test_VGG19():
    seq = tg.Sequential()
    seq.add(VGG19())
    seq.add(Flatten())
    seq.add(Linear(this_dim=nclass))
    seq.add(Softmax())
    train(seq)
Exemple #5
0
def test_ResNetBase():
    seq = tg.Sequential()
    seq.add(ResNetBase(config=[1,1,1,1]))
    seq.add(MaxPooling(poolsize=(1,1), stride=(1,1), padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(this_dim=nclass))
    seq.add(Softmax())
    train(seq)
Exemple #6
0
def test_UNet():
    seq = tg.Sequential()
    seq.add(UNet(input_shape=(h, w)))
    seq.add(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(this_dim=nclass))
    seq.add(Softmax())
    train(seq)
Exemple #7
0
def test_DenseNet():
    seq = tg.Sequential()
    seq.add(DenseNet(ndense=1, growth_rate=1, nlayer1blk=1))
    seq.add(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(this_dim=nclass))
    seq.add(Softmax())
    train(seq)
Exemple #8
0
def train_with_Resnet():
    from tensorgraph.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        seq = tg.Sequential()
        id1 = IdentityBlock(input_channels=c,
                            input_shape=(h, w),
                            nlayers=4,
                            filters=[32, 64])
        seq.add(id1)
        trans1 = TransitionLayer(input_channels=id1.output_channels,
                                 input_shape=id1.output_shape)
        seq.add(trans1)

        id2 = IdentityBlock(input_channels=trans1.output_channels,
                            input_shape=trans1.output_shape,
                            nlayers=4,
                            filters=[64, 128])
        seq.add(id2)
        trans2 = TransitionLayer(input_channels=id2.output_channels,
                                 input_shape=id2.output_shape)
        seq.add(trans2)
        seq.add(Flatten())
        ldim = trans2.output_channels * np.prod(trans2.output_shape)
        seq.add(Linear(ldim, nclass))
        seq.add(Softmax())

        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Exemple #9
0
def test_VGG19():
    seq = tg.Sequential()
    vgg = VGG19(input_channels=c, input_shape=(h, w))
    print('output channels:', vgg.output_channels)
    print('output shape:', vgg.output_shape)
    out_dim = np.prod(vgg.output_shape) * vgg.output_channels
    seq.add(vgg)
    seq.add(Flatten())
    seq.add(Linear(int(out_dim), nclass))
    seq.add(Softmax())
    train(seq)
Exemple #10
0
def test_Conv3D():
    seq = tg.Sequential()
    seq.add(Conv3D(num_filters=2, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding='SAME'))
    X_ph = tf.placeholder('float32', [None, 10, 10, 10, 5])
    y_train_sb = seq.train_fprop(X_ph)
    y_test_sb = seq.test_fprop(X_ph)
    with tf.Session() as  sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        out = sess.run(y_train_sb, feed_dict={X_ph:np.random.rand(32,10,10,10,5)})
        print(out.shape)
Exemple #11
0
def test_DenseNet():
    seq = tg.Sequential()
    model = DenseNet(input_channels=c, input_shape=(h, w), ndense=1, growth_rate=1, nlayer1blk=1)
    print('output channels:', model.output_channels)
    print('output shape:', model.output_shape)
    seq.add(model)
    seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(model.output_channels, nclass))
    seq.add(Softmax())
    train(seq)
Exemple #12
0
def model3D(img=(84, 256, 256)):
    with tf.name_scope('WMH'):
        seq = tg.Sequential()
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        seq.add(
            Conv3D(input_channels=1,
                   num_filters=10,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b1'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize1 = updateConvLayerSize(img, poolStride)
        #print("layer1: "+str(layerSize1))
        seq.add(RELU())
        seq.add(
            Conv3D(input_channels=10,
                   num_filters=20,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b2'))
        #layerSize2 = updateConvLayerSize(layerSize1,convStride)
        #print("layer1: "+str(layerSize2))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=20,
                             num_filters=10,
                             output_shape=layerSize1,
                             kernel_size=(3, 3, 3),
                             stride=convStride,
                             padding='SAME'))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=10,
                             num_filters=3,
                             output_shape=img,
                             kernel_size=(3, 3, 3),
                             stride=(2, 2, 2),
                             padding='SAME'))
        ##
        seq.add(RELU())
        seq.add(
            Conv3D(input_channels=3,
                   num_filters=2,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        ##
        seq.add(Softmax())
        #seq.add(Sigmoid())
    return seq
Exemple #13
0
def train_with_Densenet():
    from tensorgraph.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        seq = tg.Sequential()
        dense = DenseNet(input_channels=c,
                         input_shape=(h, w),
                         ndense=3,
                         growth_rate=4,
                         nlayer1blk=4)
        seq.add(dense)
        seq.add(Flatten())
        ldim = dense.output_channels
        seq.add(Linear(ldim, nclass))
        seq.add(Softmax())

        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        print(tf.global_variables())
        print('..total number of global variables: {}'.format(
            len(tf.global_variables())))
        count = 0
        for var in tf.global_variables():
            count += int(np.prod(var.get_shape()))
        print('..total number of global parameters: {}'.format(count))

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Exemple #14
0
def fcn():
    model = tg.Sequential()
    model.add(ResNet(num_blocks=1))
    model.add(
        Conv2D(input_channels=3,
               num_filters=1,
               kernel_size=(5, 5),
               stride=(1, 1),
               padding='SAME'))
    model.add(Sigmoid())
    return model
Exemple #15
0
def test_UNet():
    seq = tg.Sequential()
    model = UNet(input_channels=c, input_shape=(h, w))
    print('output channels:', model.output_channels)
    print('output shape:', model.output_shape)
    out_dim = np.prod(model.output_shape) * model.output_channels
    seq.add(model)
    seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(model.output_channels, nclass))
    seq.add(Softmax())
    train(seq)
Exemple #16
0
def test_Dropout():
    X_ph = tf.placeholder('float32', [None, 32])
    seq = tg.Sequential()
    seq.add(Linear(20))
    seq.add(Dropout(0.2, noise_shape=[-1, 20]))

    out = seq.train_fprop(X_ph)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        out = sess.run(out, feed_dict={X_ph: np.random.rand(1, 32)})
        print(out)
        print(out.shape)
Exemple #17
0
def model(nclass, h, w, c):
    with tf.name_scope('Cifar10AllCNN'):
        seq = tg.Sequential()
        seq.add(Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b1'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b3'))
        h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b5'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b7'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b9'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))

        seq.add(AvgPooling(poolsize=(h, w), stride=(1,1), padding='VALID'))
        seq.add(Flatten())
        seq.add(Softmax())
    return seq
def test_SparseLinear():
    seq = tg.Sequential()
    seq.add(SparseLinear(prev_dim=10, this_dim=300, batchsize=8))
    seq.add(Linear(this_dim=10))

    idx_ph = tf.placeholder('int32', [None, None])
    val_ph = tf.placeholder('float32', [None])
    y_sb = seq.train_fprop([idx_ph, val_ph])
    with tf.Session() as  sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        out = sess.run(y_sb, feed_dict={idx_ph:[[0, 0], [1, 2]], val_ph:[5,6]})
        print(out.shape)
Exemple #19
0
def test_ResNetBase():
    seq = tg.Sequential()
    model = ResNetBase(input_channels=c, input_shape=(h, w), config=[1,1,1,1])
    print('output channels:', model.output_channels)
    print('output shape:', model.output_shape)
    seq.add(model)
    seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID'))
    outshape = valid_nd(model.output_shape, kernel_size=model.output_shape, stride=(1,1))
    print(outshape)
    out_dim = model.output_channels
    seq.add(Flatten())
    seq.add(Linear(int(out_dim), nclass))
    seq.add(Softmax())
    train(seq)
def test_linear():
    seq = tg.Sequential()
    seq.add(Linear(this_dim=100))
    seq.add(LinearMasked(this_dim=200, mask=np.zeros(200)))
    seq.add(Linear(this_dim=10))


    X_ph = tf.placeholder('float32', [None, 100])

    y_sb = seq.train_fprop(X_ph)
    with tf.Session() as  sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        out = sess.run(y_sb, feed_dict={X_ph:np.random.rand(32,100)})
        print(out.shape)
Exemple #21
0
def model():
    with tf.name_scope('MnistCNN'):
        seq = tg.Sequential()
        seq.add(
            Conv2D(input_channels=1,
                   num_filters=32,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        h, w = same(in_height=28,
                    in_width=28,
                    stride=(1, 1),
                    kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 32]))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())

        seq.add(
            Conv2D(input_channels=32,
                   num_filters=64,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 64]))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())
        seq.add(Flatten())
        seq.add(Linear(int(h * w * 64), 128))
        seq.add(BatchNormalization(input_shape=[128]))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(128, 256))
        seq.add(BatchNormalization(input_shape=[256]))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(256, 10))
        seq.add(Softmax())
    return seq
def test_lstm(layer, seq_len, fea_dim):
    x_ph = tf.placeholder('float32', [None, seq_len, fea_dim])

    seq = tg.Sequential()
    seq.add(layer)
    print(seq.total_num_parameters)
    train_sb = seq.train_fprop(x_ph)
    test_sb = seq.test_fprop(x_ph)
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        feed_dict = {x_ph: np.random.rand(100, seq_len, fea_dim)}
        outputs, last_states = sess.run(train_sb, feed_dict=feed_dict)
        C, h = last_states
        print('outputs: {}'.format(outputs.shape))
        print('last_states: {}, {}'.format(C.shape, h.shape))
        # import pdb; pdb.set_trace()
        sess.run(test_sb, feed_dict=feed_dict)
        print(layer.__class__.__name__ + ' test done!')
Exemple #23
0
def test_OneHot():
    X1 = tf.placeholder('int32', [5, 6, 7])
    X2 = tf.placeholder('int32', [5, 6, 7, 8])
    seq = tg.Sequential()
    seq.add(OneHot(onehot_size=3))

    y1 = seq.train_fprop(X1)
    y2 = seq.train_fprop(X2)

    with tf.Session() as sess:
        print(
            sess.run(y1,
                     feed_dict={
                         X1: np.random.random_integers(0, 2, [5, 6, 7])
                     }).shape)
        print(
            sess.run(y2,
                     feed_dict={
                         X2: np.random.random_integers(0, 2, [5, 6, 7, 8])
                     }).shape)
Exemple #24
0
def train_with_VGG():
    from tensorgraph.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        from tensorgraph.layers import VGG19
        seq = tg.Sequential()
        layer = VGG19(input_channels=c, input_shape=(h, w))
        seq.add(layer)
        seq.add(Flatten())
        seq.add(Linear(512, nclass))
        seq.add(Softmax())
        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
def test_dynamic_lstm(layer, seq_len, fea_dim):
    x_ph = tf.placeholder('float32', [None, seq_len, fea_dim])
    seq_ph = tf.placeholder('int64', [None])

    seq = tg.Sequential()
    seq.add(layer)

    train_sb = seq.train_fprop([x_ph, seq_ph])
    test_sb = seq.test_fprop([x_ph, seq_ph])

    print('... total number of parameters', seq.total_num_parameters())

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        feed_dict = {
            x_ph: np.random.rand(100, seq_len, fea_dim),
            seq_ph: np.random.randint(1, 10, size=100)
        }
        sess.run(train_sb, feed_dict=feed_dict)
        sess.run(test_sb, feed_dict=feed_dict)
        sess.run(train_sb, feed_dict=feed_dict)
        print(layer.__class__.__name__ + ' test done!')
Exemple #26
0
def model3D_2(img=(84, 256, 256)):
    with tf.name_scope('WMH_2Chan_Input'):
        seq = tg.Sequential()
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        seq.add(
            Conv3D(input_channels=1,
                   num_filters=8,
                   kernel_size=(5, 5, 5),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b1'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize1 = updateConvLayerSize(img, poolStride)
        seq.add(RELU())

        seq.add(
            Conv3D(input_channels=8,
                   num_filters=16,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b2'))

        ## Extra MaxPool
        #seq.add(MaxPool3D(poolsize=(2,2,2), stride=poolStride, padding='SAME'))
        #layerSize2 = updateConvLayerSize(layerSize1,convStride)
        #seq.add(RELU())
        ## Extra Conv
        #seq.add(Conv3D(input_channels=16, num_filters=16, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #seq.add(TFBatchNormalization(name='b3'))

        seq.add(
            Conv3D_Tranpose1(input_channels=16,
                             num_filters=8,
                             output_shape=layerSize1,
                             kernel_size=(3, 3, 3),
                             stride=convStride,
                             padding='SAME'))
        seq.add(TFBatchNormalization(name='b4'))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=8,
                             num_filters=2,
                             output_shape=img,
                             kernel_size=(3, 3, 3),
                             stride=poolStride,
                             padding='SAME'))
        #seq.add(TFBatchNormalization(name='b5'))
        #seq.add(RELU())

        #seq.add(Conv3D(input_channels=2, num_filters=2, kernel_size=(1,1,1), stride=convStride, padding='SAME'))
        ##
        ##
        #seq.add(RELU())
        #seq.add(Conv3D(input_channels=2, num_filters=2, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #        seq.add(RELU())
        #        seq.add(Conv3D(input_channels=3, num_filters=3, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #        seq.add(RELU())
        #        seq.add(Conv3D(input_channels=3, num_filters=3, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #
        ##
        seq.add(Softmax())
        #seq.add(Sigmoid())
    return seq
Exemple #27
0
def train():

    batchsize = 64
    learning_rate = 0.001
    max_epoch = 100

    # batch x depth x height x width x channel
    X_train = np.random.rand(1000, 20, 32, 32, 1)
    M_train = np.random.rand(1000, 20, 32, 32, 1)

    X_valid = np.random.rand(1000, 20, 32, 32, 1)
    M_valid = np.random.rand(1000, 20, 32, 32, 1)

    X_ph = tf.placeholder('float32', [None, 20, 32, 32, 1])
    M_ph = tf.placeholder('float32', [None, 20, 32, 32, 1])

    h, w = 32, 32

    model = tg.Sequential()
    # iter_model = tg.Sequential()
    model.add(
        Conv3D(input_channels=1,
               num_filters=8,
               kernel_size=(5, 5, 5),
               stride=(1, 1, 1),
               padding='SAME'))
    model.add(RELU())
    model.add(
        Conv3D(input_channels=8,
               num_filters=1,
               kernel_size=(5, 5, 5),
               stride=(1, 1, 1),
               padding='SAME'))
    # iter_model.add(RELU())
    # model.add(Iterative(sequential=iter_model, num_iter=1))
    model.add(Sigmoid())

    M_train_s = model.train_fprop(X_ph)
    M_valid_s = model.test_fprop(X_ph)

    train_mse = tf.reduce_mean((M_ph - M_train_s)**2)
    valid_mse = tf.reduce_mean((M_ph - M_valid_s)**2)
    # train_mse = entropy(M_ph, M_train_s)
    # valid_mse = entropy(M_ph, M_valid_s)
    valid_f1 = image_f1(tf.to_int32(M_ph), tf.to_int32(M_valid_s >= 0.5))

    data_train = tg.SequentialIterator(X_train, M_train, batchsize=batchsize)
    data_valid = tg.SequentialIterator(X_valid, M_valid, batchsize=batchsize)

    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(train_mse)

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        for epoch in range(max_epoch):
            print('epoch:', epoch)
            print('..training')
            pbar = ProgressBar(len(data_train))
            n_exp = 0
            for X_batch, M_batch in data_train:
                pbar.update(n_exp)
                sess.run(optimizer, feed_dict={X_ph: X_batch, M_ph: M_batch})
                n_exp += len(X_batch)

            print('..validating')
            valid_f1_score, valid_mse_score = sess.run([valid_f1, valid_mse],
                                                       feed_dict={
                                                           X_ph: X_valid,
                                                           M_ph: M_valid
                                                       })
            print('valid mse score:', valid_mse_score)
            print('valid f1 score:', valid_f1_score)
Exemple #28
0
def ph2onehot(ph, char_embed_dim):
    seq = tg.Sequential()
    seq.add(OneHot(char_embed_dim))
    seq.add(Transpose((0,3,2,1)))
    oh = seq.train_fprop(ph)
    return oh
Exemple #29
0
def train():

    batchsize = 64
    learning_rate = 0.001
    max_epoch = 10

    X_train = np.random.rand(1000, 32, 32, 3)
    M_train = np.random.rand(1000, 32, 32, 1)

    X_valid = np.random.rand(1000, 32, 32, 3)
    M_valid = np.random.rand(1000, 32, 32, 1)

    X_ph = tf.placeholder('float32', [None, 32, 32, 3])
    M_ph = tf.placeholder('float32', [None, 32, 32, 1])

    h, w = 32, 32

    model = tg.Sequential()
    model.add(
        Conv2D(input_channels=3,
               num_filters=8,
               kernel_size=(5, 5),
               stride=(2, 2),
               padding='SAME'))
    h1, w1 = same(h, w, kernel_size=(5, 5), stride=(2, 2))
    model.add(RELU())
    model.add(
        Conv2D(input_channels=8,
               num_filters=16,
               kernel_size=(5, 5),
               stride=(2, 2),
               padding='SAME'))
    h2, w2 = same(h1, w1, kernel_size=(5, 5), stride=(2, 2))
    model.add(RELU())
    model.add(
        Conv2D_Transpose(input_channels=16,
                         num_filters=8,
                         output_shape=(h1, w1),
                         kernel_size=(5, 5),
                         stride=(2, 2),
                         padding='SAME'))
    model.add(RELU())
    model.add(
        Conv2D_Transpose(input_channels=8,
                         num_filters=1,
                         output_shape=(h, w),
                         kernel_size=(5, 5),
                         stride=(2, 2),
                         padding='SAME'))
    model.add(RELU())

    iter_model = tg.Sequential()
    iter_model.add(
        Conv2D(input_channels=1,
               num_filters=8,
               kernel_size=(5, 5),
               stride=(2, 2),
               padding='SAME'))
    iter_model.add(RELU())
    iter_model.add(
        Conv2D_Transpose(input_channels=8,
                         num_filters=1,
                         output_shape=(h, w),
                         kernel_size=(5, 5),
                         stride=(2, 2),
                         padding='SAME'))
    model.add(Iterative(sequential=iter_model, num_iter=10))

    M_train_s = model.train_fprop(X_ph)
    M_valid_s = model.test_fprop(X_ph)

    train_mse = tf.reduce_mean((M_ph - M_train_s)**2)
    valid_mse = tf.reduce_mean((M_ph - M_valid_s)**2)

    data_train = tg.SequentialIterator(X_train, M_train, batchsize=batchsize)
    data_valid = tg.SequentialIterator(X_valid, M_valid, batchsize=batchsize)

    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(train_mse)

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        for epoch in range(max_epoch):
            print('epoch:', epoch)
            print('..training')
            for X_batch, M_batch in data_train:
                sess.run(optimizer, feed_dict={X_ph: X_batch, M_ph: M_batch})

            print('..validating')
            valid_mse_score = sess.run(valid_mse,
                                       feed_dict={
                                           X_ph: X_valid,
                                           M_ph: M_valid
                                       })
            print('valid mse score:', valid_mse_score)
Exemple #30
0
def model():
    model = tg.Sequential()
    model.add(ResNet(num_blocks=1))
    model.add(Sigmoid())
    return model