Esempio n. 1
0
def test_VGG19():
    seq = tg.Sequential()
    seq.add(VGG19())
    seq.add(Flatten())
    seq.add(Linear(this_dim=nclass))
    seq.add(Softmax())
    train(seq)
Esempio n. 2
0
def model():
    with tf.name_scope('MnistCNN'):
        seq = tg.Sequential()
        seq.add(Conv2D(num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(BatchNormalization())
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME'))
        seq.add(LRN())

        seq.add(Conv2D(num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(BatchNormalization())
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME'))
        seq.add(LRN())
        seq.add(Flatten())
        seq.add(Linear(128))
        seq.add(BatchNormalization())
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(256))
        seq.add(BatchNormalization())
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(10))
        seq.add(Softmax())
    return seq
Esempio n. 3
0
def test_ResNetBase():
    seq = tg.Sequential()
    seq.add(ResNetBase(config=[1,1,1,1]))
    seq.add(MaxPooling(poolsize=(1,1), stride=(1,1), padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(this_dim=nclass))
    seq.add(Softmax())
    train(seq)
Esempio n. 4
0
def test_DenseNet():
    seq = tg.Sequential()
    seq.add(DenseNet(ndense=1, growth_rate=1, nlayer1blk=1))
    seq.add(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(this_dim=nclass))
    seq.add(Softmax())
    train(seq)
Esempio n. 5
0
def test_UNet():
    seq = tg.Sequential()
    seq.add(UNet(input_shape=(h, w)))
    seq.add(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(this_dim=nclass))
    seq.add(Softmax())
    train(seq)
Esempio n. 6
0
 def __init__(self, nclass, h, w, c):
     layers = []
     layers.append(AllCNN(nclass, h, w, c))
     layers.append(Linear(nclass, nclass))
     layers.append(Softmax())
     self.startnode = tg.StartNode(input_vars=[None])
     model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers)
     self.endnode = tg.EndNode(prev=[model_hn])
Esempio n. 7
0
def train_with_Resnet():
    from tensorgraph.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        seq = tg.Sequential()
        id1 = IdentityBlock(input_channels=c,
                            input_shape=(h, w),
                            nlayers=4,
                            filters=[32, 64])
        seq.add(id1)
        trans1 = TransitionLayer(input_channels=id1.output_channels,
                                 input_shape=id1.output_shape)
        seq.add(trans1)

        id2 = IdentityBlock(input_channels=trans1.output_channels,
                            input_shape=trans1.output_shape,
                            nlayers=4,
                            filters=[64, 128])
        seq.add(id2)
        trans2 = TransitionLayer(input_channels=id2.output_channels,
                                 input_shape=id2.output_shape)
        seq.add(trans2)
        seq.add(Flatten())
        ldim = trans2.output_channels * np.prod(trans2.output_shape)
        seq.add(Linear(ldim, nclass))
        seq.add(Softmax())

        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Esempio n. 8
0
def model3D(img=(84, 256, 256)):
    with tf.name_scope('WMH'):
        seq = tg.Sequential()
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        seq.add(
            Conv3D(input_channels=1,
                   num_filters=10,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b1'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize1 = updateConvLayerSize(img, poolStride)
        #print("layer1: "+str(layerSize1))
        seq.add(RELU())
        seq.add(
            Conv3D(input_channels=10,
                   num_filters=20,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b2'))
        #layerSize2 = updateConvLayerSize(layerSize1,convStride)
        #print("layer1: "+str(layerSize2))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=20,
                             num_filters=10,
                             output_shape=layerSize1,
                             kernel_size=(3, 3, 3),
                             stride=convStride,
                             padding='SAME'))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=10,
                             num_filters=3,
                             output_shape=img,
                             kernel_size=(3, 3, 3),
                             stride=(2, 2, 2),
                             padding='SAME'))
        ##
        seq.add(RELU())
        seq.add(
            Conv3D(input_channels=3,
                   num_filters=2,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        ##
        seq.add(Softmax())
        #seq.add(Sigmoid())
    return seq
Esempio n. 9
0
def test_VGG19():
    seq = tg.Sequential()
    vgg = VGG19(input_channels=c, input_shape=(h, w))
    print('output channels:', vgg.output_channels)
    print('output shape:', vgg.output_shape)
    out_dim = np.prod(vgg.output_shape) * vgg.output_channels
    seq.add(vgg)
    seq.add(Flatten())
    seq.add(Linear(int(out_dim), nclass))
    seq.add(Softmax())
    train(seq)
Esempio n. 10
0
def train_with_Densenet():
    from tensorgraph.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        seq = tg.Sequential()
        dense = DenseNet(input_channels=c,
                         input_shape=(h, w),
                         ndense=3,
                         growth_rate=4,
                         nlayer1blk=4)
        seq.add(dense)
        seq.add(Flatten())
        ldim = dense.output_channels
        seq.add(Linear(ldim, nclass))
        seq.add(Softmax())

        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        print(tf.global_variables())
        print('..total number of global variables: {}'.format(
            len(tf.global_variables())))
        count = 0
        for var in tf.global_variables():
            count += int(np.prod(var.get_shape()))
        print('..total number of global parameters: {}'.format(count))

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Esempio n. 11
0
def test_DenseNet():
    seq = tg.Sequential()
    model = DenseNet(input_channels=c, input_shape=(h, w), ndense=1, growth_rate=1, nlayer1blk=1)
    print('output channels:', model.output_channels)
    print('output shape:', model.output_shape)
    seq.add(model)
    seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(model.output_channels, nclass))
    seq.add(Softmax())
    train(seq)
Esempio n. 12
0
def test_UNet():
    seq = tg.Sequential()
    model = UNet(input_channels=c, input_shape=(h, w))
    print('output channels:', model.output_channels)
    print('output shape:', model.output_shape)
    out_dim = np.prod(model.output_shape) * model.output_channels
    seq.add(model)
    seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID'))
    seq.add(Flatten())
    seq.add(Linear(model.output_channels, nclass))
    seq.add(Softmax())
    train(seq)
Esempio n. 13
0
def model(word_len, sent_len, nclass):
    unicode_size = 1000
    ch_embed_dim = 20

    h, w = valid(ch_embed_dim,
                 word_len,
                 stride=(1, 1),
                 kernel_size=(ch_embed_dim, 5))
    h, w = valid(h, w, stride=(1, 1), kernel_size=(1, 5))
    h, w = valid(h, w, stride=(1, 2), kernel_size=(1, 5))
    conv_out_dim = int(h * w * 60)

    X_ph = tf.placeholder('int32', [None, sent_len, word_len])
    input_sn = tg.StartNode(input_vars=[X_ph])
    charcnn_hn = tg.HiddenNode(prev=[input_sn],
                               layers=[
                                   Reshape(shape=(-1, word_len)),
                                   Embedding(cat_dim=unicode_size,
                                             encode_dim=ch_embed_dim,
                                             zero_pad=True),
                                   Reshape(shape=(-1, ch_embed_dim, word_len,
                                                  1)),
                                   Conv2D(input_channels=1,
                                          num_filters=20,
                                          padding='VALID',
                                          kernel_size=(ch_embed_dim, 5),
                                          stride=(1, 1)),
                                   RELU(),
                                   Conv2D(input_channels=20,
                                          num_filters=40,
                                          padding='VALID',
                                          kernel_size=(1, 5),
                                          stride=(1, 1)),
                                   RELU(),
                                   Conv2D(input_channels=40,
                                          num_filters=60,
                                          padding='VALID',
                                          kernel_size=(1, 5),
                                          stride=(1, 2)),
                                   RELU(),
                                   Flatten(),
                                   Linear(conv_out_dim, nclass),
                                   Reshape((-1, sent_len, nclass)),
                                   ReduceSum(1),
                                   Softmax()
                               ])

    output_en = tg.EndNode(prev=[charcnn_hn])
    graph = tg.Graph(start=[input_sn], end=[output_en])
    y_train_sb = graph.train_fprop()[0]
    y_test_sb = graph.test_fprop()[0]

    return X_ph, y_train_sb, y_test_sb
Esempio n. 14
0
def model(nclass, h, w, c):
    with tf.name_scope('Cifar10AllCNN'):
        seq = tg.Sequential()
        seq.add(Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b1'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b3'))
        h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b5'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b7'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b9'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))

        seq.add(AvgPooling(poolsize=(h, w), stride=(1,1), padding='VALID'))
        seq.add(Flatten())
        seq.add(Softmax())
    return seq
Esempio n. 15
0
    def __init__(self, nclass, h, w, c):
        layers = []
        template = TemplateModel(nclass, h, w, c)
        layers.append(template)
        layers.append(Flatten())
        layers.append(Linear(template.output_dim, 200))
        layers.append(RELU())
        layers.append(Linear(200, nclass))
        layers.append(Softmax())

        self.startnode = tg.StartNode(input_vars=[None])
        model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers)
        self.endnode = tg.EndNode(prev=[model_hn])
Esempio n. 16
0
    def __init__(self, nclass, h, w, c):
        layers = []
        identityblk = IdentityBlock(input_channels=c,
                                    input_shape=[h, w],
                                    nlayers=10)
        layers.append(identityblk)

        layers.append(
            Conv2D(input_channels=c,
                   num_filters=16,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 16]))

        denseblk = DenseBlock(input_channels=16,
                              input_shape=[h, w],
                              growth_rate=4,
                              nlayers=4)
        layers.append(denseblk)

        layers.append(
            Conv2D(input_channels=denseblk.output_channels,
                   num_filters=32,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=32,
                   num_filters=nclass,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        layers.append(BatchNormalization(input_shape=[h, w, nclass]))

        layers.append(
            AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'))
        layers.append(Flatten())
        layers.append(Softmax())

        self.startnode = tg.StartNode(input_vars=[None])
        model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers)
        self.endnode = tg.EndNode(prev=[model_hn])
Esempio n. 17
0
def test_ResNetBase():
    seq = tg.Sequential()
    model = ResNetBase(input_channels=c, input_shape=(h, w), config=[1,1,1,1])
    print('output channels:', model.output_channels)
    print('output shape:', model.output_shape)
    seq.add(model)
    seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID'))
    outshape = valid_nd(model.output_shape, kernel_size=model.output_shape, stride=(1,1))
    print(outshape)
    out_dim = model.output_channels
    seq.add(Flatten())
    seq.add(Linear(int(out_dim), nclass))
    seq.add(Softmax())
    train(seq)
Esempio n. 18
0
 def __init__(self, nclass, h, w, c):
     layers = []
     model = UNet(input_channels=c, input_shape=(h, w))
     layers.append(model)
     layers.append(
         MaxPooling(poolsize=tuple(model.output_shape),
                    stride=(1, 1),
                    padding='VALID'))
     layers.append(Flatten())
     layers.append(Linear(model.output_channels, nclass))
     layers.append(Softmax())
     self.startnode = tg.StartNode(input_vars=[None])
     model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers)
     self.endnode = tg.EndNode(prev=[model_hn])
Esempio n. 19
0
def model():
    with tf.name_scope('MnistCNN'):
        seq = tg.Sequential()
        seq.add(
            Conv2D(input_channels=1,
                   num_filters=32,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        h, w = same(in_height=28,
                    in_width=28,
                    stride=(1, 1),
                    kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 32]))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())

        seq.add(
            Conv2D(input_channels=32,
                   num_filters=64,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 64]))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())
        seq.add(Flatten())
        seq.add(Linear(int(h * w * 64), 128))
        seq.add(BatchNormalization(input_shape=[128]))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(128, 256))
        seq.add(BatchNormalization(input_shape=[256]))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(256, 10))
        seq.add(Softmax())
    return seq
Esempio n. 20
0
def train_with_VGG():
    from tensorgraph.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        from tensorgraph.layers import VGG19
        seq = tg.Sequential()
        layer = VGG19(input_channels=c, input_shape=(h, w))
        seq.add(layer)
        seq.add(Flatten())
        seq.add(Linear(512, nclass))
        seq.add(Softmax())
        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Esempio n. 21
0
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)

                dis_real_sn = tg.StartNode(input_vars=[self.real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                disc_hn = tg.HiddenNode(
                    prev=[dis_real_sn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=self.c,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d1'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d2'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d3'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, self.bottleneck_dim),
                        # BatchNormalization(layer_type='fc', dim=self.bottleneck_dim, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/d4'),
                        LeakyRELU(),
                        # Dropout(0.5),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[Linear(self.bottleneck_dim, 1),
                            Sigmoid()])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[dis_real_sn],
                                 end=[real_class_en, real_judge_en])
                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Esempio n. 22
0
def model_Inception_Resnet(img=(84, 256, 256)):
    with tf.name_scope('WMH'):
        seq = tg.Sequential()
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        kernelSize = (3, 3, 3)

        seq.add(
            Conv3D(input_channels=1,
                   num_filters=8,
                   kernel_size=(5, 5, 5),
                   stride=convStride,
                   padding='SAME'))
        #seq.add(TFBatchNormalization(name='b1'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize1 = updateConvLayerSize(img, poolStride)
        seq.add(RELU())

        seq.add(InceptionResnet_3D(8, type='v2_out8'))

        seq.add(
            Conv3D(input_channels=8,
                   num_filters=16,
                   kernel_size=kernelSize,
                   stride=convStride,
                   padding='SAME'))
        #seq.add(TFBatchNormalization(name='b2'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize2 = updateConvLayerSize(layerSize1, poolStride)
        seq.add(RELU())

        seq.add(InceptionResnet_3D(16, type='v1_out16'))
        seq.add(
            Conv3D(input_channels=16,
                   num_filters=16,
                   kernel_size=kernelSize,
                   stride=convStride,
                   padding='SAME'))
        seq.add(RELU())

        seq.add(
            Conv3D(input_channels=16,
                   num_filters=32,
                   kernel_size=kernelSize,
                   stride=convStride,
                   padding='SAME'))
        #seq.add(TFBatchNormalization(name='b3'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        #layerSize3 = updateConvLayerSize(layerSize2,poolStride)
        seq.add(RELU())

        seq.add(InceptionResnet_3D(32, type='v1_out16'))
        seq.add(
            Conv3D_Tranpose1(input_channels=16,
                             num_filters=16,
                             output_shape=layerSize2,
                             kernel_size=kernelSize,
                             stride=poolStride,
                             padding='SAME'))
        seq.add(RELU())

        seq.add(InceptionResnet_3D(16, type='v1_out16'))
        seq.add(
            Conv3D(input_channels=16,
                   num_filters=16,
                   kernel_size=kernelSize,
                   stride=convStride,
                   padding='SAME'))
        seq.add(RELU())

        seq.add(
            Conv3D_Tranpose1(input_channels=16,
                             num_filters=8,
                             output_shape=layerSize1,
                             kernel_size=kernelSize,
                             stride=poolStride,
                             padding='SAME'))
        seq.add(RELU())

        seq.add(InceptionResnet_3D(8, type='v2_out8'))
        seq.add(
            Conv3D(input_channels=8,
                   num_filters=8,
                   kernel_size=kernelSize,
                   stride=convStride,
                   padding='SAME'))
        seq.add(RELU())

        # num_filter=3 --> Background, WhiteMatter, Others
        seq.add(
            Conv3D_Tranpose1(input_channels=8,
                             num_filters=3,
                             output_shape=img,
                             kernel_size=kernelSize,
                             stride=poolStride,
                             padding='SAME'))
        ##
        seq.add(RELU())
        seq.add(
            Conv3D(input_channels=3,
                   num_filters=3,
                   kernel_size=(1, 1, 1),
                   stride=convStride,
                   padding='SAME'))
        ##
        seq.add(Softmax())
    return seq
Esempio n. 23
0
def Residual_UNET(input, img=(84, 256, 256)):
    with tf.name_scope('WMH'):
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        kSize3 = (3, 3, 3)
        #kSize5 = (5,5,5)

        #x_dim = 50
        #component_dim = 100
        #batchsize = 32
        #learning_rate = 0.01
        #x_ph = tf.placeholder('float32', [None, x_dim])
        #start = StartNode(input_vars=[x_ph])
        #h1 = HiddenNode(prev=[start], layers=[Linear(x_dim, component_dim), Softmax()])
        #e1 = EndNode(prev=[h1], input_merge_mode=Sum())
        #e3 = EndNode(prev=[h1, h2, h3], input_merge_mode=Sum())

        start = StartNode(input_vars=[input])

        Layer01 = [
            Conv3D(input_channels=1,
                   num_filters=8,
                   kernel_size=kSize3,
                   stride=convStride,
                   padding='SAME')
        ]
        #Layer01.append(RELU())

        LayerPool = MaxPool3D(poolsize=(2, 2, 2),
                              stride=poolStride,
                              padding='SAME')

        Layer02 = [LayerPool]
        #Layer02.append(Testing(1))
        Layer02.append(
            Conv3D(input_channels=8,
                   num_filters=16,
                   kernel_size=kSize3,
                   stride=convStride,
                   padding='SAME'))
        #Layer02.append(RELU())
        #Layer02.append(Testing(2))
        Layer02.append(ResidualBlock3D(16, 'L02'))
        #Layer02.append(Testing(3))
        layerSize1 = updateConvLayerSize(img, poolStride)

        Layer03 = [LayerPool]
        Layer03.append(
            Conv3D(input_channels=16,
                   num_filters=32,
                   kernel_size=kSize3,
                   stride=convStride,
                   padding='SAME'))
        #Layer03.append(RELU())
        Layer03.append(ResidualBlock3D(32, 'L03'))
        #layerSize2 = updateConvLayerSize(layerSize1,poolStride)
        Layer03.append(
            Conv3D_Tranpose1(input_channels=32,
                             num_filters=16,
                             output_shape=layerSize1,
                             kernel_size=kSize3,
                             stride=poolStride,
                             padding='SAME'))
        #Layer03.append(RELU())

        #Layer04 = [Conv3D(input_channels=32, num_filters=64, kernel_size=kSize5, stride=convStride, padding='SAME')]
        #Layer04.append(ResidualBlock3D(64,'L03'))

        conv8 = HiddenNode(prev=[start], layers=Layer01)

        resBlock16 = HiddenNode(prev=[conv8], layers=Layer02)

        resBlock32_16 = HiddenNode(prev=[resBlock16], layers=Layer03)
        residualLong16 = HiddenNode(prev=[resBlock32_16, resBlock16],
                                    input_merge_mode=Sum())

        Layer04 = [ResidualBlock3D(16, 'L04')]
        Layer04.append(
            Conv3D_Tranpose1(input_channels=16,
                             num_filters=8,
                             output_shape=img,
                             kernel_size=kSize3,
                             stride=poolStride,
                             padding='SAME'))
        Layer04.append(RELU())

        resBlock16_8 = HiddenNode(prev=[residualLong16], layers=Layer04)
        residualLong8 = HiddenNode(prev=[resBlock16_8, conv8],
                                   input_merge_mode=Sum())

        Layer05 = [ResidualBlock3D(8, 'L05')]
        Layer05.append(
            Conv3D(input_channels=8,
                   num_filters=2,
                   kernel_size=kSize3,
                   stride=convStride,
                   padding='SAME'))
        Layer05.append(Softmax())

        resBlock8_2 = HiddenNode(prev=[residualLong8], layers=Layer05)

        endNode = EndNode(prev=[resBlock8_2], input_merge_mode=NoChange())

        graph = Graph(start=[start], end=[endNode])
        #o1, o2, o3 = graph.train_fprop()
        #o1_mse = tf.reduce_mean((y1_ph - o1)**2)
        #o2_mse = tf.reduce_mean((y2_ph - o2)**2)

    return graph
Esempio n. 24
0
    def __init__(self, nclass, h, w, c):
        layers = []
        layers.append(
            Conv2D(input_channels=c,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 96]))

        layers.append(
            Conv2D(input_channels=96,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=96,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 96]))

        layers.append(
            Conv2D(input_channels=96,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 192]))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 192]))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=nclass,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        layers.append(BatchNormalization(input_shape=[h, w, nclass]))

        layers.append(
            AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'))
        layers.append(Flatten())
        layers.append(Softmax())
        self.startnode = tg.StartNode(input_vars=[None])
        model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers)
        self.endnode = tg.EndNode(prev=[model_hn])
Esempio n. 25
0
def model3D_2(img=(84, 256, 256)):
    with tf.name_scope('WMH_2Chan_Input'):
        seq = tg.Sequential()
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        seq.add(
            Conv3D(input_channels=1,
                   num_filters=8,
                   kernel_size=(5, 5, 5),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b1'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize1 = updateConvLayerSize(img, poolStride)
        seq.add(RELU())

        seq.add(
            Conv3D(input_channels=8,
                   num_filters=16,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b2'))

        ## Extra MaxPool
        #seq.add(MaxPool3D(poolsize=(2,2,2), stride=poolStride, padding='SAME'))
        #layerSize2 = updateConvLayerSize(layerSize1,convStride)
        #seq.add(RELU())
        ## Extra Conv
        #seq.add(Conv3D(input_channels=16, num_filters=16, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #seq.add(TFBatchNormalization(name='b3'))

        seq.add(
            Conv3D_Tranpose1(input_channels=16,
                             num_filters=8,
                             output_shape=layerSize1,
                             kernel_size=(3, 3, 3),
                             stride=convStride,
                             padding='SAME'))
        seq.add(TFBatchNormalization(name='b4'))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=8,
                             num_filters=2,
                             output_shape=img,
                             kernel_size=(3, 3, 3),
                             stride=poolStride,
                             padding='SAME'))
        #seq.add(TFBatchNormalization(name='b5'))
        #seq.add(RELU())

        #seq.add(Conv3D(input_channels=2, num_filters=2, kernel_size=(1,1,1), stride=convStride, padding='SAME'))
        ##
        ##
        #seq.add(RELU())
        #seq.add(Conv3D(input_channels=2, num_filters=2, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #        seq.add(RELU())
        #        seq.add(Conv3D(input_channels=3, num_filters=3, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #        seq.add(RELU())
        #        seq.add(Conv3D(input_channels=3, num_filters=3, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #
        ##
        seq.add(Softmax())
        #seq.add(Sigmoid())
    return seq
Esempio n. 26
0
File: gan.py Progetto: Shirlly/GAN
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                real_ph = tf.placeholder('float32', [None, self.h, self.w, 1],
                                         name='real')
                real_sn = tg.StartNode(input_vars=[real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                disc_hn = tg.HiddenNode(
                    prev=[real_sn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=1,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c1'),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c2'),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c3'),
                        LeakyRELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, self.bottleneck_dim),
                        TFBatchNormalization(name=scope + '/l1'),
                        LeakyRELU(),
                        # Dropout(0.5),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.bottleneck_dim, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[real_sn],
                                 end=[real_class_en, real_judge_en])

                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()
                # dis_var_list = graph.variables
                # for var in dis_var_list:
                # print var.name

                graph = tg.Graph(start=[self.noise_sn, self.y_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                # print('========')
                # for var in graph.variables:
                # print var.name

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)
                # for var in dis_var_list:
                #     print(var.name)
                #
                # print('=========')
                # for var in tf.global_variables():
                #     print(var.name)
                # import pdb; pdb.set_trace()
                # print()

            # graph = tg.Graph(start=[G_sn], end=[class_en, judge_en])
            # class_train_sb, judge_train_sb = graph.train_fprop() # symbolic outputs
            # class_test_sb, judge_test_sb = graph.test_fprop() # symbolic outputs

        return real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Esempio n. 27
0
File: wgan.py Progetto: Shirlly/GAN
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)

                h1, w1 = valid(self.char_embed_dim,
                               self.word_len,
                               kernel_size=(self.char_embed_dim, 3),
                               stride=(1, 1))
                print('h1:{}, w1:{}'.format(h1, w1))
                h2, w2 = valid(h1, w1, kernel_size=(1, 3), stride=(1, 1))
                print('h2:{}, w2:{}'.format(h2, w2))
                h3, w3 = valid(h2, w2, kernel_size=(1, 3), stride=(1, 1))
                print('h3:{}, w3:{}'.format(h3, w3))
                # h4, w4 = valid(h3, w3, kernel_size=(1,6), stride=(1,1))
                # print('h4:{}, w4:{}'.format(h4, w4))
                # hf, wf = h4, w4
                hf, wf = h3, w3
                n_filters = 100

                real_sn = tg.StartNode(input_vars=[self.real_ph])

                real_hn = tg.HiddenNode(prev=[real_sn],
                                        layers=[
                                            OneHot(self.char_embed_dim),
                                            Transpose(perm=[0, 3, 2, 1])
                                        ])

                disc_hn = tg.HiddenNode(
                    prev=[real_hn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=self.sent_len,
                               num_filters=100,
                               kernel_size=(self.char_embed_dim, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d1'),
                        LeakyRELU(),
                        Conv2D(input_channels=100,
                               num_filters=100,
                               kernel_size=(1, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d2'),
                        LeakyRELU(),
                        Conv2D(input_channels=100,
                               num_filters=100,
                               kernel_size=(1, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d3'),
                        LeakyRELU(),
                        # Conv2D(input_channels=32, num_filters=128, kernel_size=(1,6), stride=(1,1), padding='VALID'),
                        # RELU(),
                        Flatten(),
                        Linear(int(hf * wf * n_filters), self.bottleneck_dim),
                        TFBatchNormalization(name=scope + '/d4'),
                        LeakyRELU(),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.bottleneck_dim, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[real_sn],
                                 end=[real_class_en, real_judge_en])

                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Esempio n. 28
0
    def generator(self):
        self.generator_called = True
        with self.tf_graph.as_default():
            scope = 'Generator'
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                print('h1:{}, w1:{}'.format(h1, w1))
                print('h2:{}, w2:{}'.format(h2, w2))
                print('h3:{}, w3:{}'.format(h3, w3))
                print('flat dim:{}'.format(flat_dim))

                self.gen_real_sn = tg.StartNode(input_vars=[self.real_ph])

                enc_hn = tg.HiddenNode(
                    prev=[self.gen_real_sn],
                    layers=[
                        Conv2D(input_channels=self.c,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc1'),
                        RELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc2'),
                        RELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc3'),
                        RELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, 300),
                        TFBatchNormalization(name=scope + '/genc4'),
                        RELU(),
                        Linear(300, self.bottleneck_dim),
                        Tanh(),
                    ])

                embed_end = tg.EndNode(prev=[enc_hn])

                graph_embed = tg.Graph(start=[self.gen_real_sn],
                                       end=[embed_end])

                G_train_embed = graph_embed.train_fprop()[0]
                G_test_embed = graph_embed.test_fprop()[0]

                enc_clf = tg.HiddenNode(prev=[enc_hn],
                                        layers=[
                                            Linear(self.bottleneck_dim,
                                                   self.nclass),
                                            Softmax()
                                        ])

                end_node = tg.EndNode(prev=[enc_clf])

                graph_enc = tg.Graph(start=[self.gen_real_sn], end=[end_node])

                G_train_enc = graph_enc.train_fprop()[0]
                G_test_enc = graph_enc.test_fprop()[0]

                self.noise_sn = tg.StartNode(input_vars=[self.noise_ph])

                self.gen_hn = tg.HiddenNode(
                    prev=[self.noise_sn, enc_hn],
                    input_merge_mode=Sum(),
                    layers=[
                        Linear(self.bottleneck_dim, flat_dim),
                        RELU(),
                        Reshape((-1, 1, 1, flat_dim)),
                        #    Reshape((-1, h))
                        Conv2D_Transpose(input_channels=flat_dim,
                                         num_filters=200,
                                         output_shape=(h3, w3),
                                         kernel_size=(h3, w3),
                                         stride=(1, 1),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=200, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/g1'),
                        RELU(),
                        Conv2D_Transpose(input_channels=200,
                                         num_filters=100,
                                         output_shape=(h2, w2),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        TFBatchNormalization(name=scope + '/g2'),
                        RELU(),
                        Conv2D_Transpose(input_channels=100,
                                         num_filters=50,
                                         output_shape=(h1, w1),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=50, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/g3'),
                        RELU(),
                        Conv2D_Transpose(input_channels=50,
                                         num_filters=self.c,
                                         output_shape=(self.h, self.w),
                                         kernel_size=(5, 5),
                                         stride=(1, 1),
                                         padding='VALID'),
                        SetShape((-1, self.h, self.w, self.c)),
                        Sigmoid()
                    ])

                h, w = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1))
                h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2))
                h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2))
                h, w = valid(h, w, kernel_size=(h3, w3), stride=(1, 1))

                y_en = tg.EndNode(prev=[self.gen_hn])

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[y_en])

                G_train_sb = graph.train_fprop()[0]
                G_test_sb = graph.test_fprop()[0]
                gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.y_ph, self.noise_ph, G_train_sb, G_test_sb, gen_var_list, G_train_enc, G_test_enc, G_train_embed, G_test_embed
Esempio n. 29
0
    def discriminator_allconv(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                # h1, w1 = valid(self.h, self.w, kernel_size=(5,5), stride=(1,1))
                # h2, w2 = valid(h1, w1, kernel_size=(5,5), stride=(2,2))
                # h3, w3 = valid(h2, w2, kernel_size=(5,5), stride=(2,2))
                # flat_dim = int(h3*w3*32)

                dis_real_sn = tg.StartNode(input_vars=[self.real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                h, w = same(in_height=self.h,
                            in_width=self.w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(2, 2),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))

                h, w = same(in_height=h,
                            in_width=w,
                            stride=(2, 2),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(1, 1))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))

                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(1, 1))
                print('h, w', h, w)
                print('===============')
                # h, w = valid(in_height=h, in_width=w, stride=(1,1), kernel_size=(h,w))

                disc_hn = tg.HiddenNode(
                    prev=[dis_real_sn, self.gen_hn],
                    layers=[
                        Dropout(0.2),
                        # TFBatchNormalization(name='b0'),
                        Conv2D(input_channels=self.c,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b1'),
                        # Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b2'),
                        Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(2, 2),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b3'),
                        # Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b4'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b5'),
                        # Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(2, 2),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b6'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b7'),
                        # Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(1, 1),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b8'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=self.nclass,
                               kernel_size=(1, 1),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b9'),
                        # Dropout(0.5),
                        AvgPooling(poolsize=(h, w),
                                   stride=(1, 1),
                                   padding='VALID'),
                        Flatten(),
                    ])

                print('h,w', h, w)
                print('==============')
                class_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[Linear(self.nclass, self.nclass),
                            Softmax()])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.nclass, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[dis_real_sn],
                                 end=[real_class_en, real_judge_en])
                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Esempio n. 30
0
def model(nclass, h, w, c):
    with tf.name_scope('Cifar10AllCNN'):
        seq = tg.Sequential()
        seq.add(
            Conv2D(num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=96,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=nclass,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(AvgPooling(poolsize=(8, 8), stride=(1, 1), padding='VALID'))
        seq.add(Flatten())
        seq.add(Softmax())
    return seq