Пример #1
0
def model():
    with tf.name_scope('MnistCNN'):
        seq = tg.Sequential()
        seq.add(Conv2D(num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(BatchNormalization())
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME'))
        seq.add(LRN())

        seq.add(Conv2D(num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(BatchNormalization())
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME'))
        seq.add(LRN())
        seq.add(Flatten())
        seq.add(Linear(128))
        seq.add(BatchNormalization())
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(256))
        seq.add(BatchNormalization())
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(10))
        seq.add(Softmax())
    return seq
Пример #2
0
def model(word_len, sent_len, nclass):
    unicode_size = 1000
    ch_embed_dim = 20

    h, w = valid(ch_embed_dim,
                 word_len,
                 stride=(1, 1),
                 kernel_size=(ch_embed_dim, 5))
    h, w = valid(h, w, stride=(1, 1), kernel_size=(1, 5))
    h, w = valid(h, w, stride=(1, 2), kernel_size=(1, 5))
    conv_out_dim = int(h * w * 60)

    X_ph = tf.placeholder('int32', [None, sent_len, word_len])
    input_sn = tg.StartNode(input_vars=[X_ph])
    charcnn_hn = tg.HiddenNode(prev=[input_sn],
                               layers=[
                                   Reshape(shape=(-1, word_len)),
                                   Embedding(cat_dim=unicode_size,
                                             encode_dim=ch_embed_dim,
                                             zero_pad=True),
                                   Reshape(shape=(-1, ch_embed_dim, word_len,
                                                  1)),
                                   Conv2D(input_channels=1,
                                          num_filters=20,
                                          padding='VALID',
                                          kernel_size=(ch_embed_dim, 5),
                                          stride=(1, 1)),
                                   RELU(),
                                   Conv2D(input_channels=20,
                                          num_filters=40,
                                          padding='VALID',
                                          kernel_size=(1, 5),
                                          stride=(1, 1)),
                                   RELU(),
                                   Conv2D(input_channels=40,
                                          num_filters=60,
                                          padding='VALID',
                                          kernel_size=(1, 5),
                                          stride=(1, 2)),
                                   RELU(),
                                   Flatten(),
                                   Linear(conv_out_dim, nclass),
                                   Reshape((-1, sent_len, nclass)),
                                   ReduceSum(1),
                                   Softmax()
                               ])

    output_en = tg.EndNode(prev=[charcnn_hn])
    graph = tg.Graph(start=[input_sn], end=[output_en])
    y_train_sb = graph.train_fprop()[0]
    y_test_sb = graph.test_fprop()[0]

    return X_ph, y_train_sb, y_test_sb
Пример #3
0
    def __init__(self, nclass, h, w, c):
        layers = []
        identityblk = IdentityBlock(input_channels=c,
                                    input_shape=[h, w],
                                    nlayers=10)
        layers.append(identityblk)

        layers.append(
            Conv2D(input_channels=c,
                   num_filters=16,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 16]))

        denseblk = DenseBlock(input_channels=16,
                              input_shape=[h, w],
                              growth_rate=4,
                              nlayers=4)
        layers.append(denseblk)

        layers.append(
            Conv2D(input_channels=denseblk.output_channels,
                   num_filters=32,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=32,
                   num_filters=nclass,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        layers.append(BatchNormalization(input_shape=[h, w, nclass]))

        layers.append(
            AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'))
        layers.append(Flatten())
        layers.append(Softmax())

        self.startnode = tg.StartNode(input_vars=[None])
        model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers)
        self.endnode = tg.EndNode(prev=[model_hn])
Пример #4
0
def model(nclass, h, w, c):
    with tf.name_scope('Cifar10AllCNN'):
        seq = tg.Sequential()
        seq.add(Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b1'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b3'))
        h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b5'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b7'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b9'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))

        seq.add(AvgPooling(poolsize=(h, w), stride=(1,1), padding='VALID'))
        seq.add(Flatten())
        seq.add(Softmax())
    return seq
Пример #5
0
    def __init__(self, h, w, c):

        layers1 = []
        layers1.append(Conv2D(input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME'))
        layers1.append(BatchNormalization(input_shape=[h,w,1]))
        layers1.append(RELU())

        layers2 = []
        layers2.append(Conv2D(input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME'))
        layers2.append(BatchNormalization(input_shape=[h,w,1]))
        layers2.append(RELU())

        self.startnode = tg.StartNode(input_vars=[None])
        hn1 = tg.HiddenNode(prev=[self.startnode], layers=layers1)
        hn2 = tg.HiddenNode(prev=[self.startnode], layers=layers2)
        hn3 = tg.HiddenNode(prev=[hn1, hn2], input_merge_mode=Sum())
        self.endnode = tg.EndNode(prev=[hn3])
Пример #6
0
def model():
    with tf.name_scope('MnistCNN'):
        seq = tg.Sequential()
        seq.add(
            Conv2D(input_channels=1,
                   num_filters=32,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        h, w = same(in_height=28,
                    in_width=28,
                    stride=(1, 1),
                    kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 32]))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())

        seq.add(
            Conv2D(input_channels=32,
                   num_filters=64,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 64]))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())
        seq.add(Flatten())
        seq.add(Linear(int(h * w * 64), 128))
        seq.add(BatchNormalization(input_shape=[128]))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(128, 256))
        seq.add(BatchNormalization(input_shape=[256]))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(256, 10))
        seq.add(Softmax())
    return seq
Пример #7
0
 def __init__(self, num_iter):
     self.num_iter = num_iter
     self.layers = []
     self.layers.append(
         Conv2D(input_channels=4,
                num_filters=8,
                kernel_size=(5, 5),
                stride=(1, 1),
                padding='SAME'))
     self.layers.append(
         BatchNormalization(layer_type='conv', dim=8, short_memory=0.01))
     self.layers.append(RELU())
     self.layers.append(
         Conv2D(input_channels=8,
                num_filters=1,
                kernel_size=(5, 5),
                stride=(1, 1),
                padding='SAME'))
     self.layers.append(RELU())
Пример #8
0
def fcn():
    model = tg.Sequential()
    model.add(ResNet(num_blocks=1))
    model.add(
        Conv2D(input_channels=3,
               num_filters=1,
               kernel_size=(5, 5),
               stride=(1, 1),
               padding='SAME'))
    model.add(Sigmoid())
    return model
Пример #9
0
def test_Conv2D():

    seq = tg.Sequential()
    seq.add(Conv2D(num_filters=2, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))

    X_ph = tf.placeholder('float32', [None, 100, 100, 5])
    y_train_sb = seq.train_fprop(X_ph)
    y_test_sb = seq.test_fprop(X_ph)
    with tf.Session() as  sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        out = sess.run(y_train_sb, feed_dict={X_ph:np.random.rand(32,100,100,5)})
        print(out.shape)
Пример #10
0
 def __init__(self, num_blocks):
     self.num_blocks = num_blocks
     self.blocks = []
     for _ in range(self.num_blocks):
         layers = []
         layers.append(
             Conv2D(input_channels=3,
                    num_filters=8,
                    kernel_size=(5, 5),
                    stride=(1, 1),
                    padding='SAME'))
         layers.append(
             BatchNormalization(layer_type='conv', dim=8,
                                short_memory=0.01))
         layers.append(RELU())
         layers.append(
             Conv2D(input_channels=8,
                    num_filters=3,
                    kernel_size=(5, 5),
                    stride=(1, 1),
                    padding='SAME'))
         layers.append(RELU())
         self.blocks.append(layers)
Пример #11
0
def resnet_crf_rnn(x_ph):
    s_n = tg.StartNode(input_vars=[x_ph])
    h1_n = tg.HiddenNode(prev=[s_n],
                         layers=[
                             ResNet(num_blocks=5),
                             Conv2D(input_channels=3,
                                    num_filters=1,
                                    kernel_size=(5, 5),
                                    stride=(1, 1),
                                    padding='SAME'),
                             Sigmoid()
                         ])
    h2_n = tg.HiddenNode(prev=[s_n, h1_n],
                         input_merge_mode=NoChange(),
                         layers=[CRF_RNN(num_iter=2)])

    end_n = tg.EndNode(prev=[h2_n])
    graph = tg.Graph(start=[s_n], end=[end_n])
    # import pdb; pdb.set_trace()
    train_out = graph.train_fprop()
    test_out = graph.test_fprop()
    return train_out, test_out
Пример #12
0
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)

                h1, w1 = valid(self.char_embed_dim,
                               self.word_len,
                               kernel_size=(self.char_embed_dim, 3),
                               stride=(1, 1))
                print('h1:{}, w1:{}'.format(h1, w1))
                h2, w2 = valid(h1, w1, kernel_size=(1, 3), stride=(1, 1))
                print('h2:{}, w2:{}'.format(h2, w2))
                h3, w3 = valid(h2, w2, kernel_size=(1, 3), stride=(1, 1))
                print('h3:{}, w3:{}'.format(h3, w3))
                # h4, w4 = valid(h3, w3, kernel_size=(1,6), stride=(1,1))
                # print('h4:{}, w4:{}'.format(h4, w4))
                # hf, wf = h4, w4
                hf, wf = h3, w3
                n_filters = 100

                real_sn = tg.StartNode(input_vars=[self.real_ph])

                real_hn = tg.HiddenNode(prev=[real_sn],
                                        layers=[
                                            OneHot(self.char_embed_dim),
                                            Transpose(perm=[0, 3, 2, 1])
                                        ])

                disc_hn = tg.HiddenNode(
                    prev=[real_hn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=self.sent_len,
                               num_filters=100,
                               kernel_size=(self.char_embed_dim, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d1'),
                        LeakyRELU(),
                        Conv2D(input_channels=100,
                               num_filters=100,
                               kernel_size=(1, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d2'),
                        LeakyRELU(),
                        Conv2D(input_channels=100,
                               num_filters=100,
                               kernel_size=(1, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d3'),
                        LeakyRELU(),
                        # Conv2D(input_channels=32, num_filters=128, kernel_size=(1,6), stride=(1,1), padding='VALID'),
                        # RELU(),
                        Flatten(),
                        Linear(int(hf * wf * n_filters), self.bottleneck_dim),
                        TFBatchNormalization(name=scope + '/d4'),
                        LeakyRELU(),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.bottleneck_dim, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[real_sn],
                                 end=[real_class_en, real_judge_en])

                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Пример #13
0
    def __init__(self, h, w, c, z_dim=100, gf_dim=64, df_dim=64):

        self.z_dim = z_dim

        out_shape2 = same_nd([h, w], kernel_size=(5, 5), stride=(2, 2))
        out_shape4 = same_nd(out_shape2, kernel_size=(5, 5), stride=(2, 2))
        out_shape8 = same_nd(out_shape4, kernel_size=(5, 5), stride=(2, 2))
        out_shape16 = same_nd(out_shape8, kernel_size=(5, 5), stride=(2, 2))
        h16, w16 = out_shape16

        with tf.variable_scope('Generator'):
            self.g_layers = [
                Linear(z_dim, 8 * gf_dim * h16 * w16),
                Reshape([-1, h16, w16, 8 * gf_dim]),
                # TFBatchNormalization(name='gbn1'),
                BatchNormalization(input_shape=[h16, w16, 8 * gf_dim]),
                RELU(),
                Conv2D_Transpose(input_channels=8 * gf_dim,
                                 num_filters=4 * gf_dim,
                                 output_shape=out_shape8,
                                 kernel_size=(5, 5),
                                 stride=(2, 2),
                                 padding='SAME'),
                # TFBatchNormalization(name='gbn2'),
                BatchNormalization(input_shape=out_shape8 + [4 * gf_dim]),
                RELU(),
                Conv2D_Transpose(input_channels=4 * gf_dim,
                                 num_filters=2 * gf_dim,
                                 output_shape=out_shape4,
                                 kernel_size=(5, 5),
                                 stride=(2, 2),
                                 padding='SAME'),
                # TFBatchNormalization(name='gbn3'),
                BatchNormalization(input_shape=out_shape4 + [2 * gf_dim]),
                RELU(),
                Conv2D_Transpose(input_channels=2 * gf_dim,
                                 num_filters=gf_dim,
                                 output_shape=out_shape2,
                                 kernel_size=(5, 5),
                                 stride=(2, 2),
                                 padding='SAME'),
                # TFBatchNormalization(name='gbn4'),
                BatchNormalization(input_shape=out_shape2 + [gf_dim]),
                RELU(),
                Conv2D_Transpose(input_channels=gf_dim,
                                 num_filters=c,
                                 output_shape=(h, w),
                                 kernel_size=(5, 5),
                                 stride=(2, 2),
                                 padding='SAME'),
                # Sigmoid()
            ]

        out_shape2 = same_nd([h, w], kernel_size=(5, 5), stride=(2, 2))
        out_shape4 = same_nd(out_shape2, kernel_size=(5, 5), stride=(2, 2))
        out_shape8 = same_nd(out_shape4, kernel_size=(5, 5), stride=(2, 2))
        out_shape16 = same_nd(out_shape8, kernel_size=(5, 5), stride=(2, 2))
        h16, w16 = out_shape16

        with tf.variable_scope('Discriminator'):
            self.d1_layers = [
                Conv2D(input_channels=c,
                       num_filters=df_dim,
                       kernel_size=(5, 5),
                       stride=(2, 2),
                       padding='SAME'),
                LeakyRELU(),
                Conv2D(input_channels=df_dim,
                       num_filters=2 * df_dim,
                       kernel_size=(5, 5),
                       stride=(2, 2),
                       padding='SAME'),
            ]
            # TFBatchNormalization(name='dbn1'),
            self.d2_layers = [
                BatchNormalization(input_shape=out_shape4 + [2 * df_dim]),
                LeakyRELU(),
                Conv2D(input_channels=2 * df_dim,
                       num_filters=4 * df_dim,
                       kernel_size=(5, 5),
                       stride=(2, 2),
                       padding='SAME'),
            ]

            self.d3_layers = [

                # TFBatchNormalization(name='dbn2'),
                BatchNormalization(input_shape=out_shape8 + [4 * df_dim]),
                LeakyRELU(),
                Conv2D(input_channels=4 * df_dim,
                       num_filters=8 * df_dim,
                       kernel_size=(5, 5),
                       stride=(2, 2),
                       padding='SAME'),
            ]
            self.d4_layers = [
                # TFBatchNormalization(name='dbn3'),
                BatchNormalization(input_shape=out_shape16 + [8 * df_dim]),
                LeakyRELU(),
                ReduceMax(reduction_indices=[1, 2]),
            ]
            self.d5_layers = [
                Flatten(),
                Linear(8 * df_dim, 1),
                # LeakyRELU(),
                # Linear(1000, 1)
                #    Sigmoid()
            ]
            print('====:', 8 * df_dim)
Пример #14
0
def classifier(X_ph, X_gen_ph, h, w):
    with tf.variable_scope('Classifier'):
        X_sn = tg.StartNode(input_vars=[X_ph])
        X_gen_sn = tg.StartNode(input_vars=[X_gen_ph])
        h1, w1 = same(in_height=h,
                      in_width=w,
                      stride=(1, 1),
                      kernel_size=(3, 3))
        h2, w2 = same(in_height=h1,
                      in_width=w1,
                      stride=(2, 2),
                      kernel_size=(2, 2))
        h3, w3 = same(in_height=h2,
                      in_width=w2,
                      stride=(1, 1),
                      kernel_size=(3, 3))
        h4, w4 = same(in_height=h3,
                      in_width=w3,
                      stride=(2, 2),
                      kernel_size=(2, 2))

        print('---', h, w)
        X_hn = tg.HiddenNode(prev=[X_sn],
                             layers=[
                                 Conv2D(input_channels=1,
                                        num_filters=32,
                                        kernel_size=(3, 3),
                                        stride=(1, 1),
                                        padding='SAME'),
                                 BatchNormalization(input_shape=[h1, w1, 32]),
                                 RELU(),
                                 MaxPooling(poolsize=(2, 2),
                                            stride=(2, 2),
                                            padding='SAME'),
                                 LRN(),
                                 Conv2D(input_channels=32,
                                        num_filters=64,
                                        kernel_size=(3, 3),
                                        stride=(1, 1),
                                        padding='SAME'),
                                 BatchNormalization(input_shape=[h3, w3, 64]),
                                 RELU(),
                                 MaxPooling(poolsize=(2, 2),
                                            stride=(2, 2),
                                            padding='SAME'),
                                 Flatten(),
                             ])

        X_gen_hn = tg.HiddenNode(
            prev=[X_gen_sn],
            layers=[
                Conv2D(input_channels=1,
                       num_filters=32,
                       kernel_size=(3, 3),
                       stride=(1, 1),
                       padding='SAME'),
                BatchNormalization(input_shape=[h1, w1, 32]),
                RELU(),
                MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'),
                LRN(),
                Conv2D(input_channels=32,
                       num_filters=64,
                       kernel_size=(3, 3),
                       stride=(1, 1),
                       padding='SAME'),
                BatchNormalization(input_shape=[h3, w3, 64]),
                RELU(),
                MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'),
                Flatten(),
            ])

        print('===', h4 * w4 * 64 * 2)

        merge_hn = tg.HiddenNode(prev=[X_hn, X_gen_hn],
                                 input_merge_mode=Concat(),
                                 layers=[
                                     Linear(h4 * w4 * 64 * 2, 100),
                                     RELU(),
                                     BatchNormalization(input_shape=[100]),
                                     Linear(100, 1),
                                     Sigmoid()
                                 ])

        en = tg.EndNode(prev=[merge_hn])

        graph = tg.Graph(start=[X_sn, X_gen_sn], end=[en])
        y_train, = graph.train_fprop()
        y_test, = graph.test_fprop()
    return y_train, y_test
Пример #15
0
Файл: gan.py Проект: Shirlly/GAN
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                real_ph = tf.placeholder('float32', [None, self.h, self.w, 1],
                                         name='real')
                real_sn = tg.StartNode(input_vars=[real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                disc_hn = tg.HiddenNode(
                    prev=[real_sn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=1,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c1'),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c2'),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c3'),
                        LeakyRELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, self.bottleneck_dim),
                        TFBatchNormalization(name=scope + '/l1'),
                        LeakyRELU(),
                        # Dropout(0.5),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.bottleneck_dim, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[real_sn],
                                 end=[real_class_en, real_judge_en])

                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()
                # dis_var_list = graph.variables
                # for var in dis_var_list:
                # print var.name

                graph = tg.Graph(start=[self.noise_sn, self.y_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                # print('========')
                # for var in graph.variables:
                # print var.name

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)
                # for var in dis_var_list:
                #     print(var.name)
                #
                # print('=========')
                # for var in tf.global_variables():
                #     print(var.name)
                # import pdb; pdb.set_trace()
                # print()

            # graph = tg.Graph(start=[G_sn], end=[class_en, judge_en])
            # class_train_sb, judge_train_sb = graph.train_fprop() # symbolic outputs
            # class_test_sb, judge_test_sb = graph.test_fprop() # symbolic outputs

        return real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Пример #16
0
    def generator(self):
        self.generator_called = True
        with self.tf_graph.as_default():
            scope = 'Generator'
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                print('h1:{}, w1:{}'.format(h1, w1))
                print('h2:{}, w2:{}'.format(h2, w2))
                print('h3:{}, w3:{}'.format(h3, w3))
                print('flat dim:{}'.format(flat_dim))

                self.gen_real_sn = tg.StartNode(input_vars=[self.real_ph])

                enc_hn = tg.HiddenNode(
                    prev=[self.gen_real_sn],
                    layers=[
                        Conv2D(input_channels=self.c,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc1'),
                        RELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc2'),
                        RELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc3'),
                        RELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, 300),
                        TFBatchNormalization(name=scope + '/genc4'),
                        RELU(),
                        Linear(300, self.bottleneck_dim),
                        Tanh(),
                    ])

                self.noise_sn = tg.StartNode(input_vars=[self.noise_ph])

                self.gen_hn = tg.HiddenNode(
                    prev=[self.noise_sn, enc_hn],
                    input_merge_mode=Sum(),
                    layers=[
                        Linear(self.bottleneck_dim, flat_dim),
                        RELU(),

                        ######[ Method 0 ]######
                        #    Reshape((-1, h3, w3, 32)),
                        #    Conv2D_Transpose(input_channels=32, num_filters=100, output_shape=(h2,w2),
                        #                     kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        ######[ End Method 0 ]######

                        ######[ Method 1 ]######
                        Reshape((-1, 1, 1, flat_dim)),
                        #    Reshape((-1, h))
                        Conv2D_Transpose(input_channels=flat_dim,
                                         num_filters=200,
                                         output_shape=(h3, w3),
                                         kernel_size=(h3, w3),
                                         stride=(1, 1),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=200, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/g1'),
                        RELU(),
                        Conv2D_Transpose(input_channels=200,
                                         num_filters=100,
                                         output_shape=(h2, w2),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=100, short_memory=0.01),
                        ######[ End Method 1 ]######
                        TFBatchNormalization(name=scope + '/g2'),
                        RELU(),
                        Conv2D_Transpose(input_channels=100,
                                         num_filters=50,
                                         output_shape=(h1, w1),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=50, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/g3'),
                        RELU(),
                        Conv2D_Transpose(input_channels=50,
                                         num_filters=self.c,
                                         output_shape=(self.h, self.w),
                                         kernel_size=(5, 5),
                                         stride=(1, 1),
                                         padding='VALID'),
                        SetShape((-1, self.h, self.w, self.c)),
                        Sigmoid()
                    ])

                h, w = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1))
                h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2))
                h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2))
                h, w = valid(h, w, kernel_size=(h3, w3), stride=(1, 1))

                y_en = tg.EndNode(prev=[self.gen_hn])

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[y_en])

                G_train_sb = graph.train_fprop()[0]
                G_test_sb = graph.test_fprop()[0]
                gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.y_ph, self.noise_ph, G_train_sb, G_test_sb, gen_var_list
Пример #17
0
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)

                dis_real_sn = tg.StartNode(input_vars=[self.real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                disc_hn = tg.HiddenNode(
                    prev=[dis_real_sn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=self.c,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d1'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d2'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d3'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, self.bottleneck_dim),
                        # BatchNormalization(layer_type='fc', dim=self.bottleneck_dim, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/d4'),
                        LeakyRELU(),
                        # Dropout(0.5),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[Linear(self.bottleneck_dim, 1),
                            Sigmoid()])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[dis_real_sn],
                                 end=[real_class_en, real_judge_en])
                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Пример #18
0
def train():

    batchsize = 64
    learning_rate = 0.001
    max_epoch = 10

    X_train = np.random.rand(1000, 32, 32, 3)
    M_train = np.random.rand(1000, 32, 32, 1)

    X_valid = np.random.rand(1000, 32, 32, 3)
    M_valid = np.random.rand(1000, 32, 32, 1)

    X_ph = tf.placeholder('float32', [None, 32, 32, 3])
    M_ph = tf.placeholder('float32', [None, 32, 32, 1])

    h, w = 32, 32

    model = tg.Sequential()
    model.add(
        Conv2D(input_channels=3,
               num_filters=8,
               kernel_size=(5, 5),
               stride=(2, 2),
               padding='SAME'))
    h1, w1 = same(h, w, kernel_size=(5, 5), stride=(2, 2))
    model.add(RELU())
    model.add(
        Conv2D(input_channels=8,
               num_filters=16,
               kernel_size=(5, 5),
               stride=(2, 2),
               padding='SAME'))
    h2, w2 = same(h1, w1, kernel_size=(5, 5), stride=(2, 2))
    model.add(RELU())
    model.add(
        Conv2D_Transpose(input_channels=16,
                         num_filters=8,
                         output_shape=(h1, w1),
                         kernel_size=(5, 5),
                         stride=(2, 2),
                         padding='SAME'))
    model.add(RELU())
    model.add(
        Conv2D_Transpose(input_channels=8,
                         num_filters=1,
                         output_shape=(h, w),
                         kernel_size=(5, 5),
                         stride=(2, 2),
                         padding='SAME'))
    model.add(RELU())

    iter_model = tg.Sequential()
    iter_model.add(
        Conv2D(input_channels=1,
               num_filters=8,
               kernel_size=(5, 5),
               stride=(2, 2),
               padding='SAME'))
    iter_model.add(RELU())
    iter_model.add(
        Conv2D_Transpose(input_channels=8,
                         num_filters=1,
                         output_shape=(h, w),
                         kernel_size=(5, 5),
                         stride=(2, 2),
                         padding='SAME'))
    model.add(Iterative(sequential=iter_model, num_iter=10))

    M_train_s = model.train_fprop(X_ph)
    M_valid_s = model.test_fprop(X_ph)

    train_mse = tf.reduce_mean((M_ph - M_train_s)**2)
    valid_mse = tf.reduce_mean((M_ph - M_valid_s)**2)

    data_train = tg.SequentialIterator(X_train, M_train, batchsize=batchsize)
    data_valid = tg.SequentialIterator(X_valid, M_valid, batchsize=batchsize)

    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(train_mse)

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        for epoch in range(max_epoch):
            print('epoch:', epoch)
            print('..training')
            for X_batch, M_batch in data_train:
                sess.run(optimizer, feed_dict={X_ph: X_batch, M_ph: M_batch})

            print('..validating')
            valid_mse_score = sess.run(valid_mse,
                                       feed_dict={
                                           X_ph: X_valid,
                                           M_ph: M_valid
                                       })
            print('valid mse score:', valid_mse_score)
Пример #19
0
    def __init__(self, nclass, h, w, c):
        layers = []
        layers.append(
            Conv2D(input_channels=c,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 96]))

        layers.append(
            Conv2D(input_channels=96,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=96,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 96]))

        layers.append(
            Conv2D(input_channels=96,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 192]))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 192]))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=nclass,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        layers.append(BatchNormalization(input_shape=[h, w, nclass]))

        layers.append(
            AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'))
        layers.append(Flatten())
        layers.append(Softmax())
        self.startnode = tg.StartNode(input_vars=[None])
        model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers)
        self.endnode = tg.EndNode(prev=[model_hn])
Пример #20
0
def model(nclass, h, w, c):
    with tf.name_scope('Cifar10AllCNN'):
        seq = tg.Sequential()
        seq.add(
            Conv2D(num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=96,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=nclass,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(AvgPooling(poolsize=(8, 8), stride=(1, 1), padding='VALID'))
        seq.add(Flatten())
        seq.add(Softmax())
    return seq
Пример #21
0
    def discriminator_allconv(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                # h1, w1 = valid(self.h, self.w, kernel_size=(5,5), stride=(1,1))
                # h2, w2 = valid(h1, w1, kernel_size=(5,5), stride=(2,2))
                # h3, w3 = valid(h2, w2, kernel_size=(5,5), stride=(2,2))
                # flat_dim = int(h3*w3*32)

                dis_real_sn = tg.StartNode(input_vars=[self.real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                h, w = same(in_height=self.h,
                            in_width=self.w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(2, 2),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))

                h, w = same(in_height=h,
                            in_width=w,
                            stride=(2, 2),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(1, 1))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))

                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(1, 1))
                print('h, w', h, w)
                print('===============')
                # h, w = valid(in_height=h, in_width=w, stride=(1,1), kernel_size=(h,w))

                disc_hn = tg.HiddenNode(
                    prev=[dis_real_sn, self.gen_hn],
                    layers=[
                        Dropout(0.2),
                        # TFBatchNormalization(name='b0'),
                        Conv2D(input_channels=self.c,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b1'),
                        # Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b2'),
                        Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(2, 2),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b3'),
                        # Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b4'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b5'),
                        # Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(2, 2),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b6'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b7'),
                        # Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(1, 1),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b8'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=self.nclass,
                               kernel_size=(1, 1),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b9'),
                        # Dropout(0.5),
                        AvgPooling(poolsize=(h, w),
                                   stride=(1, 1),
                                   padding='VALID'),
                        Flatten(),
                    ])

                print('h,w', h, w)
                print('==============')
                class_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.nclass, self.nclass),
                        # Softmax()
                    ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.nclass, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[dis_real_sn],
                                 end=[real_class_en, real_judge_en])
                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list