def test_SequenceMask():
    X_ph = tf.placeholder('float32', [None, 5, 6, 7])
    seq_ph = tf.placeholder('int32', [None])

    X_sn = tg.StartNode(input_vars=[X_ph])
    seq_sn = tg.StartNode(input_vars=[seq_ph])

    merge_hn = tg.HiddenNode(prev=[X_sn, seq_sn], input_merge_mode=SequenceMask(maxlen=5))

    out_en = tg.EndNode(prev=[merge_hn])

    graph = tg.Graph(start=[X_sn, seq_sn], end=[out_en])

    y_train_sb = graph.train_fprop()
    y_test_sb = graph.test_fprop()

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        feed_dict = {X_ph:np.random.rand(3,5,6,7), seq_ph:[2,3,4]}
        y_train = sess.run(y_train_sb, feed_dict=feed_dict)[0]
        y_test = sess.run(y_test_sb, feed_dict=feed_dict)[0]
        assert y_train.sum() == y_test.sum()
        assert y_train[0, :2].sum() > 0 and y_train[0, 2:].sum() == 0
        assert y_train[1, :3].sum() > 0 and y_train[1, 3:].sum() == 0
        assert y_train[2, :4].sum() > 0 and y_train[2, 4:].sum() == 0
        print('test passed!')
def test_SelectedMaskSoftmax():
    X_ph = tf.placeholder('float32', [None, 20])
    mask_ph = tf.placeholder('float32', [20])

    X_sn = tg.StartNode(input_vars=[X_ph])
    mask_sn = tg.StartNode(input_vars=[mask_ph])

    merge_hn = tg.HiddenNode(prev=[X_sn, mask_sn], input_merge_mode=SelectedMaskSoftmax())

    y_en = tg.EndNode(prev=[merge_hn])

    graph = tg.Graph(start=[X_sn, mask_sn], end=[y_en])
    y_sb, = graph.train_fprop()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        mask_arr = np.zeros(20)
        mask_arr[[2,3,4]] = 1
        # import pdb; pdb.set_trace()
        feed_dict = {X_ph:np.random.rand(3, 20),
                     mask_ph:mask_arr}
        out = sess.run(y_sb, feed_dict=feed_dict)
        assert (out.sum(1) == 1).any()
        print(out)
        print('test passed!')
def test_MaskSoftmax():
    X_ph = tf.placeholder('float32', [None, 20])
    seq_ph = tf.placeholder('int32', [None])

    X_sn = tg.StartNode(input_vars=[X_ph])
    seq_sn = tg.StartNode(input_vars=[seq_ph])

    merge_hn = tg.HiddenNode(prev=[X_sn, seq_sn], input_merge_mode=MaskSoftmax())

    y_en = tg.EndNode(prev=[merge_hn])

    graph = tg.Graph(start=[X_sn, seq_sn], end=[y_en])
    y_sb, = graph.train_fprop()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        feed_dict = {X_ph:np.random.rand(3, 20),
                     seq_ph:[5, 8, 0]}
        out = sess.run(y_sb, feed_dict=feed_dict)
        assert (out[0][5:].sum() - 0)**2 < 1e-6
        assert (out[0][:5].sum() - 1)**2 < 1e-6
        assert (out[1][8:].sum() - 0)**2 < 1e-6
        assert (out[1][:8].sum() - 1)**2 < 1e-6
        assert (out[2].sum() - 0)**2 < 1e-6
        print('test passed!')
Exemple #4
0
 def generator(self, z_fake):
     z_fake_sn = tg.StartNode(input_vars=[z_fake])
     hn = tg.HiddenNode(prev=[z_fake_sn], layers=self.g_layers)
     en = tg.EndNode(prev=[hn])
     graph = tg.Graph(start=[z_fake_sn], end=[en])
     X_fake_train_sb, = graph.train_fprop()
     X_fake_test_sb, = graph.test_fprop()
     return X_fake_train_sb, X_fake_test_sb
Exemple #5
0
 def classfication(self, X):
     X_sn = tg.StartNode(input_vars=[X])
     hn = tg.HiddenNode(prev=[X_sn], layers=[])
     en = tg.EndNode(prev=[hn])
     graph = tg.Graph(start=[X_sn], end=[en])
     y_train, = graph.train_fprop()
     y_test, = graph.test_fprop()
     return y_train, y_test
Exemple #6
0
def model(word_len, sent_len, nclass):
    unicode_size = 1000
    ch_embed_dim = 20

    h, w = valid(ch_embed_dim,
                 word_len,
                 stride=(1, 1),
                 kernel_size=(ch_embed_dim, 5))
    h, w = valid(h, w, stride=(1, 1), kernel_size=(1, 5))
    h, w = valid(h, w, stride=(1, 2), kernel_size=(1, 5))
    conv_out_dim = int(h * w * 60)

    X_ph = tf.placeholder('int32', [None, sent_len, word_len])
    input_sn = tg.StartNode(input_vars=[X_ph])
    charcnn_hn = tg.HiddenNode(prev=[input_sn],
                               layers=[
                                   Reshape(shape=(-1, word_len)),
                                   Embedding(cat_dim=unicode_size,
                                             encode_dim=ch_embed_dim,
                                             zero_pad=True),
                                   Reshape(shape=(-1, ch_embed_dim, word_len,
                                                  1)),
                                   Conv2D(input_channels=1,
                                          num_filters=20,
                                          padding='VALID',
                                          kernel_size=(ch_embed_dim, 5),
                                          stride=(1, 1)),
                                   RELU(),
                                   Conv2D(input_channels=20,
                                          num_filters=40,
                                          padding='VALID',
                                          kernel_size=(1, 5),
                                          stride=(1, 1)),
                                   RELU(),
                                   Conv2D(input_channels=40,
                                          num_filters=60,
                                          padding='VALID',
                                          kernel_size=(1, 5),
                                          stride=(1, 2)),
                                   RELU(),
                                   Flatten(),
                                   Linear(conv_out_dim, nclass),
                                   Reshape((-1, sent_len, nclass)),
                                   ReduceSum(1),
                                   Softmax()
                               ])

    output_en = tg.EndNode(prev=[charcnn_hn])
    graph = tg.Graph(start=[input_sn], end=[output_en])
    y_train_sb = graph.train_fprop()[0]
    y_test_sb = graph.test_fprop()[0]

    return X_ph, y_train_sb, y_test_sb
Exemple #7
0
 def discriminator(self, X):
     X_sn = tg.StartNode(input_vars=[X])
     hn1 = tg.HiddenNode(prev=[X_sn], layers=self.d1_layers)
     hn2 = tg.HiddenNode(prev=[hn1], layers=self.d2_layers)
     hn3 = tg.HiddenNode(prev=[hn2], layers=self.d3_layers)
     hn4 = tg.HiddenNode(prev=[hn3], layers=self.d4_layers)
     hn5 = tg.HiddenNode(prev=[hn4], layers=self.d5_layers)
     en1 = tg.EndNode(prev=[hn1])
     en2 = tg.EndNode(prev=[hn2])
     en3 = tg.EndNode(prev=[hn3])
     en4 = tg.EndNode(prev=[hn4])
     en5 = tg.EndNode(prev=[hn5])
     graph = tg.Graph(start=[X_sn], end=[en1, en2, en3, en4, en5])
     y1, y2, y3, y4, y5 = graph.train_fprop()
     y1_test, y2_test, y3_test, y4_test, y5_test = graph.test_fprop()
     return y1, y2, y3, y4, y5, y1_test, y2_test, y3_test, y4_test, y5_test
Exemple #8
0
def resnet_crf_rnn(x_ph):
    s_n = tg.StartNode(input_vars=[x_ph])
    h1_n = tg.HiddenNode(prev=[s_n],
                         layers=[
                             ResNet(num_blocks=5),
                             Conv2D(input_channels=3,
                                    num_filters=1,
                                    kernel_size=(5, 5),
                                    stride=(1, 1),
                                    padding='SAME'),
                             Sigmoid()
                         ])
    h2_n = tg.HiddenNode(prev=[s_n, h1_n],
                         input_merge_mode=NoChange(),
                         layers=[CRF_RNN(num_iter=2)])

    end_n = tg.EndNode(prev=[h2_n])
    graph = tg.Graph(start=[s_n], end=[end_n])
    # import pdb; pdb.set_trace()
    train_out = graph.train_fprop()
    test_out = graph.test_fprop()
    return train_out, test_out
Exemple #9
0
import tensorgraph as tg
import numpy as np

if __name__ == '__main__':
    # Create red points centered at (-2, -2)
    np.random.seed(0)
    red_points = np.random.randn(50, 2) - 2 * np.ones((50, 2))

    # Create blue points centered at (2, 2)
    blue_points = np.random.randn(50, 2) + 2 * np.ones((50, 2))

    tg.Graph().as_default()

    X = tg.Placeholder()
    c = tg.Placeholder()

    W = tg.Variable([[1, -1], [1, -1]])
    b = tg.Variable([0, 0])

    p = tg.softmax(tg.add(tg.matmul(X, W), b))

    # Cross-entropy loss
    J = tg.negative(
        tg.reduce_sum(tg.reduce_sum(tg.multiply(c, tg.log(p)), axis=1)))

    session = tg.Session()
    print(
        session.run(
            J, {
                X: np.concatenate((blue_points, red_points)),
                c: [[1, 0]] * len(blue_points) + [[0, 1]] * len(red_points)
Exemple #10
0
def classifier(X_ph, X_gen_ph, h, w):
    with tf.variable_scope('Classifier'):
        X_sn = tg.StartNode(input_vars=[X_ph])
        X_gen_sn = tg.StartNode(input_vars=[X_gen_ph])
        h1, w1 = same(in_height=h,
                      in_width=w,
                      stride=(1, 1),
                      kernel_size=(3, 3))
        h2, w2 = same(in_height=h1,
                      in_width=w1,
                      stride=(2, 2),
                      kernel_size=(2, 2))
        h3, w3 = same(in_height=h2,
                      in_width=w2,
                      stride=(1, 1),
                      kernel_size=(3, 3))
        h4, w4 = same(in_height=h3,
                      in_width=w3,
                      stride=(2, 2),
                      kernel_size=(2, 2))

        print('---', h, w)
        X_hn = tg.HiddenNode(prev=[X_sn],
                             layers=[
                                 Conv2D(input_channels=1,
                                        num_filters=32,
                                        kernel_size=(3, 3),
                                        stride=(1, 1),
                                        padding='SAME'),
                                 BatchNormalization(input_shape=[h1, w1, 32]),
                                 RELU(),
                                 MaxPooling(poolsize=(2, 2),
                                            stride=(2, 2),
                                            padding='SAME'),
                                 LRN(),
                                 Conv2D(input_channels=32,
                                        num_filters=64,
                                        kernel_size=(3, 3),
                                        stride=(1, 1),
                                        padding='SAME'),
                                 BatchNormalization(input_shape=[h3, w3, 64]),
                                 RELU(),
                                 MaxPooling(poolsize=(2, 2),
                                            stride=(2, 2),
                                            padding='SAME'),
                                 Flatten(),
                             ])

        X_gen_hn = tg.HiddenNode(
            prev=[X_gen_sn],
            layers=[
                Conv2D(input_channels=1,
                       num_filters=32,
                       kernel_size=(3, 3),
                       stride=(1, 1),
                       padding='SAME'),
                BatchNormalization(input_shape=[h1, w1, 32]),
                RELU(),
                MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'),
                LRN(),
                Conv2D(input_channels=32,
                       num_filters=64,
                       kernel_size=(3, 3),
                       stride=(1, 1),
                       padding='SAME'),
                BatchNormalization(input_shape=[h3, w3, 64]),
                RELU(),
                MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'),
                Flatten(),
            ])

        print('===', h4 * w4 * 64 * 2)

        merge_hn = tg.HiddenNode(prev=[X_hn, X_gen_hn],
                                 input_merge_mode=Concat(),
                                 layers=[
                                     Linear(h4 * w4 * 64 * 2, 100),
                                     RELU(),
                                     BatchNormalization(input_shape=[100]),
                                     Linear(100, 1),
                                     Sigmoid()
                                 ])

        en = tg.EndNode(prev=[merge_hn])

        graph = tg.Graph(start=[X_sn, X_gen_sn], end=[en])
        y_train, = graph.train_fprop()
        y_test, = graph.test_fprop()
    return y_train, y_test
Exemple #11
0
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                real_ph = tf.placeholder('float32', [None, self.h, self.w, 1],
                                         name='real')
                real_sn = tg.StartNode(input_vars=[real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                disc_hn = tg.HiddenNode(
                    prev=[real_sn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=1,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c1'),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c2'),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/c3'),
                        LeakyRELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, self.bottleneck_dim),
                        TFBatchNormalization(name=scope + '/l1'),
                        LeakyRELU(),
                        # Dropout(0.5),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.bottleneck_dim, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[real_sn],
                                 end=[real_class_en, real_judge_en])

                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()
                # dis_var_list = graph.variables
                # for var in dis_var_list:
                # print var.name

                graph = tg.Graph(start=[self.noise_sn, self.y_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                # print('========')
                # for var in graph.variables:
                # print var.name

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)
                # for var in dis_var_list:
                #     print(var.name)
                #
                # print('=========')
                # for var in tf.global_variables():
                #     print(var.name)
                # import pdb; pdb.set_trace()
                # print()

            # graph = tg.Graph(start=[G_sn], end=[class_en, judge_en])
            # class_train_sb, judge_train_sb = graph.train_fprop() # symbolic outputs
            # class_test_sb, judge_test_sb = graph.test_fprop() # symbolic outputs

        return real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Exemple #12
0
    def generator(self):
        self.generator_called = True
        with self.tf_graph.as_default():
            scope = 'Generator'
            with tf.name_scope(scope):
                # X_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='X')
                # X_sn = tg.StartNode(input_vars=[X_ph])
                noise_ph = tf.placeholder('float32',
                                          [None, self.bottleneck_dim],
                                          name='noise')
                self.noise_sn = tg.StartNode(input_vars=[noise_ph])

                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                print('h1:{}, w1:{}'.format(h1, w1))
                print('h2:{}, w2:{}'.format(h2, w2))
                print('h3:{}, w3:{}'.format(h3, w3))
                print('flat dim:{}'.format(flat_dim))

                # enc_hn = tg.HiddenNode(prev=[X_sn],
                #                        layers=[Conv2D(input_channels=1, num_filters=32, kernel_size=(5,5), stride=(1,1), padding='VALID'),
                #                                RELU(),
                #                                Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                #                                RELU(),
                #                                Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                #                                RELU(),
                #                             #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                #                             #    RELU(),
                #                                Flatten(),
                #                                Linear(flat_dim, 300),
                #                                RELU(),
                #                                # seq.add(Dropout(0.5))
                #                                Linear(300, self.bottleneck_dim),
                #                                Tanh(),
                #                                ])

                y_ph = tf.placeholder('float32', [None, self.nclass], name='y')
                self.y_sn = tg.StartNode(input_vars=[y_ph])

                noise_hn = tg.HiddenNode(prev=[self.noise_sn, self.y_sn],
                                         input_merge_mode=Concat(1))

                self.gen_hn = tg.HiddenNode(
                    prev=[noise_hn],
                    layers=[
                        Linear(self.bottleneck_dim + 10, flat_dim),
                        RELU(),

                        ######[ Method 0 ]######
                        #    Reshape((-1, h3, w3, 32)),
                        #    Conv2D_Transpose(input_channels=32, num_filters=100, output_shape=(h2,w2),
                        #                     kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        ######[ End Method 0 ]######

                        ######[ Method 1 ]######
                        Reshape((-1, 1, 1, flat_dim)),
                        Conv2D_Transpose(input_channels=flat_dim,
                                         num_filters=200,
                                         output_shape=(2, 2),
                                         kernel_size=(2, 2),
                                         stride=(1, 1),
                                         padding='VALID'),
                        TFBatchNormalization(name=scope + '/dc1'),
                        RELU(),
                        Conv2D_Transpose(input_channels=200,
                                         num_filters=100,
                                         output_shape=(h2, w2),
                                         kernel_size=(9, 9),
                                         stride=(1, 1),
                                         padding='VALID'),
                        ######[ End Method 1 ]######
                        TFBatchNormalization(name=scope + '/dc2'),
                        RELU(),
                        Conv2D_Transpose(input_channels=100,
                                         num_filters=50,
                                         output_shape=(h1, w1),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        TFBatchNormalization(name=scope + '/dc3'),
                        RELU(),
                        Conv2D_Transpose(input_channels=50,
                                         num_filters=1,
                                         output_shape=(self.h, self.w),
                                         kernel_size=(5, 5),
                                         stride=(1, 1),
                                         padding='VALID'),
                        SetShape((-1, self.h, self.w, 1)),
                        Sigmoid()
                    ])

                y_en = tg.EndNode(prev=[self.gen_hn])
                graph = tg.Graph(start=[self.noise_sn, self.y_sn], end=[y_en])

                G_train_sb = graph.train_fprop()[0]
                G_test_sb = graph.test_fprop()[0]
                # import pdb; pdb.set_trace()
                gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return y_ph, noise_ph, G_train_sb, G_test_sb, gen_var_list
Exemple #13
0
    def generator(self):
        self.generator_called = True
        with self.tf_graph.as_default():
            scope = 'Generator'
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                print('h1:{}, w1:{}'.format(h1, w1))
                print('h2:{}, w2:{}'.format(h2, w2))
                print('h3:{}, w3:{}'.format(h3, w3))
                print('flat dim:{}'.format(flat_dim))

                self.gen_real_sn = tg.StartNode(input_vars=[self.real_ph])

                enc_hn = tg.HiddenNode(
                    prev=[self.gen_real_sn],
                    layers=[
                        Conv2D(input_channels=self.c,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc1'),
                        RELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc2'),
                        RELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc3'),
                        RELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, 300),
                        TFBatchNormalization(name=scope + '/genc4'),
                        RELU(),
                        Linear(300, self.bottleneck_dim),
                        Tanh(),
                    ])

                self.noise_sn = tg.StartNode(input_vars=[self.noise_ph])

                self.gen_hn = tg.HiddenNode(
                    prev=[self.noise_sn, enc_hn],
                    input_merge_mode=Sum(),
                    layers=[
                        Linear(self.bottleneck_dim, flat_dim),
                        RELU(),

                        ######[ Method 0 ]######
                        #    Reshape((-1, h3, w3, 32)),
                        #    Conv2D_Transpose(input_channels=32, num_filters=100, output_shape=(h2,w2),
                        #                     kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        ######[ End Method 0 ]######

                        ######[ Method 1 ]######
                        Reshape((-1, 1, 1, flat_dim)),
                        #    Reshape((-1, h))
                        Conv2D_Transpose(input_channels=flat_dim,
                                         num_filters=200,
                                         output_shape=(h3, w3),
                                         kernel_size=(h3, w3),
                                         stride=(1, 1),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=200, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/g1'),
                        RELU(),
                        Conv2D_Transpose(input_channels=200,
                                         num_filters=100,
                                         output_shape=(h2, w2),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=100, short_memory=0.01),
                        ######[ End Method 1 ]######
                        TFBatchNormalization(name=scope + '/g2'),
                        RELU(),
                        Conv2D_Transpose(input_channels=100,
                                         num_filters=50,
                                         output_shape=(h1, w1),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=50, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/g3'),
                        RELU(),
                        Conv2D_Transpose(input_channels=50,
                                         num_filters=self.c,
                                         output_shape=(self.h, self.w),
                                         kernel_size=(5, 5),
                                         stride=(1, 1),
                                         padding='VALID'),
                        SetShape((-1, self.h, self.w, self.c)),
                        Sigmoid()
                    ])

                h, w = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1))
                h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2))
                h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2))
                h, w = valid(h, w, kernel_size=(h3, w3), stride=(1, 1))

                y_en = tg.EndNode(prev=[self.gen_hn])

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[y_en])

                G_train_sb = graph.train_fprop()[0]
                G_test_sb = graph.test_fprop()[0]
                gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.y_ph, self.noise_ph, G_train_sb, G_test_sb, gen_var_list
Exemple #14
0
    def discriminator_allconv(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                # h1, w1 = valid(self.h, self.w, kernel_size=(5,5), stride=(1,1))
                # h2, w2 = valid(h1, w1, kernel_size=(5,5), stride=(2,2))
                # h3, w3 = valid(h2, w2, kernel_size=(5,5), stride=(2,2))
                # flat_dim = int(h3*w3*32)

                dis_real_sn = tg.StartNode(input_vars=[self.real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                h, w = same(in_height=self.h,
                            in_width=self.w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(2, 2),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))

                h, w = same(in_height=h,
                            in_width=w,
                            stride=(2, 2),
                            kernel_size=(3, 3))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(1, 1))
                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(3, 3))

                h, w = same(in_height=h,
                            in_width=w,
                            stride=(1, 1),
                            kernel_size=(1, 1))
                print('h, w', h, w)
                print('===============')
                # h, w = valid(in_height=h, in_width=w, stride=(1,1), kernel_size=(h,w))

                disc_hn = tg.HiddenNode(
                    prev=[dis_real_sn, self.gen_hn],
                    layers=[
                        Dropout(0.2),
                        # TFBatchNormalization(name='b0'),
                        Conv2D(input_channels=self.c,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b1'),
                        # Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b2'),
                        Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=96,
                               kernel_size=(3, 3),
                               stride=(2, 2),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b3'),
                        # Dropout(0.5),
                        Conv2D(input_channels=96,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b4'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b5'),
                        # Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(2, 2),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b6'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(3, 3),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b7'),
                        # Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=192,
                               kernel_size=(1, 1),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        # TFBatchNormalization(name='b8'),
                        Dropout(0.5),
                        Conv2D(input_channels=192,
                               num_filters=self.nclass,
                               kernel_size=(1, 1),
                               stride=(1, 1),
                               padding='SAME'),
                        LeakyRELU(),
                        TFBatchNormalization(name='b9'),
                        # Dropout(0.5),
                        AvgPooling(poolsize=(h, w),
                                   stride=(1, 1),
                                   padding='VALID'),
                        Flatten(),
                    ])

                print('h,w', h, w)
                print('==============')
                class_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.nclass, self.nclass),
                        # Softmax()
                    ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.nclass, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[dis_real_sn],
                                 end=[real_class_en, real_judge_en])
                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Exemple #15
0
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)

                dis_real_sn = tg.StartNode(input_vars=[self.real_ph])

                # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake')
                # fake_sn = tg.StartNode(input_vars=[fake_ph])

                disc_hn = tg.HiddenNode(
                    prev=[dis_real_sn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=self.c,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d1'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d2'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d3'),
                        # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01),
                        LeakyRELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, self.bottleneck_dim),
                        # BatchNormalization(layer_type='fc', dim=self.bottleneck_dim, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/d4'),
                        LeakyRELU(),
                        # Dropout(0.5),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[Linear(self.bottleneck_dim, 1),
                            Sigmoid()])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[dis_real_sn],
                                 end=[real_class_en, real_judge_en])
                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
Exemple #16
0
    def discriminator(self):
        if not self.generator_called:
            raise Exception(
                'self.generator() has to be called first before self.discriminator()'
            )
        scope = 'Discriminator'
        with self.tf_graph.as_default():
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)

                h1, w1 = valid(self.char_embed_dim,
                               self.word_len,
                               kernel_size=(self.char_embed_dim, 3),
                               stride=(1, 1))
                print('h1:{}, w1:{}'.format(h1, w1))
                h2, w2 = valid(h1, w1, kernel_size=(1, 3), stride=(1, 1))
                print('h2:{}, w2:{}'.format(h2, w2))
                h3, w3 = valid(h2, w2, kernel_size=(1, 3), stride=(1, 1))
                print('h3:{}, w3:{}'.format(h3, w3))
                # h4, w4 = valid(h3, w3, kernel_size=(1,6), stride=(1,1))
                # print('h4:{}, w4:{}'.format(h4, w4))
                # hf, wf = h4, w4
                hf, wf = h3, w3
                n_filters = 100

                real_sn = tg.StartNode(input_vars=[self.real_ph])

                real_hn = tg.HiddenNode(prev=[real_sn],
                                        layers=[
                                            OneHot(self.char_embed_dim),
                                            Transpose(perm=[0, 3, 2, 1])
                                        ])

                disc_hn = tg.HiddenNode(
                    prev=[real_hn, self.gen_hn],
                    layers=[
                        Conv2D(input_channels=self.sent_len,
                               num_filters=100,
                               kernel_size=(self.char_embed_dim, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d1'),
                        LeakyRELU(),
                        Conv2D(input_channels=100,
                               num_filters=100,
                               kernel_size=(1, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d2'),
                        LeakyRELU(),
                        Conv2D(input_channels=100,
                               num_filters=100,
                               kernel_size=(1, 3),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/d3'),
                        LeakyRELU(),
                        # Conv2D(input_channels=32, num_filters=128, kernel_size=(1,6), stride=(1,1), padding='VALID'),
                        # RELU(),
                        Flatten(),
                        Linear(int(hf * wf * n_filters), self.bottleneck_dim),
                        TFBatchNormalization(name=scope + '/d4'),
                        LeakyRELU(),
                    ])

                class_hn = tg.HiddenNode(prev=[disc_hn],
                                         layers=[
                                             Linear(self.bottleneck_dim,
                                                    self.nclass),
                                             Softmax()
                                         ])

                judge_hn = tg.HiddenNode(
                    prev=[disc_hn],
                    layers=[
                        Linear(self.bottleneck_dim, 1),
                        #  Sigmoid()
                    ])

                real_class_en = tg.EndNode(prev=[class_hn])
                real_judge_en = tg.EndNode(prev=[judge_hn])

                fake_class_en = tg.EndNode(prev=[class_hn])
                fake_judge_en = tg.EndNode(prev=[judge_hn])

                graph = tg.Graph(start=[real_sn],
                                 end=[real_class_en, real_judge_en])

                real_train = graph.train_fprop()
                real_valid = graph.test_fprop()

                graph = tg.Graph(start=[self.noise_sn],
                                 end=[fake_class_en, fake_judge_en])
                fake_train = graph.train_fprop()
                fake_valid = graph.test_fprop()

                dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list